From: DongHun Kwak Date: Mon, 16 Oct 2017 10:59:28 +0000 (+0900) Subject: Imported Upstream version 3.5.1 X-Git-Tag: upstream/3.6.3~7 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4051fa229f5deaffdaec4591dc166e93303f6cb4;p=platform%2Fupstream%2Fpython3.git Imported Upstream version 3.5.1 Change-Id: I323aeec16b97765c499f0b829700355a72a8e4a8 Signed-off-by: DongHun Kwak --- diff --git a/Doc/c-api/buffer.rst b/Doc/c-api/buffer.rst index 6cb474e8..45c94887 100644 --- a/Doc/c-api/buffer.rst +++ b/Doc/c-api/buffer.rst @@ -40,7 +40,7 @@ protocol `. This protocol has two sides: Simple objects such as :class:`bytes` and :class:`bytearray` expose their underlying buffer in byte-oriented form. Other forms are possible; for example, -the elements exposed by a :class:`array.array` can be multi-byte values. +the elements exposed by an :class:`array.array` can be multi-byte values. An example consumer of the buffer interface is the :meth:`~io.BufferedIOBase.write` method of file objects: any object that can export a series of bytes through @@ -133,15 +133,15 @@ a buffer, see :c:func:`PyObject_GetBuffer`. called on non-NULL :c:member:`~Py_buffer.format` values. Important exception: If a consumer requests a buffer without the - :c:macro:`PyBUF_FORMAT` flag, :c:member:`~Py_Buffer.format` will + :c:macro:`PyBUF_FORMAT` flag, :c:member:`~Py_buffer.format` will be set to *NULL*, but :c:member:`~Py_buffer.itemsize` still has the value for the original format. - If :c:member:`~Py_Buffer.shape` is present, the equality + If :c:member:`~Py_buffer.shape` is present, the equality ``product(shape) * itemsize == len`` still holds and the consumer can use :c:member:`~Py_buffer.itemsize` to navigate the buffer. - If :c:member:`~Py_Buffer.shape` is *NULL* as a result of a :c:macro:`PyBUF_SIMPLE` + If :c:member:`~Py_buffer.shape` is *NULL* as a result of a :c:macro:`PyBUF_SIMPLE` or a :c:macro:`PyBUF_WRITABLE` request, the consumer must disregard :c:member:`~Py_buffer.itemsize` and assume ``itemsize == 1``. @@ -156,7 +156,7 @@ a buffer, see :c:func:`PyObject_GetBuffer`. .. c:member:: int ndim The number of dimensions the memory represents as an n-dimensional array. - If it is 0, :c:member:`~Py_Buffer.buf` points to a single item representing + If it is 0, :c:member:`~Py_buffer.buf` points to a single item representing a scalar. In this case, :c:member:`~Py_buffer.shape`, :c:member:`~Py_buffer.strides` and :c:member:`~Py_buffer.suboffsets` MUST be *NULL*. diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst index 57e8072d..9c935633 100644 --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -29,7 +29,7 @@ bound into a function. .. c:function:: int PyCode_Check(PyObject *co) - Return true if *co* is a :class:`code` object + Return true if *co* is a :class:`code` object. .. c:function:: int PyCode_GetNumFree(PyCodeObject *co) diff --git a/Doc/c-api/gen.rst b/Doc/c-api/gen.rst index 3ab073b1..1efbae4f 100644 --- a/Doc/c-api/gen.rst +++ b/Doc/c-api/gen.rst @@ -17,7 +17,7 @@ than explicitly calling :c:func:`PyGen_New` or :c:func:`PyGen_NewWithQualName`. .. c:var:: PyTypeObject PyGen_Type - The type object corresponding to generator objects + The type object corresponding to generator objects. .. c:function:: int PyGen_Check(PyObject *ob) diff --git a/Doc/c-api/typeobj.rst b/Doc/c-api/typeobj.rst index ed83e82d..b5113aae 100644 --- a/Doc/c-api/typeobj.rst +++ b/Doc/c-api/typeobj.rst @@ -94,7 +94,7 @@ type objects) *must* have the :attr:`ob_size` field. This field is not inherited by subtypes. -.. c:member:: char* PyTypeObject.tp_name +.. c:member:: const char* PyTypeObject.tp_name Pointer to a NUL-terminated string containing the name of the type. For types that are accessible as module globals, the string should be the full module @@ -372,7 +372,7 @@ type objects) *must* have the :attr:`ob_size` field. inherited individually. -.. c:member:: long PyTypeObject.tp_flags +.. c:member:: unsigned long PyTypeObject.tp_flags This field is a bit mask of various flags. Some flags indicate variant semantics for certain situations; others are used to indicate that certain @@ -472,7 +472,7 @@ type objects) *must* have the :attr:`ob_size` field. .. versionadded:: 3.4 -.. c:member:: char* PyTypeObject.tp_doc +.. c:member:: const char* PyTypeObject.tp_doc An optional pointer to a NUL-terminated C string giving the docstring for this type object. This is exposed as the :attr:`__doc__` attribute on the type and @@ -619,7 +619,7 @@ type objects) *must* have the :attr:`ob_size` field. +----------------+------------+ -.. c:member:: long PyTypeObject.tp_weaklistoffset +.. c:member:: Py_ssize_t PyTypeObject.tp_weaklistoffset If the instances of this type are weakly referenceable, this field is greater than zero and contains the offset in the instance structure of the weak @@ -786,7 +786,7 @@ type objects) *must* have the :attr:`ob_size` field. .. XXX explain. -.. c:member:: long PyTypeObject.tp_dictoffset +.. c:member:: Py_ssize_t PyTypeObject.tp_dictoffset If the instances of this type have a dictionary containing instance variables, this field is non-zero and contains the offset in the instances of the type of diff --git a/Doc/c-api/veryhigh.rst b/Doc/c-api/veryhigh.rst index f7ed4c77..f8aaf0f6 100644 --- a/Doc/c-api/veryhigh.rst +++ b/Doc/c-api/veryhigh.rst @@ -201,7 +201,7 @@ the same library that the Python runtime is using. .. c:function:: struct _node* PyParser_SimpleParseFile(FILE *fp, const char *filename, int start) This is a simplified interface to :c:func:`PyParser_SimpleParseFileFlags` below, - leaving *flags* set to ``0`` + leaving *flags* set to ``0``. .. c:function:: struct _node* PyParser_SimpleParseFileFlags(FILE *fp, const char *filename, int start, int flags) diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst index d0f89471..554d2c87 100644 --- a/Doc/distutils/apiref.rst +++ b/Doc/distutils/apiref.rst @@ -521,7 +521,7 @@ This module provides the following functions. .. method:: CCompiler.library_option(lib) - Return the compiler option to add *dir* to the list of libraries linked into the + Return the compiler option to add *lib* to the list of libraries linked into the shared library or executable. @@ -928,7 +928,7 @@ timestamp dependency analysis. Walk two filename lists in parallel, testing if each source is newer than its corresponding target. Return a pair of lists (*sources*, *targets*) where - source is newer than target, according to the semantics of :func:`newer` + source is newer than target, according to the semantics of :func:`newer`. .. % % equivalent to a listcomp... diff --git a/Doc/distutils/index.rst b/Doc/distutils/index.rst index 90d1c1ab..335f804d 100644 --- a/Doc/distutils/index.rst +++ b/Doc/distutils/index.rst @@ -15,10 +15,9 @@ very little overhead for build/release/install mechanics. .. note:: This guide only covers the basic tools for building and distributing - extensions that are provided as part of this version of Python. Third - party tools offer easier to use and more secure alternatives. Refer to the - `quick recommendations section - `__ + extensions that are provided as part of this version of Python. Third party + tools offer easier to use and more secure alternatives. Refer to the `quick + recommendations section `__ in the Python Packaging User Guide for more information. diff --git a/Doc/distutils/packageindex.rst b/Doc/distutils/packageindex.rst index daf93452..2d7daef7 100644 --- a/Doc/distutils/packageindex.rst +++ b/Doc/distutils/packageindex.rst @@ -167,7 +167,7 @@ follows:: username: password: -The *distutils* section defines a *index-servers* variable that lists the +The *distutils* section defines an *index-servers* variable that lists the name of all sections describing a repository. Each section describing a repository defines three variables: diff --git a/Doc/extending/newtypes.rst b/Doc/extending/newtypes.rst index b7e35f45..f60e208e 100644 --- a/Doc/extending/newtypes.rst +++ b/Doc/extending/newtypes.rst @@ -893,20 +893,20 @@ fields in the right order! It's often easiest to find an example that includes all the fields you need (even if they're initialized to ``0``) and then change the values to suit your new type. :: - char *tp_name; /* For printing */ + const char *tp_name; /* For printing */ The name of the type - as mentioned in the last section, this will appear in various places, almost entirely for diagnostic purposes. Try to choose something that will be helpful in such a situation! :: - int tp_basicsize, tp_itemsize; /* For allocation */ + Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */ These fields tell the runtime how much memory to allocate when new objects of this type are created. Python has some built-in support for variable length structures (think: strings, lists) which is where the :c:member:`~PyTypeObject.tp_itemsize` field comes in. This will be dealt with later. :: - char *tp_doc; + const char *tp_doc; Here you can put a string (or its address) that you want returned when the Python script references ``obj.__doc__`` to retrieve the doc string. diff --git a/Doc/extending/windows.rst b/Doc/extending/windows.rst index f0c69b85..67bdd475 100644 --- a/Doc/extending/windows.rst +++ b/Doc/extending/windows.rst @@ -37,149 +37,9 @@ There are two approaches to building extension modules on Windows, just as there are on Unix: use the :mod:`distutils` package to control the build process, or do things manually. The distutils approach works well for most extensions; documentation on using :mod:`distutils` to build and package extension modules -is available in :ref:`distutils-index`. This section describes the manual -approach to building Python extensions written in C or C++. - -To build extensions using these instructions, you need to have a copy of the -Python sources of the same version as your installed Python. You will need -Microsoft Visual C++ "Developer Studio"; project files are supplied for VC++ -version 7.1, but you can use older versions of VC++. Notice that you should use -the same version of VC++that was used to build Python itself. The example files -described here are distributed with the Python sources in the -:file:`PC\\example_nt\\` directory. - -#. **Copy the example files** --- The :file:`example_nt` directory is a - subdirectory of the :file:`PC` directory, in order to keep all the PC-specific - files under the same directory in the source distribution. However, the - :file:`example_nt` directory can't actually be used from this location. You - first need to copy or move it up one level, so that :file:`example_nt` is a - sibling of the :file:`PC` and :file:`Include` directories. Do all your work - from within this new location. - -#. **Open the project** --- From VC++, use the :menuselection:`File --> Open - Solution` dialog (not :menuselection:`File --> Open`!). Navigate to and select - the file :file:`example.sln`, in the *copy* of the :file:`example_nt` directory - you made above. Click Open. - -#. **Build the example DLL** --- In order to check that everything is set up - right, try building: - -#. Select a configuration. This step is optional. Choose - :menuselection:`Build --> Configuration Manager --> Active Solution Configuration` - and select either :guilabel:`Release` or :guilabel:`Debug`. If you skip this - step, VC++ will use the Debug configuration by default. - -#. Build the DLL. Choose :menuselection:`Build --> Build Solution`. This - creates all intermediate and result files in a subdirectory called either - :file:`Debug` or :file:`Release`, depending on which configuration you selected - in the preceding step. - -#. **Testing the debug-mode DLL** --- Once the Debug build has succeeded, bring - up a DOS box, and change to the :file:`example_nt\\Debug` directory. You should - now be able to repeat the following session (``C>`` is the DOS prompt, ``>>>`` - is the Python prompt; note that build information and various debug output from - Python may not match this screen dump exactly):: - - C>..\..\PCbuild\python_d - Adding parser accelerators ... - Done. - Python 2.2 (#28, Dec 19 2001, 23:26:37) [MSC 32 bit (Intel)] on win32 - Type "copyright", "credits" or "license" for more information. - >>> import example - [4897 refs] - >>> example.foo() - Hello, world - [4903 refs] - >>> - - Congratulations! You've successfully built your first Python extension module. - -#. **Creating your own project** --- Choose a name and create a directory for - it. Copy your C sources into it. Note that the module source file name does - not necessarily have to match the module name, but the name of the - initialization function should match the module name --- you can only import a - module :mod:`spam` if its initialization function is called :c:func:`PyInit_spam`, - (see :ref:`building`, or use the minimal :file:`Modules/xxmodule.c` as a guide). - By convention, it lives in a file called :file:`spam.c` or :file:`spammodule.c`. - The output file should be called :file:`spam.pyd` (in Release mode) or - :file:`spam_d.pyd` (in Debug mode). The extension :file:`.pyd` was chosen - to avoid confusion with a system library :file:`spam.dll` to which your module - could be a Python interface. - - Now your options are: - -#. Copy :file:`example.sln` and :file:`example.vcproj`, rename them to - :file:`spam.\*`, and edit them by hand, or - -#. Create a brand new project; instructions are below. - - In either case, copy :file:`example_nt\\example.def` to :file:`spam\\spam.def`, - and edit the new :file:`spam.def` so its second line contains the string - '``initspam``'. If you created a new project yourself, add the file - :file:`spam.def` to the project now. (This is an annoying little file with only - two lines. An alternative approach is to forget about the :file:`.def` file, - and add the option :option:`/export:initspam` somewhere to the Link settings, by - manually editing the setting in Project Properties dialog). - -#. **Creating a brand new project** --- Use the :menuselection:`File --> New - --> Project` dialog to create a new Project Workspace. Select :guilabel:`Visual - C++ Projects/Win32/ Win32 Project`, enter the name (``spam``), and make sure the - Location is set to parent of the :file:`spam` directory you have created (which - should be a direct subdirectory of the Python build tree, a sibling of - :file:`Include` and :file:`PC`). Select Win32 as the platform (in my version, - this is the only choice). Make sure the Create new workspace radio button is - selected. Click OK. - - You should now create the file :file:`spam.def` as instructed in the previous - section. Add the source files to the project, using :menuselection:`Project --> - Add Existing Item`. Set the pattern to ``*.*`` and select both :file:`spam.c` - and :file:`spam.def` and click OK. (Inserting them one by one is fine too.) - - Now open the :menuselection:`Project --> spam properties` dialog. You only need - to change a few settings. Make sure :guilabel:`All Configurations` is selected - from the :guilabel:`Settings for:` dropdown list. Select the C/C++ tab. Choose - the General category in the popup menu at the top. Type the following text in - the entry box labeled :guilabel:`Additional Include Directories`:: - - ..\Include,..\PC - - Then, choose the General category in the Linker tab, and enter :: - - ..\PCbuild - - in the text box labelled :guilabel:`Additional library Directories`. - - Now you need to add some mode-specific settings: - - Select :guilabel:`Release` in the :guilabel:`Configuration` dropdown list. - Choose the :guilabel:`Link` tab, choose the :guilabel:`Input` category, and - append ``pythonXY.lib`` to the list in the :guilabel:`Additional Dependencies` - box. - - Select :guilabel:`Debug` in the :guilabel:`Configuration` dropdown list, and - append ``pythonXY_d.lib`` to the list in the :guilabel:`Additional Dependencies` - box. Then click the C/C++ tab, select :guilabel:`Code Generation`, and select - :guilabel:`Multi-threaded Debug DLL` from the :guilabel:`Runtime library` - dropdown list. - - Select :guilabel:`Release` again from the :guilabel:`Configuration` dropdown - list. Select :guilabel:`Multi-threaded DLL` from the :guilabel:`Runtime - library` dropdown list. - -If your module creates a new type, you may have trouble with this line:: - - PyVarObject_HEAD_INIT(&PyType_Type, 0) - -Static type object initializers in extension modules may cause -compiles to fail with an error message like "initializer not a -constant". This shows up when building DLL under MSVC. Change it to:: - - PyVarObject_HEAD_INIT(NULL, 0) - -and add the following to the module initialization function:: - - if (PyType_Ready(&MyObject_Type) < 0) - return NULL; +is available in :ref:`distutils-index`. If you find you really need to do +things manually, it may be instructive to study the project file for the +:source:`winsound ` standard library module. .. _dynamic-linking: diff --git a/Doc/faq/extending.rst b/Doc/faq/extending.rst index fea5a576..7bb4dc2e 100644 --- a/Doc/faq/extending.rst +++ b/Doc/faq/extending.rst @@ -348,7 +348,7 @@ complete example using the GNU readline library (you may want to ignore { line = readline (prompt); - if (NULL == line) /* CTRL-D pressed */ + if (NULL == line) /* Ctrl-D pressed */ { done = 1; } @@ -443,8 +443,8 @@ extension module using g++ (e.g., ``g++ -shared -o mymodule.so mymodule.o``). Can I create an object class with some methods implemented in C and others in Python (e.g. through inheritance)? ---------------------------------------------------------------------------------------------------------------- -In Python 2.2, you can inherit from built-in classes such as :class:`int`, -:class:`list`, :class:`dict`, etc. +Yes, you can inherit from built-in classes such as :class:`int`, :class:`list`, +:class:`dict`, etc. The Boost Python Library (BPL, http://www.boost.org/libs/python/doc/index.html) provides a way of doing this from C++ (i.e. you can inherit from an extension diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst index 12a0337c..2221f149 100644 --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -151,8 +151,8 @@ after a final minor release is made, the Subversion trunk is incremented to the next minor version, which becomes the "a0" version, e.g. "2.4a0". -See also the documentation for ``sys.version``, ``sys.hexversion``, and -``sys.version_info``. +See also the documentation for :data:`sys.version`, :data:`sys.hexversion`, and +:data:`sys.version_info`. How do I obtain a copy of the Python source? diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst index 2d3cb1ca..295445ed 100644 --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -849,7 +849,7 @@ How do I modify a string in place? You can't, because strings are immutable. In most situations, you should simply construct a new string from the various parts you want to assemble it from. However, if you need an object with the ability to modify in-place -unicode data, try using a :class:`io.StringIO` object or the :mod:`array` +unicode data, try using an :class:`io.StringIO` object or the :mod:`array` module:: >>> import io @@ -1164,6 +1164,8 @@ analogue of lisp car is ``lisp_list[0]`` and the analogue of cdr is usually a lot slower than using Python lists. +.. _faq-multidimensional-list: + How do I create a multidimensional list? ---------------------------------------- diff --git a/Doc/faq/windows.rst b/Doc/faq/windows.rst index 7cac8a97..6db6637e 100644 --- a/Doc/faq/windows.rst +++ b/Doc/faq/windows.rst @@ -81,14 +81,14 @@ by entering a few expressions of your choice and seeing the results:: 'HelloHelloHello' Many people use the interactive mode as a convenient yet highly programmable -calculator. When you want to end your interactive Python session, hold the Ctrl -key down while you enter a Z, then hit the "Enter" key to get back to your +calculator. When you want to end your interactive Python session, hold the :kbd:`Ctrl` +key down while you enter a :kbd:`Z`, then hit the ":kbd:`Enter`" key to get back to your Windows command prompt. You may also find that you have a Start-menu entry such as :menuselection:`Start --> Programs --> Python 3.3 --> Python (command line)` that results in you seeing the ``>>>`` prompt in a new window. If so, the window will disappear -after you enter the Ctrl-Z character; Windows is running a single "python" +after you enter the :kbd:`Ctrl-Z` character; Windows is running a single "python" command in the window, and closes it when you terminate the interpreter. If the ``python`` command, instead of displaying the interpreter prompt ``>>>``, @@ -131,8 +131,8 @@ you should make sure that entering the command :: c:\Python33\python -starts up the interpreter as above (and don't forget you'll need a "CTRL-Z" and -an "Enter" to get out of it). Once you have verified the directory, you can +starts up the interpreter as above (and don't forget you'll need a ":kbd:`Ctrl-Z`" and +an ":kbd:`Enter`" to get out of it). Once you have verified the directory, you can add it to the system path to make it easier to start Python by just running the ``python`` command. This is currently an option in the installer as of CPython 3.3. @@ -327,7 +327,7 @@ Prior to Python 2.7 and 3.2, to terminate a process, you can use :mod:`ctypes`:: return (0 != kernel32.TerminateProcess(handle, 0)) In 2.7 and 3.2, :func:`os.kill` is implemented similar to the above function, -with the additional feature of being able to send CTRL+C and CTRL+BREAK +with the additional feature of being able to send :kbd:`Ctrl+C` and :kbd:`Ctrl+Break` to console subprocesses which are designed to handle those signals. See :func:`os.kill` for further details. diff --git a/Doc/glossary.rst b/Doc/glossary.rst index 846c01ae..6808e7ac 100644 --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -241,6 +241,14 @@ Glossary keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods. Called a hash in Perl. + dictionary view + The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and + :meth:`dict.items` are called dictionary views. They provide a dynamic + view on the dictionary’s entries, which means that when the dictionary + changes, the view reflects these changes. To force the + dictionary view to become a full list use ``list(dictview)``. See + :ref:`dict-views`. + docstring A string literal which appears as the first expression in a class, function or module. While ignored when the suite is executed, it is @@ -934,20 +942,13 @@ Glossary ``'\r'``. See :pep:`278` and :pep:`3116`, as well as :func:`bytes.splitlines` for an additional use. - view - The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and - :meth:`dict.items` are called dictionary views. They are lazy sequences - that will see changes in the underlying dictionary. To force the - dictionary view to become a full list use ``list(dictview)``. See - :ref:`dict-views`. - virtual environment A cooperatively isolated runtime environment that allows Python users and applications to install and upgrade Python distribution packages without interfering with the behaviour of other Python applications running on the same system. - See also :ref:`scripts-pyvenv` + See also :ref:`scripts-pyvenv`. virtual machine A computer defined entirely in software. Python's virtual machine diff --git a/Doc/howto/clinic.rst b/Doc/howto/clinic.rst index 7524c4ad..b04edea1 100644 --- a/Doc/howto/clinic.rst +++ b/Doc/howto/clinic.rst @@ -1249,18 +1249,18 @@ Here's the simplest example of a custom converter, from ``Modules/zlibmodule.c`` /*[python input] - class uint_converter(CConverter): + class capped_uint_converter(CConverter): type = 'unsigned int' - converter = 'uint_converter' + converter = 'capped_uint_converter' [python start generated code]*/ - /*[python end generated code: checksum=da39a3ee5e6b4b0d3255bfef95601890afd80709]*/ + /*[python end generated code: output=da39a3ee5e6b4b0d input=35521e4e733823c7]*/ -This block adds a converter to Argument Clinic named ``uint``. Parameters -declared as ``uint`` will be declared as type ``unsigned int``, and will -be parsed by the ``'O&'`` format unit, which will call the ``uint_converter`` -converter function. -``uint`` variables automatically support default values. +This block adds a converter to Argument Clinic named ``capped_uint``. Parameters +declared as ``capped_uint`` will be declared as type ``unsigned int``, and will +be parsed by the ``'O&'`` format unit, which will call the +``capped_uint_converter`` converter function. ``capped_uint`` variables +automatically support default values. More sophisticated custom converters can insert custom C code to handle initialization and cleanup. diff --git a/Doc/howto/descriptor.rst b/Doc/howto/descriptor.rst index f018b0e8..530f34b8 100644 --- a/Doc/howto/descriptor.rst +++ b/Doc/howto/descriptor.rst @@ -319,7 +319,7 @@ Non-data descriptors provide a simple mechanism for variations on the usual patterns of binding functions into methods. To recap, functions have a :meth:`__get__` method so that they can be converted -to a method when accessed as attributes. The non-data descriptor transforms a +to a method when accessed as attributes. The non-data descriptor transforms an ``obj.f(*args)`` call into ``f(obj, *args)``. Calling ``klass.f(*args)`` becomes ``f(*args)``. diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst index 813bc0f7..b979aa7d 100644 --- a/Doc/howto/logging-cookbook.rst +++ b/Doc/howto/logging-cookbook.rst @@ -2151,3 +2151,206 @@ The above approach can, of course, be adapted to other TTS systems and even other systems altogether which can process messages via external programs run from a command line. + +.. _buffered-logging: + +Buffering logging messages and outputting them conditionally +------------------------------------------------------------ + +There might be situations where you want to log messages in a temporary area +and only output them if a certain condition occurs. For example, you may want to +start logging debug events in a function, and if the function completes without +errors, you don't want to clutter the log with the collected debug information, +but if there is an error, you want all the debug information to be output as well +as the error. + +Here is an example which shows how you could do this using a decorator for your +functions where you want logging to behave this way. It makes use of the +:class:`logging.handlers.MemoryHandler`, which allows buffering of logged events +until some condition occurs, at which point the buffered events are ``flushed`` +- passed to another handler (the ``target`` handler) for processing. By default, +the ``MemoryHandler`` flushed when its buffer gets filled up or an event whose +level is greater than or equal to a specified threshold is seen. You can use this +recipe with a more specialised subclass of ``MemoryHandler`` if you want custom +flushing behavior. + +The example script has a simple function, ``foo``, which just cycles through +all the logging levels, writing to ``sys.stderr`` to say what level it's about +to log at, and then actually logging a message that that level. You can pass a +parameter to ``foo`` which, if true, will log at ERROR and CRITICAL levels - +otherwise, it only logs at DEBUG, INFO and WARNING levels. + +The script just arranges to decorate ``foo`` with a decorator which will do the +conditional logging that's required. The decorator takes a logger as a parameter +and attaches a memory handler for the duration of the call to the decorated +function. The decorator can be additionally parameterised using a target handler, +a level at which flushing should occur, and a capacity for the buffer. These +default to a :class:`~logging.StreamHandler` which writes to ``sys.stderr``, +``logging.ERROR`` and ``100`` respectively. + +Here's the script:: + + import logging + from logging.handlers import MemoryHandler + import sys + + logger = logging.getLogger(__name__) + logger.addHandler(logging.NullHandler()) + + def log_if_errors(logger, target_handler=None, flush_level=None, capacity=None): + if target_handler is None: + target_handler = logging.StreamHandler() + if flush_level is None: + flush_level = logging.ERROR + if capacity is None: + capacity = 100 + handler = MemoryHandler(capacity, flushLevel=flush_level, target=target_handler) + + def decorator(fn): + def wrapper(*args, **kwargs): + logger.addHandler(handler) + try: + return fn(*args, **kwargs) + except Exception: + logger.exception('call failed') + raise + finally: + super(MemoryHandler, handler).flush() + logger.removeHandler(handler) + return wrapper + + return decorator + + def write_line(s): + sys.stderr.write('%s\n' % s) + + def foo(fail=False): + write_line('about to log at DEBUG ...') + logger.debug('Actually logged at DEBUG') + write_line('about to log at INFO ...') + logger.info('Actually logged at INFO') + write_line('about to log at WARNING ...') + logger.warning('Actually logged at WARNING') + if fail: + write_line('about to log at ERROR ...') + logger.error('Actually logged at ERROR') + write_line('about to log at CRITICAL ...') + logger.critical('Actually logged at CRITICAL') + return fail + + decorated_foo = log_if_errors(logger)(foo) + + if __name__ == '__main__': + logger.setLevel(logging.DEBUG) + write_line('Calling undecorated foo with False') + assert not foo(False) + write_line('Calling undecorated foo with True') + assert foo(True) + write_line('Calling decorated foo with False') + assert not decorated_foo(False) + write_line('Calling decorated foo with True') + assert decorated_foo(True) + +When this script is run, the following output should be observed:: + + Calling undecorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling undecorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + about to log at CRITICAL ... + Calling decorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling decorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + Actually logged at DEBUG + Actually logged at INFO + Actually logged at WARNING + Actually logged at ERROR + about to log at CRITICAL ... + Actually logged at CRITICAL + +As you can see, actual logging output only occurs when an event is logged whose +severity is ERROR or greater, but in that case, any previous events at lower +severities are also logged. + +You can of course use the conventional means of decoration:: + + @log_if_errors(logger) + def foo(fail=False): + ... + + +.. _utc-formatting: + +Formatting times using UTC (GMT) via configuration +-------------------------------------------------- + +Sometimes you want to format times using UTC, which can be done using a class +such as `UTCFormatter`, shown below:: + + import logging + import time + + class UTCFormatter(logging.Formatter): + converter = time.gmtime + +and you can then use the ``UTCFormatter`` in your code instead of +:class:`~logging.Formatter`. If you want to do that via configuration, you can +use the :func:`~logging.config.dictConfig` API with an approach illustrated by +the following complete example:: + + import logging + import logging.config + import time + + class UTCFormatter(logging.Formatter): + converter = time.gmtime + + LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'utc': { + '()': UTCFormatter, + 'format': '%(asctime)s %(message)s', + }, + 'local': { + 'format': '%(asctime)s %(message)s', + } + }, + 'handlers': { + 'console1': { + 'class': 'logging.StreamHandler', + 'formatter': 'utc', + }, + 'console2': { + 'class': 'logging.StreamHandler', + 'formatter': 'local', + }, + }, + 'root': { + 'handlers': ['console1', 'console2'], + } + } + + if __name__ == '__main__': + logging.config.dictConfig(LOGGING) + logging.warning('The local time is %s', time.asctime()) + +When this script is run, it should print something like:: + + 2015-10-17 12:53:29,501 The local time is Sat Oct 17 13:53:29 2015 + 2015-10-17 13:53:29,501 The local time is Sat Oct 17 13:53:29 2015 + +showing how the time is formatted both as local time and UTC, one for each +handler. diff --git a/Doc/howto/urllib2.rst b/Doc/howto/urllib2.rst index 01ae5136..e3d77142 100644 --- a/Doc/howto/urllib2.rst +++ b/Doc/howto/urllib2.rst @@ -174,7 +174,7 @@ Explorer [#]_. :: import urllib.request url = 'http://www.someserver.com/cgi-bin/register.cgi' - user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' + user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)' values = {'name' : 'Michael Foord', 'location' : 'Northampton', 'language' : 'Python' } @@ -572,8 +572,7 @@ Footnotes This document was reviewed and revised by John Lee. -.. [#] Like Google for example. The *proper* way to use google from a program - is to use `PyGoogle `_ of course. +.. [#] Google for example. .. [#] Browser sniffing is a very bad practise for website design - building sites using web standards is much more sensible. Unfortunately a lot of sites still send different versions to different browsers. diff --git a/Doc/includes/typestruct.h b/Doc/includes/typestruct.h index 5c9f8f5d..9f47899a 100644 --- a/Doc/includes/typestruct.h +++ b/Doc/includes/typestruct.h @@ -1,7 +1,7 @@ typedef struct _typeobject { PyObject_VAR_HEAD - char *tp_name; /* For printing, in format "." */ - int tp_basicsize, tp_itemsize; /* For allocation */ + const char *tp_name; /* For printing, in format "." */ + Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */ /* Methods to implement standard operations */ @@ -9,7 +9,8 @@ typedef struct _typeobject { printfunc tp_print; getattrfunc tp_getattr; setattrfunc tp_setattr; - PyAsyncMethods *tp_as_async; + PyAsyncMethods *tp_as_async; /* formerly known as tp_compare (Python 2) + or tp_reserved (Python 3) */ reprfunc tp_repr; /* Method suites for standard classes */ @@ -30,9 +31,9 @@ typedef struct _typeobject { PyBufferProcs *tp_as_buffer; /* Flags to define presence of optional/expanded features */ - long tp_flags; + unsigned long tp_flags; - char *tp_doc; /* Documentation string */ + const char *tp_doc; /* Documentation string */ /* call function for all accessible objects */ traverseproc tp_traverse; @@ -44,7 +45,7 @@ typedef struct _typeobject { richcmpfunc tp_richcompare; /* weak reference enabler */ - long tp_weaklistoffset; + Py_ssize_t tp_weaklistoffset; /* Iterators */ getiterfunc tp_iter; @@ -58,7 +59,7 @@ typedef struct _typeobject { PyObject *tp_dict; descrgetfunc tp_descr_get; descrsetfunc tp_descr_set; - long tp_dictoffset; + Py_ssize_t tp_dictoffset; initproc tp_init; allocfunc tp_alloc; newfunc tp_new; @@ -69,7 +70,6 @@ typedef struct _typeobject { PyObject *tp_cache; PyObject *tp_subclasses; PyObject *tp_weaklist; - destructor tp_del; /* Type attribute cache version tag. Added in version 2.6 */ diff --git a/Doc/install/index.rst b/Doc/install/index.rst index 876f350f..febdb7c8 100644 --- a/Doc/install/index.rst +++ b/Doc/install/index.rst @@ -28,16 +28,16 @@ modules and extensions. .. note:: - This guide only covers the basic tools for installing extensions that are - provided as part of this version of Python. Third party tools offer easier - to use and more secure alternatives. Refer to the - `quick recommendations section - `__ + This guide only covers the basic tools for building and distributing + extensions that are provided as part of this version of Python. Third party + tools offer easier to use and more secure alternatives. Refer to the `quick + recommendations section `__ in the Python Packaging User Guide for more information. .. _inst-intro: + Introduction ============ @@ -148,7 +148,7 @@ into. For example, if you've just downloaded a module source distribution On Windows, you'd probably download :file:`foo-1.0.zip`. If you downloaded the archive file to :file:`C:\\Temp`, then it would unpack into -:file:`C:\\Temp\\foo-1.0`; you can use either a archive manipulator with a +:file:`C:\\Temp\\foo-1.0`; you can use either an archive manipulator with a graphical user interface (such as WinZip) or a command-line tool (such as :program:`unzip` or :program:`pkunzip`) to unpack the archive. Then, open a command prompt window and run:: diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst index 64738618..31f681d7 100644 --- a/Doc/library/2to3.rst +++ b/Doc/library/2to3.rst @@ -271,7 +271,7 @@ and off individually. They are described here in more detail. .. 2to3fixer:: input - Converts ``input(prompt)`` to ``eval(input(prompt))`` + Converts ``input(prompt)`` to ``eval(input(prompt))``. .. 2to3fixer:: intern diff --git a/Doc/library/_thread.rst b/Doc/library/_thread.rst index a787e2fd..7122861c 100644 --- a/Doc/library/_thread.rst +++ b/Doc/library/_thread.rst @@ -93,7 +93,8 @@ It defines the following constants and functions: Return the thread stack size used when creating new threads. The optional *size* argument specifies the stack size to be used for subsequently created threads, and must be 0 (use platform or configured default) or a positive - integer value of at least 32,768 (32 KiB). If changing the thread stack size is + integer value of at least 32,768 (32 KiB). If *size* is not specified, + 0 is used. If changing the thread stack size is unsupported, a :exc:`RuntimeError` is raised. If the specified stack size is invalid, a :exc:`ValueError` is raised and the stack size is unmodified. 32 KiB is currently the minimum supported stack size value to guarantee sufficient diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst index 72095a82..2877437b 100644 --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -530,7 +530,7 @@ allow_abbrev ^^^^^^^^^^^^ Normally, when you pass an argument list to the -:meth:`~ArgumentParser.parse_args` method of a :class:`ArgumentParser`, +:meth:`~ArgumentParser.parse_args` method of an :class:`ArgumentParser`, it :ref:`recognizes abbreviations ` of long options. This feature can be disabled by setting ``allow_abbrev`` to ``False``:: @@ -2011,4 +2011,4 @@ A partial upgrade path from :mod:`optparse` to :mod:`argparse`: ``%(default)s`` and ``%(prog)s``. * Replace the OptionParser constructor ``version`` argument with a call to - ``parser.add_argument('--version', action='version', version='')`` + ``parser.add_argument('--version', action='version', version='')``. diff --git a/Doc/library/ast.rst b/Doc/library/ast.rst index 1ee51102..5373acd5 100644 --- a/Doc/library/ast.rst +++ b/Doc/library/ast.rst @@ -104,7 +104,7 @@ The abstract grammar is currently defined as follows: :mod:`ast` Helpers ------------------ -Apart from the node classes, :mod:`ast` module defines these utility functions +Apart from the node classes, the :mod:`ast` module defines these utility functions and classes for traversing abstract syntax trees: .. function:: parse(source, filename='', mode='exec') diff --git a/Doc/library/asyncio-dev.rst b/Doc/library/asyncio-dev.rst index e27cef88..156c5c06 100644 --- a/Doc/library/asyncio-dev.rst +++ b/Doc/library/asyncio-dev.rst @@ -14,14 +14,14 @@ This page lists common traps and explains how to avoid them. Debug mode of asyncio --------------------- -The implementation of :mod:`asyncio` module has been written for performances. -To development with asyncio, it's required to enable the debug checks to ease -the development of asynchronous code. +The implementation of :mod:`asyncio` has been written for performance. +In order to ease the development of asynchronous code, you may wish to +enable *debug mode*. -Setup an application to enable all debug checks: +To enable all debug checks for an application: * Enable the asyncio debug mode globally by setting the environment variable - :envvar:`PYTHONASYNCIODEBUG` to ``1`` + :envvar:`PYTHONASYNCIODEBUG` to ``1``, or by calling :meth:`BaseEventLoop.set_debug`. * Set the log level of the :ref:`asyncio logger ` to :py:data:`logging.DEBUG`. For example, call ``logging.basicConfig(level=logging.DEBUG)`` at startup. @@ -96,10 +96,9 @@ the same thread. But when the task uses ``yield from``, the task is suspended and the event loop executes the next task. To schedule a callback from a different thread, the -:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example to -schedule a coroutine from a different thread:: +:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example:: - loop.call_soon_threadsafe(asyncio.ensure_future, coro_func()) + loop.call_soon_threadsafe(callback, *args) Most asyncio objects are not thread safe. You should only worry if you access objects outside the event loop. For example, to cancel a future, don't call @@ -110,6 +109,13 @@ directly its :meth:`Future.cancel` method, but:: To handle signals and to execute subprocesses, the event loop must be run in the main thread. +To schedule a coroutine object from a different thread, the +:func:`run_coroutine_threadsafe` function should be used. It returns a +:class:`concurrent.futures.Future` to access the result:: + + future = asyncio.run_coroutine_threadsafe(coro_func(), loop) + result = future.result(timeout) # Wait for the result with a timeout + The :meth:`BaseEventLoop.run_in_executor` method can be used with a thread pool executor to execute a callback in different thread to not block the thread of the event loop. diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst index 7d7caa8d..dbe351d5 100644 --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -6,7 +6,7 @@ Base Event Loop =============== The event loop is the central execution device provided by :mod:`asyncio`. -It provides multiple facilities, amongst which: +It provides multiple facilities, including: * Registering, executing and cancelling delayed calls (timeouts). @@ -29,7 +29,16 @@ Run an event loop .. method:: BaseEventLoop.run_forever() - Run until :meth:`stop` is called. + Run until :meth:`stop` is called. If :meth:`stop` is called before + :meth:`run_forever()` is called, this polls the I/O selector once + with a timeout of zero, runs all callbacks scheduled in response to + I/O events (and those that were already scheduled), and then exits. + If :meth:`stop` is called while :meth:`run_forever` is running, + this will run the current batch of callbacks and then exit. Note + that callbacks scheduled by callbacks will not run in that case; + they will run the next time :meth:`run_forever` is called. + + .. versionchanged:: 3.5.1 .. method:: BaseEventLoop.run_until_complete(future) @@ -48,10 +57,10 @@ Run an event loop Stop running the event loop. - Every callback scheduled before :meth:`stop` is called will run. - Callbacks scheduled after :meth:`stop` is called will not run. - However, those callbacks will run if :meth:`run_forever` is called - again later. + This causes :meth:`run_forever` to exit at the next suitable + opportunity (see there for more details). + + .. versionchanged:: 3.5.1 .. method:: BaseEventLoop.is_closed() @@ -61,7 +70,8 @@ Run an event loop .. method:: BaseEventLoop.close() - Close the event loop. The loop must not be running. + Close the event loop. The loop must not be running. Pending + callbacks will be lost. This clears the queues and shuts down the executor, but does not wait for the executor to finish. @@ -275,7 +285,9 @@ Creating connections to bind the socket to locally. The *local_host* and *local_port* are looked up using getaddrinfo(), similarly to *host* and *port*. - On Windows with :class:`ProactorEventLoop`, SSL/TLS is not supported. + .. versionchanged:: 3.5 + + On Windows with :class:`ProactorEventLoop`, SSL/TLS is now supported. .. seealso:: @@ -283,17 +295,50 @@ Creating connections (:class:`StreamReader`, :class:`StreamWriter`) instead of a protocol. -.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0) +.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0, reuse_address=None, reuse_port=None, allow_broadcast=None, sock=None) Create datagram connection: socket family :py:data:`~socket.AF_INET` or :py:data:`~socket.AF_INET6` depending on *host* (or *family* if specified), - socket type :py:data:`~socket.SOCK_DGRAM`. + socket type :py:data:`~socket.SOCK_DGRAM`. *protocol_factory* must be a + callable returning a :ref:`protocol ` instance. This method is a :ref:`coroutine ` which will try to establish the connection in the background. When successful, the coroutine returns a ``(transport, protocol)`` pair. - See the :meth:`BaseEventLoop.create_connection` method for parameters. + Options changing how the connection is created: + + * *local_addr*, if given, is a ``(local_host, local_port)`` tuple used + to bind the socket to locally. The *local_host* and *local_port* + are looked up using :meth:`getaddrinfo`. + + * *remote_addr*, if given, is a ``(remote_host, remote_port)`` tuple used + to connect the socket to a remote address. The *remote_host* and + *remote_port* are looked up using :meth:`getaddrinfo`. + + * *family*, *proto*, *flags* are the optional address family, protocol + and flags to be passed through to :meth:`getaddrinfo` for *host* + resolution. If given, these should all be integers from the + corresponding :mod:`socket` module constants. + + * *reuse_address* tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on Windows + and some UNIX's. If the :py:data:`~socket.SO_REUSEPORT` constant is not + defined then this capability is unsupported. + + * *allow_broadcast* tells the kernel to allow this endpoint to send + messages to the broadcast address. + + * *sock* can optionally be specified in order to use a preexisting, + already connected, :class:`socket.socket` object to be used by the + transport. If specified, *local_addr* and *remote_addr* should be omitted + (must be :const:`None`). On Windows with :class:`ProactorEventLoop`, this method is not supported. @@ -320,7 +365,7 @@ Creating connections Creating listening connections ------------------------------ -.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None) +.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None, reuse_port=None) Create a TCP server (socket type :data:`~socket.SOCK_STREAM`) bound to *host* and *port*. @@ -331,9 +376,12 @@ Creating listening connections Parameters: - * If *host* is an empty string or ``None``, all interfaces are assumed - and a list of multiple sockets will be returned (most likely - one for IPv4 and another one for IPv6). + * The *host* parameter can be a string, in that case the TCP server is + bound to *host* and *port*. The *host* parameter can also be a sequence + of strings and in that case the TCP server is bound to all hosts of the + sequence. If *host* is an empty string or ``None``, all interfaces are + assumed and a list of multiple sockets will be returned (most likely one + for IPv4 and another one for IPv6). * *family* can be set to either :data:`socket.AF_INET` or :data:`~socket.AF_INET6` to force the socket to use IPv4 or IPv6. If not set @@ -356,15 +404,26 @@ Creating listening connections expire. If not specified will automatically be set to True on UNIX. + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on + Windows. + This method is a :ref:`coroutine `. - On Windows with :class:`ProactorEventLoop`, SSL/TLS is not supported. + .. versionchanged:: 3.5 + + On Windows with :class:`ProactorEventLoop`, SSL/TLS is now supported. .. seealso:: The function :func:`start_server` creates a (:class:`StreamReader`, :class:`StreamWriter`) pair and calls back a function with this pair. + .. versionchanged:: 3.5.1 + + The *host* parameter can now be a sequence of strings. + .. coroutinemethod:: BaseEventLoop.create_unix_server(protocol_factory, path=None, \*, sock=None, backlog=100, ssl=None) @@ -575,14 +634,14 @@ Call a function in an :class:`~concurrent.futures.Executor` (pool of threads or pool of processes). By default, an event loop uses a thread pool executor (:class:`~concurrent.futures.ThreadPoolExecutor`). -.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, callback, \*args) +.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, func, \*args) - Arrange for a callback to be called in the specified executor. + Arrange for a *func* to be called in the specified executor. The *executor* argument should be an :class:`~concurrent.futures.Executor` instance. The default executor is used if *executor* is ``None``. - :ref:`Use functools.partial to pass keywords to the callback + :ref:`Use functools.partial to pass keywords to the *func* `. This method is a :ref:`coroutine `. @@ -853,7 +912,7 @@ the :meth:`BaseEventLoop.add_signal_handler` method:: loop.add_signal_handler(getattr(signal, signame), functools.partial(ask_exit, signame)) - print("Event loop running forever, press CTRL+c to interrupt.") + print("Event loop running forever, press Ctrl+C to interrupt.") print("pid %s: send SIGINT or SIGTERM to exit." % os.getpid()) try: loop.run_forever() diff --git a/Doc/library/asyncio-eventloops.rst b/Doc/library/asyncio-eventloops.rst index ae3bf902..b2e7d7c9 100644 --- a/Doc/library/asyncio-eventloops.rst +++ b/Doc/library/asyncio-eventloops.rst @@ -57,9 +57,9 @@ asyncio currently provides two implementations of event loops: Example to use a :class:`ProactorEventLoop` on Windows:: - import asyncio, os + import asyncio, sys - if os.name == 'nt': + if sys.platform == 'win32': loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) @@ -198,4 +198,3 @@ Access to the global loop policy Set the current event loop policy. If *policy* is ``None``, the default policy is restored. - diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst index e68c2349..fffd3e8d 100644 --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -41,6 +41,11 @@ BaseTransport protocol's :meth:`connection_lost` method will be called with :const:`None` as its argument. + .. method:: is_closing(self) + + Return ``True`` if the transport is closing or is closed. + + .. versionadded:: 3.5.1 .. method:: get_extra_info(name, default=None) @@ -71,6 +76,8 @@ BaseTransport - ``'peercert'``: peer certificate; result of :meth:`ssl.SSLSocket.getpeercert` - ``'sslcontext'``: :class:`ssl.SSLContext` instance + - ``'ssl_object'``: :class:`ssl.SSLObject` or :class:`ssl.SSLSocket` + instance * pipe: @@ -80,6 +87,9 @@ BaseTransport - ``'subprocess'``: :class:`subprocess.Popen` instance + .. versionchanged:: 3.5.1 + ``'ssl_object'`` info was added to SSL sockets. + ReadTransport ------------- @@ -143,7 +153,7 @@ WriteTransport high-water limit. Neither *high* nor *low* can be negative. The defaults are implementation-specific. If only the - high-water limit is given, the low-water limit defaults to a + high-water limit is given, the low-water limit defaults to an implementation-specific value less than or equal to the high-water limit. Setting *high* to zero forces *low* to zero as well, and causes :meth:`pause_writing` to be called whenever the @@ -227,7 +237,7 @@ BaseSubprocessTransport .. method:: kill(self) - Kill the subprocess, as in :meth:`subprocess.Popen.kill` + Kill the subprocess, as in :meth:`subprocess.Popen.kill`. On POSIX systems, the function sends SIGKILL to the subprocess. On Windows, this method is an alias for :meth:`terminate`. @@ -484,7 +494,7 @@ data and wait until the connection is closed:: def connection_lost(self, exc): print('The server closed the connection') - print('Stop the event lop') + print('Stop the event loop') self.loop.stop() loop = asyncio.get_event_loop() @@ -539,7 +549,7 @@ received data and close the connection:: coro = loop.create_server(EchoServerClientProtocol, '127.0.0.1', 8888) server = loop.run_until_complete(coro) - # Serve requests until CTRL+c is pressed + # Serve requests until Ctrl+C is pressed print('Serving on {}'.format(server.sockets[0].getsockname())) try: loop.run_forever() diff --git a/Doc/library/asyncio-stream.rst b/Doc/library/asyncio-stream.rst index e9638e36..171fd862 100644 --- a/Doc/library/asyncio-stream.rst +++ b/Doc/library/asyncio-stream.rst @@ -9,6 +9,13 @@ Streams (high-level API) Stream functions ================ +.. note:: + + The top-level functions in this module are meant convenience wrappers + only; there's really nothing special there, and if they don't do + exactly what you want, feel free to copy their code. + + .. coroutinefunction:: open_connection(host=None, port=None, \*, loop=None, limit=None, \*\*kwds) A wrapper for :meth:`~BaseEventLoop.create_connection()` returning a (reader, @@ -26,10 +33,6 @@ Stream functions instance to use) and *limit* (to set the buffer limit passed to the :class:`StreamReader`). - (If you want to customize the :class:`StreamReader` and/or - :class:`StreamReaderProtocol` classes, just copy the code -- there's really - nothing special here except some convenience.) - This function is a :ref:`coroutine `. .. coroutinefunction:: start_server(client_connected_cb, host=None, port=None, \*, loop=None, limit=None, \*\*kwds) @@ -228,8 +231,8 @@ StreamReaderProtocol (This is a helper class instead of making :class:`StreamReader` itself a :class:`Protocol` subclass, because the :class:`StreamReader` has other - potential uses, and to prevent the user of the :class:`StreamReader` to - accidentally call inappropriate methods of the protocol.) + potential uses, and to prevent the user of the :class:`StreamReader` from + accidentally calling inappropriate methods of the protocol.) IncompleteReadError @@ -312,7 +315,7 @@ TCP echo server using the :func:`asyncio.start_server` function:: coro = asyncio.start_server(handle_echo, '127.0.0.1', 8888, loop=loop) server = loop.run_until_complete(coro) - # Serve requests until CTRL+c is pressed + # Serve requests until Ctrl+C is pressed print('Serving on {}'.format(server.sockets[0].getsockname())) try: loop.run_forever() diff --git a/Doc/library/asyncio-subprocess.rst b/Doc/library/asyncio-subprocess.rst index 3c9e3cbd..21dae54e 100644 --- a/Doc/library/asyncio-subprocess.rst +++ b/Doc/library/asyncio-subprocess.rst @@ -12,9 +12,9 @@ On Windows, the default event loop is :class:`SelectorEventLoop` which does not support subprocesses. :class:`ProactorEventLoop` should be used instead. Example to use it on Windows:: - import asyncio, os + import asyncio, sys - if os.name == 'nt': + if sys.platform == 'win32': loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) @@ -303,7 +303,7 @@ Process .. _asyncio-subprocess-threads: Subprocess and threads -====================== +---------------------- asyncio supports running subprocesses from different threads, but there are limits: @@ -322,10 +322,10 @@ The :class:`asyncio.subprocess.Process` class is not thread safe. Subprocess examples -=================== +------------------- Subprocess using transport and protocol ---------------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Example of a subprocess protocol using to get the output of a subprocess and to wait for the subprocess exit. The subprocess is created by the @@ -381,7 +381,7 @@ wait for the subprocess exit. The subprocess is created by the Subprocess using streams ------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^ Example using the :class:`~asyncio.subprocess.Process` class to control the subprocess and the :class:`StreamReader` class to read from the standard diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst index 166ab738..051ae09b 100644 --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -683,3 +683,42 @@ Task functions .. versionchanged:: 3.4.3 If the wait is cancelled, the future *fut* is now also cancelled. + +.. function:: run_coroutine_threadsafe(coro, loop) + + Submit a :ref:`coroutine object ` to a given event loop. + + Return a :class:`concurrent.futures.Future` to access the result. + + This function is meant to be called from a different thread than the one + where the event loop is running. Usage:: + + # Create a coroutine + coro = asyncio.sleep(1, result=3) + # Submit the coroutine to a given loop + future = asyncio.run_coroutine_threadsafe(coro, loop) + # Wait for the result with an optional timeout argument + assert future.result(timeout) == 3 + + If an exception is raised in the coroutine, the returned future will be + notified. It can also be used to cancel the task in the event loop:: + + try: + result = future.result(timeout) + except asyncio.TimeoutError: + print('The coroutine took too long, cancelling the task...') + future.cancel() + except Exception as exc: + print('The coroutine raised an exception: {!r}'.format(exc)) + else: + print('The coroutine returned: {!r}'.format(result)) + + See the :ref:`concurrency and multithreading ` + section of the documentation. + + .. note:: + + Unlike the functions above, :func:`run_coroutine_threadsafe` requires the + *loop* argument to be passed explicitely. + + .. versionadded:: 3.4.4, 3.5.1 diff --git a/Doc/library/asyncio.rst b/Doc/library/asyncio.rst index b292ae91..9b4d65e5 100644 --- a/Doc/library/asyncio.rst +++ b/Doc/library/asyncio.rst @@ -4,6 +4,13 @@ .. module:: asyncio :synopsis: Asynchronous I/O, event loop, coroutines and tasks. +.. note:: + + The asyncio package has been included in the standard library on a + :term:`provisional basis `. Backwards incompatible + changes (up to and including removal of the module) may occur if deemed + necessary by the core developers. + .. versionadded:: 3.4 **Source code:** :source:`Lib/asyncio/` diff --git a/Doc/library/bdb.rst b/Doc/library/bdb.rst index 7229087b..8ee99215 100644 --- a/Doc/library/bdb.rst +++ b/Doc/library/bdb.rst @@ -231,7 +231,7 @@ The :mod:`bdb` module also defines two classes: .. method:: set_until(frame) Stop when the line with the line no greater than the current one is - reached or when returning from current frame + reached or when returning from current frame. .. method:: set_trace([frame]) diff --git a/Doc/library/binascii.rst b/Doc/library/binascii.rst index 3f7df74d..e3f134b5 100644 --- a/Doc/library/binascii.rst +++ b/Doc/library/binascii.rst @@ -59,7 +59,7 @@ The :mod:`binascii` module defines the following functions: should be at most 57 to adhere to the base64 standard. -.. function:: a2b_qp(string, header=False) +.. function:: a2b_qp(data, header=False) Convert a block of quoted-printable data back to binary and return the binary data. More than one line may be passed at a time. If the optional argument diff --git a/Doc/library/cgi.rst b/Doc/library/cgi.rst index 0a9e8842..7e496ca3 100644 --- a/Doc/library/cgi.rst +++ b/Doc/library/cgi.rst @@ -292,7 +292,7 @@ algorithms implemented in this module in other circumstances. .. function:: parse_qsl(qs, keep_blank_values=False, strict_parsing=False) - This function is deprecated in this module. Use :func:`urllib.parse.parse_qs` + This function is deprecated in this module. Use :func:`urllib.parse.parse_qsl` instead. It is maintained here only for backward compatibility. .. function:: parse_multipart(fp, pdict) diff --git a/Doc/library/chunk.rst b/Doc/library/chunk.rst index 50b6979f..a90e9f80 100644 --- a/Doc/library/chunk.rst +++ b/Doc/library/chunk.rst @@ -47,7 +47,7 @@ Usually an IFF-type file consists of one or more chunks. The proposed usage of the :class:`Chunk` class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end of the file, creating a new -instance will fail with a :exc:`EOFError` exception. +instance will fail with an :exc:`EOFError` exception. .. class:: Chunk(file, align=True, bigendian=True, inclheader=False) diff --git a/Doc/library/codecs.rst b/Doc/library/codecs.rst index dae556e1..46d72b5e 100644 --- a/Doc/library/codecs.rst +++ b/Doc/library/codecs.rst @@ -1148,6 +1148,8 @@ particular, the following variants typically exist: +-----------------+--------------------------------+--------------------------------+ | iso8859_10 | iso-8859-10, latin6, L6 | Nordic languages | +-----------------+--------------------------------+--------------------------------+ +| iso8859_11 | iso-8859-11, thai | Thai languages | ++-----------------+--------------------------------+--------------------------------+ | iso8859_13 | iso-8859-13, latin7, L7 | Baltic languages | +-----------------+--------------------------------+--------------------------------+ | iso8859_14 | iso-8859-14, latin8, L8 | Celtic languages | @@ -1310,9 +1312,9 @@ to :class:`bytes` mappings. They are not supported by :meth:`bytes.decode` +----------------------+------------------+------------------------------+------------------------------+ | Codec | Aliases | Purpose | Encoder / decoder | +======================+==================+==============================+==============================+ -| base64_codec [#b64]_ | base64, base_64 | Convert operand to MIME | :meth:`base64.b64encode` / | -| | | base64 (the result always | :meth:`base64.b64decode` | -| | | includes a trailing | | +| base64_codec [#b64]_ | base64, base_64 | Convert operand to multiline | :meth:`base64.encodebytes` / | +| | | MIME base64 (the result | :meth:`base64.decodebytes` | +| | | always includes a trailing | | | | | ``'\n'``) | | | | | | | | | | .. versionchanged:: 3.4 | | @@ -1324,14 +1326,14 @@ to :class:`bytes` mappings. They are not supported by :meth:`bytes.decode` | bz2_codec | bz2 | Compress the operand | :meth:`bz2.compress` / | | | | using bz2 | :meth:`bz2.decompress` | +----------------------+------------------+------------------------------+------------------------------+ -| hex_codec | hex | Convert operand to | :meth:`base64.b16encode` / | -| | | hexadecimal | :meth:`base64.b16decode` | +| hex_codec | hex | Convert operand to | :meth:`binascii.b2a_hex` / | +| | | hexadecimal | :meth:`binascii.a2b_hex` | | | | representation, with two | | | | | digits per byte | | +----------------------+------------------+------------------------------+------------------------------+ -| quopri_codec | quopri, | Convert operand to MIME | :meth:`quopri.encodestring` /| -| | quotedprintable, | quoted printable | :meth:`quopri.decodestring` | -| | quoted_printable | | | +| quopri_codec | quopri, | Convert operand to MIME | :meth:`quopri.encode` with | +| | quotedprintable, | quoted printable | ``quotetabs=True`` / | +| | quoted_printable | | :meth:`quopri.decode` | +----------------------+------------------+------------------------------+------------------------------+ | uu_codec | uu | Convert the operand using | :meth:`uu.encode` / | | | | uuencode | :meth:`uu.decode` | diff --git a/Doc/library/collections.abc.rst b/Doc/library/collections.abc.rst index 563c1bc4..d9b93ad2 100644 --- a/Doc/library/collections.abc.rst +++ b/Doc/library/collections.abc.rst @@ -150,7 +150,7 @@ ABC Inherits from Abstract Methods Mixin KeysView ValuesView - ABCs for mapping, items, keys, and values :term:`views `. + ABCs for mapping, items, keys, and values :term:`views `. .. class:: Awaitable diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst index f2befe7c..2ab44135 100644 --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -842,10 +842,10 @@ field names, the method and attribute names start with an underscore. .. method:: somenamedtuple._asdict() Return a new :class:`OrderedDict` which maps field names to their corresponding - values. Note, this method is no longer needed now that the same effect can - be achieved by using the built-in :func:`vars` function:: + values:: - >>> vars(p) + >>> p = Point(x=11, y=22) + >>> p._asdict() OrderedDict([('x', 11), ('y', 22)]) .. versionchanged:: 3.1 @@ -905,15 +905,15 @@ functionality with a subclass. Here is how to add a calculated field and a fixed-width print format: >>> class Point(namedtuple('Point', 'x y')): - __slots__ = () - @property - def hypot(self): - return (self.x ** 2 + self.y ** 2) ** 0.5 - def __str__(self): - return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) + __slots__ = () + @property + def hypot(self): + return (self.x ** 2 + self.y ** 2) ** 0.5 + def __str__(self): + return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) >>> for p in Point(3, 4), Point(14, 5/7): - print(p) + print(p) Point: x= 3.000 y= 4.000 hypot= 5.000 Point: x=14.000 y= 0.714 hypot=14.018 @@ -929,10 +929,10 @@ Docstrings can be customized by making direct assignments to the ``__doc__`` fields: >>> Book = namedtuple('Book', ['id', 'title', 'authors']) - >>> Book.__doc__ = 'Hardcover book in active collection' + >>> Book.__doc__ += ': Hardcover book in active collection' >>> Book.id.__doc__ = '13-digit ISBN' >>> Book.title.__doc__ = 'Title of first printing' - >>> Book.author.__doc__ = 'List of authors sorted by last name' + >>> Book.authors.__doc__ = 'List of authors sorted by last name' Default values can be implemented by using :meth:`_replace` to customize a prototype instance: @@ -942,16 +942,6 @@ customize a prototype instance: >>> johns_account = default_account._replace(owner='John') >>> janes_account = default_account._replace(owner='Jane') -Enumerated constants can be implemented with named tuples, but it is simpler -and more efficient to use a simple :class:`~enum.Enum`: - - >>> Status = namedtuple('Status', 'open pending closed')._make(range(3)) - >>> Status.open, Status.pending, Status.closed - (0, 1, 2) - >>> from enum import Enum - >>> class Status(Enum): - ... open, pending, closed = range(3) - .. seealso:: @@ -1015,11 +1005,11 @@ anywhere a regular dictionary is used. The :class:`OrderedDict` constructor and :meth:`update` method both accept keyword arguments, but their order is lost because Python's function call -semantics pass-in keyword arguments using a regular unordered dictionary. +semantics pass in keyword arguments using a regular unordered dictionary. .. versionchanged:: 3.5 - The items, keys, and values :term:`views ` of :class:`OrderedDict` now - support reverse iteration using :func:`reversed`. + The items, keys, and values :term:`views ` + of :class:`OrderedDict` now support reverse iteration using :func:`reversed`. :class:`OrderedDict` Examples and Recipes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1157,3 +1147,7 @@ attribute. be an instance of :class:`bytes`, :class:`str`, :class:`UserString` (or a subclass) or an arbitrary sequence which can be converted into a string using the built-in :func:`str` function. + + .. versionchanged:: 3.5 + New methods ``__getnewargs__``, ``__rmod__``, ``casefold``, + ``format_map``, ``isprintable``, and ``maketrans``. diff --git a/Doc/library/compileall.rst b/Doc/library/compileall.rst index 0325f1ac..c5736f20 100644 --- a/Doc/library/compileall.rst +++ b/Doc/library/compileall.rst @@ -88,14 +88,9 @@ compile Python sources. Added the ``-i``, ``-b`` and ``-h`` options. .. versionchanged:: 3.5 - Added the ``-j`` and ``-r`` options. - -.. versionchanged:: 3.5 - ``-q`` option was changed to a multilevel value. - -.. versionchanged:: 3.5 - ``-b`` will always produce a byte-code file ending in ``.pyc``, never - ``.pyo``. + Added the ``-j``, ``-r``, and ``-qq`` options. ``-q`` option + was changed to a multilevel value. ``-b`` will always produce a + byte-code file ending in ``.pyc``, never ``.pyo``. There is no command-line option to control the optimization level used by the diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst index 11b39168..8bb70a50 100644 --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -84,13 +84,13 @@ Executor Objects e.submit(shutil.copy, 'src1.txt', 'dest1.txt') e.submit(shutil.copy, 'src2.txt', 'dest2.txt') e.submit(shutil.copy, 'src3.txt', 'dest3.txt') - e.submit(shutil.copy, 'src3.txt', 'dest4.txt') + e.submit(shutil.copy, 'src4.txt', 'dest4.txt') ThreadPoolExecutor ------------------ -:class:`ThreadPoolExecutor` is a :class:`Executor` subclass that uses a pool of +:class:`ThreadPoolExecutor` is an :class:`Executor` subclass that uses a pool of threads to execute calls asynchronously. Deadlocks can occur when the callable associated with a :class:`Future` waits on @@ -304,7 +304,7 @@ The :class:`Future` class encapsulates the asynchronous execution of a callable. Added callables are called in the order that they were added and are always called in a thread belonging to the process that added them. If - the callable raises a :exc:`Exception` subclass, it will be logged and + the callable raises an :exc:`Exception` subclass, it will be logged and ignored. If the callable raises a :exc:`BaseException` subclass, the behavior is undefined. diff --git a/Doc/library/contextlib.rst b/Doc/library/contextlib.rst index 550b3476..9e8463df 100644 --- a/Doc/library/contextlib.rst +++ b/Doc/library/contextlib.rst @@ -142,7 +142,7 @@ Functions and classes provided: is hardwired to stdout. For example, the output of :func:`help` normally is sent to *sys.stdout*. - You can capture that output in a string by redirecting the output to a + You can capture that output in a string by redirecting the output to an :class:`io.StringIO` object:: f = io.StringIO() @@ -553,7 +553,7 @@ advance:: Due to the way the decorator protocol works, a callback function declared this way cannot take any parameters. Instead, any resources to -be released must be accessed as closure variables +be released must be accessed as closure variables. Using a context manager as a function decorator diff --git a/Doc/library/copy.rst b/Doc/library/copy.rst index 1db5c2df..02ef0e5e 100644 --- a/Doc/library/copy.rst +++ b/Doc/library/copy.rst @@ -67,8 +67,8 @@ of lists by assigning a slice of the entire list, for example, Classes can use the same interfaces to control copying that they use to control pickling. See the description of module :mod:`pickle` for information on these -methods. In fact, :mod:`copy` module uses the registered pickle functions from -:mod:`copyreg` module. +methods. In fact, the :mod:`copy` module uses the registered +pickle functions from the :mod:`copyreg` module. .. index:: single: __copy__() (copy protocol) diff --git a/Doc/library/csv.rst b/Doc/library/csv.rst index 602e9091..4fcfaef2 100644 --- a/Doc/library/csv.rst +++ b/Doc/library/csv.rst @@ -325,7 +325,7 @@ Dialects support the following attributes: .. attribute:: Dialect.doublequote - Controls how instances of *quotechar* appearing inside a field should be + Controls how instances of *quotechar* appearing inside a field should themselves be quoted. When :const:`True`, the character is doubled. When :const:`False`, the *escapechar* is used as a prefix to the *quotechar*. It defaults to :const:`True`. diff --git a/Doc/library/ctypes.rst b/Doc/library/ctypes.rst index 588ac7c1..630d2791 100644 --- a/Doc/library/ctypes.rst +++ b/Doc/library/ctypes.rst @@ -39,7 +39,7 @@ loads libraries which export functions using the standard ``cdecl`` calling convention, while *windll* libraries call functions using the ``stdcall`` calling convention. *oledll* also uses the ``stdcall`` calling convention, and assumes the functions return a Windows :c:type:`HRESULT` error code. The error -code is used to automatically raise a :class:`OSError` exception when the +code is used to automatically raise an :class:`OSError` exception when the function call fails. .. versionchanged:: 3.3 diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst index 5d7ffffb..2de0ea08 100644 --- a/Doc/library/decimal.rst +++ b/Doc/library/decimal.rst @@ -841,7 +841,7 @@ Decimal objects Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. For example, converts - ``Decimal('123E+1')`` to ``Decimal('1.23E+3')`` + ``Decimal('123E+1')`` to ``Decimal('1.23E+3')``. .. method:: to_integral(rounding=None, context=None) diff --git a/Doc/library/difflib.rst b/Doc/library/difflib.rst index 38245e77..e7e5df63 100644 --- a/Doc/library/difflib.rst +++ b/Doc/library/difflib.rst @@ -98,9 +98,9 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. *wrapcolumn* is an optional keyword to specify column number where lines are broken and wrapped, defaults to ``None`` where lines are not wrapped. - *linejunk* and *charjunk* are optional keyword arguments passed into ``ndiff()`` + *linejunk* and *charjunk* are optional keyword arguments passed into :func:`ndiff` (used by :class:`HtmlDiff` to generate the side by side HTML differences). See - ``ndiff()`` documentation for argument default values and descriptions. + :func:`ndiff` documentation for argument default values and descriptions. The following methods are public: @@ -278,7 +278,7 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. generating the delta lines) in unified diff format. Unified diffs are a compact way of showing just the lines that have changed plus - a few lines of context. The changes are shown in a inline style (instead of + a few lines of context. The changes are shown in an inline style (instead of separate before/after blocks). The number of context lines is set by *n* which defaults to three. diff --git a/Doc/library/dis.rst b/Doc/library/dis.rst index 7a214edc..1bcb3a4a 100644 --- a/Doc/library/dis.rst +++ b/Doc/library/dis.rst @@ -946,8 +946,8 @@ the more significant byte last. Creates a new function object, sets its *__closure__* slot, and pushes it on the stack. TOS is the :term:`qualified name` of the function, TOS1 is the code associated with the function, and TOS2 is the tuple containing cells for - the closure's free variables. The function also has *argc* default - parameters, which are found below the cells. + the closure's free variables. *argc* is interpreted as in ``MAKE_FUNCTION``; + the annotations and defaults are also in the same order below TOS2. .. opcode:: BUILD_SLICE (argc) diff --git a/Doc/library/doctest.rst b/Doc/library/doctest.rst index 9aa9ea66..9f7d12c6 100644 --- a/Doc/library/doctest.rst +++ b/Doc/library/doctest.rst @@ -914,15 +914,10 @@ and :ref:`doctest-simple-testfile`. above, except that *globs* defaults to ``m.__dict__``. -There's also a function to run the doctests associated with a single object. -This function is provided for backward compatibility. There are no plans to -deprecate it, but it's rarely useful: - - .. function:: run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0) - Test examples associated with object *f*; for example, *f* may be a module, - function, or class object. + Test examples associated with object *f*; for example, *f* may be a string, + a module, a function, or a class object. A shallow copy of dictionary argument *globs* is used for the execution context. @@ -1815,6 +1810,27 @@ several options for organizing tests: * Define a ``__test__`` dictionary mapping from regression test topics to docstrings containing test cases. +When you have placed your tests in a module, the module can itself be the test +runner. When a test fails, you can arrange for your test runner to re-run only +the failing doctest while you debug the problem. Here is a minimal example of +such a test runner:: + + if __name__ == '__main__': + import doctest + flags = doctest.REPORT_NDIFF|doctest.FAIL_FAST + if len(sys.argv) > 1: + name = sys.argv[1] + if name in globals(): + obj = globals()[name] + else: + obj = __test__[name] + doctest.run_docstring_examples(obj, globals(), name=name, + optionflags=flags) + else: + fail, total = doctest.testmod(optionflags=flags) + print("{} failures out of {} tests".format(fail, total)) + + .. rubric:: Footnotes .. [#] Examples containing both expected output and an exception are not supported. diff --git a/Doc/library/email.charset.rst b/Doc/library/email.charset.rst index 19a69532..80ef3d62 100644 --- a/Doc/library/email.charset.rst +++ b/Doc/library/email.charset.rst @@ -234,5 +234,5 @@ new entries to the global character set, alias, and codec registries: *charset* is the canonical name of a character set. *codecname* is the name of a Python codec, as appropriate for the second argument to the :class:`str`'s - :meth:`~str.encode` method + :meth:`~str.encode` method. diff --git a/Doc/library/email.parser.rst b/Doc/library/email.parser.rst index ec74fe02..177adc62 100644 --- a/Doc/library/email.parser.rst +++ b/Doc/library/email.parser.rst @@ -146,7 +146,7 @@ have the same API as the :class:`Parser` and :class:`BytesParser` classes. methods on file-like objects. The text contained in *fp* must be formatted as a block of :rfc:`2822` - style headers and header continuation lines, optionally preceded by a + style headers and header continuation lines, optionally preceded by an envelope header. The header block is terminated either by the end of the data or by a blank line. Following the header block is the body of the message (which may contain MIME-encoded subparts). @@ -189,7 +189,7 @@ have the same API as the :class:`Parser` and :class:`BytesParser` classes. methods on file-like objects. The bytes contained in *fp* must be formatted as a block of :rfc:`2822` - style headers and header continuation lines, optionally preceded by a + style headers and header continuation lines, optionally preceded by an envelope header. The header block is terminated either by the end of the data or by a blank line. Following the header block is the body of the message (which may contain MIME-encoded subparts, including subparts diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst index d3b838c0..81f97b31 100644 --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -466,7 +466,7 @@ The complete signature is:: :type: type to mix in to new Enum class. -:start: number to start counting at if only names are passed in +:start: number to start counting at if only names are passed in. .. versionchanged:: 3.5 The *start* parameter was added. @@ -714,7 +714,7 @@ allow one to do things with an :class:`Enum` class that fail on a typical class, such as `list(Color)` or `some_var in Color`. :class:`EnumMeta` is responsible for ensuring that various other methods on the final :class:`Enum` class are correct (such as :meth:`__new__`, :meth:`__getnewargs__`, -:meth:`__str__` and :meth:`__repr__`) +:meth:`__str__` and :meth:`__repr__`). Enum Members (aka instances) @@ -730,18 +730,24 @@ member instances. Finer Points ^^^^^^^^^^^^ -Enum members are instances of an Enum class, and even though they are -accessible as `EnumClass.member`, they are not accessible directly from -the member:: +:class:`Enum` members are instances of an :class:`Enum` class, and even +though they are accessible as `EnumClass.member`, they should not be accessed +directly from the member as that lookup may fail or, worse, return something +besides the :class:`Enum` member you looking for:: - >>> Color.red - - >>> Color.red.blue - Traceback (most recent call last): + >>> class FieldTypes(Enum): + ... name = 0 + ... value = 1 + ... size = 2 ... - AttributeError: 'Color' object has no attribute 'blue' + >>> FieldTypes.value.size + + >>> FieldTypes.size.value + 2 + +.. versionchanged:: 3.5 -Likewise, the :attr:`__members__` is only available on the class. +The :attr:`__members__` attribute is only available on the class. If you give your :class:`Enum` subclass extra methods, like the `Planet`_ class above, those methods will show up in a :func:`dir` of the member, diff --git a/Doc/library/exceptions.rst b/Doc/library/exceptions.rst index 0a422b23..ba6122e2 100644 --- a/Doc/library/exceptions.rst +++ b/Doc/library/exceptions.rst @@ -232,44 +232,71 @@ The following exceptions are the exceptions that are usually raised. classes to override the method. -.. exception:: OSError +.. exception:: OSError([arg]) + OSError(errno, strerror[, filename[, winerror[, filename2]]]) .. index:: module: errno This exception is raised when a system function returns a system-related error, including I/O failures such as "file not found" or "disk full" - (not for illegal argument types or other incidental errors). Often a - subclass of :exc:`OSError` will actually be raised as described in - `OS exceptions`_ below. The :attr:`errno` attribute is a numeric error - code from the C variable :c:data:`errno`. + (not for illegal argument types or other incidental errors). - Under Windows, the :attr:`winerror` attribute gives you the native - Windows error code. The :attr:`errno` attribute is then an approximate - translation, in POSIX terms, of that native error code. + The second form of the constructor sets the corresponding attributes, + described below. The attributes default to :const:`None` if not + specified. For backwards compatibility, if three arguments are passed, + the :attr:`~BaseException.args` attribute contains only a 2-tuple + of the first two constructor arguments. - Under all platforms, the :attr:`strerror` attribute is the corresponding - error message as provided by the operating system (as formatted by the C - functions :c:func:`perror` under POSIX, and :c:func:`FormatMessage` - Windows). + The constructor often actually returns a subclass of :exc:`OSError`, as + described in `OS exceptions`_ below. The particular subclass depends on + the final :attr:`.errno` value. This behaviour only occurs when + constructing :exc:`OSError` directly or via an alias, and is not + inherited when subclassing. - For exceptions that involve a file system path (such as :func:`open` or - :func:`os.unlink`), the exception instance will contain an additional - attribute, :attr:`filename`, which is the file name passed to the function. - For functions that involve two file system paths (such as - :func:`os.rename`), the exception instance will contain a second - :attr:`filename2` attribute corresponding to the second file name passed - to the function. + .. attribute:: errno + + A numeric error code from the C variable :c:data:`errno`. + + .. attribute:: winerror + + Under Windows, this gives you the native + Windows error code. The :attr:`.errno` attribute is then an approximate + translation, in POSIX terms, of that native error code. + + Under Windows, if the *winerror* constructor argument is an integer, + the :attr:`.errno` attribute is determined from the Windows error code, + and the *errno* argument is ignored. On other platforms, the + *winerror* argument is ignored, and the :attr:`winerror` attribute + does not exist. + + .. attribute:: strerror + + The corresponding error message, as provided by + the operating system. It is formatted by the C + functions :c:func:`perror` under POSIX, and :c:func:`FormatMessage` + under Windows. + + .. attribute:: filename + filename2 + + For exceptions that involve a file system path (such as :func:`open` or + :func:`os.unlink`), :attr:`filename` is the file name passed to the function. + For functions that involve two file system paths (such as + :func:`os.rename`), :attr:`filename2` corresponds to the second + file name passed to the function. .. versionchanged:: 3.3 :exc:`EnvironmentError`, :exc:`IOError`, :exc:`WindowsError`, :exc:`VMSError`, :exc:`socket.error`, :exc:`select.error` and - :exc:`mmap.error` have been merged into :exc:`OSError`. + :exc:`mmap.error` have been merged into :exc:`OSError`, and the + constructor may return a subclass. .. versionchanged:: 3.4 The :attr:`filename` attribute is now the original file name passed to the function, instead of the name encoded to or decoded from the - filesystem encoding. Also, the :attr:`filename2` attribute was added. + filesystem encoding. Also, the *filename2* constructor argument and + attribute was added. .. exception:: OverflowError @@ -635,7 +662,7 @@ module for more information. .. exception:: SyntaxWarning - Base class for warnings about dubious syntax + Base class for warnings about dubious syntax. .. exception:: RuntimeWarning diff --git a/Doc/library/fcntl.rst b/Doc/library/fcntl.rst index 432140f2..f8bb8340 100644 --- a/Doc/library/fcntl.rst +++ b/Doc/library/fcntl.rst @@ -16,13 +16,13 @@ interface to the :c:func:`fcntl` and :c:func:`ioctl` Unix routines. All functions in this module take a file descriptor *fd* as their first argument. This can be an integer file descriptor, such as returned by -``sys.stdin.fileno()``, or a :class:`io.IOBase` object, such as ``sys.stdin`` +``sys.stdin.fileno()``, or an :class:`io.IOBase` object, such as ``sys.stdin`` itself, which provides a :meth:`~io.IOBase.fileno` that returns a genuine file descriptor. .. versionchanged:: 3.3 - Operations in this module used to raise a :exc:`IOError` where they now - raise a :exc:`OSError`. + Operations in this module used to raise an :exc:`IOError` where they now + raise an :exc:`OSError`. The module defines the following functions: @@ -83,6 +83,8 @@ The module defines the following functions: buffer 1024 bytes long which is then passed to :func:`ioctl` and copied back into the supplied buffer. + If the :c:func:`ioctl` fails, an :exc:`IOError` exception is raised. + An example:: >>> import array, fcntl, struct, termios, os @@ -104,6 +106,8 @@ The module defines the following functions: :manpage:`flock(2)` for details. (On some systems, this function is emulated using :c:func:`fcntl`.) + If the :c:func:`flock` fails, an :exc:`IOError` exception is raised. + .. function:: lockf(fd, cmd, len=0, start=0, whence=0) diff --git a/Doc/library/formatter.rst b/Doc/library/formatter.rst index a515f74f..8e8e201a 100644 --- a/Doc/library/formatter.rst +++ b/Doc/library/formatter.rst @@ -5,9 +5,8 @@ :synopsis: Generic output formatter and device interface. :deprecated: -.. deprecated-removed:: 3.4 3.6 - Due to lack of usage, the formatter module has been deprecated and is slated - for removal in Python 3.6. +.. deprecated:: 3.4 + Due to lack of usage, the formatter module has been deprecated. This module supports two interface definitions, each with multiple diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst index e0f06824..d24f80ac 100644 --- a/Doc/library/fractions.rst +++ b/Doc/library/fractions.rst @@ -97,7 +97,7 @@ another rational number, or from a string. This class method constructs a :class:`Fraction` representing the exact value of *flt*, which must be a :class:`float`. Beware that - ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)`` + ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)``. .. note:: diff --git a/Doc/library/ftplib.rst b/Doc/library/ftplib.rst index 3b9f50ca..f06e678e 100644 --- a/Doc/library/ftplib.rst +++ b/Doc/library/ftplib.rst @@ -52,8 +52,7 @@ The module defines the following items: will be used). *source_address* is a 2-tuple ``(host, port)`` for the socket to bind to as its source address before connecting. - :class:`FTP` class supports the :keyword:`with` statement. Here is a sample - on how using it: + The :class:`FTP` class supports the :keyword:`with` statement, e.g.: >>> from ftplib import FTP >>> with FTP("ftp1.at.proftpd.org") as ftp: @@ -287,9 +286,9 @@ followed by ``lines`` for the text version or ``binary`` for the binary version. .. method:: FTP.transfercmd(cmd, rest=None) - Initiate a transfer over the data connection. If the transfer is active, send a + Initiate a transfer over the data connection. If the transfer is active, send an ``EPRT`` or ``PORT`` command and the transfer command specified by *cmd*, and - accept the connection. If the server is passive, send a ``EPSV`` or ``PASV`` + accept the connection. If the server is passive, send an ``EPSV`` or ``PASV`` command, connect to it, and start the transfer command. Either way, return the socket for the connection. @@ -418,8 +417,8 @@ FTP_TLS Objects .. method:: FTP_TLS.auth() - Set up secure control connection by using TLS or SSL, depending on what - specified in :meth:`ssl_version` attribute. + Set up a secure control connection by using TLS or SSL, depending on what + is specified in the :attr:`ssl_version` attribute. .. versionchanged:: 3.4 The method now supports hostname check with diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst index 409f6c4f..c8de0628 100644 --- a/Doc/library/functions.rst +++ b/Doc/library/functions.rst @@ -703,10 +703,10 @@ are always available. They are listed here in alphabetical order. Return true if the *object* argument is an instance of the *classinfo* argument, or of a (direct, indirect or :term:`virtual `) subclass thereof. If *object* is not - an object of the given type, the function always returns false. If - *classinfo* is not a class (type object), it may be a tuple of type objects, - or may recursively contain other such tuples (other sequence types are not - accepted). If *classinfo* is not a type or tuple of types and such tuples, + an object of the given type, the function always returns false. + If *classinfo* is a tuple of type objects (or recursively, other such + tuples), return true if *object* is an instance of any of the types. + If *classinfo* is not a type or tuple of types and such tuples, a :exc:`TypeError` exception is raised. @@ -1032,9 +1032,9 @@ are always available. They are listed here in alphabetical order. :class:`io.TextIOBase` (specifically :class:`io.TextIOWrapper`). When used to open a file in a binary mode with buffering, the returned class is a subclass of :class:`io.BufferedIOBase`. The exact class varies: in read - binary mode, it returns a :class:`io.BufferedReader`; in write binary and - append binary modes, it returns a :class:`io.BufferedWriter`, and in - read/write mode, it returns a :class:`io.BufferedRandom`. When buffering is + binary mode, it returns an :class:`io.BufferedReader`; in write binary and + append binary modes, it returns an :class:`io.BufferedWriter`, and in + read/write mode, it returns an :class:`io.BufferedRandom`. When buffering is disabled, the raw stream, a subclass of :class:`io.RawIOBase`, :class:`io.FileIO`, is returned. diff --git a/Doc/library/glob.rst b/Doc/library/glob.rst index 50f38a4d..4c01bf6f 100644 --- a/Doc/library/glob.rst +++ b/Doc/library/glob.rst @@ -12,13 +12,13 @@ -------------- The :mod:`glob` module finds all the pathnames matching a specified pattern -according to the rules used by the Unix shell. No tilde expansion is done, but -``*``, ``?``, and character ranges expressed with ``[]`` will be correctly -matched. This is done by using the :func:`os.listdir` and -:func:`fnmatch.fnmatch` functions in concert, and not by actually invoking a -subshell. Note that unlike :func:`fnmatch.fnmatch`, :mod:`glob` treats -filenames beginning with a dot (``.``) as special cases. (For tilde and shell -variable expansion, use :func:`os.path.expanduser` and +according to the rules used by the Unix shell, although results are returned in +arbitrary order. No tilde expansion is done, but ``*``, ``?``, and character +ranges expressed with ``[]`` will be correctly matched. This is done by using +the :func:`os.listdir` and :func:`fnmatch.fnmatch` functions in concert, and +not by actually invoking a subshell. Note that unlike :func:`fnmatch.fnmatch`, +:mod:`glob` treats filenames beginning with a dot (``.``) as special cases. +(For tilde and shell variable expansion, use :func:`os.path.expanduser` and :func:`os.path.expandvars`.) For a literal match, wrap the meta-characters in brackets. @@ -38,7 +38,7 @@ For example, ``'[?]'`` matches the character ``'?'``. symlinks are included in the results (as in the shell). If *recursive* is true, the pattern "``**``" will match any files and zero or - more directories and subdirectories. If the pattern is followed by a + more directories and subdirectories. If the pattern is followed by an ``os.sep``, only directories and subdirectories match. .. note:: diff --git a/Doc/library/gzip.rst b/Doc/library/gzip.rst index 04c41d58..792d57a8 100644 --- a/Doc/library/gzip.rst +++ b/Doc/library/gzip.rst @@ -64,7 +64,7 @@ The module defines the following items: method. At least one of *fileobj* and *filename* must be given a non-trivial value. - The new class instance is based on *fileobj*, which can be a regular file, a + The new class instance is based on *fileobj*, which can be a regular file, an :class:`io.BytesIO` object, or any other object which simulates a file. It defaults to ``None``, in which case *filename* is opened to provide a file object. @@ -96,7 +96,7 @@ The module defines the following items: Calling a :class:`GzipFile` object's :meth:`close` method does not close *fileobj*, since you might wish to append more material after the compressed - data. This also allows you to pass a :class:`io.BytesIO` object opened for + data. This also allows you to pass an :class:`io.BytesIO` object opened for writing as *fileobj*, and retrieve the resulting memory buffer using the :class:`io.BytesIO` object's :meth:`~io.BytesIO.getvalue` method. diff --git a/Doc/library/hashlib.rst b/Doc/library/hashlib.rst index e0a877a7..769f96f0 100644 --- a/Doc/library/hashlib.rst +++ b/Doc/library/hashlib.rst @@ -176,8 +176,8 @@ A hash object has the following methods: compute the digests of data sharing a common initial substring. -Key Derivation Function ------------------------ +Key derivation +-------------- Key derivation and key stretching algorithms are designed for secure password hashing. Naive algorithms such as ``sha1(password)`` are not resistant against @@ -227,7 +227,7 @@ include a `salt `_. http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf The FIPS 180-2 publication on Secure Hash Algorithms. - http://en.wikipedia.org/wiki/Cryptographic_hash_function#Cryptographic_hash_algorithms + https://en.wikipedia.org/wiki/Cryptographic_hash_function#Cryptographic_hash_algorithms Wikipedia article with information on which algorithms have known issues and what that means regarding their use. diff --git a/Doc/library/html.parser.rst b/Doc/library/html.parser.rst index b84c60b7..824995ed 100644 --- a/Doc/library/html.parser.rst +++ b/Doc/library/html.parser.rst @@ -185,7 +185,7 @@ implementations do nothing (except for :meth:`~HTMLParser.handle_startendtag`): The content of Internet Explorer conditional comments (condcoms) will also be sent to this method, so, for ````, - this method will receive ``'[if IE 9]>IE-specific contentIE9-specific content +.. moduleauthor:: Guido van Rossum -IDLE is the Python IDE built with the :mod:`tkinter` GUI toolkit. +IDLE is Python's Integrated Development and Learning Environment. IDLE has the following features: * coded in 100% pure Python, using the :mod:`tkinter` GUI toolkit -* cross-platform: works on Windows, Unix, and Mac OS X +* cross-platform: works mostly the same on Windows, Unix, and Mac OS X + +* Python shell window (interactive interpreter) with colorizing + of code input, output, and error messages * multi-window text editor with multiple undo, Python colorizing, - smart indent, call tips, and many other features + smart indent, call tips, auto completion, and other features -* Python shell window (a.k.a. interactive interpreter) +* search within any window, replace within editor windows, and search + through multiple files (grep) -* debugger (not complete, but you can set breakpoints, view and step) +* debugger with persistent breakpoints, stepping, and viewing + of global and local namespaces +* configuration, browsers, and other dialogs Menus ----- @@ -200,7 +206,12 @@ Check Module Run Module Do Check Module (above). If no error, restart the shell to clean the - environment, then execute the module. + environment, then execute the module. Output is displayed in the Shell + window. Note that output requires use of ``print`` or ``write``. + When execution is complete, the Shell retains focus and displays a prompt. + At this point, one may interactively explore the result of execution. + This is similar to executing a file with ``python -i file`` at a command + line. Shell menu (Shell window only) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -241,17 +252,16 @@ Options menu (Shell and Editor) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configure IDLE - Open a configuration dialog. Fonts, indentation, keybindings, and color - themes may be altered. Startup Preferences may be set, and additional - help sources can be specified. Non-default user setting are saved in a - .idlerc directory in the user's home directory. Problems caused by bad user - configuration files are solved by editing or deleting one or more of the - files in .idlerc. On OS X, open the configuration dialog by selecting - Preferences in the application menu. - -Configure Extensions - Open a configuration dialog for setting preferences for extensions - (discussed below). See note above about the location of user settings. + Open a configuration dialog and change preferences for the following: + fonts, indentation, keybindings, text color themes, startup windows and + size, additional help sources, and extensions (see below). On OS X, + open the configuration dialog by selecting Preferences in the application + menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, + save it as a new custom theme. + + Non-default user settings are saved in a .idlerc directory in the user's + home directory. Problems caused by bad user configuration files are solved + by editing or deleting one or more of the files in .idlerc. Code Context (toggle)(Editor Window only) Open a pane at the top of the edit window which shows the block context @@ -330,8 +340,8 @@ Go to file/line Editing and navigation ---------------------- -In this section, 'C' refers to the Control key on Windows and Unix and -the Command key on Mac OSX. +In this section, 'C' refers to the :kbd:`Control` key on Windows and Unix and +the :kbd:`Command` key on Mac OSX. * :kbd:`Backspace` deletes to the left; :kbd:`Del` deletes to the right @@ -424,9 +434,35 @@ Note that IDLE itself places quite a few modules in sys.modules, so much can be found by default, e.g. the re module. If you don't like the ACW popping up unbidden, simply make the delay -longer or disable the extension. Or another option is the delay could -be set to zero. Another alternative to preventing ACW popups is to -disable the call tips extension. +longer or disable the extension. + +Calltips +^^^^^^^^ + +A calltip is shown when one types :kbd:`(` after the name of an *acccessible* +function. A name expression may include dots and subscripts. A calltip +remains until it is clicked, the cursor is moved out of the argument area, +or :kbd:`)` is typed. When the cursor is in the argument part of a definition, +the menu or shortcut display a calltip. + +A calltip consists of the function signature and the first line of the +docstring. For builtins without an accessible signature, the calltip +consists of all lines up the fifth line or the first blank line. These +details may change. + +The set of *accessible* functions depends on what modules have been imported +into the user process, including those imported by Idle itself, +and what definitions have been run, all since the last restart. + +For example, restart the Shell and enter ``itertools.count(``. A calltip +appears because Idle imports itertools into the user process for its own use. +(This could change.) Enter ``turtle.write(`` and nothing appears. Idle does +not import turtle. The menu or shortcut do nothing either. Enter +``import turtle`` and then ``turtle.write(`` will work. + +In an editor, import statements have no effect until one runs the file. One +might want to run a file after writing the import statements at the top, +or immediately run an existing file before editing. Python Shell window ^^^^^^^^^^^^^^^^^^^ @@ -447,42 +483,24 @@ Python Shell window * :kbd:`Return` while on any previous command retrieves that command -Syntax colors -------------- - -The coloring is applied in a background "thread," so you may occasionally see -uncolorized text. To change the color scheme, edit the ``[Colors]`` section in -:file:`config.txt`. - -Python syntax colors: - Keywords - orange - - Strings - green - - Comments - red - - Definitions - blue - -Shell colors: - Console output - brown - - stdout - blue +Text colors +^^^^^^^^^^^ - stderr - dark green +Idle defaults to black on white text, but colors text with special meanings. +For the shell, these are shell output, shell error, user output, and +user error. For Python code, at the shell prompt or in an editor, these are +keywords, builtin class and function names, names following ``class`` and +``def``, strings, and comments. For any text window, these are the cursor (when +present), found text (when possible), and selected text. - stdin - black +Text coloring is done in the background, so uncolorized text is occasionally +visible. To change the color scheme, use the Configure IDLE dialog +Highlighting tab. The marking of debugger breakpoint lines in the editor and +text in popups and dialogs is not user-configurable. -Startup -------- +Startup and code execution +-------------------------- Upon startup with the ``-s`` option, IDLE will execute the file referenced by the environment variables :envvar:`IDLESTARTUP` or :envvar:`PYTHONSTARTUP`. @@ -504,31 +522,60 @@ Command line usage :: - idle.py [-c command] [-d] [-e] [-s] [-t title] [arg] ... + idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ... - -c command run this command - -d enable debugger - -e edit mode; arguments are files to be edited - -s run $IDLESTARTUP or $PYTHONSTARTUP first + -c command run command in the shell window + -d enable debugger and open shell window + -e open editor window + -h print help message with legal combinatios and exit + -i open shell window + -r file run file in shell window + -s run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window -t title set title of shell window + - run stdin in shell (- must be last option before args) If there are arguments: -#. If ``-e`` is used, arguments are files opened for editing and - ``sys.argv`` reflects the arguments passed to IDLE itself. +* If ``-``, ``-c``, or ``r`` is used, all arguments are placed in + ``sys.argv[1:...]`` and ``sys.argv[0]`` is set to ``''``, ``'-c'``, + or ``'-r'``. No editor window is opened, even if that is the default + set in the Options dialog. + +* Otherwise, arguments are files opened for editing and + ``sys.argv`` reflects the arguments passed to IDLE itself. -#. Otherwise, if ``-c`` is used, all arguments are placed in - ``sys.argv[1:...]``, with ``sys.argv[0]`` set to ``'-c'``. -#. Otherwise, if neither ``-e`` nor ``-c`` is used, the first - argument is a script which is executed with the remaining arguments in - ``sys.argv[1:...]`` and ``sys.argv[0]`` set to the script name. If the - script name is '-', no script is executed but an interactive Python session - is started; the arguments are still available in ``sys.argv``. +IDLE-console differences +^^^^^^^^^^^^^^^^^^^^^^^^ + +As much as possible, the result of executing Python code with IDLE is the +same as executing the same code in a console window. However, the different +interface and operation occasionally affects results. + +For instance, IDLE normally executes user code in a separate process from +the IDLE GUI itself. The IDLE versions of sys.stdin, .stdout, and .stderr in the +execution process get input from and send output to the GUI process, +which keeps control of the keyboard and screen. This is normally transparent, +but code that access these object will see different attribute values. +Also, functions that directly access the keyboard and screen will not work. + +With IDLE's Shell, one enters, edits, and recalls complete statements. +Some consoles only work with a single physical line at a time. Running without a subprocess ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +By default, IDLE executes user code in a separate subprocess via a socket, +which uses the internal loopback interface. This connection is not +externally visible and no data is sent to or received from the Internet. +If firewall software complains anyway, you can ignore it. + +If the attempt to make the socket connection fails, Idle will notify you. +Such failures are sometimes transient, but if persistent, the problem +may be either a firewall blocking the connecton or misconfiguration of +a particular system. Until the problem is fixed, one can run Idle with +the -n command line switch. + If IDLE is started with the -n command line switch it will run in a single process and will not create the subprocess which runs the RPC Python execution server. This can be useful if Python cannot create diff --git a/Doc/library/imghdr.rst b/Doc/library/imghdr.rst index c60df24c..f11f6dcf 100644 --- a/Doc/library/imghdr.rst +++ b/Doc/library/imghdr.rst @@ -54,10 +54,8 @@ from :func:`what`: +------------+-----------------------------------+ .. versionadded:: 3.5 - The *exr* format was added. + The *exr* and *webp* formats were added. -.. versionchanged:: 3.5 - The *webp* type was added. You can extend the list of file types :mod:`imghdr` can recognize by appending to this variable: diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst index 632df75d..da613534 100644 --- a/Doc/library/importlib.rst +++ b/Doc/library/importlib.rst @@ -747,7 +747,7 @@ find and load modules. modules recognized by the standard import machinery. This is a helper for code which simply needs to know if a filesystem path potentially refers to a module without needing any details on the kind - of module (for example, :func:`inspect.getmodulename`) + of module (for example, :func:`inspect.getmodulename`). .. versionadded:: 3.3 diff --git a/Doc/library/inspect.rst b/Doc/library/inspect.rst index 3b62e7f5..23e559ca 100644 --- a/Doc/library/inspect.rst +++ b/Doc/library/inspect.rst @@ -88,6 +88,12 @@ attributes: | | __globals__ | global namespace in which | | | | this function was defined | +-----------+-----------------+---------------------------+ +| | __annotations__ | mapping of parameters | +| | | names to annotations; | +| | | ``"return"`` key is | +| | | reserved for return | +| | | annotations. | ++-----------+-----------------+---------------------------+ | traceback | tb_frame | frame object at this | | | | level | +-----------+-----------------+---------------------------+ @@ -178,6 +184,10 @@ attributes: +-----------+-----------------+---------------------------+ | | gi_code | code | +-----------+-----------------+---------------------------+ +| | gi_yieldfrom | object being iterated by | +| | | ``yield from``, or | +| | | ``None`` | ++-----------+-----------------+---------------------------+ | coroutine | __name__ | name | +-----------+-----------------+---------------------------+ | | __qualname__ | qualified name | @@ -191,10 +201,6 @@ attributes: +-----------+-----------------+---------------------------+ | | cr_code | code | +-----------+-----------------+---------------------------+ -| | gi_yieldfrom | object being iterated by | -| | | ``yield from``, or | -| | | ``None`` | -+-----------+-----------------+---------------------------+ | builtin | __doc__ | documentation string | +-----------+-----------------+---------------------------+ | | __name__ | original name of this | @@ -209,9 +215,10 @@ attributes: .. versionchanged:: 3.5 - Add ``__qualname__`` attribute to generators. The ``__name__`` attribute of - generators is now set from the function name, instead of the code name, and - it can now be modified. + Add ``__qualname__`` and ``gi_yieldfrom`` attributes to generators. + + The ``__name__`` attribute of generators is now set from the function + name, instead of the code name, and it can now be modified. .. function:: getmembers(object[, predicate]) @@ -1038,6 +1045,11 @@ line. returned list represents *frame*; the last entry represents the outermost call on *frame*'s stack. + .. versionchanged:: 3.5 + A list of :term:`named tuples ` + ``FrameInfo(frame, filename, lineno, function, code_context, index)`` + is returned. + .. function:: getinnerframes(traceback, context=1) @@ -1046,6 +1058,11 @@ line. list represents *traceback*; the last entry represents where the exception was raised. + .. versionchanged:: 3.5 + A list of :term:`named tuples ` + ``FrameInfo(frame, filename, lineno, function, code_context, index)`` + is returned. + .. function:: currentframe() @@ -1065,6 +1082,11 @@ line. returned list represents the caller; the last entry represents the outermost call on the stack. + .. versionchanged:: 3.5 + A list of :term:`named tuples ` + ``FrameInfo(frame, filename, lineno, function, code_context, index)`` + is returned. + .. function:: trace(context=1) @@ -1073,6 +1095,11 @@ line. entry in the list represents the caller; the last entry represents where the exception was raised. + .. versionchanged:: 3.5 + A list of :term:`named tuples ` + ``FrameInfo(frame, filename, lineno, function, code_context, index)`` + is returned. + Fetching attributes statically ------------------------------ diff --git a/Doc/library/io.rst b/Doc/library/io.rst index f009c65c..cb3e9edf 100644 --- a/Doc/library/io.rst +++ b/Doc/library/io.rst @@ -301,11 +301,11 @@ I/O Base Classes Note that it's already possible to iterate on file objects using ``for line in file: ...`` without calling ``file.readlines()``. - .. method:: seek(offset, whence=SEEK_SET) + .. method:: seek(offset[, whence]) Change the stream position to the given byte *offset*. *offset* is - interpreted relative to the position indicated by *whence*. Values for - *whence* are: + interpreted relative to the position indicated by *whence*. The default + value for *whence* is :data:`SEEK_SET`. Values for *whence* are: * :data:`SEEK_SET` or ``0`` -- start of the stream (the default); *offset* should be zero or positive @@ -487,7 +487,7 @@ I/O Base Classes .. method:: readinto1(b) - Read up to ``len(b)`` bytes into bytearray *b*, ,using at most one call to + Read up to ``len(b)`` bytes into bytearray *b*, using at most one call to the underlying raw stream's :meth:`~RawIOBase.read` (or :meth:`~RawIOBase.readinto`) method. Return the number of bytes read. @@ -783,10 +783,11 @@ Text I/O If *size* is specified, at most *size* characters will be read. - .. method:: seek(offset, whence=SEEK_SET) + .. method:: seek(offset[, whence]) - Change the stream position to the given *offset*. Behaviour depends - on the *whence* parameter: + Change the stream position to the given *offset*. Behaviour depends on + the *whence* parameter. The default value for *whence* is + :data:`SEEK_SET`. * :data:`SEEK_SET` or ``0``: seek from the start of the stream (the default); *offset* must either be a number returned by @@ -888,10 +889,16 @@ Text I/O An in-memory stream for text I/O. The text buffer is discarded when the :meth:`~IOBase.close` method is called. - The initial value of the buffer (an empty string by default) can be set by - providing *initial_value*. The *newline* argument works like that of - :class:`TextIOWrapper`. The default is to consider only ``\n`` characters - as end of lines and to do no newline translation. + The initial value of the buffer can be set by providing *initial_value*. + If newline translation is enabled, newlines will be encoded as if by + :meth:`~TextIOBase.write`. The stream is positioned at the start of + the buffer. + + The *newline* argument works like that of :class:`TextIOWrapper`. + The default is to consider only ``\n`` characters as ends of lines and + to do no newline translation. If *newline* is set to ``None``, + newlines are written as ``\n`` on all platforms, but universal + newline decoding is still performed when reading. :class:`StringIO` provides this method in addition to those from :class:`TextIOBase` and its parents: @@ -899,6 +906,8 @@ Text I/O .. method:: getvalue() Return a ``str`` containing the entire contents of the buffer. + Newlines are decoded as if by :meth:`~TextIOBase.read`, although + the stream position is not changed. Example usage:: diff --git a/Doc/library/ipaddress.rst b/Doc/library/ipaddress.rst index 7b594408..90fcc748 100644 --- a/Doc/library/ipaddress.rst +++ b/Doc/library/ipaddress.rst @@ -575,7 +575,7 @@ so to avoid duplication they are only documented for :class:`IPv4Network`. single-address network, with the network address being *address* and the mask being ``/128``. - 3. An integer packed into a :class:`bytes` object of length 16, bit-endian. + 3. An integer packed into a :class:`bytes` object of length 16, big-endian. The interpretation is similar to an integer *address*. 4. A two-tuple of an address description and a netmask, where the address diff --git a/Doc/library/json.rst b/Doc/library/json.rst index 49bb0905..d62f14be 100644 --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -339,7 +339,7 @@ Encoders and Decoders .. method:: decode(s) Return the Python representation of *s* (a :class:`str` instance - containing a JSON document) + containing a JSON document). :exc:`JSONDecodeError` will be raised if the given JSON document is not valid. diff --git a/Doc/library/linecache.rst b/Doc/library/linecache.rst index 6c92cc58..3f14885b 100644 --- a/Doc/library/linecache.rst +++ b/Doc/library/linecache.rst @@ -49,7 +49,7 @@ The :mod:`linecache` module defines the following functions: .. function:: lazycache(filename, module_globals) - Capture enough detail about a non-file based module to permit getting its + Capture enough detail about a non-file-based module to permit getting its lines later via :func:`getline` even if *module_globals* is None in the later call. This avoids doing I/O until a line is actually needed, without having to carry the module globals around indefinitely. diff --git a/Doc/library/locale.rst b/Doc/library/locale.rst index bc7f5f93..61b79faf 100644 --- a/Doc/library/locale.rst +++ b/Doc/library/locale.rst @@ -204,7 +204,7 @@ The :mod:`locale` module defines the following exception and functions: .. data:: RADIXCHAR - Get the radix character (decimal dot, decimal comma, etc.) + Get the radix character (decimal dot, decimal comma, etc.). .. data:: THOUSEP diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst index 67403a9b..0edc942c 100644 --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -230,7 +230,7 @@ need to override. renamed to the destination. :param source: The source filename. This is normally the base - filename, e.g. 'test.log' + filename, e.g. 'test.log'. :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. diff --git a/Doc/library/mailbox.rst b/Doc/library/mailbox.rst index 6334bd6e..d29902dc 100644 --- a/Doc/library/mailbox.rst +++ b/Doc/library/mailbox.rst @@ -674,7 +674,7 @@ Supported mailbox formats are Maildir, mbox, MH, Babyl, and MMDF. In Babyl mailboxes, the headers of a message are not stored contiguously with the body of the message. To generate a file-like representation, the - headers and body are copied together into a :class:`io.BytesIO` instance, + headers and body are copied together into an :class:`io.BytesIO` instance, which has an API identical to that of a file. As a result, the file-like object is truly independent of the underlying mailbox but does not save memory compared to a string diff --git a/Doc/library/modulefinder.rst b/Doc/library/modulefinder.rst index 0656b37d..e84a4964 100644 --- a/Doc/library/modulefinder.rst +++ b/Doc/library/modulefinder.rst @@ -53,7 +53,7 @@ report of the imported modules will be printed. .. attribute:: modules A dictionary mapping module names to modules. See - :ref:`modulefinder-example` + :ref:`modulefinder-example`. .. _modulefinder-example: diff --git a/Doc/library/msvcrt.rst b/Doc/library/msvcrt.rst index 9d23720e..fadaf05a 100644 --- a/Doc/library/msvcrt.rst +++ b/Doc/library/msvcrt.rst @@ -18,7 +18,7 @@ documentation. The module implements both the normal and wide char variants of the console I/O api. The normal API deals only with ASCII characters and is of limited use for internationalized applications. The wide char API should be used where -ever possible +ever possible. .. versionchanged:: 3.3 Operations in this module now raise :exc:`OSError` where :exc:`IOError` diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst index 8703f8f5..8575fb50 100644 --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -65,8 +65,7 @@ To show the individual process IDs involved, here is an expanded example:: def info(title): print(title) print('module name:', __name__) - if hasattr(os, 'getppid'): # only available on Unix - print('parent process:', os.getppid()) + print('parent process:', os.getppid()) print('process id:', os.getpid()) def f(name): @@ -1042,7 +1041,7 @@ Connection objects are usually created using :func:`Pipe` -- see also readable. .. versionchanged:: 3.3 - This function used to raise a :exc:`IOError`, which is now an + This function used to raise :exc:`IOError`, which is now an alias of :exc:`OSError`. @@ -1130,10 +1129,15 @@ object -- see :ref:`multiprocessing-managers`. .. class:: BoundedSemaphore([value]) - A bounded semaphore object: a clone of :class:`threading.BoundedSemaphore`. + A bounded semaphore object: a close analog of + :class:`threading.BoundedSemaphore`. - (On Mac OS X, this is indistinguishable from :class:`Semaphore` because - ``sem_getvalue()`` is not implemented on that platform). + A solitary difference from its close analog exists: its ``acquire`` method's + first argument is named *block*, as is consistent with :meth:`Lock.acquire`. + + .. note:: + On Mac OS X, this is indistinguishable from :class:`Semaphore` because + ``sem_getvalue()`` is not implemented on that platform. .. class:: Condition([lock]) @@ -1149,32 +1153,134 @@ object -- see :ref:`multiprocessing-managers`. A clone of :class:`threading.Event`. + .. class:: Lock() - A non-recursive lock object: a clone of :class:`threading.Lock`. + A non-recursive lock object: a close analog of :class:`threading.Lock`. + Once a process or thread has acquired a lock, subsequent attempts to + acquire it from any process or thread will block until it is released; + any process or thread may release it. The concepts and behaviors of + :class:`threading.Lock` as it applies to threads are replicated here in + :class:`multiprocessing.Lock` as it applies to either processes or threads, + except as noted. + + Note that :class:`Lock` is actually a factory function which returns an + instance of ``multiprocessing.synchronize.Lock`` initialized with a + default context. + + :class:`Lock` supports the :term:`context manager` protocol and thus may be + used in :keyword:`with` statements. + + .. method:: acquire(block=True, timeout=None) + + Acquire a lock, blocking or non-blocking. + + With the *block* argument set to ``True`` (the default), the method call + will block until the lock is in an unlocked state, then set it to locked + and return ``True``. Note that the name of this first argument differs + from that in :meth:`threading.Lock.acquire`. + + With the *block* argument set to ``False``, the method call does not + block. If the lock is currently in a locked state, return ``False``; + otherwise set the lock to a locked state and return ``True``. + + When invoked with a positive, floating-point value for *timeout*, block + for at most the number of seconds specified by *timeout* as long as + the lock can not be acquired. Invocations with a negative value for + *timeout* are equivalent to a *timeout* of zero. Invocations with a + *timeout* value of ``None`` (the default) set the timeout period to + infinite. Note that the treatment of negative or ``None`` values for + *timeout* differs from the implemented behavior in + :meth:`threading.Lock.acquire`. The *timeout* argument has no practical + implications if the *block* argument is set to ``False`` and is thus + ignored. Returns ``True`` if the lock has been acquired or ``False`` if + the timeout period has elapsed. + + + .. method:: release() + + Release a lock. This can be called from any process or thread, not only + the process or thread which originally acquired the lock. + + Behavior is the same as in :meth:`threading.Lock.release` except that + when invoked on an unlocked lock, a :exc:`ValueError` is raised. + .. class:: RLock() - A recursive lock object: a clone of :class:`threading.RLock`. + A recursive lock object: a close analog of :class:`threading.RLock`. A + recursive lock must be released by the process or thread that acquired it. + Once a process or thread has acquired a recursive lock, the same process + or thread may acquire it again without blocking; that process or thread + must release it once for each time it has been acquired. + + Note that :class:`RLock` is actually a factory function which returns an + instance of ``multiprocessing.synchronize.RLock`` initialized with a + default context. + + :class:`RLock` supports the :term:`context manager` protocol and thus may be + used in :keyword:`with` statements. + + + .. method:: acquire(block=True, timeout=None) + + Acquire a lock, blocking or non-blocking. + + When invoked with the *block* argument set to ``True``, block until the + lock is in an unlocked state (not owned by any process or thread) unless + the lock is already owned by the current process or thread. The current + process or thread then takes ownership of the lock (if it does not + already have ownership) and the recursion level inside the lock increments + by one, resulting in a return value of ``True``. Note that there are + several differences in this first argument's behavior compared to the + implementation of :meth:`threading.RLock.acquire`, starting with the name + of the argument itself. + + When invoked with the *block* argument set to ``False``, do not block. + If the lock has already been acquired (and thus is owned) by another + process or thread, the current process or thread does not take ownership + and the recursion level within the lock is not changed, resulting in + a return value of ``False``. If the lock is in an unlocked state, the + current process or thread takes ownership and the recursion level is + incremented, resulting in a return value of ``True``. + + Use and behaviors of the *timeout* argument are the same as in + :meth:`Lock.acquire`. Note that some of these behaviors of *timeout* + differ from the implemented behaviors in :meth:`threading.RLock.acquire`. + + + .. method:: release() + + Release a lock, decrementing the recursion level. If after the + decrement the recursion level is zero, reset the lock to unlocked (not + owned by any process or thread) and if any other processes or threads + are blocked waiting for the lock to become unlocked, allow exactly one + of them to proceed. If after the decrement the recursion level is still + nonzero, the lock remains locked and owned by the calling process or + thread. + + Only call this method when the calling process or thread owns the lock. + An :exc:`AssertionError` is raised if this method is called by a process + or thread other than the owner or if the lock is in an unlocked (unowned) + state. Note that the type of exception raised in this situation + differs from the implemented behavior in :meth:`threading.RLock.release`. + .. class:: Semaphore([value]) - A semaphore object: a clone of :class:`threading.Semaphore`. + A semaphore object: a close analog of :class:`threading.Semaphore`. -.. note:: + A solitary difference from its close analog exists: its ``acquire`` method's + first argument is named *block*, as is consistent with :meth:`Lock.acquire`. - The :meth:`acquire` and :meth:`wait` methods of each of these types - treat negative timeouts as zero timeouts. This differs from - :mod:`threading` where, since version 3.2, the equivalent - :meth:`acquire` methods treat negative timeouts as infinite - timeouts. +.. note:: On Mac OS X, ``sem_timedwait`` is unsupported, so calling ``acquire()`` with a timeout will emulate that function's behavior using a sleeping loop. .. note:: - If the SIGINT signal generated by Ctrl-C arrives while the main thread is + If the SIGINT signal generated by :kbd:`Ctrl-C` arrives while the main thread is blocked by a call to :meth:`BoundedSemaphore.acquire`, :meth:`Lock.acquire`, :meth:`RLock.acquire`, :meth:`Semaphore.acquire`, :meth:`Condition.acquire` or :meth:`Condition.wait` then the call will be immediately interrupted and @@ -1833,7 +1939,7 @@ itself. This means, for example, that one shared object can contain a second: raised by :meth:`_callmethod`. Note in particular that an exception will be raised if *methodname* has - not been *exposed* + not been *exposed*. An example of the usage of :meth:`_callmethod`: @@ -1939,7 +2045,7 @@ with the :class:`Pool` class. If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then @@ -1964,7 +2070,7 @@ with the :class:`Pool` class. If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then diff --git a/Doc/library/nntplib.rst b/Doc/library/nntplib.rst index 3943f2c2..9fb1b459 100644 --- a/Doc/library/nntplib.rst +++ b/Doc/library/nntplib.rst @@ -69,9 +69,9 @@ The module itself defines the following classes: connecting to an NNTP server on the local machine and intend to call reader-specific commands, such as ``group``. If you get unexpected :exc:`NNTPPermanentError`\ s, you might need to set *readermode*. - :class:`NNTP` class supports the :keyword:`with` statement to + The :class:`NNTP` class supports the :keyword:`with` statement to unconditionally consume :exc:`OSError` exceptions and to close the NNTP - connection when done. Here is a sample on how using it: + connection when done, e.g.: >>> from nntplib import NNTP >>> with NNTP('news.gmane.org') as n: @@ -341,7 +341,7 @@ tuples or objects that the method normally returns will be empty. .. method:: NNTP.over(message_spec, *, file=None) - Send a ``OVER`` command, or a ``XOVER`` command on legacy servers. + Send an ``OVER`` command, or an ``XOVER`` command on legacy servers. *message_spec* can be either a string representing a message id, or a ``(first, last)`` tuple of numbers indicating a range of articles in the current group, or a ``(first, None)`` tuple indicating a range of diff --git a/Doc/library/optparse.rst b/Doc/library/optparse.rst index 72145aa5..160c29d3 100644 --- a/Doc/library/optparse.rst +++ b/Doc/library/optparse.rst @@ -1324,7 +1324,7 @@ where the input parameters are the list of arguments to process (default: ``sys.argv[1:]``) ``values`` - a :class:`optparse.Values` object to store option arguments in (default: a + an :class:`optparse.Values` object to store option arguments in (default: a new instance of :class:`Values`) -- if you give an existing object, the option defaults will not be initialized on it diff --git a/Doc/library/os.path.rst b/Doc/library/os.path.rst index e4fe44ed..4b4e0b42 100644 --- a/Doc/library/os.path.rst +++ b/Doc/library/os.path.rst @@ -82,8 +82,21 @@ the :mod:`glob` module.) Return the longest path prefix (taken character-by-character) that is a prefix of all paths in *list*. If *list* is empty, return the empty string - (``''``). Note that this may return invalid paths because it works a - character at a time. To obtain a valid path, see :func:`commonpath`. + (``''``). + + .. note:: + + This function may return invalid paths because it works a + character at a time. To obtain a valid path, see + :func:`commonpath`. + + :: + + >>> os.path.commonprefix(['/usr/lib', '/usr/local/lib']) + '/usr/l' + + >>> os.path.commonpath(['/usr/lib', '/usr/local/lib']) + '/usr' .. function:: dirname(path) @@ -276,7 +289,7 @@ the :mod:`glob` module.) Return ``True`` if both pathname arguments refer to the same file or directory. This is determined by the device number and i-node number and raises an - exception if a :func:`os.stat` call on either pathname fails. + exception if an :func:`os.stat` call on either pathname fails. Availability: Unix, Windows. diff --git a/Doc/library/os.rst b/Doc/library/os.rst index d4032e0c..0cc9d9c1 100644 --- a/Doc/library/os.rst +++ b/Doc/library/os.rst @@ -857,9 +857,9 @@ as internal buffering of data. :data:`os.SEEK_HOLE` or :data:`os.SEEK_DATA`. -.. function:: open(file, flags, mode=0o777, *, dir_fd=None) +.. function:: open(path, flags, mode=0o777, *, dir_fd=None) - Open the file *file* and set various flags according to *flags* and possibly + Open the file *path* and set various flags according to *flags* and possibly its mode according to *mode*. When computing *mode*, the current umask value is first masked out. Return the file descriptor for the newly opened file. The new file descriptor is :ref:`non-inheritable `. @@ -1071,10 +1071,10 @@ or `the MSDN `_ on Window :exc:`InterruptedError` exception (see :pep:`475` for the rationale). -.. function:: sendfile(out, in, offset, nbytes) - sendfile(out, in, offset, nbytes, headers=None, trailers=None, flags=0) +.. function:: sendfile(out, in, offset, count) + sendfile(out, in, offset, count, [headers], [trailers], flags=0) - Copy *nbytes* bytes from file descriptor *in* to file descriptor *out* + Copy *count* bytes from file descriptor *in* to file descriptor *out* starting at *offset*. Return the number of bytes sent. When EOF is reached return 0. @@ -1088,7 +1088,7 @@ or `the MSDN `_ on Window *trailers* are arbitrary sequences of buffers that are written before and after the data from *in* is written. It returns the same as the first case. - On Mac OS X and FreeBSD, a value of 0 for *nbytes* specifies to send until + On Mac OS X and FreeBSD, a value of 0 for *count* specifies to send until the end of *in* is reached. All platforms support sockets as *out* file descriptor, and some platforms @@ -1102,7 +1102,7 @@ or `the MSDN `_ on Window .. note:: For a higher-level wrapper of :func:`sendfile`, see - :mod:`socket.socket.sendfile`. + :meth:`socket.socket.sendfile`. .. versionadded:: 3.3 @@ -1690,10 +1690,10 @@ features: The *dir_fd* argument. -.. function:: mknod(filename, mode=0o600, device=0, *, dir_fd=None) +.. function:: mknod(path, mode=0o600, device=0, *, dir_fd=None) Create a filesystem node (file, device special file or named pipe) named - *filename*. *mode* specifies both the permissions to use and the type of node + *path*. *mode* specifies both the permissions to use and the type of node to be created, being combined (bitwise OR) with one of ``stat.S_IFREG``, ``stat.S_IFCHR``, ``stat.S_IFBLK``, and ``stat.S_IFIFO`` (those constants are available in :mod:`stat`). For ``stat.S_IFCHR`` and ``stat.S_IFBLK``, @@ -2389,9 +2389,9 @@ features: .. versionadded:: 3.3 -.. function:: symlink(source, link_name, target_is_directory=False, *, dir_fd=None) +.. function:: symlink(src, dst, target_is_directory=False, *, dir_fd=None) - Create a symbolic link pointing to *source* named *link_name*. + Create a symbolic link pointing to *src* named *dst*. On Windows, a symlink represents either a file or a directory, and does not morph to the target dynamically. If the target is present, the type of the @@ -2461,20 +2461,20 @@ features: The *dir_fd* parameter. -.. function:: utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) +.. function:: utime(path, times=None, *[, ns], dir_fd=None, follow_symlinks=True) Set the access and modified times of the file specified by *path*. :func:`utime` takes two optional parameters, *times* and *ns*. These specify the times set on *path* and are used as follows: - - If *ns* is not ``None``, + - If *ns* is specified, it must be a 2-tuple of the form ``(atime_ns, mtime_ns)`` where each member is an int expressing nanoseconds. - If *times* is not ``None``, it must be a 2-tuple of the form ``(atime, mtime)`` where each member is an int or float expressing seconds. - - If *times* and *ns* are both ``None``, + - If *times* is ``None`` and *ns* is unspecified, this is equivalent to specifying ``ns=(atime_ns, mtime_ns)`` where both times are the current time. @@ -2529,9 +2529,9 @@ features: recurse into the subdirectories whose names remain in *dirnames*; this can be used to prune the search, impose a specific order of visiting, or even to inform :func:`walk` about directories the caller creates or renames before it resumes - :func:`walk` again. Modifying *dirnames* when *topdown* is ``False`` is - ineffective, because in bottom-up mode the directories in *dirnames* are - generated before *dirpath* itself is generated. + :func:`walk` again. Modifying *dirnames* when *topdown* is ``False`` has + no effect on the behavior of the walk, because in bottom-up mode the directories + in *dirnames* are generated before *dirpath* itself is generated. By default, errors from the :func:`listdir` call are ignored. If optional argument *onerror* is specified, it should be a function; it will be called with @@ -3023,9 +3023,10 @@ written in Python, such as a mail server's external command delivery program. Availability: Unix. -.. function:: popen(command, mode='r', buffering=-1) +.. function:: popen(cmd, mode='r', buffering=-1) - Open a pipe to or from *command*. The return value is an open file object + Open a pipe to or from command *cmd*. + The return value is an open file object connected to the pipe, which can be read or written depending on whether *mode* is ``'r'`` (default) or ``'w'``. The *buffering* argument has the same meaning as the corresponding argument to the built-in :func:`open` function. The diff --git a/Doc/library/pdb.rst b/Doc/library/pdb.rst index 6e526f43..c144db6c 100644 --- a/Doc/library/pdb.rst +++ b/Doc/library/pdb.rst @@ -156,8 +156,8 @@ access further features, you have to do this yourself: that matches one of these patterns. [1]_ By default, Pdb sets a handler for the SIGINT signal (which is sent when the - user presses Ctrl-C on the console) when you give a ``continue`` command. - This allows you to break into the debugger again by pressing Ctrl-C. If you + user presses :kbd:`Ctrl-C` on the console) when you give a ``continue`` command. + This allows you to break into the debugger again by pressing :kbd:`Ctrl-C`. If you want Pdb not to touch the SIGINT handler, set *nosigint* tot true. Example call to enable tracing with *skip*:: diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst index a92526f5..7e09b03c 100644 --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -192,7 +192,7 @@ process more convenient: number is specified, :data:`HIGHEST_PROTOCOL` is selected. The *file* argument must have a write() method that accepts a single bytes - argument. It can thus be an on-disk file opened for binary writing, a + argument. It can thus be an on-disk file opened for binary writing, an :class:`io.BytesIO` instance, or any other custom object that meets this interface. @@ -221,7 +221,7 @@ process more convenient: The argument *file* must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return bytes. Thus *file* can be an on-disk file opened for - binary reading, a :class:`io.BytesIO` object, or any other custom object + binary reading, an :class:`io.BytesIO` object, or any other custom object that meets this interface. Optional keyword arguments are *fix_imports*, *encoding* and *errors*, @@ -235,7 +235,7 @@ process more convenient: .. function:: loads(bytes_object, \*, fix_imports=True, encoding="ASCII", errors="strict") Read a pickled object hierarchy from a :class:`bytes` object and return the - reconstituted object hierarchy specified therein + reconstituted object hierarchy specified therein. The protocol version of the pickle is detected automatically, so no protocol argument is needed. Bytes past the pickled object's @@ -288,7 +288,7 @@ The :mod:`pickle` module exports two classes, :class:`Pickler` and number is specified, :data:`HIGHEST_PROTOCOL` is selected. The *file* argument must have a write() method that accepts a single bytes - argument. It can thus be an on-disk file opened for binary writing, a + argument. It can thus be an on-disk file opened for binary writing, an :class:`io.BytesIO` instance, or any other custom object that meets this interface. @@ -357,7 +357,7 @@ The :mod:`pickle` module exports two classes, :class:`Pickler` and The argument *file* must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return bytes. Thus *file* can be an on-disk file object - opened for binary reading, a :class:`io.BytesIO` object, or any other + opened for binary reading, an :class:`io.BytesIO` object, or any other custom object that meets this interface. Optional keyword arguments are *fix_imports*, *encoding* and *errors*, diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst index 679cc6b2..e679317f 100644 --- a/Doc/library/platform.rst +++ b/Doc/library/platform.rst @@ -117,7 +117,7 @@ Cross Platform .. function:: python_version() - Returns the Python version as string ``'major.minor.patchlevel'`` + Returns the Python version as string ``'major.minor.patchlevel'``. Note that unlike the Python ``sys.version``, the returned value will always include the patchlevel (it defaults to 0). diff --git a/Doc/library/plistlib.rst b/Doc/library/plistlib.rst index 41655911..2c1f3dd7 100644 --- a/Doc/library/plistlib.rst +++ b/Doc/library/plistlib.rst @@ -194,7 +194,7 @@ The following classes are available: It has one attribute, :attr:`data`, that can be used to retrieve the Python bytes object stored in it. - .. deprecated:: 3.4 Use a :class:`bytes` object instead + .. deprecated:: 3.4 Use a :class:`bytes` object instead. The following constants are available: diff --git a/Doc/library/profile.rst b/Doc/library/profile.rst index 2928821d..959d9b98 100644 --- a/Doc/library/profile.rst +++ b/Doc/library/profile.rst @@ -33,7 +33,7 @@ profiling interface: 2. :mod:`profile`, a pure Python module whose interface is imitated by :mod:`cProfile`, but which adds significant overhead to profiled programs. If you're trying to extend the profiler in some way, the task might be easier - with this module. + with this module. Originally designed and written by Jim Roskind. .. note:: diff --git a/Doc/library/pyexpat.rst b/Doc/library/pyexpat.rst index 78aa99c4..620ffb1c 100644 --- a/Doc/library/pyexpat.rst +++ b/Doc/library/pyexpat.rst @@ -763,7 +763,7 @@ The ``errors`` module has the following attributes: .. data:: XML_ERROR_UNDEFINED_ENTITY - A reference was made to a entity which was not defined. + A reference was made to an entity which was not defined. .. data:: XML_ERROR_UNKNOWN_ENCODING diff --git a/Doc/library/re.rst b/Doc/library/re.rst index 88845844..a9f81616 100644 --- a/Doc/library/re.rst +++ b/Doc/library/re.rst @@ -556,13 +556,15 @@ form. .. data:: X VERBOSE - This flag allows you to write regular expressions that look nicer. Whitespace - within the pattern is ignored, except when in a character class or preceded by - an unescaped backslash, and, when a line contains a ``'#'`` neither in a - character class or preceded by an unescaped backslash, all characters from the - leftmost such ``'#'`` through the end of the line are ignored. - - That means that the two following regular expression objects that match a + This flag allows you to write regular expressions that look nicer and are + more readable by allowing you to visually separate logical sections of the + pattern and add comments. Whitespace within the pattern is ignored, except + when in a character class or when preceded by an unescaped backslash. + When a line contains a ``#`` that is not in a character class and is not + preceded by an unescaped backslash, all characters from the leftmost such + ``#`` through the end of the line are ignored. + + This means that the two following regular expression objects that match a decimal number are functionally equal:: a = re.compile(r"""\d + # the integral part diff --git a/Doc/library/readline.rst b/Doc/library/readline.rst index 3864f0d2..b3d765c5 100644 --- a/Doc/library/readline.rst +++ b/Doc/library/readline.rst @@ -222,7 +222,7 @@ sessions, by only appending the new history. :: import atexit import os - import realine + import readline histfile = os.path.join(os.path.expanduser("~"), ".python_history") try: diff --git a/Doc/library/resource.rst b/Doc/library/resource.rst index 7c0e4caf..a7b229e9 100644 --- a/Doc/library/resource.rst +++ b/Doc/library/resource.rst @@ -316,10 +316,7 @@ These functions are used to retrieve resource usage information: .. function:: getpagesize() Returns the number of bytes in a system page. (This need not be the same as the - hardware page size.) This function is useful for determining the number of bytes - of memory a process is using. The third element of the tuple returned by - :func:`getrusage` describes memory usage in pages; multiplying by page size - produces number of bytes. + hardware page size.) The following :const:`RUSAGE_\*` symbols are passed to the :func:`getrusage` function to specify which processes information should be provided for. diff --git a/Doc/library/selectors.rst b/Doc/library/selectors.rst index f6ef24b6..56cfc6bd 100644 --- a/Doc/library/selectors.rst +++ b/Doc/library/selectors.rst @@ -50,8 +50,8 @@ Classes hierarchy:: In the following, *events* is a bitwise mask indicating which I/O events should -be waited for on a given file object. It can be a combination of the constants -below: +be waited for on a given file object. It can be a combination of the modules +constants below: +-----------------------+-----------------------------------------------+ | Constant | Meaning | diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst index 3b467e01..904627f4 100644 --- a/Doc/library/shutil.rst +++ b/Doc/library/shutil.rst @@ -207,8 +207,8 @@ Directory and files operations and metadata of the linked files are copied to the new tree. When *symlinks* is false, if the file pointed by the symlink doesn't - exist, a exception will be added in the list of errors raised in - a :exc:`Error` exception at the end of the copy process. + exist, an exception will be added in the list of errors raised in + an :exc:`Error` exception at the end of the copy process. You can set the optional *ignore_dangling_symlinks* flag to true if you want to silence this exception. Notice that this option has no effect on platforms that don't support :func:`os.symlink`. @@ -497,7 +497,7 @@ provided. They rely on the :mod:`zipfile` and :mod:`tarfile` modules. .. function:: get_archive_formats() Return a list of supported formats for archiving. - Each element of the returned sequence is a tuple ``(name, description)`` + Each element of the returned sequence is a tuple ``(name, description)``. By default :mod:`shutil` provides these formats: diff --git a/Doc/library/signal.rst b/Doc/library/signal.rst index 95498383..8f814df8 100644 --- a/Doc/library/signal.rst +++ b/Doc/library/signal.rst @@ -105,7 +105,7 @@ The variables defined in the :mod:`signal` module are: .. data:: CTRL_C_EVENT - The signal corresponding to the CTRL+C keystroke event. This signal can + The signal corresponding to the :kbd:`Ctrl+C` keystroke event. This signal can only be used with :func:`os.kill`. Availability: Windows. @@ -115,7 +115,7 @@ The variables defined in the :mod:`signal` module are: .. data:: CTRL_BREAK_EVENT - The signal corresponding to the CTRL+BREAK keystroke event. This signal can + The signal corresponding to the :kbd:`Ctrl+Break` keystroke event. This signal can only be used with :func:`os.kill`. Availability: Windows. diff --git a/Doc/library/smtplib.rst b/Doc/library/smtplib.rst index a71ee585..8cbd20d4 100644 --- a/Doc/library/smtplib.rst +++ b/Doc/library/smtplib.rst @@ -22,7 +22,7 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions). .. class:: SMTP(host='', port=0, local_hostname=None[, timeout], source_address=None) - A :class:`SMTP` instance encapsulates an SMTP connection. It has methods + An :class:`SMTP` instance encapsulates an SMTP connection. It has methods that support a full repertoire of SMTP and ESMTP operations. If the optional host and port parameters are given, the SMTP :meth:`connect` method is called with those parameters during initialization. If specified, @@ -69,7 +69,7 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions). certfile=None [, timeout], context=None, \ source_address=None) - A :class:`SMTP_SSL` instance behaves exactly the same as instances of + An :class:`SMTP_SSL` instance behaves exactly the same as instances of :class:`SMTP`. :class:`SMTP_SSL` should be used for situations where SSL is required from the beginning of the connection and using :meth:`starttls` is not appropriate. If *host* is not specified, the local host is used. If diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst index 1dcdb2fb..a0817562 100644 --- a/Doc/library/socket.rst +++ b/Doc/library/socket.rst @@ -106,8 +106,30 @@ created. Socket addresses are represented as follows: .. versionadded:: 3.3 -- Certain other address families (:const:`AF_BLUETOOTH`, :const:`AF_PACKET`, - :const:`AF_CAN`) support specific representations. +- :const:`AF_BLUETOOTH` supports the following protocols and address + formats: + + - :const:`BTPROTO_L2CAP` accepts ``(bdaddr, psm)`` where ``bdaddr`` is + the Bluetooth address as a string and ``psm`` is an integer. + + - :const:`BTPROTO_RFCOMM` accepts ``(bdaddr, channel)`` where ``bdaddr`` + is the Bluetooth address as a string and ``channel`` is an integer. + + - :const:`BTPROTO_HCI` accepts ``(device_id,)`` where ``device_id`` is + either an integer or a string with the Bluetooth address of the + interface. (This depends on your OS; NetBSD and DragonFlyBSD expect + a Bluetooth address while everything else expects an integer.) + + .. versionchanged:: 3.2 + NetBSD and DragonFlyBSD support added. + + - :const:`BTPROTO_SCO` accepts ``bdaddr`` where ``bdaddr`` is a + :class:`bytes` object containing the Bluetooth address in a + string format. (ex. ``b'12:23:34:45:56:67'``) This protocol is not + supported under FreeBSD. + +- Certain other address families (:const:`AF_PACKET`, :const:`AF_CAN`) + support specific representations. .. XXX document them! @@ -327,6 +349,22 @@ Constants This constant contains a boolean value which indicates if IPv6 is supported on this platform. +.. data:: BDADDR_ANY + BDADDR_LOCAL + + These are string constants containing Bluetooth addresses with special + meanings. For example, :const:`BDADDR_ANY` can be used to indicate + any address when specifying the binding socket with + :const:`BTPROTO_RFCOMM`. + +.. data:: HCI_FILTER + HCI_TIME_STAMP + HCI_DATA_DIR + + For use with :const:`BTPROTO_HCI`. :const:`HCI_FILTER` is not + available for NetBSD or DragonFlyBSD. :const:`HCI_TIME_STAMP` and + :const:`HCI_DATA_DIR` are not available for FreeBSD, NetBSD, or + DragonFlyBSD. Functions ^^^^^^^^^ @@ -346,7 +384,11 @@ The following functions all create :ref:`socket objects `. :const:`SOCK_DGRAM`, :const:`SOCK_RAW` or perhaps one of the other ``SOCK_`` constants. The protocol number is usually zero and may be omitted or in the case where the address family is :const:`AF_CAN` the protocol should be one - of :const:`CAN_RAW` or :const:`CAN_BCM`. + of :const:`CAN_RAW` or :const:`CAN_BCM`. If *fileno* is specified, the other + arguments are ignored, causing the socket with the specified file descriptor + to return. Unlike :func:`socket.fromfd`, *fileno* will return the same + socket and not a duplicate. This may help close a detached socket using + :meth:`socket.close()`. The newly created socket is :ref:`non-inheritable `. @@ -673,7 +715,7 @@ The :mod:`socket` module also offers various network-related services: Supported values for *address_family* are currently :const:`AF_INET` and :const:`AF_INET6`. If the bytes object *packed_ip* is not the correct length for the specified address family, :exc:`ValueError` will be raised. - A :exc:`OSError` is raised for errors from the call to :func:`inet_ntop`. + :exc:`OSError` is raised for errors from the call to :func:`inet_ntop`. Availability: Unix (maybe not all platforms), Windows. @@ -744,7 +786,7 @@ The :mod:`socket` module also offers various network-related services: .. function:: sethostname(name) - Set the machine's hostname to *name*. This will raise a + Set the machine's hostname to *name*. This will raise an :exc:`OSError` if you don't have enough rights. Availability: Unix. @@ -776,7 +818,7 @@ The :mod:`socket` module also offers various network-related services: .. function:: if_indextoname(if_index) - Return a network interface name corresponding to a + Return a network interface name corresponding to an interface index number. :exc:`OSError` if no interface with the given index exists. @@ -972,7 +1014,7 @@ to sockets. interpreted the same way as by the built-in :func:`open` function. The socket must be in blocking mode; it can have a timeout, but the file - object's internal buffer may end up in a inconsistent state if a timeout + object's internal buffer may end up in an inconsistent state if a timeout occurs. Closing the file object returned by :meth:`makefile` won't close the diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst index fc69a804..b14ea722 100644 --- a/Doc/library/sqlite3.rst +++ b/Doc/library/sqlite3.rst @@ -615,7 +615,7 @@ Cursor Objects .. attribute:: lastrowid This read-only attribute provides the rowid of the last modified row. It is - only set if you issued a ``INSERT`` statement using the :meth:`execute` + only set if you issued an ``INSERT`` statement using the :meth:`execute` method. For operations other than ``INSERT`` or when :meth:`executemany` is called, :attr:`lastrowid` is set to :const:`None`. diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst index 40751347..fbb9b286 100644 --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -315,8 +315,6 @@ Random generation For almost all applications :func:`os.urandom` is preferable. - For almost all applications :func:`os.urandom` is preferable. - .. versionadded:: 3.3 .. function:: RAND_status() @@ -880,7 +878,7 @@ SSL sockets also have the following additional methods and attributes: The :meth:`~SSLSocket.read` and :meth:`~SSLSocket.write` methods are the low-level methods that read and write unencrypted, application-level data - and and decrypt/encrypt it to encrypted, wire-level data. These methods + and decrypt/encrypt it to encrypted, wire-level data. These methods require an active SSL connection, i.e. the handshake was completed and :meth:`SSLSocket.unwrap` was not called. diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst index 85734167..71fc3b50 100644 --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -854,8 +854,8 @@ operations have the same priority as the corresponding numeric operations. | ``s + t`` | the concatenation of *s* and | (6)(7) | | | *t* | | +--------------------------+--------------------------------+----------+ -| ``s * n`` or | *n* shallow copies of *s* | (2)(7) | -| ``n * s`` | concatenated | | +| ``s * n`` or | equivalent to adding *s* to | (2)(7) | +| ``n * s`` | itself *n* times | | +--------------------------+--------------------------------+----------+ | ``s[i]`` | *i*\ th item of *s*, origin 0 | \(3) | +--------------------------+--------------------------------+----------+ @@ -897,9 +897,9 @@ Notes: (2) Values of *n* less than ``0`` are treated as ``0`` (which yields an empty - sequence of the same type as *s*). Note also that the copies are shallow; - nested structures are not copied. This often haunts new Python programmers; - consider:: + sequence of the same type as *s*). Note that items in the sequence *s* + are not copied; they are referenced multiple times. This often haunts + new Python programmers; consider:: >>> lists = [[]] * 3 >>> lists @@ -909,7 +909,7 @@ Notes: [[3], [3], [3]] What has happened is that ``[[]]`` is a one-element list containing an empty - list, so all three elements of ``[[]] * 3`` are (pointers to) this single empty + list, so all three elements of ``[[]] * 3`` are references to this single empty list. Modifying any of the elements of ``lists`` modifies this single list. You can create a list of different lists this way:: @@ -920,6 +920,9 @@ Notes: >>> lists [[3], [5], [7]] + Further explanation is available in the FAQ entry + :ref:`faq-multidimensional-list`. + (3) If *i* or *j* is negative, the index is relative to the end of the string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note that ``-0`` is @@ -948,7 +951,7 @@ Notes: runtime cost, you must switch to one of the alternatives below: * if concatenating :class:`str` objects, you can build a list and use - :meth:`str.join` at the end or else write to a :class:`io.StringIO` + :meth:`str.join` at the end or else write to an :class:`io.StringIO` instance and retrieve its value when complete * if concatenating :class:`bytes` objects, you can similarly use @@ -1060,10 +1063,14 @@ accepts integers that meet the value restriction ``0 <= x <= 255``). | ``s.copy()`` | creates a shallow copy of ``s``| \(5) | | | (same as ``s[:]``) | | +------------------------------+--------------------------------+---------------------+ -| ``s.extend(t)`` | extends *s* with the | | -| | contents of *t* (same as | | +| ``s.extend(t)`` or | extends *s* with the | | +| ``s += t`` | contents of *t* (for the | | +| | most part the same as | | | | ``s[len(s):len(s)] = t``) | | +------------------------------+--------------------------------+---------------------+ +| ``s *= n`` | updates *s* with its contents | \(6) | +| | repeated *n* times | | ++------------------------------+--------------------------------+---------------------+ | ``s.insert(i, x)`` | inserts *x* into *s* at the | | | | index given by *i* | | | | (same as ``s[i:i] = [x]``) | | @@ -1104,6 +1111,12 @@ Notes: .. versionadded:: 3.3 :meth:`clear` and :meth:`!copy` methods. +(6) + The value *n* is an integer, or an object implementing + :meth:`~object.__index__`. Zero and negative values of *n* clear + the sequence. Items in the sequence are not copied; they are referenced + multiple times, as explained for ``s * n`` under :ref:`typesseq-common`. + .. _typesseq-list: diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst index efcacce4..adf99ec3 100644 --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -1068,7 +1068,7 @@ Return code handling translates as follows:: if rc is not None and rc >> 8: print("There were some errors") ==> - process = Popen(cmd, 'w', stdin=PIPE) + process = Popen(cmd, stdin=PIPE) ... process.stdin.close() if process.wait() != 0: diff --git a/Doc/library/sunau.rst b/Doc/library/sunau.rst index a94ae08d..bd37ee29 100644 --- a/Doc/library/sunau.rst +++ b/Doc/library/sunau.rst @@ -54,8 +54,8 @@ The :mod:`sunau` module defines the following functions: Note that it does not allow read/write files. - A *mode* of ``'r'`` returns a :class:`AU_read` object, while a *mode* of ``'w'`` - or ``'wb'`` returns a :class:`AU_write` object. + A *mode* of ``'r'`` returns an :class:`AU_read` object, while a *mode* of ``'w'`` + or ``'wb'`` returns an :class:`AU_write` object. .. function:: openfp(file, mode) diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst index bb9bdc86..f6325cc8 100644 --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -641,7 +641,7 @@ always available. :term:`struct sequence` :data:`sys.version_info` may be used for a more human-friendly encoding of the same information. - More details of ``hexversion`` can be found at :ref:`apiabiversion` + More details of ``hexversion`` can be found at :ref:`apiabiversion`. .. data:: implementation @@ -975,6 +975,13 @@ always available. that supports a higher limit. This should be done with care, because a too-high limit can lead to a crash. + If the new limit is too low at the current recursion depth, a + :exc:`RecursionError` exception is raised. + + .. versionchanged:: 3.5.1 + A :exc:`RecursionError` exception is now raised if the new limit is too + low at the current recursion depth. + .. function:: setswitchinterval(interval) diff --git a/Doc/library/tempfile.rst b/Doc/library/tempfile.rst index 0387fb14..19e1e7fa 100644 --- a/Doc/library/tempfile.rst +++ b/Doc/library/tempfile.rst @@ -16,53 +16,60 @@ -------------- -This module generates temporary files and directories. It works on all -supported platforms. It provides three new functions, -:func:`NamedTemporaryFile`, :func:`mkstemp`, and :func:`mkdtemp`, which should -eliminate all remaining need to use the insecure :func:`mktemp` function. -Temporary file names created by this module no longer contain the process ID; -instead a string of six random characters is used. - -Also, all the user-callable functions now take additional arguments which -allow direct control over the location and name of temporary files. It is -no longer necessary to use the global *tempdir* variable. +This module creates temporary files and directories. It works on all +supported platforms. :class:`TemporaryFile`, :class:`NamedTemporaryFile`, +:class:`TemporaryDirectory`, and :class:`SpooledTemporaryFile` are high-level +interfaces which provide automatic cleanup and can be used as +context managers. :func:`mkstemp` and +:func:`mkdtemp` are lower-level functions which require manual cleanup. + +All the user-callable functions and constructors take additional arguments which +allow direct control over the location and name of temporary files and +directories. Files names used by this module include a string of +random characters which allows those files to be securely created in +shared temporary directories. To maintain backward compatibility, the argument order is somewhat odd; it is recommended to use keyword arguments for clarity. The module defines the following user-callable items: -.. function:: TemporaryFile(mode='w+b', buffering=None, encoding=None, newline=None, suffix='', prefix='tmp', dir=None) +.. function:: TemporaryFile(mode='w+b', buffering=None, encoding=None, newline=None, suffix=None, prefix=None, dir=None) Return a :term:`file-like object` that can be used as a temporary storage area. - The file is created using :func:`mkstemp`. It will be destroyed as soon + The file is created securely, using the same rules as :func:`mkstemp`. It will be destroyed as soon as it is closed (including an implicit close when the object is garbage - collected). Under Unix, the directory entry for the file is removed + collected). Under Unix, the directory entry for the file is either not created at all or is removed immediately after the file is created. Other platforms do not support this; your code should not rely on a temporary file created using this function having or not having a visible name in the file system. + The resulting object can be used as a context manager (see + :ref:`tempfile-examples`). On completion of the context or + destruction of the file object the temporary file will be removed + from the filesystem. + The *mode* parameter defaults to ``'w+b'`` so that the file created can be read and written without being closed. Binary mode is used so that it behaves consistently on all platforms without regard for the data that is stored. *buffering*, *encoding* and *newline* are interpreted as for :func:`open`. - The *dir*, *prefix* and *suffix* parameters are passed to :func:`mkstemp`. + The *dir*, *prefix* and *suffix* parameters have the same meaning and + defaults as with :func:`mkstemp`. The returned object is a true file object on POSIX platforms. On other platforms, it is a file-like object whose :attr:`!file` attribute is the - underlying true file object. This file-like object can be used in a - :keyword:`with` statement, just like a normal file. + underlying true file object. The :py:data:`os.O_TMPFILE` flag is used if it is available and works - (Linux-specific, require Linux kernel 3.11 or later). + (Linux-specific, requires Linux kernel 3.11 or later). .. versionchanged:: 3.5 The :py:data:`os.O_TMPFILE` flag is now used if available. -.. function:: NamedTemporaryFile(mode='w+b', buffering=None, encoding=None, newline=None, suffix='', prefix='tmp', dir=None, delete=True) +.. function:: NamedTemporaryFile(mode='w+b', buffering=None, encoding=None, newline=None, suffix=None, prefix=None, dir=None, delete=True) This function operates exactly as :func:`TemporaryFile` does, except that the file is guaranteed to have a visible name in the file system (on @@ -77,7 +84,7 @@ The module defines the following user-callable items: be used in a :keyword:`with` statement, just like a normal file. -.. function:: SpooledTemporaryFile(max_size=0, mode='w+b', buffering=None, encoding=None, newline=None, suffix='', prefix='tmp', dir=None) +.. function:: SpooledTemporaryFile(max_size=0, mode='w+b', buffering=None, encoding=None, newline=None, suffix=None, prefix=None, dir=None) This function operates exactly as :func:`TemporaryFile` does, except that data is spooled in memory until the file size exceeds *max_size*, or @@ -89,7 +96,7 @@ The module defines the following user-callable items: causes the file to roll over to an on-disk file regardless of its size. The returned object is a file-like object whose :attr:`_file` attribute - is either a :class:`io.BytesIO` or :class:`io.StringIO` object (depending on + is either an :class:`io.BytesIO` or :class:`io.StringIO` object (depending on whether binary or text *mode* was specified) or a true file object, depending on whether :func:`rollover` has been called. This file-like object can be used in a :keyword:`with` statement, just like @@ -99,12 +106,11 @@ The module defines the following user-callable items: the truncate method now accepts a ``size`` argument. -.. function:: TemporaryDirectory(suffix='', prefix='tmp', dir=None) +.. function:: TemporaryDirectory(suffix=None, prefix=None, dir=None) - This function creates a temporary directory using :func:`mkdtemp` - (the supplied arguments are passed directly to the underlying function). + This function securely creates a temporary directory using the same rules as :func:`mkdtemp`. The resulting object can be used as a context manager (see - :ref:`context-managers`). On completion of the context or destruction + :ref:`tempfile-examples`). On completion of the context or destruction of the temporary directory object the newly created temporary directory and all its contents are removed from the filesystem. @@ -132,15 +138,16 @@ The module defines the following user-callable items: Unlike :func:`TemporaryFile`, the user of :func:`mkstemp` is responsible for deleting the temporary file when done with it. - If *suffix* is specified, the file name will end with that suffix, + If *suffix* is not ``None``, the file name will end with that suffix, otherwise there will be no suffix. :func:`mkstemp` does not put a dot between the file name and the suffix; if you need one, put it at the beginning of *suffix*. - If *prefix* is specified, the file name will begin with that prefix; - otherwise, a default prefix is used. + If *prefix* is not ``None``, the file name will begin with that prefix; + otherwise, a default prefix is used. The default is the return value of + :func:`gettempprefix` or :func:`gettempprefixb`, as appropriate. - If *dir* is specified, the file will be created in that directory; + If *dir* is not ``None``, the file will be created in that directory; otherwise, a default directory is used. The default directory is chosen from a platform-dependent list, but the user of the application can control the directory location by setting the *TMPDIR*, *TEMP* or *TMP* @@ -148,16 +155,12 @@ The module defines the following user-callable items: filename will have any nice properties, such as not requiring quoting when passed to external commands via ``os.popen()``. - *suffix*, *prefix*, and *dir* must all contain the same type, if specified. + If any of *suffix*, *prefix*, and *dir* are not + ``None``, they must be the same type. If they are bytes, the returned name will be bytes instead of str. If you want to force a bytes return value with otherwise default behavior, pass ``suffix=b''``. - A *prefix* value of ``None`` means use the return value of - :func:`gettempprefix` or :func:`gettempprefixb` as appropriate. - - A *suffix* value of ``None`` means use an appropriate empty value. - If *text* is specified, it indicates whether to open the file in binary mode (the default) or text mode. On some platforms, this makes no difference. @@ -194,49 +197,14 @@ The module defines the following user-callable items: an appropriate default value to be used. -.. function:: mktemp(suffix='', prefix='tmp', dir=None) - - .. deprecated:: 2.3 - Use :func:`mkstemp` instead. - - Return an absolute pathname of a file that did not exist at the time the - call is made. The *prefix*, *suffix*, and *dir* arguments are the same - as for :func:`mkstemp`. - - .. warning:: - - Use of this function may introduce a security hole in your program. By - the time you get around to doing anything with the file name it returns, - someone else may have beaten you to the punch. :func:`mktemp` usage can - be replaced easily with :func:`NamedTemporaryFile`, passing it the - ``delete=False`` parameter:: - - >>> f = NamedTemporaryFile(delete=False) - >>> f.name - '/tmp/tmptjujjt' - >>> f.write(b"Hello World!\n") - 13 - >>> f.close() - >>> os.unlink(f.name) - >>> os.path.exists(f.name) - False - -The module uses a global variable that tell it how to construct a -temporary name. They are initialized at the first call to any of the -functions above. The caller may change them, but this is discouraged; use -the appropriate function arguments, instead. +.. function:: gettempdir() + Return the name of the directory used for temporary files. This + defines the default value for the *dir* argument to all functions + in this module. -.. data:: tempdir - - When set to a value other than ``None``, this variable defines the - default value for the *dir* argument to all the functions defined in this - module. - - If ``tempdir`` is unset or ``None`` at any call to any of the above - functions, Python searches a standard list of directories and sets - *tempdir* to the first one which the calling user can create files in. - The list is: + Python searches a standard list of directories to find one which + the calling user can create files in. The list is: #. The directory named by the :envvar:`TMPDIR` environment variable. @@ -254,12 +222,8 @@ the appropriate function arguments, instead. #. As a last resort, the current working directory. - -.. function:: gettempdir() - - Return the directory currently selected to create temporary files in. If - :data:`tempdir` is not ``None``, this simply returns its contents; otherwise, - the search described above is performed, and the result returned. + The result of this search is cached, see the description of + :data:`tempdir` below. .. function:: gettempdirb() @@ -274,10 +238,27 @@ the appropriate function arguments, instead. .. function:: gettempprefixb() - Same as :func:`gettempprefixb` but the return value is in bytes. + Same as :func:`gettempprefix` but the return value is in bytes. .. versionadded:: 3.5 +The module uses a global variable to store the name of the directory +used for temporary files returned by :func:`gettempdir`. It can be +set directly to override the selection process, but this is discouraged. +All functions in this module take a *dir* argument which can be used +to specify the directory and this is the recommend approach. + +.. data:: tempdir + + When set to a value other than ``None``, this variable defines the + default value for the *dir* argument to all the functions defined in this + module. + + If ``tempdir`` is unset or ``None`` at any call to any of the above + functions except :func:`gettempprefix` it is initalized following the + algorithm described in :func:`gettempdir`. + +.. _tempfile-examples: Examples -------- @@ -311,3 +292,43 @@ Here are some examples of typical usage of the :mod:`tempfile` module:: >>> # directory and contents have been removed + +Deprecated functions and variables +---------------------------------- + +A historical way to create temporary files was to first generate a +file name with the :func:`mktemp` function and then create a file +using this name. Unfortunately this is not secure, because a different +process may create a file with this name in the time between the call +to :func:`mktemp` and the subsequent attempt to create the file by the +first process. The solution is to combine the two steps and create the +file immediately. This approach is used by :func:`mkstemp` and the +other functions described above. + +.. function:: mktemp(suffix='', prefix='tmp', dir=None) + + .. deprecated:: 2.3 + Use :func:`mkstemp` instead. + + Return an absolute pathname of a file that did not exist at the time the + call is made. The *prefix*, *suffix*, and *dir* arguments are similar + to those of :func:`mkstemp`, except that bytes file names, ``suffix=None`` + and ``prefix=None`` are not supported. + + .. warning:: + + Use of this function may introduce a security hole in your program. By + the time you get around to doing anything with the file name it returns, + someone else may have beaten you to the punch. :func:`mktemp` usage can + be replaced easily with :func:`NamedTemporaryFile`, passing it the + ``delete=False`` parameter:: + + >>> f = NamedTemporaryFile(delete=False) + >>> f.name + '/tmp/tmptjujjt' + >>> f.write(b"Hello World!\n") + 13 + >>> f.close() + >>> os.unlink(f.name) + >>> os.path.exists(f.name) + False diff --git a/Doc/library/test.rst b/Doc/library/test.rst index f7ad4754..85cab3b2 100644 --- a/Doc/library/test.rst +++ b/Doc/library/test.rst @@ -160,7 +160,7 @@ Running tests using the command-line interface The :mod:`test` package can be run as a script to drive Python's regression test suite, thanks to the :option:`-m` option: :program:`python -m test`. Under the hood, it uses :mod:`test.regrtest`; the call :program:`python -m -test.regrtest` used in previous Python versions still works). Running the +test.regrtest` used in previous Python versions still works. Running the script by itself automatically starts running all regression tests in the :mod:`test` package. It does this by finding all modules in the package whose name starts with ``test_``, importing them, and executing the function diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst index 52699947..c56d7073 100644 --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -89,7 +89,8 @@ This module defines the following functions: Return the thread stack size used when creating new threads. The optional *size* argument specifies the stack size to be used for subsequently created threads, and must be 0 (use platform or configured default) or a positive - integer value of at least 32,768 (32 KiB). If changing the thread stack size is + integer value of at least 32,768 (32 KiB). If *size* is not specified, + 0 is used. If changing the thread stack size is unsupported, a :exc:`RuntimeError` is raised. If the specified stack size is invalid, a :exc:`ValueError` is raised and the stack size is unmodified. 32 KiB is currently the minimum supported stack size value to guarantee sufficient diff --git a/Doc/library/tkinter.ttk.rst b/Doc/library/tkinter.ttk.rst index b0eefcb7..4601171b 100644 --- a/Doc/library/tkinter.ttk.rst +++ b/Doc/library/tkinter.ttk.rst @@ -110,8 +110,9 @@ All the :mod:`ttk` Widgets accepts the following options: | class | Specifies the window class. The class is used when querying | | | the option database for the window's other options, to | | | determine the default bindtags for the window, and to select | - | | the widget's default layout and style. This is a read-only | - | | which may only be specified when the window is created | + | | the widget's default layout and style. This option is | + | | read-only, and may only be specified when the window is | + | | created. | +-----------+--------------------------------------------------------------+ | cursor | Specifies the mouse cursor to be used for the widget. If set | | | to the empty string (the default), the cursor is inherited | @@ -554,9 +555,9 @@ ttk.Notebook This will extend the bindings for the toplevel window containing the notebook as follows: - * Control-Tab: selects the tab following the currently selected one. - * Shift-Control-Tab: selects the tab preceding the currently selected one. - * Alt-K: where K is the mnemonic (underlined) character of any tab, will + * :kbd:`Control-Tab`: selects the tab following the currently selected one. + * :kbd:`Shift-Control-Tab`: selects the tab preceding the currently selected one. + * :kbd:`Alt-K`: where *K* is the mnemonic (underlined) character of any tab, will select that tab. Multiple notebooks in a single toplevel may be enabled for traversal, diff --git a/Doc/library/tokenize.rst b/Doc/library/tokenize.rst index bd6b121f..c9cb5189 100644 --- a/Doc/library/tokenize.rst +++ b/Doc/library/tokenize.rst @@ -41,7 +41,7 @@ The primary entry point is a :term:`generator`: returned as a :term:`named tuple` with the field names: ``type string start end line``. - The returned :term:`named tuple` has a additional property named + The returned :term:`named tuple` has an additional property named ``exact_type`` that contains the exact operator type for :data:`token.OP` tokens. For all other token types ``exact_type`` equals the named tuple ``type`` field. diff --git a/Doc/library/tracemalloc.rst b/Doc/library/tracemalloc.rst index 13c81a7f..861be370 100644 --- a/Doc/library/tracemalloc.rst +++ b/Doc/library/tracemalloc.rst @@ -464,7 +464,7 @@ Snapshot All inclusive filters are applied at once, a trace is ignored if no inclusive filters match it. A trace is ignored if at least one exclusive - filter matchs it. + filter matches it. .. classmethod:: load(filename) diff --git a/Doc/library/types.rst b/Doc/library/types.rst index ff75de1e..eb27846a 100644 --- a/Doc/library/types.rst +++ b/Doc/library/types.rst @@ -86,14 +86,14 @@ Standard names are defined for the following types: .. data:: GeneratorType - The type of :term:`generator`-iterator objects, produced by calling a - generator function. + The type of :term:`generator`-iterator objects, created by + generator functions. .. data:: CoroutineType - The type of :term:`coroutine` objects, produced by calling a - function defined with an :keyword:`async def` statement. + The type of :term:`coroutine` objects, created by + :keyword:`async def` functions. .. versionadded:: 3.5 diff --git a/Doc/library/typing.rst b/Doc/library/typing.rst index 8089443c..0fe11671 100644 --- a/Doc/library/typing.rst +++ b/Doc/library/typing.rst @@ -35,7 +35,7 @@ Callable -------- Frameworks expecting callback functions of specific signatures might be -type hinted using `Callable[[Arg1Type, Arg2Type], ReturnType]`. +type hinted using ``Callable[[Arg1Type, Arg2Type], ReturnType]``. For example:: @@ -60,7 +60,7 @@ Since type information about objects kept in containers cannot be statically inferred in a generic way, abstract base classes have been extended to support subscription to denote expected types for container elements. -.. code-block:: python +:: from typing import Mapping, Sequence @@ -70,7 +70,7 @@ subscription to denote expected types for container elements. Generics can be parametrized by using a new factory available in typing called :class:`TypeVar`. -.. code-block:: python +:: from typing import Sequence, TypeVar @@ -85,7 +85,7 @@ User-defined generic types A user-defined class can be defined as a generic class. -.. code-block:: python +:: from typing import TypeVar, Generic from logging import Logger @@ -220,9 +220,7 @@ The module defines the following classes, functions and decorators: Type variables exist primarily for the benefit of static type checkers. They serve as the parameters for generic types as well as for generic function definitions. See class Generic for more - information on generic types. Generic functions work as follows: - - .. code-block:: python + information on generic types. Generic functions work as follows:: def repeat(x: T, n: int) -> Sequence[T]: """Return a list containing n references to x.""" @@ -238,13 +236,13 @@ The module defines the following classes, functions and decorators: the return type is still plain :class:`str`. At runtime, ``isinstance(x, T)`` will raise :exc:`TypeError`. In general, - :func:`isinstance` and :func:`issublass` should not be used with types. + :func:`isinstance` and :func:`issubclass` should not be used with types. Type variables may be marked covariant or contravariant by passing ``covariant=True`` or ``contravariant=True``. See :pep:`484` for more details. By default type variables are invariant. Alternatively, a type variable may specify an upper bound using ``bound=``. - This means that an actual type substituted (explicitly or implictly) + This means that an actual type substituted (explicitly or implicitly) for the type variable must be a subclass of the boundary type, see :pep:`484`. @@ -278,7 +276,7 @@ The module defines the following classes, functions and decorators: * You cannot subclass or instantiate a union. - * You cannot write ``Union[X][Y]`` + * You cannot write ``Union[X][Y]``. * You can use ``Optional[X]`` as a shorthand for ``Union[X, None]``. @@ -331,6 +329,7 @@ The module defines the following classes, functions and decorators: X = TypeVar('X') Y = TypeVar('Y') + def lookup_name(mapping: Mapping[X, Y], key: X, default: Y) -> Y: try: return mapping[key] @@ -347,26 +346,26 @@ The module defines the following classes, functions and decorators: .. class:: SupportsInt - An ABC with one abstract method `__int__`. + An ABC with one abstract method ``__int__``. .. class:: SupportsFloat - An ABC with one abstract method `__float__`. + An ABC with one abstract method ``__float__``. .. class:: SupportsAbs - An ABC with one abstract method `__abs__` that is covariant + An ABC with one abstract method ``__abs__`` that is covariant in its return type. .. class:: SupportsRound - An ABC with one abstract method `__round__` + An ABC with one abstract method ``__round__`` that is covariant in its return type. .. class:: Reversible - An ABC with one abstract method `__reversed__` returning - an `Iterator[T_co]`. + An ABC with one abstract method ``__reversed__`` returning + an ``Iterator[T_co]``. .. class:: Container(Generic[T_co]) @@ -479,7 +478,7 @@ The module defines the following classes, functions and decorators: Usage:: - Employee = typing.NamedTuple('Employee', [('name', str), 'id', int)]) + Employee = typing.NamedTuple('Employee', [('name', str), ('id', int)]) This is equivalent to:: @@ -503,9 +502,9 @@ The module defines the following classes, functions and decorators: Return type hints for a function or method object. - This is often the same as obj.__annotations__, but it handles + This is often the same as ``obj.__annotations__``, but it handles forward references encoded as string literals, and if necessary - adds Optional[t] if a default value equal to None is set. + adds ``Optional[t]`` if a default value equal to None is set. .. decorator:: no_type_check(arg) @@ -519,7 +518,7 @@ The module defines the following classes, functions and decorators: .. decorator:: no_type_check_decorator(decorator) - Decorator to give another decorator the @no_type_check effect. + Decorator to give another decorator the :func:`no_type_check` effect. This wraps the decorator with something that wraps the decorated - function in @no_type_check. + function in :func:`no_type_check`. diff --git a/Doc/library/unittest.mock.rst b/Doc/library/unittest.mock.rst index 0f621c84..9a51194b 100644 --- a/Doc/library/unittest.mock.rst +++ b/Doc/library/unittest.mock.rst @@ -552,7 +552,7 @@ the *new_callable* argument to :func:`patch`. keyword arguments (or an empty dictionary). >>> mock = Mock(return_value=None) - >>> print mock.call_args + >>> print(mock.call_args) None >>> mock() >>> mock.call_args @@ -747,7 +747,7 @@ apply to method calls on the mock object. >>> with patch('__main__.Foo.foo', new_callable=PropertyMock) as mock_foo: ... mock_foo.return_value = 'mockity-mock' ... this_foo = Foo() - ... print this_foo.foo + ... print(this_foo.foo) ... this_foo.foo = 6 ... mockity-mock @@ -1131,11 +1131,11 @@ you wanted a :class:`NonCallableMock` to be used: ... TypeError: 'NonCallableMock' object is not callable -Another use case might be to replace an object with a :class:`io.StringIO` instance: +Another use case might be to replace an object with an :class:`io.StringIO` instance: >>> from io import StringIO >>> def foo(): - ... print 'Something' + ... print('Something') ... >>> @patch('sys.stdout', new_callable=StringIO) ... def test(mock_stdout): @@ -1249,7 +1249,7 @@ ends. >>> import os >>> with patch.dict('os.environ', {'newkey': 'newvalue'}): - ... print os.environ['newkey'] + ... print(os.environ['newkey']) ... newvalue >>> assert 'newkey' not in os.environ @@ -1462,9 +1462,9 @@ inform the patchers of the different prefix by setting ``patch.TEST_PREFIX``: >>> @patch('__main__.value', 'not three') ... class Thing: ... def foo_one(self): - ... print value + ... print(value) ... def foo_two(self): - ... print value + ... print(value) ... >>> >>> Thing().foo_one() diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst index 50857031..ff9cc22d 100644 --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -204,8 +204,8 @@ Command-line options .. cmdoption:: -c, --catch - Control-C during the test run waits for the current test to end and then - reports all the results so far. A second control-C raises the normal + :kbd:`Control-C` during the test run waits for the current test to end and then + reports all the results so far. A second :kbd:`Control-C` raises the normal :exc:`KeyboardInterrupt` exception. See `Signal Handling`_ for the functions that provide this functionality. @@ -278,8 +278,8 @@ The :option:`-s`, :option:`-p`, and :option:`-t` options can be passed in as positional arguments in that order. The following two command lines are equivalent:: - python -m unittest discover -s project_directory -p '*_test.py' - python -m unittest discover project_directory '*_test.py' + python -m unittest discover -s project_directory -p "*_test.py" + python -m unittest discover project_directory "*_test.py" As well as being a path it is possible to pass a package name, for example ``myproject.subpackage.test``, as the start directory. The package name you @@ -471,7 +471,7 @@ Skipping tests and expected failures .. versionadded:: 3.1 Unittest supports skipping individual test methods and even whole classes of -tests. In addition, it supports marking a test as a "expected failure," a test +tests. In addition, it supports marking a test as an "expected failure," a test that is broken and will fail, but shouldn't be counted as a failure on a :class:`TestResult`. diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst index 349ba70d..87551551 100644 --- a/Doc/library/urllib.request.rst +++ b/Doc/library/urllib.request.rst @@ -202,7 +202,7 @@ The following classes are provided: ``"Python-urllib/2.6"`` (on Python 2.6). An example of using ``Content-Type`` header with *data* argument would be - sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}`` + sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}``. The final two arguments are only of interest for correct handling of third-party HTTP cookies: @@ -1168,7 +1168,7 @@ The code for the sample CGI used in the above example is:: #!/usr/bin/env python import sys data = sys.stdin.read() - print('Content-type: text-plain\n\nGot Data: "%s"' % data) + print('Content-type: text/plain\n\nGot Data: "%s"' % data) Here is an example of doing a ``PUT`` request using :class:`Request`:: @@ -1383,7 +1383,7 @@ some point in the future. .. method:: retrieve(url, filename=None, reporthook=None, data=None) Retrieves the contents of *url* and places it in *filename*. The return value - is a tuple consisting of a local filename and either a + is a tuple consisting of a local filename and either an :class:`email.message.Message` object containing the response headers (for remote URLs) or ``None`` (for local URLs). The caller must then open and read the contents of *filename*. If *filename* is not given and the URL refers to a diff --git a/Doc/library/wsgiref.rst b/Doc/library/wsgiref.rst index de74c302..8c091f6b 100644 --- a/Doc/library/wsgiref.rst +++ b/Doc/library/wsgiref.rst @@ -506,7 +506,7 @@ input, output, and error streams. Similar to :class:`BaseCGIHandler`, but designed for use with HTTP origin servers. If you are writing an HTTP server implementation, you will probably - want to subclass this instead of :class:`BaseCGIHandler` + want to subclass this instead of :class:`BaseCGIHandler`. This class is a subclass of :class:`BaseHandler`. It overrides the :meth:`__init__`, :meth:`get_stdin`, :meth:`get_stderr`, :meth:`add_cgi_vars`, diff --git a/Doc/library/xml.dom.minidom.rst b/Doc/library/xml.dom.minidom.rst index ff5c270d..6762e917 100644 --- a/Doc/library/xml.dom.minidom.rst +++ b/Doc/library/xml.dom.minidom.rst @@ -15,7 +15,7 @@ Model interface, with an API similar to that in other languages. It is intended to be simpler than the full DOM and also significantly smaller. Users who are not already proficient with the DOM should consider using the -:mod:`xml.etree.ElementTree` module for their XML processing instead +:mod:`xml.etree.ElementTree` module for their XML processing instead. .. warning:: @@ -54,7 +54,7 @@ instead: .. function:: parseString(string, parser=None) - Return a :class:`Document` that represents the *string*. This method creates a + Return a :class:`Document` that represents the *string*. This method creates an :class:`io.StringIO` object for the string and passes that on to :func:`parse`. Both functions return a :class:`Document` object representing the content of the diff --git a/Doc/library/xml.dom.pulldom.rst b/Doc/library/xml.dom.pulldom.rst index a9c9f67a..a3b8bc16 100644 --- a/Doc/library/xml.dom.pulldom.rst +++ b/Doc/library/xml.dom.pulldom.rst @@ -47,7 +47,7 @@ Example:: * :data:`PROCESSING_INSTRUCTION` * :data:`IGNORABLE_WHITESPACE` -``node`` is a object of type :class:`xml.dom.minidom.Document`, +``node`` is an object of type :class:`xml.dom.minidom.Document`, :class:`xml.dom.minidom.Element` or :class:`xml.dom.minidom.Text`. Since the document is treated as a "flat" stream of events, the document "tree" diff --git a/Doc/library/xml.dom.rst b/Doc/library/xml.dom.rst index 4914738d..a432202e 100644 --- a/Doc/library/xml.dom.rst +++ b/Doc/library/xml.dom.rst @@ -304,7 +304,7 @@ All of the components of an XML document are subclasses of :class:`Node`. .. attribute:: Node.prefix The part of the :attr:`tagName` preceding the colon if there is one, else the - empty string. The value is a string, or ``None`` + empty string. The value is a string, or ``None``. .. attribute:: Node.namespaceURI diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst index eef1b583..dc0274eb 100644 --- a/Doc/library/xml.etree.elementtree.rst +++ b/Doc/library/xml.etree.elementtree.rst @@ -651,21 +651,29 @@ Element Objects .. attribute:: text + tail - The *text* attribute can be used to hold additional data associated with - the element. As the name implies this attribute is usually a string but - may be any application-specific object. If the element is created from - an XML file the attribute will contain any text found between the element - tags. + These attributes can be used to hold additional data associated with + the element. Their values are usually strings but may be any + application-specific object. If the element is created from + an XML file, the *text* attribute holds either the text between + the element's start tag and its first child or end tag, or ``None``, and + the *tail* attribute holds either the text between the element's + end tag and the next tag, or ``None``. For the XML data + .. code-block:: xml - .. attribute:: tail + 1234 - The *tail* attribute can be used to hold additional data associated with - the element. This attribute is usually a string but may be any - application-specific object. If the element is created from an XML file - the attribute will contain any text found after the element's end tag and - before the next tag. + the *a* element has ``None`` for both *text* and *tail* attributes, + the *b* element has *text* ``"1"`` and *tail* ``"4"``, + the *c* element has *text* ``"2"`` and *tail* ``None``, + and the *d* element has *text* ``None`` and *tail* ``"3"``. + + To collect the inner text of an element, see :meth:`itertext`, for + example ``"".join(element.itertext())``. + + Applications may store arbitrary objects in these attributes. .. attribute:: attrib @@ -883,7 +891,7 @@ ElementTree Objects Creates and returns a tree iterator for the root element. The iterator loops over all elements in this tree, in section order. *tag* is the tag - to look for (default is to return all elements) + to look for (default is to return all elements). .. method:: iterfind(match, namespaces=None) diff --git a/Doc/library/zlib.rst b/Doc/library/zlib.rst index b178fe10..58fc31b0 100644 --- a/Doc/library/zlib.rst +++ b/Doc/library/zlib.rst @@ -30,7 +30,7 @@ The available exception and functions in this module are: .. function:: adler32(data[, value]) - Computes a Adler-32 checksum of *data*. (An Adler-32 checksum is almost as + Computes an Adler-32 checksum of *data*. (An Adler-32 checksum is almost as reliable as a CRC32 but can be computed much more quickly.) If *value* is present, it is used as the starting value of the checksum; otherwise, a fixed default value is used. This allows computing a running checksum over the @@ -58,7 +58,7 @@ The available exception and functions in this module are: Raises the :exc:`error` exception if any error occurs. -.. function:: compressobj(level=-1, method=DEFLATED, wbits=15, memlevel=8, strategy=Z_DEFAULT_STRATEGY[, zdict]) +.. function:: compressobj(level=-1, method=DEFLATED, wbits=15, memLevel=8, strategy=Z_DEFAULT_STRATEGY[, zdict]) Returns a compression object, to be used for compressing data streams that won't fit into memory at once. @@ -75,9 +75,9 @@ The available exception and functions in this module are: should be an integer from ``8`` to ``15``. Higher values give better compression, but use more memory. - *memlevel* controls the amount of memory used for internal compression state. - Valid values range from ``1`` to ``9``. Higher values using more memory, - but are faster and produce smaller output. + The *memLevel* argument controls the amount of memory used for the + internal compression state. Valid values range from ``1`` to ``9``. + Higher values use more memory, but are faster and produce smaller output. *strategy* is used to tune the compression algorithm. Possible values are ``Z_DEFAULT_STRATEGY``, ``Z_FILTERED``, and ``Z_HUFFMAN_ONLY``. @@ -230,7 +230,7 @@ Decompression objects support the following methods and attributes: :meth:`decompress` method. Some of the input data may be preserved in internal buffers for later processing. - If the optional parameter *max_length* is supplied then the return value will be + If the optional parameter *max_length* is non-zero then the return value will be no longer than *max_length*. This may mean that not all of the compressed input can be processed; and unconsumed data will be stored in the attribute :attr:`unconsumed_tail`. This bytestring must be passed to a subsequent call to diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst index e1d71b81..764c4916 100644 --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -1320,7 +1320,7 @@ Basic customization object's :meth:`__hash__` must interoperate on builds of different bit sizes, be sure to check the width on all supported builds. An easy way to do this is with - ``python -c "import sys; print(sys.hash_info.width)"`` + ``python -c "import sys; print(sys.hash_info.width)"``. If a class does not define an :meth:`__eq__` method it should not define a :meth:`__hash__` operation either; if it defines :meth:`__eq__` but not @@ -1341,7 +1341,7 @@ Basic customization :meth:`__hash__` method of a class is ``None``, instances of the class will raise an appropriate :exc:`TypeError` when a program attempts to retrieve their hash value, and will also be correctly identified as unhashable when - checking ``isinstance(obj, collections.Hashable``). + checking ``isinstance(obj, collections.Hashable)``. If a class that overrides :meth:`__eq__` needs to retain the implementation of :meth:`__hash__` from a parent class, the interpreter must be told this @@ -2358,7 +2358,7 @@ An *asynchronous iterable* is able to call asynchronous code in its ``__aiter__`` implementation, and an *asynchronous iterator* can call asynchronous code in its ``__anext__`` method. -Asynchronous iterators can be used in a :keyword:`async for` statement. +Asynchronous iterators can be used in an :keyword:`async for` statement. .. method:: object.__aiter__(self) @@ -2393,7 +2393,7 @@ Asynchronous Context Managers An *asynchronous context manager* is a *context manager* that is able to suspend execution in its ``__aenter__`` and ``__aexit__`` methods. -Asynchronous context managers can be used in a :keyword:`async with` statement. +Asynchronous context managers can be used in an :keyword:`async with` statement. .. method:: object.__aenter__(self) diff --git a/Doc/reference/executionmodel.rst b/Doc/reference/executionmodel.rst index 5dfa0d2e..5f1ea92e 100644 --- a/Doc/reference/executionmodel.rst +++ b/Doc/reference/executionmodel.rst @@ -11,8 +11,8 @@ Execution model .. _prog_structure: -Structure of a programm -======================= +Structure of a program +====================== .. index:: block @@ -270,4 +270,3 @@ and :keyword:`raise` statement in section :ref:`raise`. .. [#] This limitation occurs because the code that is executed by these operations is not available at the time the module is compiled. - diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst index 3a51f977..99fa037b 100644 --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -371,7 +371,7 @@ the yield expression. It can be either set explicitly when raising (by returning a value from the sub-generator). .. versionchanged:: 3.3 - Added ``yield from `` to delegate control flow to a subiterator + Added ``yield from `` to delegate control flow to a subiterator. The parentheses may be omitted when the yield expression is the sole expression on the right hand side of an assignment statement. @@ -1036,10 +1036,6 @@ must be integers. .. _comparisons: -.. _is: -.. _is not: -.. _in: -.. _not in: Comparisons =========== @@ -1075,66 +1071,183 @@ Note that ``a op1 b op2 c`` doesn't imply any kind of comparison between *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal (though perhaps not pretty). +Value comparisons +----------------- + The operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare the -values of two objects. The objects need not have the same type. If both are -numbers, they are converted to a common type. Otherwise, the ``==`` and ``!=`` -operators *always* consider objects of different types to be unequal, while the -``<``, ``>``, ``>=`` and ``<=`` operators raise a :exc:`TypeError` when -comparing objects of different types that do not implement these operators for -the given pair of types. You can control comparison behavior of objects of -non-built-in types by defining rich comparison methods like :meth:`__gt__`, -described in section :ref:`customization`. - -Comparison of objects of the same type depends on the type: - -* Numbers are compared arithmetically. - -* The values :const:`float('NaN')` and :const:`Decimal('NaN')` are special. - They are identical to themselves, ``x is x`` but are not equal to themselves, - ``x != x``. Additionally, comparing any value to a not-a-number value +values of two objects. The objects do not need to have the same type. + +Chapter :ref:`objects` states that objects have a value (in addition to type +and identity). The value of an object is a rather abstract notion in Python: +For example, there is no canonical access method for an object's value. Also, +there is no requirement that the value of an object should be constructed in a +particular way, e.g. comprised of all its data attributes. Comparison operators +implement a particular notion of what the value of an object is. One can think +of them as defining the value of an object indirectly, by means of their +comparison implementation. + +Because all types are (direct or indirect) subtypes of :class:`object`, they +inherit the default comparison behavior from :class:`object`. Types can +customize their comparison behavior by implementing +:dfn:`rich comparison methods` like :meth:`__lt__`, described in +:ref:`customization`. + +The default behavior for equality comparison (``==`` and ``!=``) is based on +the identity of the objects. Hence, equality comparison of instances with the +same identity results in equality, and equality comparison of instances with +different identities results in inequality. A motivation for this default +behavior is the desire that all objects should be reflexive (i.e. ``x is y`` +implies ``x == y``). + +A default order comparison (``<``, ``>``, ``<=``, and ``>=``) is not provided; +an attempt raises :exc:`TypeError`. A motivation for this default behavior is +the lack of a similar invariant as for equality. + +The behavior of the default equality comparison, that instances with different +identities are always unequal, may be in contrast to what types will need that +have a sensible definition of object value and value-based equality. Such +types will need to customize their comparison behavior, and in fact, a number +of built-in types have done that. + +The following list describes the comparison behavior of the most important +built-in types. + +* Numbers of built-in numeric types (:ref:`typesnumeric`) and of the standard + library types :class:`fractions.Fraction` and :class:`decimal.Decimal` can be + compared within and across their types, with the restriction that complex + numbers do not support order comparison. Within the limits of the types + involved, they compare mathematically (algorithmically) correct without loss + of precision. + + The not-a-number values :const:`float('NaN')` and :const:`Decimal('NaN')` + are special. They are identical to themselves (``x is x`` is true) but + are not equal to themselves (``x == x`` is false). Additionally, + comparing any number to a not-a-number value will return ``False``. For example, both ``3 < float('NaN')`` and ``float('NaN') < 3`` will return ``False``. -* Bytes objects are compared lexicographically using the numeric values of their - elements. +* Binary sequences (instances of :class:`bytes` or :class:`bytearray`) can be + compared within and across their types. They compare lexicographically using + the numeric values of their elements. + +* Strings (instances of :class:`str`) compare lexicographically using the + numerical Unicode code points (the result of the built-in function + :func:`ord`) of their characters. [#]_ + + Strings and binary sequences cannot be directly compared. + +* Sequences (instances of :class:`tuple`, :class:`list`, or :class:`range`) can + be compared only within each of their types, with the restriction that ranges + do not support order comparison. Equality comparison across these types + results in unequality, and ordering comparison across these types raises + :exc:`TypeError`. + + Sequences compare lexicographically using comparison of corresponding + elements, whereby reflexivity of the elements is enforced. + + In enforcing reflexivity of elements, the comparison of collections assumes + that for a collection element ``x``, ``x == x`` is always true. Based on + that assumption, element identity is compared first, and element comparison + is performed only for distinct elements. This approach yields the same + result as a strict element comparison would, if the compared elements are + reflexive. For non-reflexive elements, the result is different than for + strict element comparison, and may be surprising: The non-reflexive + not-a-number values for example result in the following comparison behavior + when used in a list:: + + >>> nan = float('NaN') + >>> nan is nan + True + >>> nan == nan + False <-- the defined non-reflexive behavior of NaN + >>> [nan] == [nan] + True <-- list enforces reflexivity and tests identity first + + Lexicographical comparison between built-in collections works as follows: + + - For two collections to compare equal, they must be of the same type, have + the same length, and each pair of corresponding elements must compare + equal (for example, ``[1,2] == (1,2)`` is false because the type is not the + same). + + - Collections that support order comparison are ordered the same as their + first unequal elements (for example, ``[1,2,x] <= [1,2,y]`` has the same + value as ``x <= y``). If a corresponding element does not exist, the + shorter collection is ordered first (for example, ``[1,2] < [1,2,3]`` is + true). + +* Mappings (instances of :class:`dict`) compare equal if and only if they have + equal `(key, value)` pairs. Equality comparison of the keys and elements + enforces reflexivity. + + Order comparisons (``<``, ``>``, ``<=``, and ``>=``) raise :exc:`TypeError`. + +* Sets (instances of :class:`set` or :class:`frozenset`) can be compared within + and across their types. + + They define order + comparison operators to mean subset and superset tests. Those relations do + not define total orderings (for example, the two sets ``{1,2}`` and ``{2,3}`` + are not equal, nor subsets of one another, nor supersets of one + another). Accordingly, sets are not appropriate arguments for functions + which depend on total ordering (for example, :func:`min`, :func:`max`, and + :func:`sorted` produce undefined results given a list of sets as inputs). -* Strings are compared lexicographically using the numeric equivalents (the - result of the built-in function :func:`ord`) of their characters. [#]_ String - and bytes object can't be compared! + Comparison of sets enforces reflexivity of its elements. -* Tuples and lists are compared lexicographically using comparison of - corresponding elements. This means that to compare equal, each element must - compare equal and the two sequences must be of the same type and have the same - length. +* Most other built-in types have no comparison methods implemented, so they + inherit the default comparison behavior. - If not equal, the sequences are ordered the same as their first differing - elements. For example, ``[1,2,x] <= [1,2,y]`` has the same value as - ``x <= y``. If the corresponding element does not exist, the shorter - sequence is ordered first (for example, ``[1,2] < [1,2,3]``). +User-defined classes that customize their comparison behavior should follow +some consistency rules, if possible: -* Mappings (dictionaries) compare equal if and only if they have the same - ``(key, value)`` pairs. Order comparisons ``('<', '<=', '>=', '>')`` - raise :exc:`TypeError`. +* Equality comparison should be reflexive. + In other words, identical objects should compare equal: -* Sets and frozensets define comparison operators to mean subset and superset - tests. Those relations do not define total orderings (the two sets ``{1,2}`` - and ``{2,3}`` are not equal, nor subsets of one another, nor supersets of one - another). Accordingly, sets are not appropriate arguments for functions - which depend on total ordering. For example, :func:`min`, :func:`max`, and - :func:`sorted` produce undefined results given a list of sets as inputs. + ``x is y`` implies ``x == y`` + +* Comparison should be symmetric. + In other words, the following expressions should have the same result: + + ``x == y`` and ``y == x`` + + ``x != y`` and ``y != x`` + + ``x < y`` and ``y > x`` + + ``x <= y`` and ``y >= x`` + +* Comparison should be transitive. + The following (non-exhaustive) examples illustrate that: + + ``x > y and y > z`` implies ``x > z`` -* Most other objects of built-in types compare unequal unless they are the same - object; the choice whether one object is considered smaller or larger than - another one is made arbitrarily but consistently within one execution of a - program. + ``x < y and y <= z`` implies ``x < z`` -Comparison of objects of differing types depends on whether either of the -types provide explicit support for the comparison. Most numeric types can be -compared with one another. When cross-type comparison is not supported, the -comparison method returns ``NotImplemented``. +* Inverse comparison should result in the boolean negation. + In other words, the following expressions should have the same result: + ``x == y`` and ``not x != y`` + + ``x < y`` and ``not x >= y`` (for total ordering) + + ``x > y`` and ``not x <= y`` (for total ordering) + + The last two expressions apply to totally ordered collections (e.g. to + sequences, but not to sets or mappings). See also the + :func:`~functools.total_ordering` decorator. + +Python does not enforce these consistency rules. In fact, the not-a-number +values are an example for not following these rules. + + +.. _in: +.. _not in: .. _membership-test-details: +Membership test operations +-------------------------- + The operators :keyword:`in` and :keyword:`not in` test for membership. ``x in s`` evaluates to true if *x* is a member of *s*, and false otherwise. ``x not in s`` returns the negation of ``x in s``. All built-in sequences and set types @@ -1176,6 +1289,13 @@ The operator :keyword:`not in` is defined to have the inverse true value of operator: is not pair: identity; test + +.. _is: +.. _is not: + +Identity comparisons +-------------------- + The operators :keyword:`is` and :keyword:`is not` test for object identity: ``x is y`` is true if and only if *x* and *y* are the same object. ``x is not y`` yields the inverse truth value. [#]_ @@ -1405,12 +1525,24 @@ precedence and have a left-to-right chaining feature as described in the cases, Python returns the latter result, in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very close to ``x``. -.. [#] While comparisons between strings make sense at the byte level, they may - be counter-intuitive to users. For example, the strings ``"\u00C7"`` and - ``"\u0043\u0327"`` compare differently, even though they both represent the - same unicode character (LATIN CAPITAL LETTER C WITH CEDILLA). To compare - strings in a human recognizable way, compare using - :func:`unicodedata.normalize`. +.. [#] The Unicode standard distinguishes between :dfn:`code points` + (e.g. U+0041) and :dfn:`abstract characters` (e.g. "LATIN CAPITAL LETTER A"). + While most abstract characters in Unicode are only represented using one + code point, there is a number of abstract characters that can in addition be + represented using a sequence of more than one code point. For example, the + abstract character "LATIN CAPITAL LETTER C WITH CEDILLA" can be represented + as a single :dfn:`precomposed character` at code position U+00C7, or as a + sequence of a :dfn:`base character` at code position U+0043 (LATIN CAPITAL + LETTER C), followed by a :dfn:`combining character` at code position U+0327 + (COMBINING CEDILLA). + + The comparison operators on strings compare at the level of Unicode code + points. This may be counter-intuitive to humans. For example, + ``"\u00C7" == "\u0043\u0327"`` is ``False``, even though both strings + represent the same abstract character "LATIN CAPITAL LETTER C WITH CEDILLA". + + To compare strings at the level of abstract characters (that is, in a way + intuitive to humans), use :func:`unicodedata.normalize`. .. [#] Due to automatic garbage-collection, free lists, and the dynamic nature of descriptors, you may notice seemingly unusual behaviour in certain uses of diff --git a/Doc/tools/susp-ignored.csv b/Doc/tools/susp-ignored.csv index 7664f92a..2a95fca3 100644 --- a/Doc/tools/susp-ignored.csv +++ b/Doc/tools/susp-ignored.csv @@ -10,7 +10,6 @@ extending/embedding,,:numargs,"if(!PyArg_ParseTuple(args, "":numargs""))" extending/extending,,:myfunction,"PyArg_ParseTuple(args, ""D:myfunction"", &c);" extending/extending,,:set,"if (PyArg_ParseTuple(args, ""O:set_callback"", &temp)) {" extending/newtypes,,:call,"if (!PyArg_ParseTuple(args, ""sss:call"", &arg1, &arg2, &arg3)) {" -extending/windows,,:initspam,/export:initspam faq/programming,,:chr,">=4.0) or 1+f(xc,yc,x*x-y*y+xc,2.0*x*y+yc,k-1,f):f(xc,yc,x,y,k,f):chr(" faq/programming,,::,for x in sequence[::-1]: faq/programming,,:reduce,"print((lambda Ru,Ro,Iu,Io,IM,Sx,Sy:reduce(lambda x,y:x+y,map(lambda y," @@ -288,6 +287,11 @@ library/xml.etree.elementtree,332,:character,"for char in actor.findall('role:ch library/zipapp,31,:main,"$ python -m zipapp myapp -m ""myapp:main""" library/zipapp,82,:fn,"argument should have the form ""pkg.mod:fn"", where ""pkg.mod"" is a" library/zipapp,155,:callable,"""pkg.module:callable"" and the archive will be run by importing" -library/stdtypes,3767,::,>>> m[::2].tolist() -library/sys,1115,`,# ``wrapper`` creates a ``wrap(coro)`` coroutine: +library/stdtypes,,::,>>> m[::2].tolist() +library/sys,,`,# ``wrapper`` creates a ``wrap(coro)`` coroutine: tutorial/venv,77,:c7b9645a6f35,"Python 3.4.3+ (3.4:c7b9645a6f35+, May 22 2015, 09:31:25)" +whatsnew/3.5,,:root,'WARNING:root:warning\n' +whatsnew/3.5,,:warning,'WARNING:root:warning\n' +whatsnew/3.5,,::,>>> addr6 = ipaddress.IPv6Address('::1') +whatsnew/3.5,,:root,ERROR:root:exception +whatsnew/3.5,,:exception,ERROR:root:exception diff --git a/Doc/tutorial/appendix.rst b/Doc/tutorial/appendix.rst index 67262a1d..e04459b2 100644 --- a/Doc/tutorial/appendix.rst +++ b/Doc/tutorial/appendix.rst @@ -25,7 +25,7 @@ some cases of running out of memory. All error messages are written to the standard error stream; normal output from executed commands is written to standard output. -Typing the interrupt character (usually Control-C or DEL) to the primary or +Typing the interrupt character (usually :kbd:`Control-C` or :kbd:`Delete`) to the primary or secondary prompt cancels the input and returns to the primary prompt. [#]_ Typing an interrupt while a command is executing raises the :exc:`KeyboardInterrupt` exception, which may be handled by a :keyword:`try` diff --git a/Doc/tutorial/datastructures.rst b/Doc/tutorial/datastructures.rst index a2031ed8..0d514801 100644 --- a/Doc/tutorial/datastructures.rst +++ b/Doc/tutorial/datastructures.rst @@ -612,18 +612,18 @@ returns a new sorted list while leaving the source unaltered. :: orange pear -To change a sequence you are iterating over while inside the loop (for -example to duplicate certain items), it is recommended that you first make -a copy. Looping over a sequence does not implicitly make a copy. The slice -notation makes this especially convenient:: - - >>> words = ['cat', 'window', 'defenestrate'] - >>> for w in words[:]: # Loop over a slice copy of the entire list. - ... if len(w) > 6: - ... words.insert(0, w) +It is sometimes tempting to change a list while you are looping over it; +however, it is often simpler and safer to create a new list instead. :: + + >>> import math + >>> raw_data = [56.2, float('NaN'), 51.7, 55.3, 52.5, float('NaN'), 47.8] + >>> filtered_data = [] + >>> for value in raw_data: + ... if not math.isnan(value): + ... filtered_data.append(value) ... - >>> words - ['defenestrate', 'cat', 'window', 'defenestrate'] + >>> filtered_data + [56.2, 51.7, 55.3, 52.5, 47.8] .. _tut-conditions: diff --git a/Doc/tutorial/errors.rst b/Doc/tutorial/errors.rst index d048ae9e..351ee52b 100644 --- a/Doc/tutorial/errors.rst +++ b/Doc/tutorial/errors.rst @@ -336,7 +336,7 @@ example:: A *finally clause* is always executed before leaving the :keyword:`try` statement, whether an exception has occurred or not. When an exception has occurred in the :keyword:`try` clause and has not been handled by an -:keyword:`except` clause (or it has occurred in a :keyword:`except` or +:keyword:`except` clause (or it has occurred in an :keyword:`except` or :keyword:`else` clause), it is re-raised after the :keyword:`finally` clause has been executed. The :keyword:`finally` clause is also executed "on the way out" when any other clause of the :keyword:`try` statement is left via a diff --git a/Doc/tutorial/interpreter.rst b/Doc/tutorial/interpreter.rst index d5789a62..e966085f 100644 --- a/Doc/tutorial/interpreter.rst +++ b/Doc/tutorial/interpreter.rst @@ -38,7 +38,7 @@ following command: ``quit()``. The interpreter's line-editing features include interactive editing, history substitution and code completion on systems that support readline. Perhaps the quickest check to see whether command line editing is supported is typing -Control-P to the first Python prompt you get. If it beeps, you have command +:kbd:`Control-P` to the first Python prompt you get. If it beeps, you have command line editing; see Appendix :ref:`tut-interacting` for an introduction to the keys. If nothing appears to happen, or if ``^P`` is echoed, command line editing isn't available; you'll only be able to use backspace to remove diff --git a/Doc/tutorial/introduction.rst b/Doc/tutorial/introduction.rst index 531d06b4..c5c13438 100644 --- a/Doc/tutorial/introduction.rst +++ b/Doc/tutorial/introduction.rst @@ -303,7 +303,7 @@ For non-negative indices, the length of a slice is the difference of the indices, if both are within bounds. For example, the length of ``word[1:3]`` is 2. -Attempting to use a index that is too large will result in an error:: +Attempting to use an index that is too large will result in an error:: >>> word[42] # the word only has 6 characters Traceback (most recent call last): diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst index 701c6241..c7210a01 100644 --- a/Doc/using/cmdline.rst +++ b/Doc/using/cmdline.rst @@ -41,7 +41,7 @@ additional methods of invocation: * When called with standard input connected to a tty device, it prompts for commands and executes them until an EOF (an end-of-file character, you can - produce that with *Ctrl-D* on UNIX or *Ctrl-Z, Enter* on Windows) is read. + produce that with :kbd:`Ctrl-D` on UNIX or :kbd:`Ctrl-Z, Enter` on Windows) is read. * When called with a file name argument or with a file as standard input, it reads and executes a script from that file. * When called with a directory name argument, it reads and executes an diff --git a/Doc/using/mac.rst b/Doc/using/mac.rst index ede864d7..ef091e52 100644 --- a/Doc/using/mac.rst +++ b/Doc/using/mac.rst @@ -102,8 +102,9 @@ Configuration Python on OS X honors all standard Unix environment variables such as :envvar:`PYTHONPATH`, but setting these variables for programs started from the Finder is non-standard as the Finder does not read your :file:`.profile` or -:file:`.cshrc` at startup. You need to create a file :file:`~ -/.MacOSX/environment.plist`. See Apple's Technical Document QA1067 for details. +:file:`.cshrc` at startup. You need to create a file +:file:`~/.MacOSX/environment.plist`. See Apple's Technical Document QA1067 for +details. For more information on installation Python packages in MacPython, see section :ref:`mac-package-manager`. diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst index 5c19b81a..2116c106 100644 --- a/Doc/using/windows.rst +++ b/Doc/using/windows.rst @@ -77,8 +77,8 @@ machines without user interaction. These options may also be set without suppressing the UI in order to change some of the defaults. To completely hide the installer UI and install Python silently, pass the -``/quiet`` (``/q``) option. To skip past the user interaction but still display -progress and errors, pass the ``/passive`` (``/p``) option. The ``/uninstall`` +``/quiet`` option. To skip past the user interaction but still display +progress and errors, pass the ``/passive`` option. The ``/uninstall`` option may be passed to immediately begin removing Python - no prompt will be displayed. @@ -89,7 +89,7 @@ of available options is shown below. +---------------------------+--------------------------------------+--------------------------+ | Name | Description | Default | +===========================+======================================+==========================+ -| InstallAllUsers | Perform a system-wide installation. | 1 | +| InstallAllUsers | Perform a system-wide installation. | 0 | +---------------------------+--------------------------------------+--------------------------+ | TargetDir | The installation directory | Selected based on | | | | InstallAllUsers | diff --git a/Doc/whatsnew/2.0.rst b/Doc/whatsnew/2.0.rst index 2c952acd..10bb29ee 100644 --- a/Doc/whatsnew/2.0.rst +++ b/Doc/whatsnew/2.0.rst @@ -1174,8 +1174,8 @@ partial list: * In the editor window, there is now a line/column bar at the bottom. -* Three new keystroke commands: Check module (Alt-F5), Import module (F5) and - Run script (Ctrl-F5). +* Three new keystroke commands: Check module (:kbd:`Alt-F5`), Import module (:kbd:`F5`) and + Run script (:kbd:`Ctrl-F5`). .. ====================================================================== diff --git a/Doc/whatsnew/2.1.rst b/Doc/whatsnew/2.1.rst index ff156622..6de5bf57 100644 --- a/Doc/whatsnew/2.1.rst +++ b/Doc/whatsnew/2.1.rst @@ -555,7 +555,7 @@ will include metadata, making it possible to build automated cataloguing systems and experiment with them. With the result experience, perhaps it'll be possible to design a really good catalog and then build support for it into Python 2.2. For example, the Distutils :command:`sdist` and :command:`bdist_\*` commands -could support a ``upload`` option that would automatically upload your +could support an ``upload`` option that would automatically upload your package to a catalog server. You can start creating packages containing :file:`PKG-INFO` even if you're not diff --git a/Doc/whatsnew/2.3.rst b/Doc/whatsnew/2.3.rst index f478c090..9d99074f 100644 --- a/Doc/whatsnew/2.3.rst +++ b/Doc/whatsnew/2.3.rst @@ -411,7 +411,7 @@ PEP 279: enumerate() A new built-in function, :func:`enumerate`, will make certain loops a bit clearer. ``enumerate(thing)``, where *thing* is either an iterator or a -sequence, returns a iterator that will return ``(0, thing[0])``, ``(1, +sequence, returns an iterator that will return ``(0, thing[0])``, ``(1, thing[1])``, ``(2, thing[2])``, and so forth. A common idiom to change every element of a list looks like this:: diff --git a/Doc/whatsnew/2.5.rst b/Doc/whatsnew/2.5.rst index f8f7ca04..cb92e08d 100644 --- a/Doc/whatsnew/2.5.rst +++ b/Doc/whatsnew/2.5.rst @@ -827,7 +827,7 @@ inheritance relationships are:: This rearrangement was done because people often want to catch all exceptions that indicate program errors. :exc:`KeyboardInterrupt` and :exc:`SystemExit` aren't errors, though, and usually represent an explicit action such as the user -hitting Control-C or code calling :func:`sys.exit`. A bare ``except:`` will +hitting :kbd:`Control-C` or code calling :func:`sys.exit`. A bare ``except:`` will catch all exceptions, so you commonly need to list :exc:`KeyboardInterrupt` and :exc:`SystemExit` in order to re-raise them. The usual pattern is:: diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst index ed1446c2..3966cb43 100644 --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -1118,7 +1118,7 @@ changes, or look through the Subversion logs for all the details. (Fixed by Daniel Stutzbach; :issue:`8729`.) * Constructors for the parsing classes in the :mod:`ConfigParser` module now - take a *allow_no_value* parameter, defaulting to false; if true, + take an *allow_no_value* parameter, defaulting to false; if true, options without values will be allowed. For example:: >>> import ConfigParser, StringIO @@ -2320,7 +2320,7 @@ Port-Specific Changes: Windows * The :func:`os.kill` function now works on Windows. The signal value can be the constants :const:`CTRL_C_EVENT`, :const:`CTRL_BREAK_EVENT`, or any integer. The first two constants - will send Control-C and Control-Break keystroke events to + will send :kbd:`Control-C` and :kbd:`Control-Break` keystroke events to subprocesses; any other value will use the :c:func:`TerminateProcess` API. (Contributed by Miki Tebeka; :issue:`1220212`.) diff --git a/Doc/whatsnew/3.1.rst b/Doc/whatsnew/3.1.rst index f272da4d..c1a16879 100644 --- a/Doc/whatsnew/3.1.rst +++ b/Doc/whatsnew/3.1.rst @@ -336,7 +336,7 @@ New, Improved, and Deprecated Modules (Contributed by David Laban; :issue:`4739`.) * The :mod:`unittest` module now supports skipping individual tests or classes - of tests. And it supports marking a test as a expected failure, a test that + of tests. And it supports marking a test as an expected failure, a test that is known to be broken, but shouldn't be counted as a failure on a TestResult:: diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst index 5171f3c1..58225045 100644 --- a/Doc/whatsnew/3.2.rst +++ b/Doc/whatsnew/3.2.rst @@ -788,7 +788,7 @@ functools :issue:`8814`.) * To help write classes with rich comparison methods, a new decorator - :func:`functools.total_ordering` will use a existing equality and inequality + :func:`functools.total_ordering` will use existing equality and inequality methods to fill in the remaining methods. For example, supplying *__eq__* and *__lt__* will enable @@ -1399,7 +1399,7 @@ Aides and Brian Curtin in :issue:`9962`, :issue:`1675951`, :issue:`7471` and Also, the :class:`zipfile.ZipExtFile` class was reworked internally to represent files stored inside an archive. The new implementation is significantly faster -and can be wrapped in a :class:`io.BufferedReader` object for more speedups. It +and can be wrapped in an :class:`io.BufferedReader` object for more speedups. It also solves an issue where interleaved calls to *read* and *readline* gave the wrong results. diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst index 1d4ce723..094eff87 100644 --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -1421,7 +1421,7 @@ can be used to directly manage when the accumlated headers are sent. :class:`http.client.HTTPResponse` now has a :meth:`~http.client.HTTPResponse.readinto` method, which means it can be used -as a :class:`io.RawIOBase` class. (Contributed by John Kuhn in +as an :class:`io.RawIOBase` class. (Contributed by John Kuhn in :issue:`13464`.) @@ -2155,7 +2155,7 @@ Major performance enhancements have been added: * encode an ASCII string to UTF-8 doesn't need to encode characters anymore, the UTF-8 representation is shared with the ASCII representation * the UTF-8 encoder has been optimized - * repeating a single ASCII letter and getting a substring of a ASCII strings + * repeating a single ASCII letter and getting a substring of an ASCII string is 4 times faster * UTF-8 is now 2x to 4x faster. UTF-16 encoding is now up to 10x faster. diff --git a/Doc/whatsnew/3.4.rst b/Doc/whatsnew/3.4.rst index 5176b9a7..7d9bc0d8 100644 --- a/Doc/whatsnew/3.4.rst +++ b/Doc/whatsnew/3.4.rst @@ -974,7 +974,7 @@ Since idlelib implements the IDLE shell and editor and is not intended for import by other programs, it gets improvements with every release. See :file:`Lib/idlelib/NEWS.txt` for a cumulative list of changes since 3.3.0, as well as changes made in future 3.4.x releases. This file is also available -from the IDLE Help -> About Idle dialog. +from the IDLE :menuselection:`Help --> About IDLE` dialog. importlib @@ -2076,7 +2076,7 @@ using ``-Wd``). Deprecations in the Python API ------------------------------ -* As mentioned in :ref:`whatsnew-pep-451`, a number of :mod:`importilb` +* As mentioned in :ref:`whatsnew-pep-451`, a number of :mod:`importlib` methods and functions are deprecated: :meth:`importlib.find_loader` is replaced by :func:`importlib.util.find_spec`; :meth:`importlib.machinery.PathFinder.find_module` is replaced by diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst index 30aed1e9..73c851dc 100644 --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -2,9 +2,7 @@ What's New In Python 3.5 **************************** -:Release: |release| -:Date: |today| -:Editors: Elvis Pranskevichus , Yury Selivanov +:Editors: Elvis Pranskevichus , Yury Selivanov .. Rules for maintenance: @@ -46,11 +44,10 @@ This saves the maintainer the effort of going through the Mercurial log when researching a change. -Python 3.5 was released on September 13, 2015. - This article explains the new features in Python 3.5, compared to 3.4. -For full details, see the -`changelog `_. +Python 3.5 was released on September 13, 2015.  See the +`changelog `_ for a full +list of changes. .. seealso:: @@ -60,82 +57,81 @@ For full details, see the Summary -- Release highlights ============================= -.. This section singles out the most important changes in Python 3.5. - Brevity is key. - New syntax features: -* :pep:`492`, coroutines with async and await syntax. -* :pep:`465`, a new matrix multiplication operator: ``a @ b``. -* :pep:`448`, additional unpacking generalizations. +* :ref:`PEP 492 `, coroutines with async and await syntax. +* :ref:`PEP 465 `, a new matrix multiplication operator: ``a @ b``. +* :ref:`PEP 448 `, additional unpacking generalizations. + New library modules: -* :mod:`zipapp`: :ref:`Improving Python ZIP Application Support - ` (:pep:`441`). +* :mod:`typing`: :ref:`PEP 484 -- Type Hints `. +* :mod:`zipapp`: :ref:`PEP 441 Improving Python ZIP Application Support + `. + New built-in features: -* ``bytes % args``, ``bytearray % args``: :pep:`461` - Adding ``%`` formatting - to bytes and bytearray. +* ``bytes % args``, ``bytearray % args``: :ref:`PEP 461 ` -- + Adding ``%`` formatting to bytes and bytearray. -* ``b'\xf0\x9f\x90\x8d'.hex()``, ``bytearray(b'\xf0\x9f\x90\x8d').hex()``, - ``memoryview(b'\xf0\x9f\x90\x8d').hex()``: :issue:`9951` - A ``hex`` method - has been added to bytes, bytearray, and memoryview. +* New :meth:`bytes.hex`, :meth:`bytearray.hex` and :meth:`memoryview.hex` + methods. (Contributed by Arnon Yaari in :issue:`9951`.) -* :class:`memoryview` (including multi-dimensional) now supports tuple indexing. +* :class:`memoryview` now supports tuple indexing (including multi-dimensional). (Contributed by Antoine Pitrou in :issue:`23632`.) -* Generators have new ``gi_yieldfrom`` attribute, which returns the +* Generators have a new ``gi_yieldfrom`` attribute, which returns the object being iterated by ``yield from`` expressions. (Contributed by Benno Leslie and Yury Selivanov in :issue:`24450`.) -* New :exc:`RecursionError` exception. (Contributed by Georg Brandl +* A new :exc:`RecursionError` exception is now raised when maximum + recursion depth is reached. (Contributed by Georg Brandl in :issue:`19235`.) -* New :exc:`StopAsyncIteration` exception. (Contributed by - Yury Selivanov in :issue:`24017`. See also :pep:`492`.) CPython implementation improvements: * When the ``LC_TYPE`` locale is the POSIX locale (``C`` locale), - :py:data:`sys.stdin` and :py:data:`sys.stdout` are now using the + :py:data:`sys.stdin` and :py:data:`sys.stdout` now use the ``surrogateescape`` error handler, instead of the ``strict`` error handler. (Contributed by Victor Stinner in :issue:`19977`.) * ``.pyo`` files are no longer used and have been replaced by a more flexible - scheme that inclides the optimization level explicitly in ``.pyc`` name. - (:pep:`488`) + scheme that includes the optimization level explicitly in ``.pyc`` name. + (See :ref:`PEP 488 overview `.) * Builtin and extension modules are now initialized in a multi-phase process, - which is similar to how Python modules are loaded. (:pep:`489`). + which is similar to how Python modules are loaded. + (See :ref:`PEP 489 overview `.) + -Significantly Improved Library Modules: +Significant improvements in the standard library: -* :class:`collections.OrderedDict` is now implemented in C, which makes it - 4 to 100 times faster. (Contributed by Eric Snow in :issue:`16991`.) +* :class:`collections.OrderedDict` is now + :ref:`implemented in C `, which makes it + 4 to 100 times faster. -* You may now pass bytes to the :mod:`tempfile` module's APIs and it will - return the temporary pathname as :class:`bytes` instead of :class:`str`. - It also accepts a value of ``None`` on parameters where only str was - accepted in the past to do the right thing based on the types of the - other inputs. Two functions, :func:`gettempdirb` and - :func:`gettempprefixb`, have been added to go along with this. - This behavior matches that of the :mod:`os` APIs. - (Contributed by Gregory P. Smith in :issue:`24230`.) +* The :mod:`ssl` module gained + :ref:`support for Memory BIO `, which decouples SSL + protocol handling from network IO. -* :mod:`ssl` module gained support for Memory BIO, which decouples SSL - protocol handling from network IO. (Contributed by Geert Jansen in - :issue:`21965`.) +* The new :func:`os.scandir` function provides a + :ref:`better and significantly faster way ` + of directory traversal. -* :mod:`traceback` has new lightweight and convenient to work with - classes :class:`~traceback.TracebackException`, - :class:`~traceback.StackSummary`, and :class:`~traceback.FrameSummary`. - (Contributed by Robert Collins in :issue:`17911`.) +* :func:`functools.lru_cache` has been mostly + :ref:`reimplemented in C `, yielding much better + performance. + +* The new :func:`subprocess.run` function provides a + :ref:`streamlined way to run subprocesses `. + +* The :mod:`traceback` module has been significantly + :ref:`enhanced ` for improved + performance and developer convenience. -* Most of :func:`functools.lru_cache` machinery is now implemented in C. - (Contributed by Matt Joiner, Alexey Kachayev, and Serhiy Storchaka - in :issue:`14373`.) Security improvements: @@ -148,6 +144,7 @@ Security improvements: against potential injection attacks. (Contributed by Antoine Pitrou in :issue:`22796`.) + Windows improvements: * A new installer for Windows has replaced the old MSI. @@ -156,6 +153,7 @@ Windows improvements: * Windows builds now use Microsoft Visual C++ 14.0, and extension modules should use the same. + Please read on for a comprehensive list of user-facing changes, including many other smaller improvements, CPython optimizations, deprecations, and potential porting issues. @@ -188,7 +186,7 @@ defining the :meth:`__await__` method. PEP 492 also adds :keyword:`async for` statement for convenient iteration over asynchronous iterables. -An example of a simple HTTP client written using the new syntax:: +An example of a rudimentary HTTP client written using the new syntax:: import asyncio @@ -234,7 +232,7 @@ context managers. The following script:: finally: loop.close() -will print:: +will output:: coro 2: waiting for lock coro 2: holding the lock @@ -247,7 +245,7 @@ Note that both :keyword:`async for` and :keyword:`async with` can only be used inside a coroutine function declared with :keyword:`async def`. Coroutine functions are intended to be run inside a compatible event loop, -such as :class:`asyncio.Loop`. +such as the :ref:`asyncio loop `. .. seealso:: @@ -278,7 +276,7 @@ instead of:: S = dot((dot(H, beta) - r).T, dot(inv(dot(dot(H, V), H.T)), dot(H, beta) - r)) -An upcoming release of NumPy 1.10 will add support for the new operator:: +NumPy 1.10 has support for the new operator:: >>> import numpy @@ -348,22 +346,29 @@ unpackings:: PEP 461 - % formatting support for bytes and bytearray ------------------------------------------------------ -PEP 461 adds % formatting to :class:`bytes` and :class:`bytearray`, aiding in -handling data that is a mixture of binary and ASCII compatible text. This -feature also eases porting such code from Python 2. +:pep:`461` adds support for the ``%`` +:ref:`interpolation operator ` to :class:`bytes` +and :class:`bytearray`. + +While interpolation is usually thought of as a string operation, there are +cases where interpolation on ``bytes`` or ``bytearrays`` makes sense, and the +work needed to make up for this missing functionality detracts from the +overall readability of the code. This issue is particularly important when +dealing with wire format protocols, which are often a mixture of binary +and ASCII compatible text. Examples:: - >>> b'Hello %s!' % b'World' + >>> b'Hello %b!' % b'World' b'Hello World!' >>> b'x=%i y=%f' % (1, 2.5) b'x=1 y=2.500000' -Unicode is not allowed for ``%s``, but it is accepted by ``%a`` (equivalent of +Unicode is not allowed for ``%b``, but it is accepted by ``%a`` (equivalent of ``repr(obj).encode('ascii', 'backslashreplace')``):: - >>> b'Hello %s!' % 'World' + >>> b'Hello %b!' % 'World' Traceback (most recent call last): File "", line 1, in TypeError: %b requires bytes, or an object that implements __bytes__, not 'str' @@ -371,6 +376,9 @@ Unicode is not allowed for ``%s``, but it is accepted by ``%a`` (equivalent of >>> b'price: %a' % '10€' b"price: '10\\u20ac'" +Note that ``%s`` and ``%r`` conversion types, although supported, should +only be used in codebases that need compatibility with Python 2. + .. seealso:: :pep:`461` -- Adding % formatting to bytes and bytearray @@ -383,9 +391,17 @@ Unicode is not allowed for ``%s``, but it is accepted by ``%a`` (equivalent of PEP 484 - Type Hints -------------------- -This PEP introduces a provisional module to provide these standard -definitions and tools, along with some conventions for situations -where annotations are not available. +Function annotation syntax has been a Python feature since version 3.0 +(:pep:`3107`), however the semantics of annotations has been left undefined. + +Experience has shown that the majority of function annotation +uses were to provide type hints to function parameters and return values. It +became evident that it would be beneficial for Python users, if the +standard library included the base definitions and tools for type annotations. + +:pep:`484` introduces a :term:`provisional module ` to +provide these standard definitions and tools, along with some conventions +for situations where annotations are not available. For example, here is a simple function whose argument and return type are declared in the annotations:: @@ -393,9 +409,15 @@ are declared in the annotations:: def greeting(name: str) -> str: return 'Hello ' + name +While these annotations are available at runtime through the usual +:attr:`__annotations__` attribute, *no automatic type checking happens at +runtime*. Instead, it is assumed that a separate off-line type checker +(e.g. `mypy `_) will be used for on-demand +source code analysis. + The type system supports unions, generic types, and a special type -named ``Any`` which is consistent with (i.e. assignable to and from) all -types. +named :class:`~typing.Any` which is consistent with (i.e. assignable to +and from) all types. .. seealso:: @@ -403,6 +425,8 @@ types. * :pep:`484` -- Type Hints PEP written by Guido van Rossum, Jukka Lehtosalo, and Łukasz Langa; implemented by Guido van Rossum. + * :pep:`483` -- The Theory of Type Hints + PEP written by Guido van Rossum .. _whatsnew-pep-471: @@ -412,8 +436,23 @@ PEP 471 - os.scandir() function -- a better and faster directory iterator :pep:`471` adds a new directory iteration function, :func:`os.scandir`, to the standard library. Additionally, :func:`os.walk` is now -implemented using :func:`os.scandir`, which speeds it up by 3-5 times -on POSIX systems and by 7-20 times on Windows systems. +implemented using ``scandir``, which makes it 3 to 5 times faster +on POSIX systems and 7 to 20 times faster on Windows systems. This is +largely achieved by greatly reducing the number of calls to :func:`os.stat` +required to walk a directory tree. + +Additionally, ``scandir`` returns an iterator, as opposed to returning +a list of file names, which improves memory efficiency when iterating +over very large directories. + +The following example shows a simple use of :func:`os.scandir` to display all +the files (excluding directories) in the given *path* that don't start with +``'.'``. The :meth:`entry.is_file() ` call will generally +not make an additional system call:: + + for entry in os.scandir(path): + if not entry.name.startswith('.') and entry.is_file(): + print(entry.name) .. seealso:: @@ -426,45 +465,71 @@ on POSIX systems and by 7-20 times on Windows systems. PEP 475: Retry system calls failing with EINTR ---------------------------------------------- -:pep:`475` adds support for automatic retry of system calls failing with -:py:data:`~errno.EINTR`: this means that user code doesn't have to deal with -EINTR or :exc:`InterruptedError` manually, and should make it more robust -against asynchronous signal reception. +An :py:data:`errno.EINTR` error code is returned whenever a system call, that +is waiting for I/O, is interrupted by a signal. Previously, Python would +raise :exc:`InterruptedError` in such cases. This meant that, when writing a +Python application, the developer had two choices: -Examples of functions which are now retried when interrupted by a signal -instead of raising :exc:`InterruptedError` if the Python signal handler does -not raise an exception: +#. Ignore the ``InterruptedError``. +#. Handle the ``InterruptedError`` and attempt to restart the interrupted + system call at every call site. -* :func:`open`, :func:`os.open`, :func:`io.open`; +The first option makes an application fail intermittently. +The second option adds a large amount of boilerplate that makes the +code nearly unreadable. Compare:: + + print("Hello World") + +and:: + + while True: + try: + print("Hello World") + break + except InterruptedError: + continue + +:pep:`475` implements automatic retry of system calls on +``EINTR``. This removes the burden of dealing with ``EINTR`` +or :exc:`InterruptedError` in user code in most situations and makes +Python programs, including the standard library, more robust. Note that +the system call is only retried if the signal handler does not raise an +exception. + +Below is a list of functions which are now retried when interrupted +by a signal: + +* :func:`open` and :func:`io.open`; * functions of the :mod:`faulthandler` module; * :mod:`os` functions: :func:`~os.fchdir`, :func:`~os.fchmod`, :func:`~os.fchown`, :func:`~os.fdatasync`, :func:`~os.fstat`, :func:`~os.fstatvfs`, :func:`~os.fsync`, :func:`~os.ftruncate`, - :func:`~os.mkfifo`, :func:`~os.mknod`, :func:`~os.posix_fadvise`, - :func:`~os.posix_fallocate`, :func:`~os.pread`, :func:`~os.pwrite`, - :func:`~os.read`, :func:`~os.readv`, :func:`~os.sendfile`, + :func:`~os.mkfifo`, :func:`~os.mknod`, :func:`~os.open`, + :func:`~os.posix_fadvise`, :func:`~os.posix_fallocate`, :func:`~os.pread`, + :func:`~os.pwrite`, :func:`~os.read`, :func:`~os.readv`, :func:`~os.sendfile`, :func:`~os.wait3`, :func:`~os.wait4`, :func:`~os.wait`, :func:`~os.waitid`, :func:`~os.waitpid`, :func:`~os.write`, :func:`~os.writev`; * special cases: :func:`os.close` and :func:`os.dup2` now ignore - :py:data:`~errno.EINTR` error, the syscall is not retried (see the PEP + :py:data:`~errno.EINTR` errors; the syscall is not retried (see the PEP for the rationale); -* :mod:`select` functions: :func:`~select.devpoll.poll`, - :func:`~select.epoll.poll`, :func:`~select.kqueue.control`, - :func:`~select.poll.poll`, :func:`~select.select`; +* :mod:`select` functions: :func:`devpoll.poll() `, + :func:`epoll.poll() `, + :func:`kqueue.control() `, + :func:`poll.poll() `, :func:`~select.select`; -* :func:`socket.socket` methods: :meth:`~socket.socket.accept`, +* methods of the :class:`~socket.socket` class: :meth:`~socket.socket.accept`, :meth:`~socket.socket.connect` (except for non-blocking sockets), :meth:`~socket.socket.recv`, :meth:`~socket.socket.recvfrom`, :meth:`~socket.socket.recvmsg`, :meth:`~socket.socket.send`, :meth:`~socket.socket.sendall`, :meth:`~socket.socket.sendmsg`, :meth:`~socket.socket.sendto`; -* :func:`signal.sigtimedwait`, :func:`signal.sigwaitinfo`; +* :func:`signal.sigtimedwait` and :func:`signal.sigwaitinfo`; * :func:`time.sleep`. @@ -472,7 +537,7 @@ not raise an exception: :pep:`475` -- Retry system calls failing with EINTR PEP and implementation written by Charles-François Natali and - Victor Stinner, with the help of Antoine Pitrou (the french connection). + Victor Stinner, with the help of Antoine Pitrou (the French connection). .. _whatsnew-pep-479: @@ -480,15 +545,42 @@ not raise an exception: PEP 479: Change StopIteration handling inside generators -------------------------------------------------------- -:pep:`479` changes the behavior of generators: when a :exc:`StopIteration` +The interaction of generators and :exc:`StopIteration` in Python 3.4 and +earlier was sometimes surprising, and could conceal obscure bugs. Previously, +``StopIteration`` raised accidentally inside a generator function was +interpreted as the end of the iteration by the loop construct driving the +generator. + +:pep:`479` changes the behavior of generators: when a ``StopIteration`` exception is raised inside a generator, it is replaced with a -:exc:`RuntimeError`. To enable the feature a ``__future__`` import should -be used:: +:exc:`RuntimeError` before it exits the generator frame. The main goal of +this change is to ease debugging in the situation where an unguarded +:func:`next` call raises ``StopIteration`` and causes the iteration controlled +by the generator to terminate silently. This is particularly pernicious in +combination with the ``yield from`` construct. + +This is a backwards incompatible change, so to enable the new behavior, +a :term:`__future__` import is necessary:: - from __future__ import generator_stop + >>> from __future__ import generator_stop + + >>> def gen(): + ... next(iter([])) + ... yield + ... + >>> next(gen()) + Traceback (most recent call last): + File "", line 2, in gen + StopIteration + + The above exception was the direct cause of the following exception: + + Traceback (most recent call last): + File "", line 1, in + RuntimeError: generator raised StopIteration Without a ``__future__`` import, a :exc:`PendingDeprecationWarning` will be -raised. +raised whenever a ``StopIteration`` exception is raised inside a generator. .. seealso:: @@ -497,6 +589,44 @@ raised. Chris Angelico, Yury Selivanov and Nick Coghlan. +.. _whatsnew-pep-485: + +PEP 485: A function for testing approximate equality +---------------------------------------------------- + +:pep:`485` adds the :func:`math.isclose` and :func:`cmath.isclose` +functions which tell whether two values are approximately equal or +"close" to each other. Whether or not two values are considered +close is determined according to given absolute and relative tolerances. +Relative tolerance is the maximum allowed difference between ``isclose`` +arguments, relative to the larger absolute value:: + + >>> import math + >>> a = 5.0 + >>> b = 4.99998 + >>> math.isclose(a, b, rel_tol=1e-5) + True + >>> math.isclose(a, b, rel_tol=1e-6) + False + +It is also possible to compare two values using absolute tolerance, which +must be a non-negative value:: + + >>> import math + >>> a = 5.0 + >>> b = 4.99998 + >>> math.isclose(a, b, abs_tol=0.00003) + True + >>> math.isclose(a, b, abs_tol=0.00001) + False + +.. seealso:: + + :pep:`485` -- A function for testing approximate equality + PEP written by Christopher Barker; implemented by Chris Barker and + Tal Einat. + + .. _whatsnew-pep-486: PEP 486: Make the Python Launcher aware of virtual environments @@ -554,45 +684,25 @@ rather than being restricted to ASCII. implemented by Petr Viktorin. -.. _whatsnew-pep-485: - -PEP 485: A function for testing approximate equality ----------------------------------------------------- - -:pep:`485` adds the :func:`math.isclose` and :func:`cmath.isclose` -functions which tell whether two values are approximately equal or -"close" to each other. Whether or not two values are considered -close is determined according to given absolute and relative tolerances. - -.. seealso:: - - :pep:`485` -- A function for testing approximate equality - PEP written by Christopher Barker; implemented by Chris Barker and - Tal Einat. - - Other Language Changes ====================== Some smaller changes made to the core Python language are: * Added the ``"namereplace"`` error handlers. The ``"backslashreplace"`` - error handlers now works with decoding and translating. + error handlers now work with decoding and translating. (Contributed by Serhiy Storchaka in :issue:`19676` and :issue:`22286`.) * The :option:`-b` option now affects comparisons of :class:`bytes` with :class:`int`. (Contributed by Serhiy Storchaka in :issue:`23681`.) -* New Kazakh :ref:`codec ` ``kz1048``. (Contributed by - Serhiy Storchaka in :issue:`22682`.) +* New Kazakh ``kz1048`` and Tajik ``koi8_t`` :ref:`codecs `. + (Contributed by Serhiy Storchaka in :issue:`22682` and :issue:`22681`.) * Property docstrings are now writable. This is especially useful for :func:`collections.namedtuple` docstrings. (Contributed by Berker Peksag in :issue:`24064`.) -* New Tajik :ref:`codec ` ``koi8_t``. (Contributed by - Serhiy Storchaka in :issue:`22681`.) - * Circular imports involving relative imports are now supported. (Contributed by Brett Cannon and Antoine Pitrou in :issue:`17636`.) @@ -600,6 +710,13 @@ Some smaller changes made to the core Python language are: New Modules =========== +typing +------ + +The new :mod:`typing` :term:`provisional ` module +provides standard definitions and tools for function type annotations. +See :ref:`Type Hints ` for more information. + .. _whatsnew-zipapp: zipapp @@ -637,6 +754,63 @@ The :class:`~argparse.ArgumentParser` class now allows to disable Steven Bethard, paul j3 and Daniel Eriksson in :issue:`14910`.) +asyncio +------- + +Since the :mod:`asyncio` module is :term:`provisional `, +all changes introduced in Python 3.5 have also been backported to Python 3.4.x. + +Notable changes in the :mod:`asyncio` module since Python 3.4.0: + +* New debugging APIs: :meth:`loop.set_debug() ` + and :meth:`loop.get_debug() ` methods. + (Contributed by Victor Stinner.) + +* The proactor event loop now supports SSL. + (Contributed by Antoine Pitrou and Victor Stinner in :issue:`22560`.) + +* A new :meth:`loop.is_closed() ` method to + check if the event loop is closed. + (Contributed by Victor Stinner in :issue:`21326`.) + +* A new :meth:`loop.create_task() ` + to conveniently create and schedule a new :class:`~asyncio.Task` + for a coroutine. The ``create_task`` method is also used by all + asyncio functions that wrap coroutines into tasks, such as + :func:`asyncio.wait`, :func:`asyncio.gather`, etc. + (Contributed by Victor Stinner.) + +* A new :meth:`transport.get_write_buffer_limits() ` + method to inquire for *high-* and *low-* water limits of the flow + control. + (Contributed by Victor Stinner.) + +* The :func:`~asyncio.async` function is deprecated in favor of + :func:`~asyncio.ensure_future`. + (Contributed by Yury Selivanov.) + +* New :meth:`loop.set_task_factory() ` + and :meth:`loop.set_task_factory() ` + methods to customize the task factory that + :meth:`loop.create_task() ` method uses. + (Contributed by Yury Selivanov.) + +* New :meth:`Queue.join() ` and + :meth:`Queue.task_done() ` queue methods. + (Contributed by Victor Stinner.) + +* The ``JoinableQueue`` class was removed, in favor of the + :class:`asyncio.Queue` class. + (Contributed by Victor Stinner.) + +Updates in 3.5.1: + +* The :func:`~asyncio.ensure_future` function and all functions that + use it, such as :meth:`loop.run_until_complete() `, + now accept all kinds of :term:`awaitable objects `. + (Contributed by Yury Selivanov.) + + bz2 --- @@ -648,7 +822,7 @@ size of decompressed data. (Contributed by Nikolaus Rath in :issue:`15955`.) cgi --- -The :class:`~cgi.FieldStorage` class now supports the context management +The :class:`~cgi.FieldStorage` class now supports the :term:`context manager` protocol. (Contributed by Berker Peksag in :issue:`20289`.) @@ -656,13 +830,13 @@ cmath ----- A new function :func:`~cmath.isclose` provides a way to test for approximate -equality. (Contributed by Chris Barker and Tal Einat in :issue:`24270`.) +equality. (Contributed by Chris Barker and Tal Einat in :issue:`24270`.) code ---- -The :func:`InteractiveInterpreter.showtraceback ` +The :func:`InteractiveInterpreter.showtraceback() ` method now prints the full chained traceback, just like the interactive interpreter. (Contributed by Claudiu Popa in :issue:`17442`.) @@ -670,58 +844,71 @@ interpreter. (Contributed by Claudiu Popa in :issue:`17442`.) collections ----------- +.. _whatsnew-ordereddict: + The :class:`~collections.OrderedDict` class is now implemented in C, which makes it 4 to 100 times faster. (Contributed by Eric Snow in :issue:`16991`.) -:meth:`OrderedDict.items `, -:meth:`OrderedDict.keys `, -:meth:`OrderedDict.values ` views now support +:meth:`OrderedDict.items() `, +:meth:`OrderedDict.keys() `, +:meth:`OrderedDict.values() ` views now support :func:`reversed` iteration. (Contributed by Serhiy Storchaka in :issue:`19505`.) The :class:`~collections.deque` class now defines :meth:`~collections.deque.index`, :meth:`~collections.deque.insert`, and -:meth:`~collections.deque.copy`, as well as supports ``+`` and ``*`` operators. +:meth:`~collections.deque.copy`, and supports the ``+`` and ``*`` operators. This allows deques to be recognized as a :class:`~collections.abc.MutableSequence` and improves their substitutability for lists. -(Contributed by Raymond Hettinger :issue:`23704`.) +(Contributed by Raymond Hettinger in :issue:`23704`.) Docstrings produced by :func:`~collections.namedtuple` can now be updated:: Point = namedtuple('Point', ['x', 'y']) - Point.__doc__ = 'ordered pair' + Point.__doc__ += ': Cartesian coodinate' Point.x.__doc__ = 'abscissa' Point.y.__doc__ = 'ordinate' (Contributed by Berker Peksag in :issue:`24064`.) -The :class:`~collections.UserString` class now implements +The :class:`~collections.UserString` class now implements the :meth:`__getnewargs__`, :meth:`__rmod__`, :meth:`~str.casefold`, :meth:`~str.format_map`, :meth:`~str.isprintable`, and :meth:`~str.maketrans` -methods to match corresponding methods of :class:`str`. +methods to match the corresponding methods of :class:`str`. (Contributed by Joe Jevnik in :issue:`22189`.) collections.abc --------------- +The :meth:`Sequence.index() ` method now +accepts *start* and *stop* arguments to match the corresponding methods +of :class:`tuple`, :class:`list`, etc. +(Contributed by Devin Jeanpierre in :issue:`23086`.) + A new :class:`~collections.abc.Generator` abstract base class. (Contributed by Stefan Behnel in :issue:`24018`.) -New :class:`~collections.abc.Coroutine`, +New :class:`~collections.abc.Awaitable`, :class:`~collections.abc.Coroutine`, :class:`~collections.abc.AsyncIterator`, and :class:`~collections.abc.AsyncIterable` abstract base classes. (Contributed by Yury Selivanov in :issue:`24184`.) +For earlier Python versions, a backport of the new ABCs is available in an +external `PyPI package `_. + compileall ---------- -A new :mod:`compileall` option, ``-j N``, allows to run ``N`` workers +A new :mod:`compileall` option, :samp:`-j {N}`, allows to run *N* workers sumultaneously to perform parallel bytecode compilation. The :func:`~compileall.compile_dir` function has a corresponding ``workers`` parameter. (Contributed by Claudiu Popa in :issue:`16104`.) +Another new option, ``-r``, allows to control the maximum recursion +level for subdirectories. (Contributed by Claudiu Popa in :issue:`19628`.) + The ``-q`` command line option can now be specified more than once, in which case all output, including errors, will be suppressed. The corresponding ``quiet`` parameter in :func:`~compileall.compile_dir`, @@ -733,35 +920,92 @@ accept an integer value indicating the level of output suppression. concurrent.futures ------------------ -The :meth:`Executor.map ` method now accepts a +The :meth:`Executor.map() ` method now accepts a *chunksize* argument to allow batching of tasks to improve performance when :meth:`~concurrent.futures.ProcessPoolExecutor` is used. (Contributed by Dan O'Reilly in :issue:`11271`.) +The number of workers in the :class:`~concurrent.futures.ThreadPoolExecutor` +constructor is optional now. The default value is 5 times the number of CPUs. +(Contributed by Claudiu Popa in :issue:`21527`.) + + +configparser +------------ + +:mod:`configparser` now provides a way to customize the conversion +of values by specifying a dictionary of converters in the +:class:`~configparser.ConfigParser` constructor, or by defining them +as methods in ``ConfigParser`` subclasses. Converters defined in +a parser instance are inherited by its section proxies. + +Example:: + + >>> import configparser + >>> conv = {} + >>> conv['list'] = lambda v: [e.strip() for e in v.split() if e.strip()] + >>> cfg = configparser.ConfigParser(converters=conv) + >>> cfg.read_string(""" + ... [s] + ... list = a b c d e f g + ... """) + >>> cfg.get('s', 'list') + 'a b c d e f g' + >>> cfg.getlist('s', 'list') + ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + >>> section = cfg['s'] + >>> section.getlist('list') + ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + +(Contributed by Łukasz Langa in :issue:`18159`.) + contextlib ---------- -The new :func:`~contextlib.redirect_stderr` context manager (similar to +The new :func:`~contextlib.redirect_stderr` :term:`context manager` (similar to :func:`~contextlib.redirect_stdout`) makes it easier for utility scripts to handle inflexible APIs that write their output to :data:`sys.stderr` and -don't provide any options to redirect it. (Contributed by Berker Peksag in -:issue:`22389`.) +don't provide any options to redirect it:: + + >>> import contextlib, io, logging + >>> f = io.StringIO() + >>> with contextlib.redirect_stderr(f): + ... logging.warning('warning') + ... + >>> f.getvalue() + 'WARNING:root:warning\n' + +(Contributed by Berker Peksag in :issue:`22389`.) + + +csv +--- + +The :meth:`~csv.csvwriter.writerow` method now supports arbitrary iterables, +not just sequences. (Contributed by Serhiy Storchaka in :issue:`23171`.) curses ------ -The new :func:`~curses.update_lines_cols` function updates :envvar:`LINES` +The new :func:`~curses.update_lines_cols` function updates the :envvar:`LINES` and :envvar:`COLS` environment variables. This is useful for detecting -manual screen resize. (Contributed by Arnon Yaari in :issue:`4254`.) +manual screen resizing. (Contributed by Arnon Yaari in :issue:`4254`.) + + +dbm +--- + +:func:`dumb.open ` always creates a new database when the flag +has the value ``"n"``. (Contributed by Claudiu Popa in :issue:`18039`.) difflib ------- The charset of HTML documents generated by -:meth:`HtmlDiff.make_file ` +:meth:`HtmlDiff.make_file() ` can now be customized by using a new *charset* keyword-only argument. The default charset of HTML document changed from ``"ISO-8859-1"`` to ``"utf-8"``. @@ -775,7 +1019,7 @@ strings. This fixes a regression from Python 2. distutils --------- -Both ``build`` and ``build_ext`` commands now accept a ``-j`` option to +Both the ``build`` and ``build_ext`` commands now accept a ``-j`` option to enable parallel building of extension modules. (Contributed by Antoine Pitrou in :issue:`5309`.) @@ -788,7 +1032,7 @@ doctest ------- The :func:`~doctest.DocTestSuite` function returns an empty -:class:`unittest.TestSuite` if *module* contains no docstrings instead of +:class:`unittest.TestSuite` if *module* contains no docstrings, instead of raising :exc:`ValueError`. (Contributed by Glenn Jones in :issue:`15916`.) @@ -802,7 +1046,7 @@ prefixed with a ``">"`` character by generators. The default is ``True`` for (Contributed by Milan Oberkirch in :issue:`20098`.) A new -:meth:`Message.get_content_disposition ` +:meth:`Message.get_content_disposition() ` method provides easy access to a canonical value for the :mailheader:`Content-Disposition` header. (Contributed by Abhilash Raj in :issue:`21083`.) @@ -814,6 +1058,10 @@ of using encoded words. This allows ``Messages`` to be formatted according to ``SMTPUTF8`` extension. (Contributed by R. David Murray in :issue:`24211`.) +The :class:`mime.text.MIMEText ` constructor now +accepts a :class:`charset.Charset ` instance. +(Contributed by Claude Paroz and Berker Peksag in :issue:`16324`.) + enum ---- @@ -833,7 +1081,7 @@ specify the initial number of enum values if only *names* are provided:: faulthandler ------------ -:func:`~faulthandler.enable`, :func:`~faulthandler.register`, +The :func:`~faulthandler.enable`, :func:`~faulthandler.register`, :func:`~faulthandler.dump_traceback` and :func:`~faulthandler.dump_traceback_later` functions now accept file descriptors in addition to file-like objects. @@ -843,7 +1091,9 @@ descriptors in addition to file-like objects. functools --------- -Most of :func:`~functools.lru_cache` machinery is now implemented in C, making +.. _whatsnew-lrucache: + +Most of the :func:`~functools.lru_cache` machinery is now implemented in C, making it significantly faster. (Contributed by Matt Joiner, Alexey Kachayev, and Serhiy Storchaka in :issue:`14373`.) @@ -851,18 +1101,36 @@ Serhiy Storchaka in :issue:`14373`.) glob ---- -:func:`~glob.iglob` and :func:`~glob.glob` functions now support recursive -search in subdirectories using the ``"**"`` pattern. +The :func:`~glob.iglob` and :func:`~glob.glob` functions now support recursive +search in subdirectories, using the ``"**"`` pattern. (Contributed by Serhiy Storchaka in :issue:`13968`.) +gzip +---- + +The *mode* argument of the :class:`~gzip.GzipFile` constructor now +accepts ``"x"`` to request exclusive creation. +(Contributed by Tim Heaney in :issue:`19222`.) + + heapq ----- Element comparison in :func:`~heapq.merge` can now be customized by -passing a :term:`key function` in a new optional ``key`` keyword argument. -A new optional ``reverse`` keyword argument can be used to reverse element -comparison. (Contributed by Raymond Hettinger in :issue:`13742`.) +passing a :term:`key function` in a new optional *key* keyword argument, +and a new optional *reverse* keyword argument can be used to reverse element +comparison:: + + >>> import heapq + >>> a = ['9', '777', '55555'] + >>> b = ['88', '6666'] + >>> list(heapq.merge(a, b, key=len)) + ['9', '88', '777', '6666', '55555'] + >>> list(heapq.merge(reversed(a), reversed(b), key=len, reverse=True)) + ['55555', '6666', '777', '88', '9'] + +(Contributed by Raymond Hettinger in :issue:`13742`.) http @@ -873,6 +1141,28 @@ HTTP status codes, reason phrases and long descriptions written in English. (Contributed by Demian Brecht in :issue:`21793`.) +http.client +----------- + +:meth:`HTTPConnection.getresponse() ` +now raises a :exc:`~http.client.RemoteDisconnected` exception when a +remote server connection is closed unexpectedly. Additionally, if a +:exc:`ConnectionError` (of which ``RemoteDisconnected`` +is a subclass) is raised, the client socket is now closed automatically, +and will reconnect on the next request:: + + import http.client + conn = http.client.HTTPConnection('www.python.org') + for retries in range(3): + try: + conn.request('GET', '/') + resp = conn.getresponse() + except http.client.RemoteDisconnected: + pass + +(Contributed by Martin Panter in :issue:`3566`.) + + idlelib and IDLE ---------------- @@ -880,21 +1170,21 @@ Since idlelib implements the IDLE shell and editor and is not intended for import by other programs, it gets improvements with every release. See :file:`Lib/idlelib/NEWS.txt` for a cumulative list of changes since 3.4.0, as well as changes made in future 3.5.x releases. This file is also available -from the IDLE Help -> About Idle dialog. +from the IDLE :menuselection:`Help --> About IDLE` dialog. imaplib ------- -The :class:`~imaplib.IMAP4` class now supports context manager protocol. +The :class:`~imaplib.IMAP4` class now supports the :term:`context manager` protocol. When used in a :keyword:`with` statement, the IMAP4 ``LOGOUT`` command will be called automatically at the end of the block. (Contributed by Tarek Ziadé and Serhiy Storchaka in :issue:`4972`.) The :mod:`imaplib` module now supports :rfc:`5161` (ENABLE Extension) -and :rfc:`6855` (UTF-8 Support) via the :meth:`IMAP4.enable ` +and :rfc:`6855` (UTF-8 Support) via the :meth:`IMAP4.enable() ` method. A new :attr:`IMAP4.utf8_enabled ` -attribute, tracks whether or not :rfc:`6855` support is enabled. +attribute tracks whether or not :rfc:`6855` support is enabled. (Contributed by Milan Oberkirch, R. David Murray, and Maciej Szulik in :issue:`21800`.) @@ -920,13 +1210,13 @@ The :class:`util.LazyLoader ` class allows for lazy loading of modules in applications where startup time is important. (Contributed by Brett Cannon in :issue:`17621`.) -The :func:`abc.InspectLoader.source_to_code ` +The :func:`abc.InspectLoader.source_to_code() ` method is now a static method. This makes it easier to initialize a module object with code compiled from a string by running ``exec(code, module.__dict__)``. (Contributed by Brett Cannon in :issue:`21156`.) -The new :func:`util.module_from_spec ` +The new :func:`util.module_from_spec() ` function is now the preferred way to create a new module. As opposed to creating a :class:`types.ModuleType` instance directly, this new function will set the various import-controlled attributes based on the passed-in @@ -936,21 +1226,28 @@ spec object. (Contributed by Brett Cannon in :issue:`20383`.) inspect ------- -Both :class:`~inspect.Signature` and :class:`~inspect.Parameter` classes are +Both the :class:`~inspect.Signature` and :class:`~inspect.Parameter` classes are now picklable and hashable. (Contributed by Yury Selivanov in :issue:`20726` and :issue:`20334`.) A new -:meth:`BoundArguments.apply_defaults ` -method provides a way to set default values for missing arguments. +:meth:`BoundArguments.apply_defaults() ` +method provides a way to set default values for missing arguments:: + + >>> def foo(a, b='ham', *args): pass + >>> ba = inspect.signature(foo).bind('spam') + >>> ba.apply_defaults() + >>> ba.arguments + OrderedDict([('a', 'spam'), ('b', 'ham'), ('args', ())]) + (Contributed by Yury Selivanov in :issue:`24190`.) A new class method -:meth:`Signature.from_callable ` makes +:meth:`Signature.from_callable() ` makes subclassing of :class:`~inspect.Signature` easier. (Contributed by Yury Selivanov and Eric Snow in :issue:`17373`.) -The :func:`~inspect.signature` function now accepts a ``follow_wrapped`` +The :func:`~inspect.signature` function now accepts a *follow_wrapped* optional keyword argument, which, when set to ``False``, disables automatic following of ``__wrapped__`` links. (Contributed by Yury Selivanov in :issue:`20691`.) @@ -963,7 +1260,7 @@ A set of new functions to inspect and :func:`~inspect.getcoroutinestate`. (Contributed by Yury Selivanov in :issue:`24017` and :issue:`24400`.) -:func:`~inspect.stack`, :func:`~inspect.trace`, +The :func:`~inspect.stack`, :func:`~inspect.trace`, :func:`~inspect.getouterframes`, and :func:`~inspect.getinnerframes` functions now return a list of named tuples. (Contributed by Daniel Shahaf in :issue:`16808`.) @@ -972,24 +1269,40 @@ functions now return a list of named tuples. io -- -A new :meth:`BufferedIOBase.readinto1 ` +A new :meth:`BufferedIOBase.readinto1() ` method, that uses at most one call to the underlying raw stream's -:meth:`RawIOBase.read ` (or -:meth:`RawIOBase.readinto `) method. +:meth:`RawIOBase.read() ` or +:meth:`RawIOBase.readinto() ` methods. (Contributed by Nikolaus Rath in :issue:`20578`.) ipaddress --------- -Both :class:`~ipaddress.IPv4Network` and :class:`~ipaddress.IPv6Network` classes +Both the :class:`~ipaddress.IPv4Network` and :class:`~ipaddress.IPv6Network` classes now accept an ``(address, netmask)`` tuple argument, so as to easily construct -network objects from existing addresses. (Contributed by Peter Moody -and Antoine Pitrou in :issue:`16531`.) +network objects from existing addresses:: + + >>> import ipaddress + >>> ipaddress.IPv4Network(('127.0.0.0', 8)) + IPv4Network('127.0.0.0/8') + >>> ipaddress.IPv4Network(('127.0.0.0', '255.0.0.0')) + IPv4Network('127.0.0.0/8') -A new :attr:`~ipaddress.IPv4Network.reverse_pointer>` attribute for +(Contributed by Peter Moody and Antoine Pitrou in :issue:`16531`.) + +A new :attr:`~ipaddress.IPv4Network.reverse_pointer` attribute for the :class:`~ipaddress.IPv4Network` and :class:`~ipaddress.IPv6Network` classes -returns the name of the reverse DNS PTR record. +returns the name of the reverse DNS PTR record:: + + >>> import ipaddress + >>> addr = ipaddress.IPv4Address('127.0.0.1') + >>> addr.reverse_pointer + '1.0.0.127.in-addr.arpa' + >>> addr6 = ipaddress.IPv6Address('::1') + >>> addr6.reverse_pointer + '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa' + (Contributed by Leon Weber in :issue:`20480`.) @@ -1001,15 +1314,37 @@ JSON objects passed in input. The new ``--sort-keys`` option can be used to sort the keys alphabetically. (Contributed by Berker Peksag in :issue:`21650`.) -JSON decoder now raises :exc:`json.JSONDecodeError` instead of -:exc:`ValueError`. (Contributed by Serhiy Storchaka in :issue:`19361`.) +JSON decoder now raises :exc:`~json.JSONDecodeError` instead of +:exc:`ValueError` to provide better context information about the error. +(Contributed by Serhiy Storchaka in :issue:`19361`.) + + +linecache +--------- + +A new :func:`~linecache.lazycache` function can be used to capture information +about a non-file-based module to permit getting its lines later via +:func:`~linecache.getline`. This avoids doing I/O until a line is actually +needed, without having to carry the module globals around indefinitely. +(Contributed by Robert Collins in :issue:`17911`.) locale ------ A new :func:`~locale.delocalize` function can be used to convert a string into -a normalized number string, taking the ``LC_NUMERIC`` settings into account. +a normalized number string, taking the ``LC_NUMERIC`` settings into account:: + + >>> import locale + >>> locale.setlocale(locale.LC_NUMERIC, 'de_DE.UTF-8') + 'de_DE.UTF-8' + >>> locale.delocalize('1.234,56') + '1234.56' + >>> locale.setlocale(locale.LC_NUMERIC, 'en_US.UTF-8') + 'en_US.UTF-8' + >>> locale.delocalize('1,234.56') + '1234.56' + (Contributed by Cédric Krier in :issue:`13918`.) @@ -1019,8 +1354,17 @@ logging All logging methods (:class:`~logging.Logger` :meth:`~logging.Logger.log`, :meth:`~logging.Logger.exception`, :meth:`~logging.Logger.critical`, :meth:`~logging.Logger.debug`, etc.), now accept exception instances -as an ``exc_info`` argument, in addition to boolean values and exception -tuples. (Contributed by Yury Selivanov in :issue:`20537`.) +as an *exc_info* argument, in addition to boolean values and exception +tuples:: + + >>> import logging + >>> try: + ... 1/0 + ... except ZeroDivisionError as ex: + ... logging.error('exception', exc_info=ex) + ERROR:root:exception + +(Contributed by Yury Selivanov in :issue:`20537`.) The :class:`handlers.HTTPHandler ` class now accepts an optional :class:`ssl.SSLContext` instance to configure SSL @@ -1036,7 +1380,7 @@ will pass messages to handlers taking handler levels into account. lzma ---- -The :meth:`LZMADecompressor.decompress ` +The :meth:`LZMADecompressor.decompress() ` method now accepts an optional *max_length* argument to limit the maximum size of decompressed data. (Contributed by Martin Panter in :issue:`15955`.) @@ -1056,6 +1400,14 @@ function is now deprecated. (Contributed by Mark Dickinson and Serhiy Storchaka in :issue:`22486`.) +multiprocessing +--------------- + +:func:`sharedctypes.synchronized() ` +objects now support the :term:`context manager` protocol. +(Contributed by Charles-François Natali in :issue:`21565`.) + + operator -------- @@ -1080,17 +1432,17 @@ of Victor Stinner in :issue:`22524`.) On Windows, a new :attr:`stat_result.st_file_attributes ` -attribute is now available. It corresponds to ``dwFileAttributes`` member of -the ``BY_HANDLE_FILE_INFORMATION`` structure returned by +attribute is now available. It corresponds to the ``dwFileAttributes`` member +of the ``BY_HANDLE_FILE_INFORMATION`` structure returned by ``GetFileInformationByHandle()``. (Contributed by Ben Hoyt in :issue:`21719`.) -The :func:`~os.urandom` function now uses ``getrandom()`` syscall on Linux 3.17 +The :func:`~os.urandom` function now uses the ``getrandom()`` syscall on Linux 3.17 or newer, and ``getentropy()`` on OpenBSD 5.6 and newer, removing the need to use ``/dev/urandom`` and avoiding failures due to potential file descriptor exhaustion. (Contributed by Victor Stinner in :issue:`22181`.) New :func:`~os.get_blocking` and :func:`~os.set_blocking` functions allow to -get and set a file descriptor blocking mode (:data:`~os.O_NONBLOCK`.) +get and set a file descriptor's blocking mode (:data:`~os.O_NONBLOCK`.) (Contributed by Victor Stinner in :issue:`22054`.) The :func:`~os.truncate` and :func:`~os.ftruncate` functions are now supported @@ -1099,35 +1451,59 @@ on Windows. (Contributed by Steve Dower in :issue:`23668`.) There is a new :func:`os.path.commonpath` function returning the longest common sub-path of each passed pathname. Unlike the :func:`os.path.commonprefix` function, it always returns a valid -path. (Contributed by Rafik Draoui and Serhiy Storchaka in :issue:`10395`.) +path:: + + >>> os.path.commonprefix(['/usr/lib', '/usr/local/lib']) + '/usr/l' + + >>> os.path.commonpath(['/usr/lib', '/usr/local/lib']) + '/usr' + +(Contributed by Rafik Draoui and Serhiy Storchaka in :issue:`10395`.) pathlib ------- -The new :meth:`Path.samefile ` method can be used -to check whether the path points to the same file as other path, which can be -either an another :class:`~pathlib.Path` object, or a string. +The new :meth:`Path.samefile() ` method can be used +to check whether the path points to the same file as another path, which can +be either another :class:`~pathlib.Path` object, or a string:: + + >>> import pathlib + >>> p1 = pathlib.Path('/etc/hosts') + >>> p2 = pathlib.Path('/etc/../etc/hosts') + >>> p1.samefile(p2) + True + (Contributed by Vajrasky Kok and Antoine Pitrou in :issue:`19775`.) -The :meth:`Path.mkdir ` method how accepts a new optional -``exist_ok`` argument to match ``mkdir -p`` and :func:`os.makrdirs` +The :meth:`Path.mkdir() ` method now accepts a new optional +*exist_ok* argument to match ``mkdir -p`` and :func:`os.makedirs` functionality. (Contributed by Berker Peksag in :issue:`21539`.) -There is a new :meth:`Path.expanduser ` method to +There is a new :meth:`Path.expanduser() ` method to expand ``~`` and ``~user`` prefixes. (Contributed by Serhiy Storchaka and Claudiu Popa in :issue:`19776`.) -A new :meth:`Path.home ` class method can be used to get -an instance of :class:`~pathlib.Path` object representing the user’s home +A new :meth:`Path.home() ` class method can be used to get +a :class:`~pathlib.Path` instance representing the user’s home directory. (Contributed by Victor Salgado and Mayank Tripathi in :issue:`19777`.) -New :meth:`Path.write_text `, -:meth:`Path.read_text `, -:meth:`Path.write_bytes `, -:meth:`Path.read_bytes ` methods to simplify +New :meth:`Path.write_text() `, +:meth:`Path.read_text() `, +:meth:`Path.write_bytes() `, +:meth:`Path.read_bytes() ` methods to simplify read/write operations on files. + +The following code snippet will create or rewrite existing file +``~/spam42``:: + + >>> import pathlib + >>> p = pathlib.Path('~/spam42') + >>> p.expanduser().write_text('ham') + 3 + (Contributed by Christopher Welborn in :issue:`20218`.) @@ -1143,7 +1519,7 @@ Storchaka in :issue:`23611`.) poplib ------ -A new :meth:`POP3.utf8 ` command enables :rfc:`6856` +A new :meth:`POP3.utf8() ` command enables :rfc:`6856` (Internationalized Email) support, if a POP server supports it. (Contributed by Milan OberKirch in :issue:`21804`.) @@ -1151,18 +1527,39 @@ A new :meth:`POP3.utf8 ` command enables :rfc:`6856` re -- -The number of capturing groups in regular expression is no longer limited by +References and conditional references to groups with fixed length are now +allowed in lookbehind assertions:: + + >>> import re + >>> pat = re.compile(r'(a|b).(?<=\1)c') + >>> pat.match('aac') + <_sre.SRE_Match object; span=(0, 3), match='aac'> + >>> pat.match('bbc') + <_sre.SRE_Match object; span=(0, 3), match='bbc'> + +(Contributed by Serhiy Storchaka in :issue:`9179`.) + +The number of capturing groups in regular expressions is no longer limited to 100. (Contributed by Serhiy Storchaka in :issue:`22437`.) The :func:`~re.sub` and :func:`~re.subn` functions now replace unmatched groups with empty strings instead of raising an exception. (Contributed by Serhiy Storchaka in :issue:`1519638`.) -The :class:`re.error` exceptions have new attributes: +The :class:`re.error` exceptions have new attributes, :attr:`~re.error.msg`, :attr:`~re.error.pattern`, :attr:`~re.error.pos`, :attr:`~re.error.lineno`, -and :attr:`~re.error.colno` that provide better context -information about the error. +and :attr:`~re.error.colno`, that provide better context +information about the error:: + + >>> re.compile(""" + ... (?x) + ... .++ + ... """) + Traceback (most recent call last): + ... + sre_constants.error: multiple repeat at position 16 (line 3, column 7) + (Contributed by Serhiy Storchaka in :issue:`22578`.) @@ -1210,31 +1607,31 @@ during debugging, instead of integer "magic numbers". smtpd ----- -Both :class:`~smtpd.SMTPServer` and :class:`~smtpd.SMTPChannel` classes now +Both the :class:`~smtpd.SMTPServer` and :class:`~smtpd.SMTPChannel` classes now accept a *decode_data* keyword argument to determine if the ``DATA`` portion of the SMTP transaction is decoded using the ``"utf-8"`` codec or is instead provided to the -:meth:`SMTPServer.process_message ` +:meth:`SMTPServer.process_message() ` method as a byte string. The default is ``True`` for backward compatibility reasons, but will change to ``False`` in Python 3.6. If *decode_data* is set -to ``False``, the :meth:`~smtpd.SMTPServer.process_message` method must -be prepared to accept keyword arguments. +to ``False``, the ``process_message`` method must be prepared to accept keyword +arguments. (Contributed by Maciej Szulik in :issue:`19662`.) The :class:`~smtpd.SMTPServer` class now advertises the ``8BITMIME`` extension (:rfc:`6152`) if *decode_data* has been set ``True``. If the client specifies ``BODY=8BITMIME`` on the ``MAIL`` command, it is passed to -:meth:`SMTPServer.process_message ` -via the ``mail_options`` keyword. +:meth:`SMTPServer.process_message() ` +via the *mail_options* keyword. (Contributed by Milan Oberkirch and R. David Murray in :issue:`21795`.) The :class:`~smtpd.SMTPServer` class now also supports the ``SMTPUTF8`` extension (:rfc:`6531`: Internationalized Email). If the client specified ``SMTPUTF8 BODY=8BITMIME`` on the ``MAIL`` command, they are passed to -:meth:`SMTPServer.process_message ` -via the ``mail_options`` keyword. It is the responsibility of the -:meth:`~smtpd.SMTPServer.process_message` method to correctly handle the -``SMTPUTF8`` data. (Contributed by Milan Oberkirch in :issue:`21725`.) +:meth:`SMTPServer.process_message() ` +via the *mail_options* keyword. It is the responsibility of the +``process_message`` method to correctly handle the ``SMTPUTF8`` data. +(Contributed by Milan Oberkirch in :issue:`21725`.) It is now possible to provide, directly or via name resolution, IPv6 addresses in the :class:`~smtpd.SMTPServer` constructor, and have it @@ -1244,16 +1641,16 @@ successfully connect. (Contributed by Milan Oberkirch in :issue:`14758`.) smtplib ------- -A new :meth:`SMTP.auth ` method provides a convenient way to +A new :meth:`SMTP.auth() ` method provides a convenient way to implement custom authentication mechanisms. (Contributed by Milan Oberkirch in :issue:`15014`.) -The :meth:`SMTP.set_debuglevel ` method now +The :meth:`SMTP.set_debuglevel() ` method now accepts an additional debuglevel (2), which enables timestamps in debug messages. (Contributed by Gavin Chappell and Maciej Szulik in :issue:`16914`.) -Both :meth:`SMTP.sendmail ` and -:meth:`SMTP.send_message ` methods now +Both the :meth:`SMTP.sendmail() ` and +:meth:`SMTP.send_message() ` methods now support support :rfc:`6531` (SMTPUTF8). (Contributed by Milan Oberkirch and R. David Murray in :issue:`22027`.) @@ -1261,14 +1658,39 @@ support support :rfc:`6531` (SMTPUTF8). sndhdr ------ -:func:`~sndhdr.what` and :func:`~sndhdr.whathdr` functions now return +The :func:`~sndhdr.what` and :func:`~sndhdr.whathdr` functions now return a :func:`~collections.namedtuple`. (Contributed by Claudiu Popa in :issue:`18615`.) +socket +------ + +Functions with timeouts now use a monotonic clock, instead of a system clock. +(Contributed by Victor Stinner in :issue:`22043`.) + +A new :meth:`socket.sendfile() ` method allows to +send a file over a socket by using the high-performance :func:`os.sendfile` +function on UNIX, resulting in uploads being from 2 to 3 times faster than when +using plain :meth:`socket.send() `. +(Contributed by Giampaolo Rodola' in :issue:`17552`.) + +The :meth:`socket.sendall() ` method no longer resets the +socket timeout every time bytes are received or sent. The socket timeout is +now the maximum total duration to send all data. +(Contributed by Victor Stinner in :issue:`23853`.) + +The *backlog* argument of the :meth:`socket.listen() ` +method is now optional. By default it is set to +:data:`SOMAXCONN ` or to ``128``, whichever is less. +(Contributed by Charles-François Natali in :issue:`21455`.) + + ssl --- +.. _whatsnew-sslmemorybio: + Memory BIO Support ~~~~~~~~~~~~~~~~~~ @@ -1276,7 +1698,7 @@ Memory BIO Support The new :class:`~ssl.SSLObject` class has been added to provide SSL protocol support for cases when the network I/O capabilities of :class:`~ssl.SSLSocket` -are not necessary or suboptimal. :class:`~ssl.SSLObject` represents +are not necessary or are suboptimal. ``SSLObject`` represents an SSL protocol instance, but does not implement any network I/O methods, and instead provides a memory buffer interface. The new :class:`~ssl.MemoryBIO` class can be used to pass data between Python and an SSL protocol instance. @@ -1285,8 +1707,8 @@ The memory BIO SSL support is primarily intended to be used in frameworks implementing asynchronous I/O for which :class:`~ssl.SSLSocket`'s readiness model ("select/poll") is inefficient. -A new :meth:`SSLContext.wrap_bio ` method can be used -to create a new :class:`~ssl.SSLObject` instance. +A new :meth:`SSLContext.wrap_bio() ` method can be used +to create a new ``SSLObject`` instance. Application-Layer Protocol Negotiation Support @@ -1294,33 +1716,33 @@ Application-Layer Protocol Negotiation Support (Contributed by Benjamin Peterson in :issue:`20188`.) -Where OpenSSL support is present, :mod:`ssl` module now implements -*Application-Layer Protocol Negotiation* TLS extension as described +Where OpenSSL support is present, the :mod:`ssl` module now implements +the *Application-Layer Protocol Negotiation* TLS extension as described in :rfc:`7301`. -The new :meth:`SSLContext.set_alpn_protocols ` +The new :meth:`SSLContext.set_alpn_protocols() ` can be used to specify which protocols a socket should advertise during the TLS handshake. The new -:meth:`SSLSocket.selected_alpn_protocol ` +:meth:`SSLSocket.selected_alpn_protocol() ` returns the protocol that was selected during the TLS handshake. -:data:`~ssl.HAS_ALPN` flag indicates whether APLN support is present. +The :data:`~ssl.HAS_ALPN` flag indicates whether ALPN support is present. Other Changes ~~~~~~~~~~~~~ -There is a new :meth:`SSLSocket.version ` method to query -the actual protocol version in use. +There is a new :meth:`SSLSocket.version() ` method to +query the actual protocol version in use. (Contributed by Antoine Pitrou in :issue:`20421`.) The :class:`~ssl.SSLSocket` class now implements -a :meth:`SSLSocket.sendfile ` method. +a :meth:`SSLSocket.sendfile() ` method. (Contributed by Giampaolo Rodola' in :issue:`17552`.) -The :meth:`SSLSocket.send ` method now raises either -:exc:`ssl.SSLWantReadError` or :exc:`ssl.SSLWantWriteError` exception on a +The :meth:`SSLSocket.send() ` method now raises either +the :exc:`ssl.SSLWantReadError` or :exc:`ssl.SSLWantWriteError` exception on a non-blocking socket if the operation would block. Previously, it would return ``0``. (Contributed by Nikolaus Rath in :issue:`20951`.) @@ -1328,15 +1750,15 @@ The :func:`~ssl.cert_time_to_seconds` function now interprets the input time as UTC and not as local time, per :rfc:`5280`. Additionally, the return value is always an :class:`int`. (Contributed by Akira Li in :issue:`19940`.) -New :meth:`SSLObject.shared_ciphers ` and -:meth:`SSLSocket.shared_ciphers ` methods return +New :meth:`SSLObject.shared_ciphers() ` and +:meth:`SSLSocket.shared_ciphers() ` methods return the list of ciphers sent by the client during the handshake. (Contributed by Benjamin Peterson in :issue:`23186`.) -The :meth:`SSLSocket.do_handshake `, -:meth:`SSLSocket.read `, -:meth:`SSLSocket.shutdown `, and -:meth:`SSLSocket.write ` methods of :class:`ssl.SSLSocket` +The :meth:`SSLSocket.do_handshake() `, +:meth:`SSLSocket.read() `, +:meth:`SSLSocket.shutdown() `, and +:meth:`SSLSocket.write() ` methods of the :class:`~ssl.SSLSocket` class no longer reset the socket timeout every time bytes are received or sent. The socket timeout is now the maximum total duration of the method. (Contributed by Victor Stinner in :issue:`23853`.) @@ -1345,49 +1767,42 @@ The :func:`~ssl.match_hostname` function now supports matching of IP addresses. (Contributed by Antoine Pitrou in :issue:`23239`.) -socket ------- - -Functions with timeouts now use a monotonic clock, instead of a system clock. -(Contributed by Victor Stinner in :issue:`22043`.) - -A new :meth:`socket.sendfile ` method allows to -send a file over a socket by using the high-performance :func:`os.sendfile` -function on UNIX resulting in uploads being from 2 to 3 times faster than when -using plain :meth:`socket.send `. -(Contributed by Giampaolo Rodola' in :issue:`17552`.) - -The :meth:`socket.sendall ` method no longer resets the -socket timeout every time bytes are received or sent. The socket timeout is -now the maximum total duration to send all data. -(Contributed by Victor Stinner in :issue:`23853`.) - -The *backlog* argument of the :meth:`socket.listen ` -method is now optional. By default it is set to -:data:`SOMAXCONN ` or to ``128`` whichever is less. -(Contributed by Charles-François Natali in :issue:`21455`.) - - sqlite3 ------- -The :class:`~sqlite3.Row` class now fully supports sequence protocol, +The :class:`~sqlite3.Row` class now fully supports the sequence protocol, in particular :func:`reversed` iteration and slice indexing. (Contributed by Claudiu Popa in :issue:`10203`; by Lucas Sinclair, Jessica McKellar, and Serhiy Storchaka in :issue:`13583`.) +.. _whatsnew-subprocess: + subprocess ---------- The new :func:`~subprocess.run` function has been added. -It runs the specified command and and returns a +It runs the specified command and returns a :class:`~subprocess.CompletedProcess` object, which describes a finished process. The new API is more consistent and is the recommended approach to invoking subprocesses in Python code that does not need to maintain compatibility with earlier Python versions. (Contributed by Thomas Kluyver in :issue:`23342`.) +Examples:: + + >>> subprocess.run(["ls", "-l"]) # doesn't capture output + CompletedProcess(args=['ls', '-l'], returncode=0) + + >>> subprocess.run("exit 1", shell=True, check=True) + Traceback (most recent call last): + ... + subprocess.CalledProcessError: Command 'exit 1' returned non-zero exit status 1 + + >>> subprocess.run(["ls", "-l", "/dev/null"], stdout=subprocess.PIPE) + CompletedProcess(args=['ls', '-l', '/dev/null'], returncode=0, + stdout=b'crw-rw-rw- 1 root root 1, 3 Jan 23 16:23 /dev/null\n') + sys --- @@ -1396,8 +1811,9 @@ A new :func:`~sys.set_coroutine_wrapper` function allows setting a global hook that will be called whenever a :term:`coroutine object ` is created by an :keyword:`async def` function. A corresponding :func:`~sys.get_coroutine_wrapper` can be used to obtain a currently set -wrapper. Both functions are provisional, and are intended for debugging -purposes only. (Contributed by Yury Selivanov in :issue:`24017`.) +wrapper. Both functions are :term:`provisional `, +and are intended for debugging purposes only. (Contributed by Yury Selivanov +in :issue:`24017`.) A new :func:`~sys.is_finalizing` function can be used to check if the Python interpreter is :term:`shutting down `. @@ -1408,7 +1824,7 @@ sysconfig --------- The name of the user scripts directory on Windows now includes the first -two components of Python version. (Contributed by Paul Moore +two components of the Python version. (Contributed by Paul Moore in :issue:`23437`.) @@ -1418,20 +1834,25 @@ tarfile The *mode* argument of the :func:`~tarfile.open` function now accepts ``"x"`` to request exclusive creation. (Contributed by Berker Peksag in :issue:`21717`.) -:meth:`TarFile.extractall ` and -:meth:`TarFile.extract ` methods now take a keyword +The :meth:`TarFile.extractall() ` and +:meth:`TarFile.extract() ` methods now take a keyword argument *numeric_only*. If set to ``True``, the extracted files and directories will be owned by the numeric ``uid`` and ``gid`` from the tarfile. If set to ``False`` (the default, and the behavior in versions prior to 3.5), they will be owned by the named user and group in the tarfile. (Contributed by Michael Vogt and Eric Smith in :issue:`23193`.) +The :meth:`TarFile.list() ` now accepts an optional +*members* keyword argument that can be set to a subset of the list returned +by :meth:`TarFile.getmembers() `. +(Contributed by Serhiy Storchaka in :issue:`21549`.) + threading --------- -Both :meth:`Lock.acquire ` and -:meth:`RLock.acquire ` methods +Both the :meth:`Lock.acquire() ` and +:meth:`RLock.acquire() ` methods now use a monotonic clock for timeout management. (Contributed by Victor Stinner in :issue:`22043`.) @@ -1446,7 +1867,7 @@ The :func:`~time.monotonic` function is now always available. timeit ------ -A new command line option ``-u`` or ``--unit=U`` can be used to specify the time +A new command line option ``-u`` or :samp:`--unit={U}` can be used to specify the time unit for the timer output. Supported options are ``usec``, ``msec``, or ``sec``. (Contributed by Julian Gindi in :issue:`18983`.) @@ -1464,6 +1885,8 @@ module which makes no permanent changes to environment variables. (Contributed by Zachary Ware in :issue:`20035`.) +.. _whatsnew-traceback: + traceback --------- @@ -1472,10 +1895,10 @@ functions to conveniently traverse frame and traceback objects. (Contributed by Robert Collins in :issue:`17911`.) New lightweight classes: :class:`~traceback.TracebackException`, -:class:`~traceback.StackSummary`, and :class:`traceback.FrameSummary`. +:class:`~traceback.StackSummary`, and :class:`~traceback.FrameSummary`. (Contributed by Robert Collins in :issue:`17911`.) -Both :func:`~traceback.print_tb` and :func:`~traceback.print_stack` functions +Both the :func:`~traceback.print_tb` and :func:`~traceback.print_stack` functions now support negative values for the *limit* argument. (Contributed by Dmitry Kazakov in :issue:`22619`.) @@ -1489,38 +1912,11 @@ A new :func:`~types.coroutine` function to transform :term:`awaitables `. (Contributed by Yury Selivanov in :issue:`24017`.) -A new :class:`~types.CoroutineType` is the type of :term:`coroutine` objects -created by :keyword:`async def` functions. +A new type called :class:`~types.CoroutineType`, which is used for +:term:`coroutine` objects created by :keyword:`async def` functions. (Contributed by Yury Selivanov in :issue:`24400`.) -urllib ------- - -A new -:class:`request.HTTPPasswordMgrWithPriorAuth ` -class allows HTTP Basic Authentication credentials to be managed so as to -eliminate unnecessary ``401`` response handling, or to unconditionally send -credentials on the first request in order to communicate with servers that -return a ``404`` response instead of a ``401`` if the ``Authorization`` header -is not sent. (Contributed by Matej Cepl in :issue:`19494` and Akshit Khurana in -:issue:`7159`.) - -A new *quote_via* argument for the -:func:`parse.urlencode ` -function provides a way to control the encoding of query parts if needed. -(Contributed by Samwyse and Arnon Yaari in :issue:`13866`.) - -The :func:`request.urlopen ` function accepts an -:class:`ssl.SSLContext` object as a *context* argument, which will be used for -the HTTPS connection. (Contributed by Alex Gaynor in :issue:`22366`.) - -The :func:`parse.urljoin ` was updated to use the -:rfc:`3986` semantics for the resolution of relative URLs, rather than -:rfc:`1808` and :rfc:`2396`. -(Contributed by Demian Brecht and Senthil Kumaran in :issue:`22118`.) - - unicodedata ----------- @@ -1531,6 +1927,18 @@ The :mod:`unicodedata` module now uses data from `Unicode 8.0.0 unittest -------- +The :meth:`TestLoader.loadTestsFromModule() ` +method now accepts a keyword-only argument *pattern* which is passed to +``load_tests`` as the third argument. Found packages are now checked for +``load_tests`` regardless of whether their path matches *pattern*, because it +is impossible for a package name to match the default pattern. +(Contributed by Robert Collins and Barry A. Warsaw in :issue:`16662`.) + +Unittest discovery errors now are exposed in the +:data:`TestLoader.errors ` attribute of the +:class:`~unittest.TestLoader` instance. +(Contributed by Robert Collins in :issue:`19746`.) + A new command line option ``--locals`` to show local variables in tracebacks. (Contributed by Robert Collins in :issue:`22936`.) @@ -1538,14 +1946,14 @@ tracebacks. (Contributed by Robert Collins in :issue:`22936`.) unittest.mock ------------- -The :class:`~unittest.mock.Mock` has the following improvements: +The :class:`~unittest.mock.Mock` class has the following improvements: -* Class constructor has a new *unsafe* parameter, which causes mock +* The class constructor has a new *unsafe* parameter, which causes mock objects to raise :exc:`AttributeError` on attribute names starting with ``"assert"``. (Contributed by Kushal Das in :issue:`21238`.) -* A new :meth:`Mock.assert_not_called ` +* A new :meth:`Mock.assert_not_called() ` method to check if the mock object was called. (Contributed by Kushal Das in :issue:`21262`.) @@ -1554,6 +1962,37 @@ The :class:`~unittest.mock.MagicMock` class now supports :meth:`__truediv__`, (Contributed by Johannes Baiter in :issue:`20968`, and HÃ¥kan Lövdahl in :issue:`23581` and :issue:`23568`.) +It is no longer necessary to explicitly pass ``create=True`` to the +:func:`~unittest.mock.patch` function when patching builtin names. +(Contributed by Kushal Das in :issue:`17660`.) + + +urllib +------ + +A new +:class:`request.HTTPPasswordMgrWithPriorAuth ` +class allows HTTP Basic Authentication credentials to be managed so as to +eliminate unnecessary ``401`` response handling, or to unconditionally send +credentials on the first request in order to communicate with servers that +return a ``404`` response instead of a ``401`` if the ``Authorization`` header +is not sent. (Contributed by Matej Cepl in :issue:`19494` and Akshit Khurana in +:issue:`7159`.) + +A new *quote_via* argument for the +:func:`parse.urlencode() ` +function provides a way to control the encoding of query parts if needed. +(Contributed by Samwyse and Arnon Yaari in :issue:`13866`.) + +The :func:`request.urlopen() ` function accepts an +:class:`ssl.SSLContext` object as a *context* argument, which will be used for +the HTTPS connection. (Contributed by Alex Gaynor in :issue:`22366`.) + +The :func:`parse.urljoin() ` was updated to use the +:rfc:`3986` semantics for the resolution of relative URLs, rather than +:rfc:`1808` and :rfc:`2396`. +(Contributed by Demian Brecht and Senthil Kumaran in :issue:`22118`.) + wsgiref ------- @@ -1566,11 +2005,11 @@ class constructor is now optional. xmlrpc ------ -The :class:`client.ServerProxy ` class is now a -:term:`context manager`. +The :class:`client.ServerProxy ` class now supports +the :term:`context manager` protocol. (Contributed by Claudiu Popa in :issue:`20627`.) -:class:`client.ServerProxy ` constructor now accepts +The :class:`client.ServerProxy ` constructor now accepts an optional :class:`ssl.SSLContext` instance. (Contributed by Alex Gaynor in :issue:`22960`.) @@ -1582,6 +2021,9 @@ SAX parsers now support a character stream of the :class:`xmlreader.InputSource ` object. (Contributed by Serhiy Storchaka in :issue:`2175`.) +:func:`~xml.sax.parseString` now accepts a :class:`str` instance. +(Contributed by Serhiy Storchaka in :issue:`10590`.) + zipfile ------- @@ -1589,7 +2031,7 @@ zipfile ZIP output can now be written to unseekable streams. (Contributed by Serhiy Storchaka in :issue:`23252`.) -The *mode* argument of :meth:`ZipFile.open ` method now +The *mode* argument of :meth:`ZipFile.open() ` method now accepts ``"x"`` to request exclusive creation. (Contributed by Serhiy Storchaka in :issue:`21717`.) @@ -1597,7 +2039,7 @@ accepts ``"x"`` to request exclusive creation. Other module-level changes ========================== -Many functions in :mod:`mmap`, :mod:`ossaudiodev`, :mod:`socket`, +Many functions in the :mod:`mmap`, :mod:`ossaudiodev`, :mod:`socket`, :mod:`ssl`, and :mod:`codecs` modules now accept writable :term:`bytes-like objects `. (Contributed by Serhiy Storchaka in :issue:`23001`.) @@ -1656,7 +2098,7 @@ Method caching was slightly improved, yielding up to 5% performance improvement in some benchmarks. (Contributed by Antoine Pitrou in :issue:`22847`.) -Objects from :mod:`random` module now use two times less memory on 64-bit +Objects from the :mod:`random` module now use 50% less memory on 64-bit builds. (Contributed by Serhiy Storchaka in :issue:`23488`.) The :func:`property` getter calls are up to 25% faster. @@ -1666,7 +2108,7 @@ Instantiation of :class:`fractions.Fraction` is now up to 30% faster. (Contributed by Stefan Behnel in :issue:`22464`.) String methods :meth:`~str.find`, :meth:`~str.rfind`, :meth:`~str.split`, -:meth:`~str.partition` and :keyword:`in` string operator are now significantly +:meth:`~str.partition` and the :keyword:`in` string operator are now significantly faster for searching 1-character substrings. (Contributed by Serhiy Storchaka in :issue:`23573`.) @@ -1676,17 +2118,17 @@ Build and C API Changes New ``calloc`` functions were added: - * :c:func:`PyMem_RawCalloc`, - * :c:func:`PyMem_Calloc`, - * :c:func:`PyObject_Calloc`, - * :c:func:`_PyObject_GC_Calloc`. +* :c:func:`PyMem_RawCalloc`, +* :c:func:`PyMem_Calloc`, +* :c:func:`PyObject_Calloc`, +* :c:func:`_PyObject_GC_Calloc`. (Contributed by Victor Stinner in :issue:`21233`.) New encoding/decoding helper functions: - * :c:func:`Py_DecodeLocale` (replaced ``_Py_char2wchar()``), - * :c:func:`Py_EncodeLocale` (replaced ``_Py_wchar2char()``). +* :c:func:`Py_DecodeLocale` (replaced ``_Py_char2wchar()``), +* :c:func:`Py_EncodeLocale` (replaced ``_Py_wchar2char()``). (Contributed by Victor Stinner in :issue:`18395`.) @@ -1702,8 +2144,8 @@ A new :c:data:`PyExc_RecursionError` exception. (Contributed by Georg Brandl in :issue:`19235`.) New :c:func:`PyModule_FromDefAndSpec`, :c:func:`PyModule_FromDefAndSpec2`, -and :c:func:`PyModule_ExecDef` introduced by :pep:`489` -- multi-phase -extension module initialization. +and :c:func:`PyModule_ExecDef` functions introduced by :pep:`489` -- +multi-phase extension module initialization. (Contributed by Petr Viktorin in :issue:`24268`.) New :c:func:`PyNumber_MatrixMultiply` and @@ -1712,14 +2154,14 @@ multiplication. (Contributed by Benjamin Peterson in :issue:`21176`. See also :pep:`465` for details.) -The :c:member:`PyTypeObject.tp_finalize` slot is now part of stable ABI. +The :c:member:`PyTypeObject.tp_finalize` slot is now part of the stable ABI. Windows builds now require Microsoft Visual C++ 14.0, which is available as part of `Visual Studio 2015 `_. -Extension modules now include platform information tag in their filename on +Extension modules now include a platform information tag in their filename on some platforms (the tag is optional, and CPython will import extensions without -it; although if the tag is present and mismatched, the extension won't be +it, although if the tag is present and mismatched, the extension won't be loaded): * On Linux, extension module filenames end with @@ -1772,6 +2214,16 @@ function or module names. Introduced by :pep:`492` in Python 3.5, they will become proper keywords in Python 3.7. +Deprecated Python Behavior +-------------------------- + +Raising the :exc:`StopIteration` exception inside a generator will now generate a silent +:exc:`PendingDeprecationWarning`, which will become a non-silent deprecation +warning in Python 3.6 and will trigger a :exc:`RuntimeError` in Python 3.7. +See :ref:`PEP 479: Change StopIteration handling inside generators ` +for details. + + Unsupported Operating Systems ----------------------------- @@ -1796,14 +2248,15 @@ with an appropriate value to avoid the deprecation warning. Directly assigning values to the :attr:`~http.cookies.Morsel.key`, :attr:`~http.cookies.Morsel.value` and -:attr:`~http.cookies.Morsel.coded_value` of :class:`~http.cookies.Morsel` -objects is deprecated. Use the :func:`~http.cookies.Morsel.set` method +:attr:`~http.cookies.Morsel.coded_value` of :class:`http.cookies.Morsel` +objects is deprecated. Use the :meth:`~http.cookies.Morsel.set` method instead. In addition, the undocumented *LegalChars* parameter of -:func:`~http.cookies.Morsel.set` is deprecated, and is now ignored. +:meth:`~http.cookies.Morsel.set` is deprecated, and is now ignored. Passing a format string as keyword argument *format_string* to the :meth:`~string.Formatter.format` method of the :class:`string.Formatter` class has been deprecated. +(Contributed by Serhiy Storchaka in :issue:`23671`.) The :func:`platform.dist` and :func:`platform.linux_distribution` functions are now deprecated and will be removed in Python 3.7. Linux distributions use @@ -1812,9 +2265,9 @@ left to a package. (Contributed by Vajrasky Kok and Berker Peksag in :issue:`1322`.) The previously undocumented ``from_function`` and ``from_builtin`` methods of -:class:`inspect.Signature` are deprecated. Use new -:meth:`inspect.Signature.from_callable` instead. (Contributed by Yury -Selivanov in :issue:`24248`.) +:class:`inspect.Signature` are deprecated. Use the new +:meth:`Signature.from_callable() ` +method instead. (Contributed by Yury Selivanov in :issue:`24248`.) The :func:`inspect.getargspec` function is deprecated and scheduled to be removed in Python 3.6. (See :issue:`20438` for details.) @@ -1823,12 +2276,22 @@ The :mod:`inspect` :func:`~inspect.getfullargspec`, :func:`~inspect.getargvalues`, :func:`~inspect.getcallargs`, :func:`~inspect.getargvalues`, :func:`~inspect.formatargspec`, and :func:`~inspect.formatargvalues` functions are deprecated in favor of -:func:`inspect.signature` API. +the :func:`inspect.signature` API. (Contributed by Yury Selivanov in :issue:`20438`.) -Use of ``re.LOCALE`` flag with str patterns or ``re.ASCII`` is now +Use of :const:`re.LOCALE` flag with str patterns or :const:`re.ASCII` is now deprecated. (Contributed by Serhiy Storchaka in :issue:`22407`.) +Use of unrecognized special sequences consisting of ``'\'`` and an ASCII letter +in regular expression patterns and replacement patterns now raises a +deprecation warning and will be forbidden in Python 3.6. +(Contributed by Serhiy Storchaka in :issue:`23622`.) + +The undocumented and unofficial *use_load_tests* default argument of the +:meth:`unittest.TestLoader.loadTestsFromModule` method now is +deprecated and ignored. +(Contributed by Robert Collins and Barry A. Warsaw in :issue:`16662`.) + Removed ======= @@ -1849,8 +2312,8 @@ removed: * The concept of ``.pyo`` files has been removed. -* The JoinableQueue class in the provisional asyncio module was deprecated - in 3.4.4 and is now removed. +* The JoinableQueue class in the provisional :mod:`asyncio` module was + deprecated in 3.4.4 and is now removed. (Contributed by A. Jesse Jiryu Davis in :issue:`23464`.) @@ -1860,6 +2323,20 @@ Porting to Python 3.5 This section lists previously described changes and other bugfixes that may require changes to your code. + +Changes in Python behavior +-------------------------- + +* Due to an oversight, earlier Python versions erroneously accepted the + following syntax:: + + f(1 for x in [1], *args) + f(1 for x in [1], **kwargs) + + Python 3.5 now correctly raises a :exc:`SyntaxError`, as generator + expressions must be put in parentheses if not a sole argument to a function. + + Changes in the Python API ------------------------- @@ -1877,7 +2354,7 @@ Changes in the Python API on a non-blocking socket if the operation would block. Previously, it would return ``0``. (Contributed by Nikolaus Rath in :issue:`20951`.) -* The ``__name__`` attribute of generator is now set from the function name, +* The ``__name__`` attribute of generators is now set from the function name, instead of being set from the code name. Use ``gen.gi_code.co_name`` to retrieve the code name. Generators also have a new ``__qualname__`` attribute, the qualified name, which is now used for the representation @@ -1899,7 +2376,7 @@ Changes in the Python API * If the current directory is set to a directory that no longer exists then :exc:`FileNotFoundError` will no longer be raised and instead :meth:`~importlib.machinery.FileFinder.find_spec` will return ``None`` - **without** caching ``None`` in :data:`sys.path_importer_cache` which is + **without** caching ``None`` in :data:`sys.path_importer_cache`, which is different than the typical case (:issue:`22834`). * HTTP status code and messages from :mod:`http.client` and :mod:`http.server` @@ -1907,7 +2384,7 @@ Changes in the Python API :mod:`http.client` and :mod:`http.server` remain available for backwards compatibility. (Contributed by Demian Brecht in :issue:`21793`.) -* When an import loader defines :meth:`~importlib.machinery.Loader.exec_module` +* When an import loader defines :meth:`importlib.machinery.Loader.exec_module` it is now expected to also define :meth:`~importlib.machinery.Loader.create_module` (raises a :exc:`DeprecationWarning` now, will be an error in Python 3.6). If the loader @@ -1918,11 +2395,12 @@ Changes in the Python API * The :func:`re.split` function always ignored empty pattern matches, so the ``"x*"`` pattern worked the same as ``"x+"``, and the ``"\b"`` pattern never worked. Now :func:`re.split` raises a warning if the pattern could match - an empty string. For compatibility use patterns that never match an empty + an empty string. For compatibility, use patterns that never match an empty string (e.g. ``"x+"`` instead of ``"x*"``). Patterns that could only match an empty string (such as ``"\b"``) now raise an error. + (Contributed by Serhiy Storchaka in :issue:`22818`.) -* The :class:`~http.cookies.Morsel` dict-like interface has been made self +* The :class:`http.cookies.Morsel` dict-like interface has been made self consistent: morsel comparison now takes the :attr:`~http.cookies.Morsel.key` and :attr:`~http.cookies.Morsel.value` into account, :meth:`~http.cookies.Morsel.copy` now results in a @@ -1946,16 +2424,16 @@ Changes in the Python API * The :mod:`socket` module now exports the :data:`~socket.CAN_RAW_FD_FRAMES` constant on linux 3.6 and greater. -* The :func:`~ssl.cert_time_to_seconds` function now interprets the input time +* The :func:`ssl.cert_time_to_seconds` function now interprets the input time as UTC and not as local time, per :rfc:`5280`. Additionally, the return value is always an :class:`int`. (Contributed by Akira Li in :issue:`19940`.) * The ``pygettext.py`` Tool now uses the standard +NNNN format for timezones in the POT-Creation-Date header. -* The :mod:`smtplib` module now uses :data:`sys.stderr` instead of previous - module level :data:`stderr` variable for debug output. If your (test) - program depends on patching the module level variable to capture the debug +* The :mod:`smtplib` module now uses :data:`sys.stderr` instead of the previous + module-level :data:`stderr` variable for debug output. If your (test) + program depends on patching the module-level variable to capture the debug output, you will need to update it to capture sys.stderr instead. * The :meth:`str.startswith` and :meth:`str.endswith` methods no longer return @@ -1970,6 +2448,12 @@ Changes in the Python API module and the :func:`help` function. (Contributed by Serhiy Storchaka in :issue:`15582`.) +* Nested :func:`functools.partial` calls are now flattened. If you were + relying on the previous behavior, you can now either add an attribute to a + :func:`functools.partial` object or you can create a subclass of + :func:`functools.partial`. + (Contributed by Alexander Belopolsky in :issue:`7830`.) + Changes in the C API -------------------- @@ -1987,11 +2471,12 @@ Changes in the C API (Contributed by Serhiy Storchaka in :issue:`22453`.) * Because the lack of the :attr:`__module__` attribute breaks pickling and - introspection, a deprecation warning now is raised for builtin type without - the :attr:`__module__` attribute. Would be an AttributeError in future. + introspection, a deprecation warning is now raised for builtin types without + the :attr:`__module__` attribute. This would be an AttributeError in + the future. (Contributed by Serhiy Storchaka in :issue:`20204`.) -* As part of :pep:`492` implementation, ``tp_reserved`` slot of +* As part of the :pep:`492` implementation, the ``tp_reserved`` slot of :c:type:`PyTypeObject` was replaced with a :c:member:`tp_as_async` slot. Refer to :ref:`coro-objects` for new types, structures and functions. diff --git a/Include/Python-ast.h b/Include/Python-ast.h index 3bc015fe..2d3eacb9 100644 --- a/Include/Python-ast.h +++ b/Include/Python-ast.h @@ -585,8 +585,9 @@ excepthandler_ty _Py_ExceptHandler(expr_ty type, identifier name, asdl_seq * arguments_ty _Py_arguments(asdl_seq * args, arg_ty vararg, asdl_seq * kwonlyargs, asdl_seq * kw_defaults, arg_ty kwarg, asdl_seq * defaults, PyArena *arena); -#define arg(a0, a1, a2) _Py_arg(a0, a1, a2) -arg_ty _Py_arg(identifier arg, expr_ty annotation, PyArena *arena); +#define arg(a0, a1, a2, a3, a4) _Py_arg(a0, a1, a2, a3, a4) +arg_ty _Py_arg(identifier arg, expr_ty annotation, int lineno, int col_offset, + PyArena *arena); #define keyword(a0, a1, a2) _Py_keyword(a0, a1, a2) keyword_ty _Py_keyword(identifier arg, expr_ty value, PyArena *arena); #define alias(a0, a1, a2) _Py_alias(a0, a1, a2) diff --git a/Include/abstract.h b/Include/abstract.h index 83dbf94f..6afba099 100644 --- a/Include/abstract.h +++ b/Include/abstract.h @@ -95,7 +95,7 @@ Proposal numeric, sequence, and mapping. Each protocol consists of a collection of related operations. If an operation that is not provided by a particular type is invoked, then a standard exception, - NotImplementedError is raised with a operation name as an argument. + NotImplementedError is raised with an operation name as an argument. In addition, for convenience this interface defines a set of constructors for building objects of built-in types. This is needed so new objects can be returned from C functions that otherwise treat diff --git a/Include/ceval.h b/Include/ceval.h index eb1ee434..b5373a9c 100644 --- a/Include/ceval.h +++ b/Include/ceval.h @@ -94,10 +94,16 @@ PyAPI_DATA(int) _Py_CheckRecursionLimit; # define _Py_MakeRecCheck(x) (++(x) > _Py_CheckRecursionLimit) #endif +/* Compute the "lower-water mark" for a recursion limit. When + * Py_LeaveRecursiveCall() is called with a recursion depth below this mark, + * the overflowed flag is reset to 0. */ +#define _Py_RecursionLimitLowerWaterMark(limit) \ + (((limit) > 200) \ + ? ((limit) - 50) \ + : (3 * ((limit) >> 2))) + #define _Py_MakeEndRecCheck(x) \ - (--(x) < ((_Py_CheckRecursionLimit > 100) \ - ? (_Py_CheckRecursionLimit - 50) \ - : (3 * (_Py_CheckRecursionLimit >> 2)))) + (--(x) < _Py_RecursionLimitLowerWaterMark(_Py_CheckRecursionLimit)) #define Py_ALLOW_RECURSION \ do { unsigned char _old = PyThreadState_GET()->recursion_critical;\ diff --git a/Include/codecs.h b/Include/codecs.h index 9e4f3050..f8275a12 100644 --- a/Include/codecs.h +++ b/Include/codecs.h @@ -165,14 +165,14 @@ PyAPI_FUNC(PyObject *) PyCodec_Decoder( const char *encoding ); -/* Get a IncrementalEncoder object for the given encoding. */ +/* Get an IncrementalEncoder object for the given encoding. */ PyAPI_FUNC(PyObject *) PyCodec_IncrementalEncoder( const char *encoding, const char *errors ); -/* Get a IncrementalDecoder object function for the given encoding. */ +/* Get an IncrementalDecoder object function for the given encoding. */ PyAPI_FUNC(PyObject *) PyCodec_IncrementalDecoder( const char *encoding, diff --git a/Include/dictobject.h b/Include/dictobject.h index 320f9ecf..80bd3302 100644 --- a/Include/dictobject.h +++ b/Include/dictobject.h @@ -72,6 +72,10 @@ PyAPI_FUNC(int) _PyDict_SetItem_KnownHash(PyObject *mp, PyObject *key, PyObject *item, Py_hash_t hash); #endif PyAPI_FUNC(int) PyDict_DelItem(PyObject *mp, PyObject *key); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key, + Py_hash_t hash); +#endif PyAPI_FUNC(void) PyDict_Clear(PyObject *mp); PyAPI_FUNC(int) PyDict_Next( PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value); diff --git a/Include/object.h b/Include/object.h index 8afcbe96..4d286efe 100644 --- a/Include/object.h +++ b/Include/object.h @@ -351,7 +351,8 @@ typedef struct _typeobject { printfunc tp_print; getattrfunc tp_getattr; setattrfunc tp_setattr; - PyAsyncMethods *tp_as_async; /* formerly known as tp_compare or tp_reserved */ + PyAsyncMethods *tp_as_async; /* formerly known as tp_compare (Python 2) + or tp_reserved (Python 3) */ reprfunc tp_repr; /* Method suites for standard classes */ diff --git a/Include/patchlevel.h b/Include/patchlevel.h index 226f9cfd..bca4b3f8 100644 --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -18,12 +18,12 @@ /*--start constants--*/ #define PY_MAJOR_VERSION 3 #define PY_MINOR_VERSION 5 -#define PY_MICRO_VERSION 0 +#define PY_MICRO_VERSION 1 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "3.5.0" +#define PY_VERSION "3.5.1" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Include/pyatomic.h b/Include/pyatomic.h index 99816a5b..892a217d 100644 --- a/Include/pyatomic.h +++ b/Include/pyatomic.h @@ -1,8 +1,6 @@ -/* Issue #23644: is incompatible with C++, see: - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932 */ -#if !defined(Py_LIMITED_API) && !defined(__cplusplus) #ifndef Py_ATOMIC_H #define Py_ATOMIC_H +#ifdef Py_BUILD_CORE #include "dynamic_annotations.h" @@ -248,5 +246,5 @@ _Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order) #define _Py_atomic_load_relaxed(ATOMIC_VAL) \ _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed) +#endif /* Py_BUILD_CORE */ #endif /* Py_ATOMIC_H */ -#endif /* Py_LIMITED_API */ diff --git a/Include/pystate.h b/Include/pystate.h index a2fd8031..6000b81a 100644 --- a/Include/pystate.h +++ b/Include/pystate.h @@ -177,20 +177,13 @@ PyAPI_FUNC(int) PyThreadState_SetAsyncExc(long, PyObject *); /* Variable and macro for in-line access to current thread state */ /* Assuming the current thread holds the GIL, this is the - PyThreadState for the current thread. - - Issue #23644: pyatomic.h is incompatible with C++ (yet). Disable - PyThreadState_GET() optimization: declare it as an alias to - PyThreadState_Get(), as done for limited API. */ -#if !defined(Py_LIMITED_API) && !defined(__cplusplus) + PyThreadState for the current thread. */ +#ifdef Py_BUILD_CORE PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current; -#endif - -#if defined(Py_DEBUG) || defined(Py_LIMITED_API) || defined(__cplusplus) -#define PyThreadState_GET() PyThreadState_Get() +# define PyThreadState_GET() \ + ((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current)) #else -#define PyThreadState_GET() \ - ((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current)) +# define PyThreadState_GET() PyThreadState_Get() #endif typedef diff --git a/Include/pytime.h b/Include/pytime.h index 027c3d80..494322c5 100644 --- a/Include/pytime.h +++ b/Include/pytime.h @@ -117,6 +117,18 @@ PyAPI_FUNC(int) _PyTime_AsTimeval_noraise(_PyTime_t t, struct timeval *tv, _PyTime_round_t round); +/* Convert a timestamp to a number of seconds (secs) and microseconds (us). + us is always positive. This function is similar to _PyTime_AsTimeval() + except that secs is always a time_t type, whereas the timeval structure + uses a C long for tv_sec on Windows. + Raise an exception and return -1 if the conversion overflowed, + return 0 on success. */ +PyAPI_FUNC(int) _PyTime_AsTimevalTime_t( + _PyTime_t t, + time_t *secs, + int *us, + _PyTime_round_t round); + #if defined(HAVE_CLOCK_GETTIME) || defined(HAVE_KQUEUE) /* Convert a timestamp to a timespec structure (nanosecond resolution). tv_nsec is always positive. diff --git a/Include/unicodeobject.h b/Include/unicodeobject.h index 4ba63280..09c9b218 100644 --- a/Include/unicodeobject.h +++ b/Include/unicodeobject.h @@ -965,7 +965,7 @@ _PyUnicodeWriter_WriteSubstring(_PyUnicodeWriter *writer, Py_ssize_t end ); -/* Append a ASCII-encoded byte string. +/* Append an ASCII-encoded byte string. Return 0 on success, raise an exception and return -1 on error. */ PyAPI_FUNC(int) _PyUnicodeWriter_WriteASCIIString(_PyUnicodeWriter *writer, @@ -2041,7 +2041,7 @@ PyAPI_FUNC(PyObject *) PyUnicode_RichCompare( int op /* Operation: Py_EQ, Py_NE, Py_GT, etc. */ ); -/* Apply a argument tuple or dictionary to a format string and return +/* Apply an argument tuple or dictionary to a format string and return the resulting Unicode string. */ PyAPI_FUNC(PyObject *) PyUnicode_Format( diff --git a/Lib/_pyio.py b/Lib/_pyio.py index 50ad9ff9..37157d53 100644 --- a/Lib/_pyio.py +++ b/Lib/_pyio.py @@ -8,12 +8,13 @@ import codecs import errno import array import stat +import sys # Import _thread instead of threading to reduce startup cost try: from _thread import allocate_lock as Lock except ImportError: from _dummy_thread import allocate_lock as Lock -if os.name == 'win32': +if sys.platform in {'win32', 'cygwin'}: from msvcrt import setmode as _setmode else: _setmode = None @@ -2511,7 +2512,7 @@ class StringIO(TextIOWrapper): def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, - # that's a implementation detail. + # that's an implementation detail. return object.__repr__(self) @property diff --git a/Lib/ast.py b/Lib/ast.py index 03c30f66..01704727 100644 --- a/Lib/ast.py +++ b/Lib/ast.py @@ -293,7 +293,6 @@ class NodeTransformer(NodeVisitor): def generic_visit(self, node): for field, old_value in iter_fields(node): - old_value = getattr(node, field, None) if isinstance(old_value, list): new_values = [] for value in old_value: diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py index c2054454..c5ffad40 100644 --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -18,6 +18,7 @@ import collections import concurrent.futures import heapq import inspect +import itertools import logging import os import socket @@ -69,10 +70,6 @@ def _format_pipe(fd): return repr(fd) -class _StopError(BaseException): - """Raised to stop the event loop.""" - - def _check_resolved_address(sock, address): # Ensure that the address is already resolved to avoid the trap of hanging # the entire event loop when the address requires doing a DNS lookup. @@ -117,9 +114,6 @@ def _check_resolved_address(sock, address): "got host %r: %s" % (host, err)) -def _raise_stop_error(*args): - raise _StopError - def _run_until_complete_cb(fut): exc = fut._exception @@ -128,7 +122,7 @@ def _run_until_complete_cb(fut): # Issue #22429: run_forever() already finished, no need to # stop it. return - _raise_stop_error() + fut._loop.stop() class Server(events.AbstractServer): @@ -183,6 +177,7 @@ class BaseEventLoop(events.AbstractEventLoop): def __init__(self): self._timer_cancelled_count = 0 self._closed = False + self._stopping = False self._ready = collections.deque() self._scheduled = [] self._default_executor = None @@ -297,11 +292,11 @@ class BaseEventLoop(events.AbstractEventLoop): self._thread_id = threading.get_ident() try: while True: - try: - self._run_once() - except _StopError: + self._run_once() + if self._stopping: break finally: + self._stopping = False self._thread_id = None self._set_coroutine_wrapper(False) @@ -344,11 +339,10 @@ class BaseEventLoop(events.AbstractEventLoop): def stop(self): """Stop running the event loop. - Every callback scheduled before stop() is called will run. Callbacks - scheduled after stop() is called will not run. However, those callbacks - will run if run_forever is called again later. + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. """ - self.call_soon(_raise_stop_error) + self._stopping = True def close(self): """Close the event loop. @@ -699,75 +693,109 @@ class BaseEventLoop(events.AbstractEventLoop): @coroutine def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): """Create datagram connection.""" - if not (local_addr or remote_addr): - if family == 0: - raise ValueError('unexpected address family') - addr_pairs_info = (((family, proto), (None, None)),) - else: - # join address by (family, protocol) - addr_infos = collections.OrderedDict() - for idx, addr in ((0, local_addr), (1, remote_addr)): - if addr is not None: - assert isinstance(addr, tuple) and len(addr) == 2, ( - '2-tuple is expected') - - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) - if not infos: - raise OSError('getaddrinfo() returned empty list') - - for fam, _, pro, _, address in infos: - key = (fam, pro) - if key not in addr_infos: - addr_infos[key] = [None, None] - addr_infos[key][idx] = address - - # each addr has to have info for each (family, proto) pair - addr_pairs_info = [ - (key, addr_pair) for key, addr_pair in addr_infos.items() - if not ((local_addr and addr_pair[0] is None) or - (remote_addr and addr_pair[1] is None))] - - if not addr_pairs_info: - raise ValueError('can not get address information') - - exceptions = [] - - for ((family, proto), - (local_address, remote_address)) in addr_pairs_info: - sock = None + if sock is not None: + if (local_addr or remote_addr or + family or proto or flags or + reuse_address or reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join( + '{}={}'.format(k, v) for k, v in opts.items() if v) + raise ValueError( + 'socket modifier keyword arguments can not be used ' + 'when sock is specified. ({})'.format(problems)) + sock.setblocking(False) r_addr = None - try: - sock = socket.socket( - family=family, type=socket.SOCK_DGRAM, proto=proto) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.setblocking(False) - - if local_addr: - sock.bind(local_address) - if remote_addr: - yield from self.sock_connect(sock, remote_address) - r_addr = remote_address - except OSError as exc: - if sock is not None: - sock.close() - exceptions.append(exc) - except: - if sock is not None: - sock.close() - raise - else: - break else: - raise exceptions[0] + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + else: + # join address by (family, protocol) + addr_infos = collections.OrderedDict() + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') + + infos = yield from self.getaddrinfo( + *addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags) + if not infos: + raise OSError('getaddrinfo() returned empty list') + + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address + + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] + + if not addr_pairs_info: + raise ValueError('can not get address information') + + exceptions = [] + + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' + + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + if allow_broadcast: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + yield from self.sock_connect(sock, remote_address) + r_addr = remote_address + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break + else: + raise exceptions[0] protocol = protocol_factory() waiter = futures.Future(loop=self) - transport = self._make_datagram_transport(sock, protocol, r_addr, - waiter) + transport = self._make_datagram_transport( + sock, protocol, r_addr, waiter) if self._debug: if local_addr: logger.info("Datagram endpoint local_addr=%r remote_addr=%r " @@ -786,6 +814,15 @@ class BaseEventLoop(events.AbstractEventLoop): return transport, protocol + @coroutine + def _create_server_getaddrinfo(self, host, port, family, flags): + infos = yield from self.getaddrinfo(host, port, family=family, + type=socket.SOCK_STREAM, + flags=flags) + if not infos: + raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) + return infos + @coroutine def create_server(self, protocol_factory, host=None, port=None, *, @@ -794,8 +831,15 @@ class BaseEventLoop(events.AbstractEventLoop): sock=None, backlog=100, ssl=None, - reuse_address=None): - """Create a TCP server bound to host and port. + reuse_address=None, + reuse_port=None): + """Create a TCP server. + + The host parameter can be a string, in that case the TCP server is bound + to host and port. + + The host parameter can also be a sequence of strings and in that case + the TCP server is bound to all hosts of the sequence. Return a Server object which can be used to stop the service. @@ -813,13 +857,18 @@ class BaseEventLoop(events.AbstractEventLoop): reuse_address = os.name == 'posix' and sys.platform != 'cygwin' sockets = [] if host == '': - host = None + hosts = [None] + elif (isinstance(host, str) or + not isinstance(host, collections.Iterable)): + hosts = [host] + else: + hosts = host - infos = yield from self.getaddrinfo( - host, port, family=family, - type=socket.SOCK_STREAM, proto=0, flags=flags) - if not infos: - raise OSError('getaddrinfo() returned empty list') + fs = [self._create_server_getaddrinfo(host, port, family=family, + flags=flags) + for host in hosts] + infos = yield from tasks.gather(*fs, loop=self) + infos = itertools.chain.from_iterable(infos) completed = False try: @@ -836,8 +885,15 @@ class BaseEventLoop(events.AbstractEventLoop): continue sockets.append(sock) if reuse_address: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, - True) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. @@ -1131,7 +1187,7 @@ class BaseEventLoop(events.AbstractEventLoop): handle._scheduled = False timeout = None - if self._ready: + if self._ready or self._stopping: timeout = 0 elif self._scheduled: # Compute the desired timeout. diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py index 6851cd2b..73425d9b 100644 --- a/Lib/asyncio/base_subprocess.py +++ b/Lib/asyncio/base_subprocess.py @@ -87,6 +87,9 @@ class BaseSubprocessTransport(transports.SubprocessTransport): def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): raise NotImplementedError + def is_closing(self): + return self._closed + def close(self): if self._closed: return diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py index e11b21b0..3a92c7d7 100644 --- a/Lib/asyncio/coroutines.py +++ b/Lib/asyncio/coroutines.py @@ -140,7 +140,13 @@ class CoroWrapper: if compat.PY35: - __await__ = __iter__ # make compatible with 'await' expression + def __await__(self): + cr_await = getattr(self.gen, 'cr_await', None) + if cr_await is not None: + raise RuntimeError( + "Cannot await on coroutine {!r} while it's " + "awaiting for {!r}".format(self.gen, cr_await)) + return self @property def gi_yieldfrom(self): diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py index d5f0d451..176a8466 100644 --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -297,7 +297,8 @@ class AbstractEventLoop: def create_server(self, protocol_factory, host=None, port=None, *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, - sock=None, backlog=100, ssl=None, reuse_address=None): + sock=None, backlog=100, ssl=None, reuse_address=None, + reuse_port=None): """A coroutine which creates a TCP server bound to host and port. The return value is a Server object which can be used to stop @@ -305,7 +306,8 @@ class AbstractEventLoop: If host is an empty string or None all interfaces are assumed and a list of multiple sockets will be returned (most likely - one for IPv4 and another one for IPv6). + one for IPv4 and another one for IPv6). The host parameter can also be a + sequence (e.g. list) of hosts to bind to. family can be set to either AF_INET or AF_INET6 to force the socket to use IPv4 or IPv6. If not set it will be determined @@ -326,6 +328,11 @@ class AbstractEventLoop: TIME_WAIT state, without waiting for its natural timeout to expire. If not specified will automatically be set to True on UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. """ raise NotImplementedError @@ -357,7 +364,37 @@ class AbstractEventLoop: def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ raise NotImplementedError # Pipes and subprocesses. diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py index dbe06c4a..4dcb6546 100644 --- a/Lib/asyncio/futures.py +++ b/Lib/asyncio/futures.py @@ -154,7 +154,7 @@ class Future: if self._loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) - def _format_callbacks(self): + def __format_callbacks(self): cb = self._callbacks size = len(cb) if not size: @@ -184,7 +184,7 @@ class Future: result = reprlib.repr(self._result) info.append('result={}'.format(result)) if self._callbacks: - info.append(self._format_callbacks()) + info.append(self.__format_callbacks()) if self._source_traceback: frame = self._source_traceback[-1] info.append('created at %s:%s' % (frame[0], frame[1])) @@ -319,12 +319,6 @@ class Future: # So-called internal methods (note: no set_running_or_notify_cancel()). - def _set_result_unless_cancelled(self, result): - """Helper setting the result only if the future was not cancelled.""" - if self.cancelled(): - return - self.set_result(result) - def set_result(self, result): """Mark the future done and set its result. @@ -358,27 +352,6 @@ class Future: # have had a chance to call result() or exception(). self._loop.call_soon(self._tb_logger.activate) - # Truly internal methods. - - def _copy_state(self, other): - """Internal helper to copy state from another Future. - - The other Future may be a concurrent.futures.Future. - """ - assert other.done() - if self.cancelled(): - return - assert not self.done() - if other.cancelled(): - self.cancel() - else: - exception = other.exception() - if exception is not None: - self.set_exception(exception) - else: - result = other.result() - self.set_result(result) - def __iter__(self): if not self.done(): self._blocking = True @@ -390,22 +363,91 @@ class Future: __await__ = __iter__ # make compatible with 'await' expression -def wrap_future(fut, *, loop=None): - """Wrap concurrent.futures.Future object.""" - if isinstance(fut, Future): - return fut - assert isinstance(fut, concurrent.futures.Future), \ - 'concurrent.futures.Future is expected, got {!r}'.format(fut) - if loop is None: - loop = events.get_event_loop() - new_future = Future(loop=loop) +def _set_result_unless_cancelled(fut, result): + """Helper setting the result only if the future was not cancelled.""" + if fut.cancelled(): + return + fut.set_result(result) + + +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(exception) + else: + result = source.result() + concurrent.set_result(result) + + +def _copy_future_state(source, dest): + """Internal helper to copy state from another Future. - def _check_cancel_other(f): - if f.cancelled(): - fut.cancel() + The other Future may be a concurrent.futures.Future. + """ + assert source.done() + if dest.cancelled(): + return + assert not dest.done() + if source.cancelled(): + dest.cancel() + else: + exception = source.exception() + if exception is not None: + dest.set_exception(exception) + else: + result = source.result() + dest.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. - new_future.add_done_callback(_check_cancel_other) - fut.add_done_callback( - lambda future: loop.call_soon_threadsafe( - new_future._copy_state, future)) + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isinstance(source, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for source argument') + if not isinstance(destination, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for destination argument') + source_loop = source._loop if isinstance(source, Future) else None + dest_loop = destination._loop if isinstance(destination, Future) else None + + def _set_state(future, other): + if isinstance(future, Future): + _copy_future_state(other, future) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): + """Wrap concurrent.futures.Future object.""" + if isinstance(future, Future): + return future + assert isinstance(future, concurrent.futures.Future), \ + 'concurrent.futures.Future is expected, got {!r}'.format(future) + new_future = Future(loop=loop) + _chain_future(future, new_future) return new_future diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py index 7a132796..34f6bc16 100644 --- a/Lib/asyncio/locks.py +++ b/Lib/asyncio/locks.py @@ -411,6 +411,13 @@ class Semaphore(_ContextManagerMixin): extra = '{},waiters:{}'.format(extra, len(self._waiters)) return '<{} [{}]>'.format(res[1:-1], extra) + def _wake_up_next(self): + while self._waiters: + waiter = self._waiters.popleft() + if not waiter.done(): + waiter.set_result(None) + return + def locked(self): """Returns True if semaphore can not be acquired immediately.""" return self._value == 0 @@ -425,18 +432,19 @@ class Semaphore(_ContextManagerMixin): called release() to make it larger than 0, and then return True. """ - if not self._waiters and self._value > 0: - self._value -= 1 - return True - - fut = futures.Future(loop=self._loop) - self._waiters.append(fut) - try: - yield from fut - self._value -= 1 - return True - finally: - self._waiters.remove(fut) + while self._value <= 0: + fut = futures.Future(loop=self._loop) + self._waiters.append(fut) + try: + yield from fut + except: + # See the similar code in Queue.get. + fut.cancel() + if self._value > 0 and not fut.cancelled(): + self._wake_up_next() + raise + self._value -= 1 + return True def release(self): """Release a semaphore, incrementing the internal counter by one. @@ -444,10 +452,7 @@ class Semaphore(_ContextManagerMixin): become larger than zero again, wake up that coroutine. """ self._value += 1 - for waiter in self._waiters: - if not waiter.done(): - waiter.set_result(True) - break + self._wake_up_next() class BoundedSemaphore(Semaphore): diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py index abe4c129..7eac41ee 100644 --- a/Lib/asyncio/proactor_events.py +++ b/Lib/asyncio/proactor_events.py @@ -41,7 +41,8 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin, self._loop.call_soon(self._protocol.connection_made, self) if waiter is not None: # only wake up the waiter when connection_made() has been called - self._loop.call_soon(waiter._set_result_unless_cancelled, None) + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) def __repr__(self): info = [self.__class__.__name__] @@ -65,6 +66,9 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin, def _set_extra(self, sock): self._extra['pipe'] = sock + def is_closing(self): + return self._closing + def close(self): if self._closing: return diff --git a/Lib/asyncio/queues.py b/Lib/asyncio/queues.py index 021043d6..e3a1d5ed 100644 --- a/Lib/asyncio/queues.py +++ b/Lib/asyncio/queues.py @@ -47,7 +47,7 @@ class Queue: # Futures. self._getters = collections.deque() - # Futures + # Futures. self._putters = collections.deque() self._unfinished_tasks = 0 self._finished = locks.Event(loop=self._loop) @@ -67,10 +67,13 @@ class Queue: # End of the overridable methods. - def __put_internal(self, item): - self._put(item) - self._unfinished_tasks += 1 - self._finished.clear() + def _wakeup_next(self, waiters): + # Wake up the next waiter (if any) that isn't cancelled. + while waiters: + waiter = waiters.popleft() + if not waiter.done(): + waiter.set_result(None) + break def __repr__(self): return '<{} at {:#x} {}>'.format( @@ -91,16 +94,6 @@ class Queue: result += ' tasks={}'.format(self._unfinished_tasks) return result - def _consume_done_getters(self): - # Delete waiters at the head of the get() queue who've timed out. - while self._getters and self._getters[0].done(): - self._getters.popleft() - - def _consume_done_putters(self): - # Delete waiters at the head of the put() queue who've timed out. - while self._putters and self._putters[0].done(): - self._putters.popleft() - def qsize(self): """Number of items in the queue.""" return len(self._queue) @@ -134,47 +127,31 @@ class Queue: This method is a coroutine. """ - self._consume_done_getters() - if self._getters: - assert not self._queue, ( - 'queue non-empty, why are getters waiting?') - - getter = self._getters.popleft() - self.__put_internal(item) - - # getter cannot be cancelled, we just removed done getters - getter.set_result(self._get()) - - elif self._maxsize > 0 and self._maxsize <= self.qsize(): - waiter = futures.Future(loop=self._loop) - - self._putters.append(waiter) - yield from waiter - self._put(item) - - else: - self.__put_internal(item) + while self.full(): + putter = futures.Future(loop=self._loop) + self._putters.append(putter) + try: + yield from putter + except: + putter.cancel() # Just in case putter is not done yet. + if not self.full() and not putter.cancelled(): + # We were woken up by get_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._putters) + raise + return self.put_nowait(item) def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise QueueFull. """ - self._consume_done_getters() - if self._getters: - assert not self._queue, ( - 'queue non-empty, why are getters waiting?') - - getter = self._getters.popleft() - self.__put_internal(item) - - # getter cannot be cancelled, we just removed done getters - getter.set_result(self._get()) - - elif self._maxsize > 0 and self._maxsize <= self.qsize(): + if self.full(): raise QueueFull - else: - self.__put_internal(item) + self._put(item) + self._unfinished_tasks += 1 + self._finished.clear() + self._wakeup_next(self._getters) @coroutine def get(self): @@ -184,77 +161,30 @@ class Queue: This method is a coroutine. """ - self._consume_done_putters() - if self._putters: - assert self.full(), 'queue not full, why are putters waiting?' - putter = self._putters.popleft() - - # When a getter runs and frees up a slot so this putter can - # run, we need to defer the put for a tick to ensure that - # getters and putters alternate perfectly. See - # ChannelTest.test_wait. - self._loop.call_soon(putter._set_result_unless_cancelled, None) - - return self._get() - - elif self.qsize(): - return self._get() - else: - waiter = futures.Future(loop=self._loop) - self._getters.append(waiter) + while self.empty(): + getter = futures.Future(loop=self._loop) + self._getters.append(getter) try: - return (yield from waiter) - except futures.CancelledError: - # if we get CancelledError, it means someone cancelled this - # get() coroutine. But there is a chance that the waiter - # already is ready and contains an item that has just been - # removed from the queue. In this case, we need to put the item - # back into the front of the queue. This get() must either - # succeed without fault or, if it gets cancelled, it must be as - # if it never happened. - if waiter.done(): - self._put_it_back(waiter.result()) + yield from getter + except: + getter.cancel() # Just in case getter is not done yet. + if not self.empty() and not getter.cancelled(): + # We were woken up by put_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._getters) raise - - def _put_it_back(self, item): - """ - This is called when we have a waiter to get() an item and this waiter - gets cancelled. In this case, we put the item back: wake up another - waiter or put it in the _queue. - """ - self._consume_done_getters() - if self._getters: - assert not self._queue, ( - 'queue non-empty, why are getters waiting?') - - getter = self._getters.popleft() - self.__put_internal(item) - - # getter cannot be cancelled, we just removed done getters - getter.set_result(item) - else: - self._queue.appendleft(item) + return self.get_nowait() def get_nowait(self): """Remove and return an item from the queue. Return an item if one is immediately available, else raise QueueEmpty. """ - self._consume_done_putters() - if self._putters: - assert self.full(), 'queue not full, why are putters waiting?' - putter = self._putters.popleft() - # Wake putter on next tick. - - # getter cannot be cancelled, we just removed done putters - putter.set_result(None) - - return self._get() - - elif self.qsize(): - return self._get() - else: + if self.empty(): raise QueueEmpty + item = self._get() + self._wakeup_next(self._putters) + return item def task_done(self): """Indicate that a formerly enqueued task is complete. diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py index 4a996584..a05f81cd 100644 --- a/Lib/asyncio/selector_events.py +++ b/Lib/asyncio/selector_events.py @@ -556,6 +556,9 @@ class _SelectorTransport(transports._FlowControlMixin, def abort(self): self._force_close(None) + def is_closing(self): + return self._closing + def close(self): if self._closing: return @@ -633,7 +636,8 @@ class _SelectorSocketTransport(_SelectorTransport): self._sock_fd, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called - self._loop.call_soon(waiter._set_result_unless_cancelled, None) + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) def pause_reading(self): if self._closing: @@ -843,6 +847,7 @@ class _SelectorSslTransport(_SelectorTransport): self._extra.update(peercert=peercert, cipher=self._sock.cipher(), compression=self._sock.compression(), + ssl_object=self._sock, ) self._read_wants_write = False @@ -986,7 +991,8 @@ class _SelectorDatagramTransport(_SelectorTransport): self._sock_fd, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called - self._loop.call_soon(waiter._set_result_unless_cancelled, None) + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) def get_write_buffer_size(self): return sum(len(data) for data, _ in self._buffer) diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py index e5ae49a5..dde980b6 100644 --- a/Lib/asyncio/sslproto.py +++ b/Lib/asyncio/sslproto.py @@ -295,6 +295,7 @@ class _SSLProtocolTransport(transports._FlowControlMixin, def __init__(self, loop, ssl_protocol, app_protocol): self._loop = loop + # SSLProtocol instance self._ssl_protocol = ssl_protocol self._app_protocol = app_protocol self._closed = False @@ -303,6 +304,9 @@ class _SSLProtocolTransport(transports._FlowControlMixin, """Get optional transport information.""" return self._ssl_protocol._get_extra_info(name, default) + def is_closing(self): + return self._closed + def close(self): """Close the transport. @@ -348,7 +352,7 @@ class _SSLProtocolTransport(transports._FlowControlMixin, high-water limit. Neither value can be negative. The defaults are implementation-specific. If only the - high-water limit is given, the low-water limit defaults to a + high-water limit is given, the low-water limit defaults to an implementation-specific value less than or equal to the high-water limit. Setting high to zero forces low to zero as well, and causes pause_writing() to be called whenever the @@ -425,10 +429,12 @@ class SSLProtocol(protocols.Protocol): self._app_protocol = app_protocol self._app_transport = _SSLProtocolTransport(self._loop, self, self._app_protocol) + # _SSLPipe instance (None until the connection is made) self._sslpipe = None self._session_established = False self._in_handshake = False self._in_shutdown = False + # transport, ex: SelectorSocketTransport self._transport = None def _wakeup_waiter(self, exc=None): @@ -591,6 +597,7 @@ class SSLProtocol(protocols.Protocol): self._extra.update(peercert=peercert, cipher=sslobj.cipher(), compression=sslobj.compression(), + ssl_object=sslobj, ) self._app_protocol.connection_made(self._app_transport) self._wakeup_waiter() diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py index 6484c435..6b5e96ae 100644 --- a/Lib/asyncio/streams.py +++ b/Lib/asyncio/streams.py @@ -255,7 +255,7 @@ class StreamWriter: def __init__(self, transport, protocol, reader, loop): self._transport = transport self._protocol = protocol - # drain() expects that the reader has a exception() method + # drain() expects that the reader has an exception() method assert reader is None or isinstance(reader, StreamReader) self._reader = reader self._loop = loop @@ -301,6 +301,15 @@ class StreamWriter: exc = self._reader.exception() if exc is not None: raise exc + if self._transport is not None: + if self._transport.is_closing(): + # Yield to the event loop so connection_lost() may be + # called. Without this, _drain_helper() would return + # immediately, and code that calls + # write(...); yield from drain() + # in a loop would never call connection_lost(), so it + # would not see an error when the socket is closed. + yield yield from self._protocol._drain_helper() @@ -324,7 +333,7 @@ class StreamReader: def __repr__(self): info = ['StreamReader'] if self._buffer: - info.append('%d bytes' % len(info)) + info.append('%d bytes' % len(self._buffer)) if self._eof: info.append('eof') if self._limit != _DEFAULT_LIMIT: diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py index a235e742..e6389d82 100644 --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -3,7 +3,7 @@ __all__ = ['Task', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'wait', 'wait_for', 'as_completed', 'sleep', 'async', - 'gather', 'shield', 'ensure_future', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', ] import concurrent.futures @@ -220,9 +220,9 @@ class Task(futures.Future): self._must_cancel = True return True - def _step(self, value=None, exc=None): + def _step(self, exc=None): assert not self.done(), \ - '_step(): already done: {!r}, {!r}, {!r}'.format(self, value, exc) + '_step(): already done: {!r}, {!r}'.format(self, exc) if self._must_cancel: if not isinstance(exc, futures.CancelledError): exc = futures.CancelledError() @@ -231,12 +231,14 @@ class Task(futures.Future): self._fut_waiter = None self.__class__._current_tasks[self._loop] = self - # Call either coro.throw(exc) or coro.send(value). + # Call either coro.throw(exc) or coro.send(None). try: - if exc is not None: - result = coro.throw(exc) + if exc is None: + # We use the `send` method directly, because coroutines + # don't have `__iter__` and `__next__` methods. + result = coro.send(None) else: - result = coro.send(value) + result = coro.throw(exc) except StopIteration as exc: self.set_result(exc.value) except futures.CancelledError as exc: @@ -258,7 +260,7 @@ class Task(futures.Future): self._must_cancel = False else: self._loop.call_soon( - self._step, None, + self._step, RuntimeError( 'yield was used instead of yield from ' 'in task {!r} with {!r}'.format(self, result))) @@ -268,7 +270,7 @@ class Task(futures.Future): elif inspect.isgenerator(result): # Yielding a generator is just wrong. self._loop.call_soon( - self._step, None, + self._step, RuntimeError( 'yield was used instead of yield from for ' 'generator in task {!r} with {}'.format( @@ -276,7 +278,7 @@ class Task(futures.Future): else: # Yielding something else is an error. self._loop.call_soon( - self._step, None, + self._step, RuntimeError( 'Task got bad yield: {!r}'.format(result))) finally: @@ -285,12 +287,18 @@ class Task(futures.Future): def _wakeup(self, future): try: - value = future.result() + future.result() except Exception as exc: # This may also be a cancellation. - self._step(None, exc) + self._step(exc) else: - self._step(value, None) + # Don't pass the value of `future.result()` explicitly, + # as `Future.__iter__` and `Future.__await__` don't need it. + # If we call `_step(value, None)` instead of `_step()`, + # Python eval loop would use `.send(value)` method call, + # instead of `__next__()`, which is slower for futures + # that return non-generator iterators from their `__iter__`. + self._step() self = None # Needed to break cycles when an exception occurs. @@ -488,9 +496,14 @@ def as_completed(fs, *, loop=None, timeout=None): @coroutine def sleep(delay, result=None, *, loop=None): """Coroutine that completes after a given time (in seconds).""" + if delay == 0: + yield + return result + future = futures.Future(loop=loop) h = future._loop.call_later(delay, - future._set_result_unless_cancelled, result) + futures._set_result_unless_cancelled, + future, result) try: return (yield from future) finally: @@ -512,7 +525,7 @@ def async(coro_or_future, *, loop=None): def ensure_future(coro_or_future, *, loop=None): - """Wrap a coroutine in a future. + """Wrap a coroutine or an awaitable in a future. If the argument is a Future, it is returned directly. """ @@ -527,8 +540,20 @@ def ensure_future(coro_or_future, *, loop=None): if task._source_traceback: del task._source_traceback[-1] return task + elif compat.PY35 and inspect.isawaitable(coro_or_future): + return ensure_future(_wrap_awaitable(coro_or_future), loop=loop) else: - raise TypeError('A Future or coroutine is required') + raise TypeError('A Future, a coroutine or an awaitable is required') + + +@coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) class _GatheringFuture(futures.Future): @@ -680,3 +705,24 @@ def shield(arg, *, loop=None): inner.add_done_callback(_done_callback) return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except Exception as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise + + loop.call_soon_threadsafe(callback) + return future diff --git a/Lib/asyncio/test_utils.py b/Lib/asyncio/test_utils.py index 8cee95b8..81705331 100644 --- a/Lib/asyncio/test_utils.py +++ b/Lib/asyncio/test_utils.py @@ -24,6 +24,7 @@ except ImportError: # pragma: no cover ssl = None from . import base_events +from . import compat from . import events from . import futures from . import selectors @@ -71,12 +72,13 @@ def run_until(loop, pred, timeout=30): def run_once(loop): - """loop.stop() schedules _raise_stop_error() - and run_forever() runs until _raise_stop_error() callback. - this wont work if test waits for some IO events, because - _raise_stop_error() runs before any of io events callbacks. + """Legacy API to run once through the event loop. + + This is the recommended pattern for test code. It will poll the + selector once and run all callbacks scheduled in response to I/O + events. """ - loop.stop() + loop.call_soon(loop.stop) loop.run_forever() @@ -420,6 +422,16 @@ class TestCase(unittest.TestCase): # in an except block of a generator self.assertEqual(sys.exc_info(), (None, None, None)) + if not compat.PY34: + # Python 3.3 compatibility + def subTest(self, *args, **kwargs): + class EmptyCM: + def __enter__(self): + pass + def __exit__(self, *exc): + pass + return EmptyCM() + @contextlib.contextmanager def disable_logger(): diff --git a/Lib/asyncio/transports.py b/Lib/asyncio/transports.py index 70b323f2..9a6d9197 100644 --- a/Lib/asyncio/transports.py +++ b/Lib/asyncio/transports.py @@ -19,6 +19,10 @@ class BaseTransport: """Get optional transport information.""" return self._extra.get(name, default) + def is_closing(self): + """Return True if the transport is closing or closed.""" + raise NotImplementedError + def close(self): """Close the transport. @@ -62,7 +66,7 @@ class WriteTransport(BaseTransport): high-water limit. Neither value can be negative. The defaults are implementation-specific. If only the - high-water limit is given, the low-water limit defaults to a + high-water limit is given, the low-water limit defaults to an implementation-specific value less than or equal to the high-water limit. Setting high to zero forces low to zero as well, and causes pause_writing() to be called whenever the diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py index bf3b0844..7747ff41 100644 --- a/Lib/asyncio/unix_events.py +++ b/Lib/asyncio/unix_events.py @@ -319,7 +319,8 @@ class _UnixReadPipeTransport(transports.ReadTransport): self._fileno, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called - self._loop.call_soon(waiter._set_result_unless_cancelled, None) + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) def __repr__(self): info = [self.__class__.__name__] @@ -364,6 +365,9 @@ class _UnixReadPipeTransport(transports.ReadTransport): def resume_reading(self): self._loop.add_reader(self._fileno, self._read_ready) + def is_closing(self): + return self._closing + def close(self): if not self._closing: self._close(None) @@ -439,7 +443,8 @@ class _UnixWritePipeTransport(transports._FlowControlMixin, if waiter is not None: # only wake up the waiter when connection_made() has been called - self._loop.call_soon(waiter._set_result_unless_cancelled, None) + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) def __repr__(self): info = [self.__class__.__name__] @@ -548,6 +553,9 @@ class _UnixWritePipeTransport(transports._FlowControlMixin, self._loop.remove_reader(self._fileno) self._loop.call_soon(self._call_connection_lost, None) + def is_closing(self): + return self._closing + def close(self): if self._pipe is not None and not self._closing: # write_eof is all what we needed to close the write pipe diff --git a/Lib/calendar.py b/Lib/calendar.py index 02050ea4..5244b8d1 100644 --- a/Lib/calendar.py +++ b/Lib/calendar.py @@ -142,7 +142,7 @@ class Calendar(object): def iterweekdays(self): """ - Return a iterator for one week of weekday numbers starting with the + Return an iterator for one week of weekday numbers starting with the configured first one. """ for i in range(self.firstweekday, self.firstweekday + 7): diff --git a/Lib/cgi.py b/Lib/cgi.py index 4be28ba0..26d25444 100755 --- a/Lib/cgi.py +++ b/Lib/cgi.py @@ -720,6 +720,11 @@ class FieldStorage: self.bytes_read += len(hdr_text) parser.feed(hdr_text.decode(self.encoding, self.errors)) headers = parser.close() + + # Some clients add Content-Length for part headers, ignore them + if 'content-length' in headers: + del headers['content-length'] + part = klass(self.fp, headers, ib, environ, keep_blank_values, strict_parsing,self.limit-self.bytes_read, self.encoding, self.errors) diff --git a/Lib/chunk.py b/Lib/chunk.py index 84b77cc8..d94dd398 100644 --- a/Lib/chunk.py +++ b/Lib/chunk.py @@ -21,7 +21,7 @@ Usually an IFF-type file consists of one or more chunks. The proposed usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/Lib/codecs.py b/Lib/codecs.py index 22d5f82a..39ec8454 100644 --- a/Lib/codecs.py +++ b/Lib/codecs.py @@ -258,7 +258,7 @@ class IncrementalDecoder(object): """ def __init__(self, errors='strict'): """ - Create a IncrementalDecoder instance. + Create an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1011,7 +1011,7 @@ def iterencode(iterator, encoding, errors='strict', **kwargs): """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1029,7 +1029,7 @@ def iterdecode(iterator, encoding, errors='strict', **kwargs): """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py index 80dc4f6d..e8312a90 100644 --- a/Lib/collections/__init__.py +++ b/Lib/collections/__init__.py @@ -320,23 +320,14 @@ class {typename}(tuple): 'Return a nicely formatted representation string' return self.__class__.__name__ + '({repr_fmt})' % self - @property - def __dict__(self): - 'A new OrderedDict mapping field names to their values' - return OrderedDict(zip(self._fields, self)) - def _asdict(self): 'Return a new OrderedDict which maps field names to their values.' - return self.__dict__ + return OrderedDict(zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) - def __getstate__(self): - 'Exclude the OrderedDict from pickling' - return None - {field_defs} """ @@ -901,9 +892,8 @@ class ChainMap(MutableMapping): __copy__ = copy def new_child(self, m=None): # like Django's Context.push() - ''' - New ChainMap with a new map followed by all previous maps. If no - map is provided, an empty dict is used. + '''New ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. ''' if m is None: m = {} @@ -949,7 +939,22 @@ class ChainMap(MutableMapping): class UserDict(MutableMapping): # Start by filling-out the abstract methods - def __init__(self, dict=None, **kwargs): + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'UserDict' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is deprecated", + PendingDeprecationWarning, stacklevel=2) + else: + dict = None self.data = {} if dict is not None: self.update(dict) diff --git a/Lib/concurrent/futures/_base.py b/Lib/concurrent/futures/_base.py index 9e447137..295489c9 100644 --- a/Lib/concurrent/futures/_base.py +++ b/Lib/concurrent/futures/_base.py @@ -521,7 +521,7 @@ class Executor(object): raise NotImplementedError() def map(self, fn, *iterables, timeout=None, chunksize=1): - """Returns a iterator equivalent to map(fn, iter). + """Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py index 3dd6da1f..590edba2 100644 --- a/Lib/concurrent/futures/process.py +++ b/Lib/concurrent/futures/process.py @@ -456,7 +456,7 @@ class ProcessPoolExecutor(_base.Executor): submit.__doc__ = _base.Executor.submit.__doc__ def map(self, fn, *iterables, timeout=None, chunksize=1): - """Returns a iterator equivalent to map(fn, iter). + """Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are diff --git a/Lib/configparser.py b/Lib/configparser.py index ecd06600..3a9fb56d 100644 --- a/Lib/configparser.py +++ b/Lib/configparser.py @@ -263,12 +263,9 @@ class InterpolationMissingOptionError(InterpolationError): """A string substitution required a setting which was not available.""" def __init__(self, option, section, rawval, reference): - msg = ("Bad value substitution:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\tkey : %s\n" - "\trawval : %s\n" - % (section, option, reference, rawval)) + msg = ("Bad value substitution: option {!r} in section {!r} contains " + "an interpolation key {!r} which is not a valid option name. " + "Raw value: {!r}".format(option, section, reference, rawval)) InterpolationError.__init__(self, option, section, msg) self.reference = reference self.args = (option, section, rawval, reference) @@ -286,11 +283,11 @@ class InterpolationDepthError(InterpolationError): """Raised when substitutions are nested too deeply.""" def __init__(self, option, section, rawval): - msg = ("Value interpolation too deeply recursive:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\trawval : %s\n" - % (section, option, rawval)) + msg = ("Recursion limit exceeded in value substitution: option {!r} " + "in section {!r} contains an interpolation key which " + "cannot be substituted in {} steps. Raw value: {!r}" + "".format(option, section, MAX_INTERPOLATION_DEPTH, + rawval)) InterpolationError.__init__(self, option, section, msg) self.args = (option, section, rawval) @@ -406,8 +403,9 @@ class BasicInterpolation(Interpolation): def _interpolate_some(self, parser, option, accum, rest, section, map, depth): + rawval = parser.get(section, option, raw=True, fallback=rest) if depth > MAX_INTERPOLATION_DEPTH: - raise InterpolationDepthError(option, section, rest) + raise InterpolationDepthError(option, section, rawval) while rest: p = rest.find("%") if p < 0: @@ -432,7 +430,7 @@ class BasicInterpolation(Interpolation): v = map[var] except KeyError: raise InterpolationMissingOptionError( - option, section, rest, var) from None + option, section, rawval, var) from None if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) @@ -466,8 +464,9 @@ class ExtendedInterpolation(Interpolation): def _interpolate_some(self, parser, option, accum, rest, section, map, depth): + rawval = parser.get(section, option, raw=True, fallback=rest) if depth > MAX_INTERPOLATION_DEPTH: - raise InterpolationDepthError(option, section, rest) + raise InterpolationDepthError(option, section, rawval) while rest: p = rest.find("$") if p < 0: @@ -504,7 +503,7 @@ class ExtendedInterpolation(Interpolation): "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( - option, section, rest, ":".join(path)) from None + option, section, rawval, ":".join(path)) from None if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), diff --git a/Lib/ctypes/test/test_frombuffer.py b/Lib/ctypes/test/test_frombuffer.py index 6aa2d1c5..29c5a193 100644 --- a/Lib/ctypes/test/test_frombuffer.py +++ b/Lib/ctypes/test/test_frombuffer.py @@ -38,11 +38,32 @@ class Test(unittest.TestCase): del a; gc.collect(); gc.collect(); gc.collect() self.assertEqual(x[:], expected) - with self.assertRaises(TypeError): + with self.assertRaisesRegex(TypeError, "not writable"): (c_char * 16).from_buffer(b"a" * 16) - with self.assertRaises(TypeError): + with self.assertRaisesRegex(TypeError, "not writable"): + (c_char * 16).from_buffer(memoryview(b"a" * 16)) + with self.assertRaisesRegex(TypeError, "not C contiguous"): + (c_char * 16).from_buffer(memoryview(bytearray(b"a" * 16))[::-1]) + msg = "bytes-like object is required" + with self.assertRaisesRegex(TypeError, msg): (c_char * 16).from_buffer("a" * 16) + def test_fortran_contiguous(self): + try: + import _testbuffer + except ImportError as err: + self.skipTest(str(err)) + flags = _testbuffer.ND_WRITABLE | _testbuffer.ND_FORTRAN + array = _testbuffer.ndarray( + [97] * 16, format="B", shape=[4, 4], flags=flags) + with self.assertRaisesRegex(TypeError, "not C contiguous"): + (c_char * 16).from_buffer(array) + array = memoryview(array) + self.assertTrue(array.f_contiguous) + self.assertFalse(array.c_contiguous) + with self.assertRaisesRegex(TypeError, "not C contiguous"): + (c_char * 16).from_buffer(array) + def test_from_buffer_with_offset(self): a = array.array("i", range(16)) x = (c_int * 15).from_buffer(a, sizeof(c_int)) @@ -55,6 +76,12 @@ class Test(unittest.TestCase): with self.assertRaises(ValueError): (c_int * 1).from_buffer(a, 16 * sizeof(c_int)) + def test_from_buffer_memoryview(self): + a = [c_char.from_buffer(memoryview(bytearray(b'a')))] + a.append(a) + del a + gc.collect() # Should not crash + def test_from_buffer_copy(self): a = array.array("i", range(16)) x = (c_int * 16).from_buffer_copy(a) diff --git a/Lib/ctypes/test/test_pointers.py b/Lib/ctypes/test/test_pointers.py index 40738f78..225b9d19 100644 --- a/Lib/ctypes/test/test_pointers.py +++ b/Lib/ctypes/test/test_pointers.py @@ -195,9 +195,19 @@ class PointersTestCase(unittest.TestCase): LargeNamedType = type('T' * 2 ** 25, (Structure,), {}) self.assertTrue(POINTER(LargeNamedType)) + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[LargeNamedType] + def test_pointer_type_str_name(self): large_string = 'T' * 2 ** 25 - self.assertTrue(POINTER(large_string)) + P = POINTER(large_string) + self.assertTrue(P) + + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[id(P)] + if __name__ == '__main__': unittest.main() diff --git a/Lib/ctypes/test/test_random_things.py b/Lib/ctypes/test/test_random_things.py index 4555ecd8..ee5b2128 100644 --- a/Lib/ctypes/test/test_random_things.py +++ b/Lib/ctypes/test/test_random_things.py @@ -30,7 +30,7 @@ class CallbackTracbackTestCase(unittest.TestCase): # value is printed correctly. # # Changed in 0.9.3: No longer is '(in callback)' prepended to the - # error message - instead a additional frame for the C code is + # error message - instead an additional frame for the C code is # created, then a full traceback printed. When SystemExit is # raised in a callback function, the interpreter exits. diff --git a/Lib/ctypes/test/test_win32.py b/Lib/ctypes/test/test_win32.py index 5867b059..da162401 100644 --- a/Lib/ctypes/test/test_win32.py +++ b/Lib/ctypes/test/test_win32.py @@ -135,5 +135,9 @@ class Structures(unittest.TestCase): self.assertEqual(ret.top, top.value) self.assertEqual(ret.bottom, bottom.value) + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[RECT] + if __name__ == '__main__': unittest.main() diff --git a/Lib/datetime.py b/Lib/datetime.py index db13b12e..b9719cbb 100644 --- a/Lib/datetime.py +++ b/Lib/datetime.py @@ -1366,28 +1366,34 @@ class datetime(date): return self._tzinfo @classmethod - def fromtimestamp(cls, t, tz=None): + def _fromtimestamp(cls, t, utc, tz): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ - _check_tzinfo_arg(tz) - - converter = _time.localtime if tz is None else _time.gmtime - - t, frac = divmod(t, 1.0) - us = int(frac * 1e6) - - # If timestamp is less than one microsecond smaller than a - # full second, us can be rounded up to 1000000. In this case, - # roll over to seconds, otherwise, ValueError is raised - # by the constructor. - if us == 1000000: + frac, t = _math.modf(t) + us = round(frac * 1e6) + if us >= 1000000: t += 1 - us = 0 + us -= 1000000 + elif us < 0: + t -= 1 + us += 1000000 + + converter = _time.gmtime if utc else _time.localtime y, m, d, hh, mm, ss, weekday, jday, dst = converter(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them - result = cls(y, m, d, hh, mm, ss, us, tz) + return cls(y, m, d, hh, mm, ss, us, tz) + + @classmethod + def fromtimestamp(cls, t, tz=None): + """Construct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + """ + _check_tzinfo_arg(tz) + + result = cls._fromtimestamp(t, tz is not None, tz) if tz is not None: result = tz.fromutc(result) return result @@ -1395,19 +1401,7 @@ class datetime(date): @classmethod def utcfromtimestamp(cls, t): """Construct a naive UTC datetime from a POSIX timestamp.""" - t, frac = divmod(t, 1.0) - us = int(frac * 1e6) - - # If timestamp is less than one microsecond smaller than a - # full second, us can be rounded up to 1000000. In this case, - # roll over to seconds, otherwise, ValueError is raised - # by the constructor. - if us == 1000000: - t += 1 - us = 0 - y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) - ss = min(ss, 59) # clamp out leap seconds if the platform has them - return cls(y, m, d, hh, mm, ss, us) + return cls._fromtimestamp(t, True, None) @classmethod def now(cls, tz=None): diff --git a/Lib/difflib.py b/Lib/difflib.py index 22d9145a..076bbac0 100644 --- a/Lib/difflib.py +++ b/Lib/difflib.py @@ -1515,7 +1515,7 @@ def _mdiff(fromlines, tolines, context=None, linejunk=None, yield _make_line(lines,'-',0), None, True continue elif s.startswith(('--?+', '--+', '- ')): - # in delete block and see a intraline change or unchanged line + # in delete block and see an intraline change or unchanged line # coming: yield the delete line and then blanks from_line,to_line = _make_line(lines,'-',0), None num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0 diff --git a/Lib/distutils/_msvccompiler.py b/Lib/distutils/_msvccompiler.py index 82b78a0f..10a9ffda 100644 --- a/Lib/distutils/_msvccompiler.py +++ b/Lib/distutils/_msvccompiler.py @@ -28,15 +28,17 @@ import winreg from itertools import count def _find_vcvarsall(plat_spec): - with winreg.OpenKeyEx( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY - ) as key: - if not key: - log.debug("Visual C++ is not registered") - return None, None + try: + key = winreg.OpenKeyEx( + winreg.HKEY_LOCAL_MACHINE, + r"Software\Microsoft\VisualStudio\SxS\VC7", + access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY + ) + except OSError: + log.debug("Visual C++ is not registered") + return None, None + with key: best_version = 0 best_dir = None for i in count(): @@ -100,7 +102,7 @@ def _get_vc_env(plat_spec): (line.partition('=') for line in out.splitlines()) if key and value } - + if vcruntime: env['py_vcruntime_redist'] = vcruntime return env @@ -236,7 +238,7 @@ class MSVCCompiler(CCompiler) : '/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG' ] self.compile_options.append('/MD' if self._vcruntime_redist else '/MT') - + self.compile_options_debug = [ '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG' ] diff --git a/Lib/distutils/ccompiler.py b/Lib/distutils/ccompiler.py index b7df394e..82fd7d5c 100644 --- a/Lib/distutils/ccompiler.py +++ b/Lib/distutils/ccompiler.py @@ -752,7 +752,7 @@ class CCompiler: raise NotImplementedError def library_option(self, lib): - """Return the compiler option to add 'dir' to the list of libraries + """Return the compiler option to add 'lib' to the list of libraries linked into the shared library or executable. """ raise NotImplementedError diff --git a/Lib/distutils/cygwinccompiler.py b/Lib/distutils/cygwinccompiler.py index d28b1b36..c879646c 100644 --- a/Lib/distutils/cygwinccompiler.py +++ b/Lib/distutils/cygwinccompiler.py @@ -10,9 +10,9 @@ cygwin in no-cygwin mode). # # * if you use a msvc compiled python version (1.5.2) # 1. you have to insert a __GNUC__ section in its config.h -# 2. you have to generate a import library for its dll +# 2. you have to generate an import library for its dll # - create a def-file for python??.dll -# - create a import library using +# - create an import library using # dlltool --dllname python15.dll --def python15.def \ # --output-lib libpython15.a # @@ -318,7 +318,7 @@ class Mingw32CCompiler(CygwinCCompiler): self.dll_libraries = get_msvcr() # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/Lib/distutils/tests/test_core.py b/Lib/distutils/tests/test_core.py index 41321f7d..654227ca 100644 --- a/Lib/distutils/tests/test_core.py +++ b/Lib/distutils/tests/test_core.py @@ -9,6 +9,7 @@ import test.support from test.support import captured_stdout, run_unittest import unittest from distutils.tests import support +from distutils import log # setup script that uses __file__ setup_using___file__ = """\ @@ -36,6 +37,7 @@ class CoreTestCase(support.EnvironGuard, unittest.TestCase): self.old_stdout = sys.stdout self.cleanup_testfn() self.old_argv = sys.argv, sys.argv[:] + self.addCleanup(log.set_threshold, log._global_log.threshold) def tearDown(self): sys.stdout = self.old_stdout diff --git a/Lib/distutils/tests/test_dist.py b/Lib/distutils/tests/test_dist.py index b7fd3fbf..1f104cef 100644 --- a/Lib/distutils/tests/test_dist.py +++ b/Lib/distutils/tests/test_dist.py @@ -13,6 +13,7 @@ from distutils.cmd import Command from test.support import TESTFN, captured_stdout, run_unittest from distutils.tests import support +from distutils import log class test_dist(Command): @@ -405,6 +406,7 @@ class MetadataTestCase(support.TempdirManager, support.EnvironGuard, def test_show_help(self): # smoke test, just makes sure some help is displayed + self.addCleanup(log.set_threshold, log._global_log.threshold) dist = Distribution() sys.argv = [] dist.help = 1 diff --git a/Lib/distutils/tests/test_log.py b/Lib/distutils/tests/test_log.py index ce66ee51..0c2ad7a4 100644 --- a/Lib/distutils/tests/test_log.py +++ b/Lib/distutils/tests/test_log.py @@ -14,8 +14,8 @@ class TestLog(unittest.TestCase): # error handler) old_stdout = sys.stdout old_stderr = sys.stderr + old_threshold = log.set_threshold(log.DEBUG) try: - log.set_threshold(log.DEBUG) with NamedTemporaryFile(mode="w+", encoding='ascii') as stdout, \ NamedTemporaryFile(mode="w+", encoding='ascii') as stderr: sys.stdout = stdout @@ -27,6 +27,7 @@ class TestLog(unittest.TestCase): stderr.seek(0) self.assertEqual(stderr.read().rstrip(), "fatal:\\xe9") finally: + log.set_threshold(old_threshold) sys.stdout = old_stdout sys.stderr = old_stderr diff --git a/Lib/distutils/tests/test_msvccompiler.py b/Lib/distutils/tests/test_msvccompiler.py index 874d6035..c4d911ff 100644 --- a/Lib/distutils/tests/test_msvccompiler.py +++ b/Lib/distutils/tests/test_msvccompiler.py @@ -77,7 +77,7 @@ class msvccompilerTestCase(support.TempdirManager, compiler.initialize() dll = compiler._vcruntime_redist self.assertTrue(os.path.isfile(dll)) - + compiler._copy_vcruntime(tempdir) self.assertFalse(os.path.isfile(os.path.join( diff --git a/Lib/doctest.py b/Lib/doctest.py index 96ab0c40..40947238 100644 --- a/Lib/doctest.py +++ b/Lib/doctest.py @@ -1575,7 +1575,7 @@ class OutputChecker: # If `want` contains hex-escaped character such as "\u1234", # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]). - # On the other hand, `got` could be an another sequence of + # On the other hand, `got` could be another sequence of # characters such as [\u1234], so `want` and `got` should # be folded to hex-escaped ASCII string to compare. got = self._toAscii(got) diff --git a/Lib/encodings/quopri_codec.py b/Lib/encodings/quopri_codec.py index 0533dbe4..496cb765 100644 --- a/Lib/encodings/quopri_codec.py +++ b/Lib/encodings/quopri_codec.py @@ -11,7 +11,7 @@ def quopri_encode(input, errors='strict'): assert errors == 'strict' f = BytesIO(input) g = BytesIO() - quopri.encode(f, g, 1) + quopri.encode(f, g, quotetabs=True) return (g.getvalue(), len(input)) def quopri_decode(input, errors='strict'): diff --git a/Lib/fileinput.py b/Lib/fileinput.py index af810d1d..c41b94ab 100644 --- a/Lib/fileinput.py +++ b/Lib/fileinput.py @@ -315,7 +315,10 @@ class FileInput: return line if not self._file: if not self._files: - return "" + if 'b' in self._mode: + return b'' + else: + return '' self._filename = self._files[0] self._files = self._files[1:] self._filelineno = 0 diff --git a/Lib/formatter.py b/Lib/formatter.py index 5e8e2ff3..e2394de8 100644 --- a/Lib/formatter.py +++ b/Lib/formatter.py @@ -20,8 +20,8 @@ manage and inserting data into the output. import sys import warnings -warnings.warn('the formatter module is deprecated and will be removed in ' - 'Python 3.6', DeprecationWarning, stacklevel=2) +warnings.warn('the formatter module is deprecated', DeprecationWarning, + stacklevel=2) AS_IS = None diff --git a/Lib/ftplib.py b/Lib/ftplib.py index 54b0e2cf..c416d856 100644 --- a/Lib/ftplib.py +++ b/Lib/ftplib.py @@ -287,7 +287,7 @@ class FTP: return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -852,7 +852,7 @@ def parse227(resp): def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/Lib/functools.py b/Lib/functools.py index 09df0680..214523cb 100644 --- a/Lib/functools.py +++ b/Lib/functools.py @@ -536,7 +536,7 @@ def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear - return update_wrapper(wrapper, user_function) + return wrapper try: from _functools import _lru_cache_wrapper @@ -567,7 +567,7 @@ def _c3_merge(sequences): break # reject the current head, it appears later else: break - if not candidate: + if candidate is None: raise RuntimeError("Inconsistent hierarchy") result.append(candidate) # remove the chosen candidate diff --git a/Lib/getopt.py b/Lib/getopt.py index 3d6ecbdd..9d4cab1b 100644 --- a/Lib/getopt.py +++ b/Lib/getopt.py @@ -28,7 +28,7 @@ option involved with the exception. # - RETURN_IN_ORDER option # - GNU extension with '-' as first character of option string # - optional arguments, specified by double colons -# - a option string with a W followed by semicolon should +# - an option string with a W followed by semicolon should # treat "-W foo" as "--foo" __all__ = ["GetoptError","error","getopt","gnu_getopt"] diff --git a/Lib/getpass.py b/Lib/getpass.py index 7c4e9761..be511211 100644 --- a/Lib/getpass.py +++ b/Lib/getpass.py @@ -99,7 +99,7 @@ def win_getpass(prompt='Password: ', stream=None): """Prompt for password with echo off, using Windows getch().""" if sys.stdin is not sys.__stdin__: return fallback_getpass(prompt, stream) - import msvcrt + for c in prompt: msvcrt.putwch(c) pw = "" diff --git a/Lib/glob.py b/Lib/glob.py index 56d67041..16330d81 100644 --- a/Lib/glob.py +++ b/Lib/glob.py @@ -4,7 +4,7 @@ import os import re import fnmatch -__all__ = ["glob", "iglob"] +__all__ = ["glob", "iglob", "escape"] def glob(pathname, *, recursive=False): """Return a list of paths matching a pathname pattern. @@ -30,6 +30,13 @@ def iglob(pathname, *, recursive=False): If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories. """ + it = _iglob(pathname, recursive) + if recursive and _isrecursive(pathname): + s = next(it) # skip empty string + assert not s + return it + +def _iglob(pathname, recursive): dirname, basename = os.path.split(pathname) if not has_magic(pathname): if basename: @@ -50,7 +57,7 @@ def iglob(pathname, *, recursive=False): # drive or UNC path. Prevent an infinite recursion if a drive or UNC path # contains magic characters (i.e. r'\\?\C:'). if dirname != pathname and has_magic(dirname): - dirs = iglob(dirname, recursive=recursive) + dirs = _iglob(dirname, recursive) else: dirs = [dirname] if has_magic(basename): @@ -98,12 +105,10 @@ def glob0(dirname, basename): def glob2(dirname, pattern): assert _isrecursive(pattern) - if dirname: - yield pattern[:0] + yield pattern[:0] yield from _rlistdir(dirname) # Recursively yields relative pathnames inside a literal directory. - def _rlistdir(dirname): if not dirname: if isinstance(dirname, bytes): diff --git a/Lib/html/entities.py b/Lib/html/entities.py index 3e1778b7..91ea5da2 100644 --- a/Lib/html/entities.py +++ b/Lib/html/entities.py @@ -224,7 +224,7 @@ name2codepoint = { 'spades': 0x2660, # black spade suit, U+2660 ISOpub 'sub': 0x2282, # subset of, U+2282 ISOtech 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech - 'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb + 'sum': 0x2211, # n-ary summation, U+2211 ISOamsb 'sup': 0x2283, # superset of, U+2283 ISOtech 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum diff --git a/Lib/html/parser.py b/Lib/html/parser.py index 390d4ccc..43e6411b 100644 --- a/Lib/html/parser.py +++ b/Lib/html/parser.py @@ -139,7 +139,15 @@ class HTMLParser(_markupbase.ParserBase): if self.convert_charrefs and not self.cdata_elem: j = rawdata.find('<', i) if j < 0: - if not end: + # if we can't find the next <, either we are at the end + # or there's more text incoming. If the latter is True, + # we can't pass the text to handle_data in case we have + # a charref cut in half at end. Try to determine if + # this is the case before proceding by looking for an + # & near the end and see if it's followed by a space or ;. + amppos = rawdata.rfind('&', max(i, n-34)) + if (amppos >= 0 and + not re.compile(r'[\s;]').search(rawdata, amppos)): break # wait till we get all the text j = n else: diff --git a/Lib/http/cookiejar.py b/Lib/http/cookiejar.py index b1ba72ec..ac5e667d 100644 --- a/Lib/http/cookiejar.py +++ b/Lib/http/cookiejar.py @@ -423,10 +423,10 @@ def join_header_words(lists): Takes a list of lists of (key, value) pairs and produces a single header value. Attribute values are quoted if needed. - >>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]]) - 'text/plain; charset="iso-8859/1"' - >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]]) - 'text/plain, charset="iso-8859/1"' + >>> join_header_words([[("text/plain", None), ("charset", "iso-8859-1")]]) + 'text/plain; charset="iso-8859-1"' + >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859-1")]]) + 'text/plain, charset="iso-8859-1"' """ headers = [] @@ -1437,7 +1437,7 @@ class CookieJar: break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 - # age-calculation rules. Remember that zero Max-Age is a + # age-calculation rules. Remember that zero Max-Age # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v diff --git a/Lib/http/server.py b/Lib/http/server.py index fd13be37..e1b71abf 100644 --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -831,19 +831,21 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): def _url_collapse_path(path): """ Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references and returns a colllapsed path. + any '..' references and returns a collapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. The utility of this function is limited to is_cgi method and helps preventing some security attacks. - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. + Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.parse.unquote(path) + # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') @@ -864,6 +866,9 @@ def _url_collapse_path(path): else: tail_part = '' + if query: + tail_part = '?'.join((tail_part, query)) + splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) @@ -947,7 +952,7 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(urllib.parse.unquote(self.path)) + collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: @@ -984,11 +989,7 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): break # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' + rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. @@ -1167,8 +1168,7 @@ def test(HandlerClass=BaseHTTPRequestHandler, ServerClass=HTTPServer, protocol="HTTP/1.0", port=8000, bind=""): """Test the HTTP request handler class. - This runs an HTTP server on port 8000 (or the first command line - argument). + This runs an HTTP server on port 8000 (or the port argument). """ server_address = (bind, port) diff --git a/Lib/idlelib/AutoCompleteWindow.py b/Lib/idlelib/AutoCompleteWindow.py index f666ea62..2ee68783 100644 --- a/Lib/idlelib/AutoCompleteWindow.py +++ b/Lib/idlelib/AutoCompleteWindow.py @@ -192,6 +192,7 @@ class AutoCompleteWindow: scrollbar.config(command=listbox.yview) scrollbar.pack(side=RIGHT, fill=Y) listbox.pack(side=LEFT, fill=BOTH, expand=True) + acw.lift() # work around bug in Tk 8.5.18+ (issue #24570) # Initialize the listbox selection self.listbox.select_set(self._binary_search(self.start)) diff --git a/Lib/idlelib/Bindings.py b/Lib/idlelib/Bindings.py index 226671cc..ab25ff18 100644 --- a/Lib/idlelib/Bindings.py +++ b/Lib/idlelib/Bindings.py @@ -78,7 +78,6 @@ menudefs = [ ]), ('options', [ ('Configure _IDLE', '<>'), - ('Configure _Extensions', '<>'), None, ]), ('help', [ diff --git a/Lib/idlelib/CallTipWindow.py b/Lib/idlelib/CallTipWindow.py index 170d1465..8e68a76b 100644 --- a/Lib/idlelib/CallTipWindow.py +++ b/Lib/idlelib/CallTipWindow.py @@ -72,6 +72,7 @@ class CallTip: background="#ffffe0", relief=SOLID, borderwidth=1, font = self.widget['font']) self.label.pack() + tw.lift() # work around bug in Tk 8.5.18+ (issue #24570) self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhide_event) diff --git a/Lib/idlelib/ClassBrowser.py b/Lib/idlelib/ClassBrowser.py index 5be65efb..d09c52fe 100644 --- a/Lib/idlelib/ClassBrowser.py +++ b/Lib/idlelib/ClassBrowser.py @@ -56,7 +56,7 @@ class ClassBrowser: self.settitle() top.focus_set() # create scrolled canvas - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1) sc.frame.pack(expand=1, fill="both") diff --git a/Lib/idlelib/ColorDelegator.py b/Lib/idlelib/ColorDelegator.py index 13a90103..9f313496 100644 --- a/Lib/idlelib/ColorDelegator.py +++ b/Lib/idlelib/ColorDelegator.py @@ -60,7 +60,7 @@ class ColorDelegator(Delegator): self.tag_raise('sel') def LoadTagDefs(self): - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() self.tagdefs = { "COMMENT": idleConf.GetHighlight(theme, "comment"), "KEYWORD": idleConf.GetHighlight(theme, "keyword"), diff --git a/Lib/idlelib/Debugger.py b/Lib/idlelib/Debugger.py index 68751978..250422ed 100644 --- a/Lib/idlelib/Debugger.py +++ b/Lib/idlelib/Debugger.py @@ -17,7 +17,10 @@ class Idb(bdb.Bdb): self.set_step() return message = self.__frame2message(frame) - self.gui.interaction(message, frame) + try: + self.gui.interaction(message, frame) + except TclError: # When closing debugger window with [x] in 3.x + pass def user_exception(self, frame, info): if self.in_rpc_code(frame): @@ -59,8 +62,42 @@ class Debugger: self.frame = None self.make_gui() self.interacting = 0 + self.nesting_level = 0 def run(self, *args): + # Deal with the scenario where we've already got a program running + # in the debugger and we want to start another. If that is the case, + # our second 'run' was invoked from an event dispatched not from + # the main event loop, but from the nested event loop in 'interaction' + # below. So our stack looks something like this: + # outer main event loop + # run() + # + # callback to debugger's interaction() + # nested event loop + # run() for second command + # + # This kind of nesting of event loops causes all kinds of problems + # (see e.g. issue #24455) especially when dealing with running as a + # subprocess, where there's all kinds of extra stuff happening in + # there - insert a traceback.print_stack() to check it out. + # + # By this point, we've already called restart_subprocess() in + # ScriptBinding. However, we also need to unwind the stack back to + # that outer event loop. To accomplish this, we: + # - return immediately from the nested run() + # - abort_loop ensures the nested event loop will terminate + # - the debugger's interaction routine completes normally + # - the restart_subprocess() will have taken care of stopping + # the running program, which will also let the outer run complete + # + # That leaves us back at the outer main event loop, at which point our + # after event can fire, and we'll come back to this routine with a + # clean stack. + if self.nesting_level > 0: + self.abort_loop() + self.root.after(100, lambda: self.run(*args)) + return try: self.interacting = 1 return self.idb.run(*args) @@ -68,6 +105,10 @@ class Debugger: self.interacting = 0 def close(self, event=None): + try: + self.quit() + except Exception: + pass if self.interacting: self.top.bell() return @@ -191,7 +232,12 @@ class Debugger: b.configure(state="normal") # self.top.wakeup() - self.root.mainloop() + # Nested main loop: Tkinter's main loop is not reentrant, so use + # Tcl's vwait facility, which reenters the event loop until an + # event handler sets the variable we're waiting on + self.nesting_level += 1 + self.root.tk.call('vwait', '::idledebugwait') + self.nesting_level -= 1 # for b in self.buttons: b.configure(state="disabled") @@ -215,23 +261,26 @@ class Debugger: def cont(self): self.idb.set_continue() - self.root.quit() + self.abort_loop() def step(self): self.idb.set_step() - self.root.quit() + self.abort_loop() def next(self): self.idb.set_next(self.frame) - self.root.quit() + self.abort_loop() def ret(self): self.idb.set_return(self.frame) - self.root.quit() + self.abort_loop() def quit(self): self.idb.set_quit() - self.root.quit() + self.abort_loop() + + def abort_loop(self): + self.root.tk.call('set', '::idledebugwait', '1') stackviewer = None diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py index 3ac68bba..b5868be3 100644 --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -21,6 +21,7 @@ from idlelib import PyParse from idlelib.configHandler import idleConf from idlelib import aboutDialog, textView, configDialog from idlelib import macosxSupport +from idlelib import help # The default tab setting for a Text widget, in average-width characters. TK_TABWIDTH_DEFAULT = 8 @@ -53,6 +54,11 @@ class HelpDialog(object): near - a Toplevel widget (e.g. EditorWindow or PyShell) to use as a reference for placing the help window """ + import warnings as w + w.warn("EditorWindow.HelpDialog is no longer used by Idle.\n" + "It will be removed in 3.6 or later.\n" + "It has been replaced by private help.HelpWindow\n", + DeprecationWarning, stacklevel=2) if self.dlg is None: self.show_dialog(parent) if near: @@ -79,9 +85,7 @@ class HelpDialog(object): self.dlg = None self.parent = None -helpDialog = HelpDialog() # singleton instance -def _help_dialog(parent): # wrapper for htest - helpDialog.show_dialog(parent) +helpDialog = HelpDialog() # singleton instance, no longer used class EditorWindow(object): @@ -152,6 +156,7 @@ class EditorWindow(object): 'name': 'text', 'padx': 5, 'wrap': 'none', + 'highlightthickness': 0, 'width': self.width, 'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')} @@ -171,13 +176,13 @@ class EditorWindow(object): if macosxSupport.isAquaTk(): # Command-W on editorwindows doesn't work without this. text.bind('<>', self.close_event) - # Some OS X systems have only one mouse button, - # so use control-click for pulldown menus there. - # (Note, AquaTk defines <2> as the right button if - # present and the Tk Text widget already binds <2>.) + # Some OS X systems have only one mouse button, so use + # control-click for popup context menus there. For two + # buttons, AquaTk defines <2> as the right button, not <3>. text.bind("",self.right_menu_event) + text.bind("<2>", self.right_menu_event) else: - # Elsewhere, use right-click for pulldown menus. + # Elsewhere, use right-click for popup menus. text.bind("<3>",self.right_menu_event) text.bind("<>", self.cut) text.bind("<>", self.copy) @@ -187,8 +192,6 @@ class EditorWindow(object): text.bind("<>", self.python_docs) text.bind("<>", self.about_dialog) text.bind("<>", self.config_dialog) - text.bind("<>", - self.config_extensions_dialog) text.bind("<>", self.open_module) text.bind("<>", lambda event: "break") text.bind("<>", self.select_all) @@ -313,36 +316,6 @@ class EditorWindow(object): self.askinteger = tkSimpleDialog.askinteger self.showerror = tkMessageBox.showerror - self._highlight_workaround() # Fix selection tags on Windows - - def _highlight_workaround(self): - # On Windows, Tk removes painting of the selection - # tags which is different behavior than on Linux and Mac. - # See issue14146 for more information. - if not sys.platform.startswith('win'): - return - - text = self.text - text.event_add("<>", "") - text.event_add("<>", "") - def highlight_fix(focus): - sel_range = text.tag_ranges("sel") - if sel_range: - if focus == 'out': - HILITE_CONFIG = idleConf.GetHighlight( - idleConf.CurrentTheme(), 'hilite') - text.tag_config("sel_fix", HILITE_CONFIG) - text.tag_raise("sel_fix") - text.tag_add("sel_fix", *sel_range) - elif focus == 'in': - text.tag_remove("sel_fix", "1.0", "end") - - text.bind("<>", - lambda ev: highlight_fix("out")) - text.bind("<>", - lambda ev: highlight_fix("in")) - - def _filename_to_unicode(self, filename): """Return filename as BMP unicode so diplayable in Tk.""" # Decode bytes to unicode. @@ -408,6 +381,7 @@ class EditorWindow(object): def set_status_bar(self): self.status_bar = self.MultiStatusBar(self.top) + sep = Frame(self.top, height=1, borderwidth=1, background='grey75') if sys.platform == "darwin": # Insert some padding to avoid obscuring some of the statusbar # by the resize widget. @@ -415,6 +389,7 @@ class EditorWindow(object): self.status_bar.set_label('column', 'Col: ?', side=RIGHT) self.status_bar.set_label('line', 'Ln: ?', side=RIGHT) self.status_bar.pack(side=BOTTOM, fill=X) + sep.pack(side=BOTTOM, fill=X) self.text.bind("<>", self.set_line_and_column) self.text.event_add("<>", "", "") @@ -531,19 +506,23 @@ class EditorWindow(object): return 'normal' def about_dialog(self, event=None): + "Handle Help 'About IDLE' event." + # Synchronize with macosxSupport.overrideRootMenu.about_dialog. aboutDialog.AboutDialog(self.top,'About IDLE') def config_dialog(self, event=None): + "Handle Options 'Configure IDLE' event." + # Synchronize with macosxSupport.overrideRootMenu.config_dialog. configDialog.ConfigDialog(self.top,'Settings') - def config_extensions_dialog(self, event=None): - configDialog.ConfigExtensionsDialog(self.top) def help_dialog(self, event=None): + "Handle Help 'IDLE Help' event." + # Synchronize with macosxSupport.overrideRootMenu.help_dialog. if self.root: parent = self.root else: parent = self.top - helpDialog.display(parent, near=self.top) + help.show_idlehelp(parent) def python_docs(self, event=None): if sys.platform[:3] == 'win': @@ -763,7 +742,7 @@ class EditorWindow(object): # Called from self.filename_change_hook and from configDialog.py self._rmcolorizer() self._addcolorizer() - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() normal_colors = idleConf.GetHighlight(theme, 'normal') cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg') select_colors = idleConf.GetHighlight(theme, 'hilite') @@ -774,6 +753,9 @@ class EditorWindow(object): selectforeground=select_colors['foreground'], selectbackground=select_colors['background'], ) + if TkVersion >= 8.5: + self.text.config( + inactiveselectbackground=select_colors['background']) IDENTCHARS = string.ascii_letters + string.digits + "_" @@ -908,9 +890,11 @@ class EditorWindow(object): except OSError as err: if not getattr(self.root, "recentfilelist_error_displayed", False): self.root.recentfilelist_error_displayed = True - tkMessageBox.showerror(title='IDLE Error', - message='Unable to update Recent Files list:\n%s' - % str(err), + tkMessageBox.showwarning(title='IDLE Warning', + message="Cannot update File menu Recent Files list. " + "Your operating system says:\n%s\n" + "Select OK and IDLE will continue without updating." + % self._filename_to_unicode(str(err)), parent=self.text) # for each edit window instance, construct the recent files menu for instance in self.top.instance_dict: @@ -1398,7 +1382,7 @@ class EditorWindow(object): text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. @@ -1716,4 +1700,4 @@ def _editor_window(parent): # htest # if __name__ == '__main__': from idlelib.idle_test.htest import run - run(_help_dialog, _editor_window) + run(_editor_window) diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py index 505cc8b8..5ec9d546 100644 --- a/Lib/idlelib/IOBinding.py +++ b/Lib/idlelib/IOBinding.py @@ -1,17 +1,16 @@ +import codecs +from codecs import BOM_UTF8 import os +import re import shlex import sys -import codecs import tempfile + import tkinter.filedialog as tkFileDialog import tkinter.messagebox as tkMessageBox -import re -from tkinter import * from tkinter.simpledialog import askstring -from idlelib.configHandler import idleConf -from codecs import BOM_UTF8 # Try setting the locale, so that we can find out # what encoding to use @@ -217,7 +216,7 @@ class IOBinding: f.seek(0) bytes = f.read() except OSError as msg: - tkMessageBox.showerror("I/O Error", str(msg), master=self.text) + tkMessageBox.showerror("I/O Error", str(msg), parent=self.text) return False chars, converted = self._decode(two_lines, bytes) if chars is None: @@ -266,7 +265,7 @@ class IOBinding: title="Error loading the file", message="The encoding '%s' is not known to this Python "\ "installation. The file may not display correctly" % name, - master = self.text) + parent = self.text) enc = None except UnicodeDecodeError: return None, False @@ -321,7 +320,7 @@ class IOBinding: title="Save On Close", message=message, default=tkMessageBox.YES, - master=self.text) + parent=self.text) if confirm: reply = "yes" self.save(None) @@ -381,7 +380,7 @@ class IOBinding: return True except OSError as msg: tkMessageBox.showerror("I/O Error", str(msg), - master=self.text) + parent=self.text) return False def encode(self, chars): @@ -418,7 +417,7 @@ class IOBinding: tkMessageBox.showerror( "I/O Error", "%s.\nSaving as UTF-8" % failed, - master = self.text) + parent = self.text) # Fallback: save as UTF-8, with BOM - ignoring the incorrect # declared encoding return BOM_UTF8 + chars.encode("utf-8") @@ -433,7 +432,7 @@ class IOBinding: title="Print", message="Print to Default Printer", default=tkMessageBox.OK, - master=self.text) + parent=self.text) if not confirm: self.text.focus_set() return "break" @@ -470,10 +469,10 @@ class IOBinding: status + output if output: output = "Printing command: %s\n" % repr(command) + output - tkMessageBox.showerror("Print status", output, master=self.text) + tkMessageBox.showerror("Print status", output, parent=self.text) else: #no printing for this platform message = "Printing is not enabled for this platform: %s" % platform - tkMessageBox.showinfo("Print status", message, master=self.text) + tkMessageBox.showinfo("Print status", message, parent=self.text) if tempfilename: os.unlink(tempfilename) return "break" @@ -492,7 +491,7 @@ class IOBinding: def askopenfile(self): dir, base = self.defaultfilename("open") if not self.opendialog: - self.opendialog = tkFileDialog.Open(master=self.text, + self.opendialog = tkFileDialog.Open(parent=self.text, filetypes=self.filetypes) filename = self.opendialog.show(initialdir=dir, initialfile=base) return filename @@ -513,7 +512,7 @@ class IOBinding: dir, base = self.defaultfilename("save") if not self.savedialog: self.savedialog = tkFileDialog.SaveAs( - master=self.text, + parent=self.text, filetypes=self.filetypes, defaultextension=self.defaultextension) filename = self.savedialog.show(initialdir=dir, initialfile=base) @@ -525,7 +524,10 @@ class IOBinding: self.editwin.update_recent_files_list(filename) def _io_binding(parent): # htest # - root = Tk() + from tkinter import Toplevel, Text + from idlelib.configHandler import idleConf + + root = Toplevel(parent) root.title("Test IOBinding") width, height, x, y = list(map(int, re.split('[x+]', parent.geometry()))) root.geometry("+%d+%d"%(x, y + 150)) diff --git a/Lib/idlelib/MultiStatusBar.py b/Lib/idlelib/MultiStatusBar.py index f44b6a86..e82ba9ab 100644 --- a/Lib/idlelib/MultiStatusBar.py +++ b/Lib/idlelib/MultiStatusBar.py @@ -8,13 +8,15 @@ class MultiStatusBar(Frame): Frame.__init__(self, master, **kw) self.labels = {} - def set_label(self, name, text='', side=LEFT): + def set_label(self, name, text='', side=LEFT, width=0): if name not in self.labels: - label = Label(self, bd=1, relief=SUNKEN, anchor=W) - label.pack(side=side) + label = Label(self, borderwidth=0, anchor=W) + label.pack(side=side, pady=0, padx=4) self.labels[name] = label else: label = self.labels[name] + if width != 0: + label.config(width=width) label.config(text=text) def _multistatus_bar(parent): diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt index 2d8ce54e..0eb939b3 100644 --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -1,6 +1,118 @@ +What's New in IDLE 3.5.1? +========================= +*Release date: 2015-12-06* + +- Issue 15348: Stop the debugger engine (normally in a user process) + before closing the debugger window (running in the IDLE process). + This prevents the RuntimeErrors that were being caught and ignored. + +- Issue #24455: Prevent IDLE from hanging when a) closing the shell while the + debugger is active (15347); b) closing the debugger with the [X] button + (15348); and c) activating the debugger when already active (24455). + The patch by Mark Roseman does this by making two changes. + 1. Suspend and resume the gui.interaction method with the tcl vwait + mechanism intended for this purpose (instead of root.mainloop & .quit). + 2. In gui.run, allow any existing interaction to terminate first. + +- Change 'The program' to 'Your program' in an IDLE 'kill program?' message + to make it clearer that the program referred to is the currently running + user program, not IDLE itself. + +- Issue #24750: Improve the appearance of the IDLE editor window status bar. + Patch by Mark Roseman. + +- Issue #25313: Change the handling of new built-in text color themes to better + address the compatibility problem introduced by the addition of IDLE Dark. + Consistently use the revised idleConf.CurrentTheme everywhere in idlelib. + +- Issue #24782: Extension configuration is now a tab in the IDLE Preferences + dialog rather than a separate dialog. The former tabs are now a sorted + list. Patch by Mark Roseman. + +- Issue #22726: Re-activate the config dialog help button with some content + about the other buttons and the new IDLE Dark theme. + +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + To use it with IDLEs released before November 2015, hit the + 'Save as New Custom Theme' button and enter a new name, + such as 'Custom Dark'. The custom theme will work with any IDLE + release, and can be modified. + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc chapter. + 'IDLE' now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + +- Issue #24972: Inactive selection background now matches active selection + background, as configured by users, on all systems. Found items are now + always highlighted on Windows. Initial patch by Mark Roseman. + +- Issue #24570: Idle: make calltip and completion boxes appear on Macs + affected by a tk regression. Initial patch by Mark Roseman. + +- Issue #24988: Idle ScrolledList context menus (used in debugger) + now work on Mac Aqua. Patch by Mark Roseman. + +- Issue #24801: Make right-click for context menu work on Mac Aqua. + Patch by Mark Roseman. + +- Issue #25173: Associate tkinter messageboxes with a specific widget. + For Mac OSX, make them a 'sheet'. Patch by Mark Roseman. + +- Issue #25198: Enhance the initial html viewer now used for Idle Help. + * Properly indent fixed-pitch text (patch by Mark Roseman). + * Give code snippet a very Sphinx-like light blueish-gray background. + * Re-use initial width and height set by users for shell and editor. + * When the Table of Contents (TOC) menu is used, put the section header + at the top of the screen. + +- Issue #25225: Condense and rewrite Idle doc section on text colors. + +- Issue #21995: Explain some differences between IDLE and console Python. + +- Issue #22820: Explain need for *print* when running file from Idle editor. + +- Issue #25224: Doc: augment Idle feature list and no-subprocess section. + +- Issue #25219: Update doc for Idle command line options. + Some were missing and notes were not correct. + +- Issue #24861: Most of idlelib is private and subject to change. + Use idleib.idle.* to start Idle. See idlelib.__init__.__doc__. + +- Issue #25199: Idle: add synchronization comments for future maintainers. + +- Issue #16893: Replace help.txt with help.html for Idle doc display. + The new idlelib/help.html is rstripped Doc/build/html/library/idle.html. + It looks better than help.txt and will better document Idle as released. + The tkinter html viewer that works for this file was written by Mark Roseman. + The now unused EditorWindow.HelpDialog class and helt.txt file are deprecated. + +- Issue #24199: Deprecate unused idlelib.idlever with possible removal in 3.6. + +- Issue #24790: Remove extraneous code (which also create 2 & 3 conflicts). + + What's New in IDLE 3.5.0? ========================= -*Release date: 2015-09-13* ?? +*Release date: 2015-09-13* + +- Issue #23672: Allow Idle to edit and run files with astral chars in name. + Patch by Mohd Sanad Zaki Rizvi. + +- Issue 24745: Idle editor default font. Switch from Courier to + platform-sensitive TkFixedFont. This should not affect current customized + font selections. If there is a problem, edit $HOME/.idlerc/config-main.cfg + and remove 'fontxxx' entries from [Editor Window]. Patch by Mark Roseman. + +- Issue #21192: Idle editor. When a file is run, put its name in the restart bar. + Do not print false prompts. Original patch by Adnan Umer. + +- Issue #13884: Idle menus. Remove tearoff lines. Patch by Roger Serwy. - Issue #23184: remove unused names and imports in idlelib. Initial patch by Al Sweigart. @@ -29,7 +141,7 @@ What's New in IDLE 3.5.0? - Issue #21986: Code objects are not normally pickled by the pickle module. To match this, they are no longer pickled when running under Idle. - + - Issue #23180: Rename IDLE "Windows" menu item to "Window". Patch by Al Sweigart. @@ -167,11 +279,6 @@ What's New in IDLE 3.1b1? - Use of 'filter' in keybindingDialog.py was causing custom key assignment to fail. Patch 5707 amaury.forgeotdarc. - -What's New in IDLE 3.1a1? -========================= -*Release date: 07-Mar-09* - - Issue #4815: Offer conversion to UTF-8 if source files have no encoding declaration and are not encoded in UTF-8. @@ -185,6 +292,7 @@ What's New in IDLE 3.1a1? - Issue #2665: On Windows, an IDLE installation upgraded from an old version would not start if a custom theme was defined. + What's New in IDLE 2.7? (UNRELEASED, but merged into 3.1 releases above.) ======================= *Release date: XX-XXX-2010* @@ -210,11 +318,6 @@ What's New in IDLE 2.7? (UNRELEASED, but merged into 3.1 releases above.) - Issue #3549: On MacOS the preferences menu was not present -What's New in IDLE 3.0 final? -============================= - -*Release date: 03-Dec-2008* - - IDLE would print a "Unhandled server exception!" message when internal debugging is enabled. @@ -224,12 +327,6 @@ What's New in IDLE 3.0 final? - Issue #4383: When IDLE cannot make the connection to its subprocess, it would fail to properly display the error message. - -What's New in IDLE 3.0a3? -========================= - -*Release date: 29-Feb-2008* - - help() was not paging to the shell. Issue1650. - CodeContext was not importing. @@ -241,21 +338,9 @@ What's New in IDLE 3.0a3? - Issue #1585: IDLE uses non-existent xrange() function. - -What's New in IDLE 3.0a2? -========================= - -*Release date: 06-Dec-2007* - - Windows EOL sequence not converted correctly, encoding error. Caused file save to fail. Bug 1130. - -What's New in IDLE 3.0a1? -========================= - -*Release date: 31-Aug-2007* - - IDLE converted to Python 3000 syntax. - Strings became Unicode. @@ -269,9 +354,8 @@ What's New in IDLE 3.0a1? be cleared before IDLE exits. -What's New in IDLE 2.6 final? -============================= - +What's New in IDLE 2.6 +====================== *Release date: 01-Oct-2008*, merged into 3.0 releases detailed above (3.0rc2) - Issue #2665: On Windows, an IDLE installation upgraded from an old version @@ -356,15 +440,8 @@ What's New in IDLE 2.6 final? What's New in IDLE 1.2? ======================= - *Release date: 19-SEP-2006* - -What's New in IDLE 1.2c1? -========================= - -*Release date: 17-AUG-2006* - - File menu hotkeys: there were three 'p' assignments. Reassign the 'Save Copy As' and 'Print' hotkeys to 'y' and 't'. Change the Shell hotkey from 's' to 'l'. @@ -385,11 +462,6 @@ What's New in IDLE 1.2c1? - When used w/o subprocess, all exceptions were preceded by an error message claiming they were IDLE internal errors (since 1.2a1). -What's New in IDLE 1.2b3? -========================= - -*Release date: 03-AUG-2006* - - Bug #1525817: Don't truncate short lines in IDLE's tool tips. - Bug #1517990: IDLE keybindings on MacOS X now work correctly @@ -413,26 +485,6 @@ What's New in IDLE 1.2b3? 'as' keyword in comment directly following import command. Closes 1325071. Patch 1479219 Tal Einat -What's New in IDLE 1.2b2? -========================= - -*Release date: 11-JUL-2006* - -What's New in IDLE 1.2b1? -========================= - -*Release date: 20-JUN-2006* - -What's New in IDLE 1.2a2? -========================= - -*Release date: 27-APR-2006* - -What's New in IDLE 1.2a1? -========================= - -*Release date: 05-APR-2006* - - Patch #1162825: Support non-ASCII characters in IDLE window titles. - Source file f.flush() after writing; trying to avoid lossage if user @@ -512,19 +564,14 @@ What's New in IDLE 1.2a1? - The remote procedure call module rpc.py can now access data attributes of remote registered objects. Changes to these attributes are local, however. + What's New in IDLE 1.1? ======================= - *Release date: 30-NOV-2004* - On OpenBSD, terminating IDLE with ctrl-c from the command line caused a stuck subprocess MainThread because only the SocketThread was exiting. -What's New in IDLE 1.1b3/rc1? -============================= - -*Release date: 18-NOV-2004* - - Saving a Keyset w/o making changes (by using the "Save as New Custom Key Set" button) caused IDLE to fail on restart (no new keyset was created in config-keys.cfg). Also true for Theme/highlights. Python Bug 1064535. @@ -532,28 +579,12 @@ What's New in IDLE 1.1b3/rc1? - A change to the linecache.py API caused IDLE to exit when an exception was raised while running without the subprocess (-n switch). Python Bug 1063840. -What's New in IDLE 1.1b2? -========================= - -*Release date: 03-NOV-2004* - - When paragraph reformat width was made configurable, a bug was introduced that caused reformatting of comment blocks to ignore how far the block was indented, effectively adding the indentation width to the reformat width. This has been repaired, and the reformat width is again a bound on the total width of reformatted lines. -What's New in IDLE 1.1b1? -========================= - -*Release date: 15-OCT-2004* - - -What's New in IDLE 1.1a3? -========================= - -*Release date: 02-SEP-2004* - - Improve keyboard focus binding, especially in Windows menu. Improve window raising, especially in the Windows menu and in the debugger. IDLEfork 763524. @@ -561,24 +592,12 @@ What's New in IDLE 1.1a3? - If user passes a non-existent filename on the commandline, just open a new file, don't raise a dialog. IDLEfork 854928. - -What's New in IDLE 1.1a2? -========================= - -*Release date: 05-AUG-2004* - - EditorWindow.py was not finding the .chm help file on Windows. Typo at Rev 1.54. Python Bug 990954 - checking sys.platform for substring 'win' was breaking IDLE docs on Mac (darwin). Also, Mac Safari browser requires full file:// URIs. SF 900580. - -What's New in IDLE 1.1a1? -========================= - -*Release date: 08-JUL-2004* - - Redirect the warning stream to the shell during the ScriptBinding check of user code and format the warning similarly to an exception for both that check and for runtime warnings raised in the subprocess. @@ -641,26 +660,13 @@ What's New in IDLE 1.1a1? What's New in IDLE 1.0? ======================= - *Release date: 29-Jul-2003* - Added a banner to the shell discussing warnings possibly raised by personal firewall software. Added same comment to README.txt. - -What's New in IDLE 1.0 release candidate 2? -=========================================== - -*Release date: 24-Jul-2003* - - Calltip error when docstring was None Python Bug 775541 - -What's New in IDLE 1.0 release candidate 1? -=========================================== - -*Release date: 18-Jul-2003* - - Updated extend.txt, help.txt, and config-extensions.def to correctly reflect the current status of the configuration system. Python Bug 768469 @@ -676,12 +682,6 @@ What's New in IDLE 1.0 release candidate 1? sys.std{in|out|err}.encoding, for both the local and the subprocess case. SF IDLEfork patch 682347. - -What's New in IDLE 1.0b2? -========================= - -*Release date: 29-Jun-2003* - - Extend AboutDialog.ViewFile() to support file encodings. Make the CREDITS file Latin-1. @@ -720,7 +720,6 @@ What's New in IDLE 1.0b2? What's New in IDLEfork 0.9b1? ============================= - *Release date: 02-Jun-2003* - The current working directory of the execution environment (and shell @@ -822,10 +821,8 @@ What's New in IDLEfork 0.9b1? exception formatting to the subprocess. - What's New in IDLEfork 0.9 Alpha 2? =================================== - *Release date: 27-Jan-2003* - Updated INSTALL.txt to claify use of the python2 rpm. @@ -929,7 +926,6 @@ What's New in IDLEfork 0.9 Alpha 2? What's New in IDLEfork 0.9 Alpha 1? =================================== - *Release date: 31-Dec-2002* - First release of major new functionality. For further details refer to diff --git a/Lib/idlelib/OutputWindow.py b/Lib/idlelib/OutputWindow.py index 9dacc492..e614f9b2 100644 --- a/Lib/idlelib/OutputWindow.py +++ b/Lib/idlelib/OutputWindow.py @@ -91,7 +91,7 @@ class OutputWindow(EditorWindow): "No special line", "The line you point at doesn't look like " "a valid file name followed by a line number.", - master=self.text) + parent=self.text) return filename, lineno = result edit = self.flist.open(filename) diff --git a/Lib/idlelib/PyShell.py b/Lib/idlelib/PyShell.py index 5854cf91..1bcc9b68 100755 --- a/Lib/idlelib/PyShell.py +++ b/Lib/idlelib/PyShell.py @@ -152,7 +152,7 @@ class PyShellEditorWindow(EditorWindow): # possible due to update in restore_file_breaks return if color: - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() cfg = idleConf.GetHighlight(theme, "break") else: cfg = {'foreground': '', 'background': ''} @@ -338,7 +338,7 @@ class ModifiedColorDelegator(ColorDelegator): def LoadTagDefs(self): ColorDelegator.LoadTagDefs(self) - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() self.tagdefs.update({ "stdin": {'background':None,'foreground':None}, "stdout": idleConf.GetHighlight(theme, "stdout"), @@ -621,7 +621,7 @@ class ModifiedInterpreter(InteractiveInterpreter): item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid) from idlelib.TreeWidget import ScrolledCanvas, TreeNode top = Toplevel(self.tkconsole.root) - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0) sc.frame.pack(expand=1, fill="both") @@ -774,7 +774,7 @@ class ModifiedInterpreter(InteractiveInterpreter): "Exit?", "Do you want to exit altogether?", default="yes", - master=self.tkconsole.text): + parent=self.tkconsole.text): raise else: self.showtraceback() @@ -812,7 +812,7 @@ class ModifiedInterpreter(InteractiveInterpreter): "Run IDLE with the -n command line switch to start without a " "subprocess and refer to Help/IDLE Help 'Running without a " "subprocess' for further details.", - master=self.tkconsole.text) + parent=self.tkconsole.text) def display_no_subprocess_error(self): tkMessageBox.showerror( @@ -820,14 +820,14 @@ class ModifiedInterpreter(InteractiveInterpreter): "IDLE's subprocess didn't make connection. Either IDLE can't " "start a subprocess or personal firewall software is blocking " "the connection.", - master=self.tkconsole.text) + parent=self.tkconsole.text) def display_executing_dialog(self): tkMessageBox.showerror( "Already executing", "The Python Shell window is already executing a command; " "please wait until it is finished.", - master=self.tkconsole.text) + parent=self.tkconsole.text) class PyShell(OutputWindow): @@ -931,7 +931,7 @@ class PyShell(OutputWindow): if self.executing: tkMessageBox.showerror("Don't debug now", "You can only toggle the debugger when idle", - master=self.text) + parent=self.text) self.set_debugger_indicator() return "break" else: @@ -989,7 +989,7 @@ class PyShell(OutputWindow): if self.executing: response = tkMessageBox.askokcancel( "Kill?", - "The program is still running!\n Do you want to kill it?", + "Your program is still running!\n Do you want to kill it?", default="ok", parent=self.text) if response is False: @@ -1043,6 +1043,7 @@ class PyShell(OutputWindow): self.write("Python %s on %s\n%s\n%s" % (sys.version, sys.platform, self.COPYRIGHT, nosub)) + self.text.focus_force() self.showprompt() import tkinter tkinter._default_root = None # 03Jan04 KBK What's this? @@ -1238,7 +1239,7 @@ class PyShell(OutputWindow): tkMessageBox.showerror("No stack trace", "There is no stack trace yet.\n" "(sys.last_traceback is not defined)", - master=self.text) + parent=self.text) return from idlelib.StackViewer import StackBrowser StackBrowser(self.root, self.flist) @@ -1547,6 +1548,14 @@ def main(): flist = PyShellFileList(root) macosxSupport.setupApp(root, flist) + if macosxSupport.isAquaTk(): + # There are some screwed up <2> class bindings for text + # widgets defined in Tk which we need to do away with. + # See issue #24801. + root.unbind_class('Text', '') + root.unbind_class('Text', '') + root.unbind_class('Text', '<>') + if enable_edit: if not (cmd or script): for filename in args[:]: diff --git a/Lib/idlelib/README.txt b/Lib/idlelib/README.txt index 7f4a66dd..b5663c25 100644 --- a/Lib/idlelib/README.txt +++ b/Lib/idlelib/README.txt @@ -1,60 +1,229 @@ -IDLE is Python's Tkinter-based Integrated DeveLopment Environment. - -IDLE emphasizes a lightweight, clean design with a simple user interface. -Although it is suitable for beginners, even advanced users will find that -IDLE has everything they really need to develop pure Python code. - -IDLE features a multi-window text editor with multiple undo, Python colorizing, -and many other capabilities, e.g. smart indent, call tips, and autocompletion. - -The editor has comprehensive search functions, including searching through -multiple files. Class browsers and path browsers provide fast access to -code objects from a top level viewpoint without dealing with code folding. - -There is a Python Shell window which features colorizing and command recall. - -IDLE executes Python code in a separate process, which is restarted for each -Run (F5) initiated from an editor window. The environment can also be -restarted from the Shell window without restarting IDLE. - -This enhancement has often been requested, and is now finally available. The -magic "reload/import *" incantations are no longer required when editing and -testing a module two or three steps down the import chain. - -(Personal firewall software may warn about the connection IDLE makes to its -subprocess using this computer's internal loopback interface. This connection -is not visible on any external interface and no data is sent to or received -from the Internet.) - -It is possible to interrupt tightly looping user code, even on Windows. - -Applications which cannot support subprocesses and/or sockets can still run -IDLE in a single process. - -IDLE has an integrated debugger with stepping, persistent breakpoints, and call -stack visibility. - -There is a GUI configuration manager which makes it easy to select fonts, -colors, keybindings, and startup options. This facility includes a feature -which allows the user to specify additional help sources, either locally or on -the web. - -IDLE is coded in 100% pure Python, using the Tkinter GUI toolkit (Tk/Tcl) -and is cross-platform, working on Unix, Mac, and Windows. - -IDLE accepts command line arguments. Try idle -h to see the options. - - -If you find bugs or have suggestions or patches, let us know about -them by using the Python issue tracker: - -http://bugs.python.org - -For further details and links, read the Help files and check the IDLE home -page at - -http://www.python.org/idle/ - -There is a mail list for IDLE: idle-dev@python.org. You can join at - -http://mail.python.org/mailman/listinfo/idle-dev +README.txt: an index to idlelib files and the IDLE menu. + +IDLE is Python’s Integrated Development and Learning +Environment. The user documentation is part of the Library Reference and +is available in IDLE by selecting Help => IDLE Help. This README documents +idlelib for IDLE developers and curious users. + +IDLELIB FILES lists files alphabetically by category, +with a short description of each. + +IDLE MENU show the menu tree, annotated with the module +or module object that implements the corresponding function. + +This file is descriptive, not prescriptive, and may have errors +and omissions and lag behind changes in idlelib. + + +IDLELIB FILES +Implemetation files not in IDLE MENU are marked (nim). +Deprecated files and objects are listed separately as the end. + +Startup +------- +__init__.py # import, does nothing +__main__.py # -m, starts IDLE +idle.bat +idle.py +idle.pyw + +Implementation +-------------- +AutoComplete.py # Complete attribute names or filenames. +AutoCompleteWindow.py # Display completions. +AutoExpand.py # Expand word with previous word in file. +Bindings.py # Define most of IDLE menu. +CallTipWindow.py # Display calltip. +CallTips.py # Create calltip text. +ClassBrowser.py # Create module browser window. +CodeContext.py # Show compound statement headers otherwise not visible. +ColorDelegator.py # Colorize text (nim). +Debugger.py # Debug code run from editor; show window. +Delegator.py # Define base class for delegators (nim). +EditorWindow.py # Define most of editor and utility functions. +FileList.py # Open files and manage list of open windows (nim). +FormatParagraph.py# Re-wrap multiline strings and comments. +GrepDialog.py # Find all occurrences of pattern in multiple files. +HyperParser.py # Parse code around a given index. +IOBinding.py # Open, read, and write files +IdleHistory.py # Get previous or next user input in shell (nim) +MultiCall.py # Wrap tk widget to allow multiple calls per event (nim). +MultiStatusBar.py # Define status bar for windows (nim). +ObjectBrowser.py # Define class used in StackViewer (nim). +OutputWindow.py # Create window for grep output. +ParenMatch.py # Match fenceposts: (), [], and {}. +PathBrowser.py # Create path browser window. +Percolator.py # Manage delegator stack (nim). +PyParse.py # Give information on code indentation +PyShell.py # Start IDLE, manage shell, complete editor window +RemoteDebugger.py # Debug code run in remote process. +RemoteObjectBrowser.py # Communicate objects between processes with rpc (nim). +ReplaceDialog.py # Search and replace pattern in text. +RstripExtension.py# Strip trailing whitespace +ScriptBinding.py # Check and run user code. +ScrolledList.py # Define ScrolledList widget for IDLE (nim). +SearchDialog.py # Search for pattern in text. +SearchDialogBase.py # Define base for search, replace, and grep dialogs. +SearchEngine.py # Define engine for all 3 search dialogs. +StackViewer.py # View stack after exception. +TreeWidget.py # Define tree widger, used in browsers (nim). +UndoDelegator.py # Manage undo stack. +WidgetRedirector.py # Intercept widget subcommands (for percolator) (nim). +WindowList.py # Manage window list and define listed top level. +ZoomHeight.py # Zoom window to full height of screen. +aboutDialog.py # Display About IDLE dialog. +configDialog.py # Display user configuration dialogs. +configHandler.py # Load, fetch, and save configuration (nim). +configHelpSourceEdit.py # Specify help source. +configSectionNameDialog.py # Spefify user config section name +dynOptionMenuWidget.py # define mutable OptionMenu widget (nim). +help.py # Display IDLE's html doc. +keybindingDialog.py # Change keybindings. +macosxSupport.py # Help IDLE run on Macs (nim). +rpc.py # Commuicate between idle and user processes (nim). +run.py # Manage user code execution subprocess. +tabbedpages.py # Define tabbed pages widget (nim). +textView.py # Define read-only text widget (nim). + +Configuration +------------- +config-extensions.def # Defaults for extensions +config-highlight.def # Defaults for colorizing +config-keys.def # Defaults for key bindings +config-main.def # Defai;ts fpr font and geneal + +Text +---- +CREDITS.txt # not maintained, displayed by About IDLE +HISTORY.txt # NEWS up to July 2001 +NEWS.txt # commits, displayed by About IDLE +README.txt # this file, displeyed by About IDLE +TODO.txt # needs review +extend.txt # about writing extensions +help.html # copy of idle.html in docs, displayed by IDLE Help + +Subdirectories +-------------- +Icons # small image files +idle_test # files for human test and automated unit tests + +Unused and Deprecated files and objects (nim) +--------------------------------------------- +EditorWindow.py: Helpdialog and helpDialog +ToolTip.py: unused. +help.txt +idlever.py + + +IDLE MENUS +Top level items and most submenu items are defined in Bindings. +Extenstions add submenu items when active. The names given are +found, quoted, in one of these modules, paired with a '<>'. +Each pseudoevent is bound to an event handler. Some event handlers +call another function that does the actual work. The annotations below +are intended to at least give the module where the actual work is done. + +File # IOBindig except as noted + New File + Open... # IOBinding.open + Open Module + Recent Files + Class Browser # Class Browser + Path Browser # Path Browser + --- + Save # IDBinding.save + Save As... # IOBinding.save_as + Save Copy As... # IOBindling.save_a_copy + --- + Print Window # IOBinding.print_window + --- + Close + Exit + +Edit + Undo # undoDelegator + Redo # undoDelegator + --- + Cut + Copy + Paste + Select All + --- # Next 5 items use SearchEngine; dialogs use SearchDialogBase + Find # Search Dialog + Find Again + Find Selection + Find in Files... # GrepDialog + Replace... # ReplaceDialog + Go to Line + Show Completions # AutoComplete extension and AutoCompleteWidow (&HP) + Expand Word # AutoExpand extension + Show call tip # Calltips extension and CalltipWindow (& Hyperparser) + Show surrounding parens # ParenMatch (& Hyperparser) + +Shell # PyShell + View Last Restart # PyShell.? + Restart Shell # PyShell.? + +Debug (Shell only) + Go to File/Line + Debugger # Debugger, RemoteDebugger + Stack Viewer # StackViewer + Auto-open Stack Viewer # StackViewer + +Format (Editor only) + Indent Region + Dedent Region + Comment Out Region + Uncomment Region + Tabify Region + Untabify Region + Toggle Tabs + New Indent Width + Format Paragraph # FormatParagraph extension + --- + Strip tailing whitespace # RstripExtension extension + +Run (Editor only) + Python Shell # PyShell + --- + Check Module # ScriptBinding + Run Module # ScriptBinding + +Options + Configure IDLE # configDialog + (tabs in the dialog) + Font tab # onfig-main.def + Highlight tab # configSectionNameDialog, config-highlight.def + Keys tab # keybindingDialog, configSectionNameDialog, onfig-keus.def + General tab # configHelpSourceEdit, config-main.def + Configure Extensions # configDialog + Xyz tab # xyz.py, config-extensions.def + --- + Code Context (editor only) # CodeContext extension + +Window + Zoomheight # ZoomHeight extension + --- + # WindowList + +Help + About IDLE # aboutDialog + --- + IDLE Help # help + Python Doc + Turtle Demo + --- + + + (right click) +Defined in EditorWindow, PyShell, Output + Cut + Copy + Paste + --- + Go to file/line (shell and output only) + Set Breakpoint (editor only) + Clear Breakpoint (editor only) + Defined in Debugger + Go to source line + Show stack frame diff --git a/Lib/idlelib/ReplaceDialog.py b/Lib/idlelib/ReplaceDialog.py index fc8b80f1..2665a1c6 100644 --- a/Lib/idlelib/ReplaceDialog.py +++ b/Lib/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ class ReplaceDialog(SearchDialogBase): def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/Lib/idlelib/ScriptBinding.py b/Lib/idlelib/ScriptBinding.py index e8cb2fc6..5cb818d8 100644 --- a/Lib/idlelib/ScriptBinding.py +++ b/Lib/idlelib/ScriptBinding.py @@ -69,7 +69,7 @@ class ScriptBinding: try: tabnanny.process_tokens(tokenize.generate_tokens(f.readline)) except tokenize.TokenError as msg: - msgtxt, (lineno, start) = msg + msgtxt, (lineno, start) = msg.args self.editwin.gotoline(lineno) self.errorbox("Tabnanny Tokenizing Error", "Token Error: %s" % msgtxt) @@ -197,10 +197,10 @@ class ScriptBinding: confirm = tkMessageBox.askokcancel(title="Save Before Run or Check", message=msg, default=tkMessageBox.OK, - master=self.editwin.text) + parent=self.editwin.text) return confirm def errorbox(self, title, message): # XXX This should really be a function of EditorWindow... - tkMessageBox.showerror(title, message, master=self.editwin.text) + tkMessageBox.showerror(title, message, parent=self.editwin.text) self.editwin.text.focus_set() diff --git a/Lib/idlelib/ScrolledList.py b/Lib/idlelib/ScrolledList.py index 71ec5470..53576b5f 100644 --- a/Lib/idlelib/ScrolledList.py +++ b/Lib/idlelib/ScrolledList.py @@ -1,4 +1,5 @@ from tkinter import * +from idlelib import macosxSupport class ScrolledList: @@ -22,7 +23,11 @@ class ScrolledList: # Bind events to the list box listbox.bind("", self.click_event) listbox.bind("", self.double_click_event) - listbox.bind("", self.popup_event) + if macosxSupport.isAquaTk(): + listbox.bind("", self.popup_event) + listbox.bind("", self.popup_event) + else: + listbox.bind("", self.popup_event) listbox.bind("", self.up_event) listbox.bind("", self.down_event) # Mark as empty diff --git a/Lib/idlelib/StackViewer.py b/Lib/idlelib/StackViewer.py index b1e5e267..ccc755ce 100644 --- a/Lib/idlelib/StackViewer.py +++ b/Lib/idlelib/StackViewer.py @@ -10,8 +10,7 @@ from idlelib.PyShell import PyShellFileList def StackBrowser(root, flist=None, tb=None, top=None): if top is None: - from tkinter import Toplevel - top = Toplevel(root) + top = tk.Toplevel(root) sc = ScrolledCanvas(top, bg="white", highlightthickness=0) sc.frame.pack(expand=1, fill="both") item = StackTreeItem(flist, tb) @@ -108,12 +107,9 @@ class VariablesTreeItem(ObjectTreeItem): def IsExpandable(self): return len(self.object) > 0 - def keys(self): - return list(self.object.keys()) - def GetSubList(self): sublist = [] - for key in self.keys(): + for key in self.object.keys(): try: value = self.object[key] except KeyError: @@ -124,6 +120,9 @@ class VariablesTreeItem(ObjectTreeItem): sublist.append(item) return sublist + def keys(self): # unused, left for possible 3rd party use + return list(self.object.keys()) + def _stack_viewer(parent): root = tk.Tk() root.title("Test StackViewer") diff --git a/Lib/idlelib/TreeWidget.py b/Lib/idlelib/TreeWidget.py index 4844a695..a19578fd 100644 --- a/Lib/idlelib/TreeWidget.py +++ b/Lib/idlelib/TreeWidget.py @@ -249,7 +249,7 @@ class TreeNode: except AttributeError: # padding carefully selected (on Windows) to match Entry widget: self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2) - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() if self.selected: self.label.configure(idleConf.GetHighlight(theme, 'hilite')) else: diff --git a/Lib/idlelib/__init__.py b/Lib/idlelib/__init__.py index 7a83ddea..711f61bb 100644 --- a/Lib/idlelib/__init__.py +++ b/Lib/idlelib/__init__.py @@ -1 +1,8 @@ -# Dummy file to make this a package. +"""The idlelib package implements the Idle application. + +Idle includes an interactive shell and editor. +Use the files named idle.* to start Idle. + +The other files are private implementations. Their details are subject to +change. See PEP 434 for more. Import them at your own risk. +""" diff --git a/Lib/idlelib/config-highlight.def b/Lib/idlelib/config-highlight.def index 7d20f782..4146e28c 100644 --- a/Lib/idlelib/config-highlight.def +++ b/Lib/idlelib/config-highlight.def @@ -62,3 +62,32 @@ stderr-foreground= red stderr-background= #ffffff console-foreground= #770000 console-background= #ffffff + +[IDLE Dark] +comment-foreground = #dd0000 +console-foreground = #ff4d4d +error-foreground = #FFFFFF +hilite-background = #7e7e7e +string-foreground = #02ff02 +stderr-background = #002240 +stderr-foreground = #ffb3b3 +console-background = #002240 +hit-background = #fbfbfb +string-background = #002240 +normal-background = #002240 +hilite-foreground = #FFFFFF +keyword-foreground = #ff8000 +error-background = #c86464 +keyword-background = #002240 +builtin-background = #002240 +break-background = #808000 +builtin-foreground = #ff00ff +definition-foreground = #5e5eff +stdout-foreground = #c2d1fa +definition-background = #002240 +normal-foreground = #FFFFFF +cursor-foreground = #ffffff +stdout-background = #002240 +hit-foreground = #002240 +comment-background = #002240 +break-foreground = #FFFFFF diff --git a/Lib/idlelib/config-main.def b/Lib/idlelib/config-main.def index 3622cb2a..8ebbc1b4 100644 --- a/Lib/idlelib/config-main.def +++ b/Lib/idlelib/config-main.def @@ -65,6 +65,8 @@ num-spaces= 4 [Theme] default= 1 name= IDLE Classic +name2= +# name2 set in user config-main.cfg for themes added after 2015 Oct 1 [Keys] default= 1 diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py index 9ed63369..9b16459d 100644 --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -20,7 +20,9 @@ from idlelib.keybindingDialog import GetKeysDialog from idlelib.configSectionNameDialog import GetCfgSectionNameDialog from idlelib.configHelpSourceEdit import GetHelpSourceDialog from idlelib.tabbedpages import TabbedPageSet +from idlelib.textView import view_text from idlelib import macosxSupport + class ConfigDialog(Toplevel): def __init__(self, parent, title='', _htest=False, _utest=False): @@ -43,19 +45,20 @@ class ConfigDialog(Toplevel): #The first value of the tuple is the sample area tag name. #The second value is the display name list sort index. self.themeElements={ - 'Normal Text':('normal', '00'), - 'Python Keywords':('keyword', '01'), - 'Python Definitions':('definition', '02'), - 'Python Builtins':('builtin', '03'), - 'Python Comments':('comment', '04'), - 'Python Strings':('string', '05'), - 'Selected Text':('hilite', '06'), - 'Found Text':('hit', '07'), - 'Cursor':('cursor', '08'), - 'Error Text':('error', '09'), - 'Shell Normal Text':('console', '10'), - 'Shell Stdout Text':('stdout', '11'), - 'Shell Stderr Text':('stderr', '12'), + 'Normal Text': ('normal', '00'), + 'Python Keywords': ('keyword', '01'), + 'Python Definitions': ('definition', '02'), + 'Python Builtins': ('builtin', '03'), + 'Python Comments': ('comment', '04'), + 'Python Strings': ('string', '05'), + 'Selected Text': ('hilite', '06'), + 'Found Text': ('hit', '07'), + 'Cursor': ('cursor', '08'), + 'Editor Breakpoint': ('break', '09'), + 'Shell Normal Text': ('console', '10'), + 'Shell Error Text': ('error', '11'), + 'Shell Stdout Text': ('stdout', '12'), + 'Shell Stderr Text': ('stderr', '13'), } self.ResetChangedItems() #load initial values in changed items dict self.CreateWidgets() @@ -77,13 +80,16 @@ class ConfigDialog(Toplevel): def CreateWidgets(self): self.tabPages = TabbedPageSet(self, - page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General']) + page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General', + 'Extensions']) self.tabPages.pack(side=TOP, expand=TRUE, fill=BOTH) self.CreatePageFontTab() self.CreatePageHighlight() self.CreatePageKeys() self.CreatePageGeneral() + self.CreatePageExtensions() self.create_action_buttons().pack(side=BOTTOM) + def create_action_buttons(self): if macosxSupport.isAquaTk(): # Changing the default padding on OSX results in unreadable @@ -93,28 +99,18 @@ class ConfigDialog(Toplevel): paddingArgs = {'padx':6, 'pady':3} outer = Frame(self, pady=2) buttons = Frame(outer, pady=2) - self.buttonOk = Button( - buttons, text='Ok', command=self.Ok, - takefocus=FALSE, **paddingArgs) - self.buttonApply = Button( - buttons, text='Apply', command=self.Apply, - takefocus=FALSE, **paddingArgs) - self.buttonCancel = Button( - buttons, text='Cancel', command=self.Cancel, - takefocus=FALSE, **paddingArgs) - self.buttonOk.pack(side=LEFT, padx=5) - self.buttonApply.pack(side=LEFT, padx=5) - self.buttonCancel.pack(side=LEFT, padx=5) -# Comment out Help button creation and packing until implement self.Help -## self.buttonHelp = Button( -## buttons, text='Help', command=self.Help, -## takefocus=FALSE, **paddingArgs) -## self.buttonHelp.pack(side=RIGHT, padx=5) - + for txt, cmd in ( + ('Ok', self.Ok), + ('Apply', self.Apply), + ('Cancel', self.Cancel), + ('Help', self.Help)): + Button(buttons, text=txt, command=cmd, takefocus=FALSE, + **paddingArgs).pack(side=LEFT, padx=5) # add space above buttons Frame(outer, height=2, borderwidth=0).pack(side=TOP) buttons.pack(side=BOTTOM) return outer + def CreatePageFontTab(self): parent = self.parent self.fontSize = StringVar(parent) @@ -219,7 +215,8 @@ class ConfigDialog(Toplevel): ("'selected'", 'hilite'), ('\n var2 = ', 'normal'), ("'found'", 'hit'), ('\n var3 = ', 'normal'), ('list', 'builtin'), ('(', 'normal'), - ('None', 'keyword'), (')\n\n', 'normal'), + ('None', 'keyword'), (')\n', 'normal'), + (' breakpoint("line")', 'break'), ('\n\n', 'normal'), (' error ', 'error'), (' ', 'normal'), ('cursor |', 'cursor'), ('\n ', 'normal'), ('shell', 'console'), (' ', 'normal'), @@ -266,6 +263,7 @@ class ConfigDialog(Toplevel): self.buttonDeleteCustomTheme=Button( frameTheme, text='Delete Custom Theme', command=self.DeleteCustomTheme) + self.new_custom_theme = Label(frameTheme, bd=2) ##widget packing #body @@ -289,6 +287,7 @@ class ConfigDialog(Toplevel): self.optMenuThemeBuiltin.pack(side=TOP, fill=X, padx=5, pady=5) self.optMenuThemeCustom.pack(side=TOP, fill=X, anchor=W, padx=5, pady=5) self.buttonDeleteCustomTheme.pack(side=TOP, fill=X, padx=5, pady=5) + self.new_custom_theme.pack(side=TOP, fill=X, pady=5) return frame def CreatePageKeys(self): @@ -505,7 +504,16 @@ class ConfigDialog(Toplevel): def VarChanged_builtinTheme(self, *params): value = self.builtinTheme.get() - self.AddChangedItem('main', 'Theme', 'name', value) + if value == 'IDLE Dark': + if idleConf.GetOption('main', 'Theme', 'name') != 'IDLE New': + self.AddChangedItem('main', 'Theme', 'name', 'IDLE Classic') + self.AddChangedItem('main', 'Theme', 'name2', value) + self.new_custom_theme.config(text='New theme, see Help', + fg='#500000') + else: + self.AddChangedItem('main', 'Theme', 'name', value) + self.AddChangedItem('main', 'Theme', 'name2', '') + self.new_custom_theme.config(text='', fg='black') self.PaintThemeSample() def VarChanged_customTheme(self, *params): @@ -1083,6 +1091,7 @@ class ConfigDialog(Toplevel): self.LoadKeyCfg() ### general page self.LoadGeneralCfg() + # note: extension page handled separately def SaveNewKeySet(self, keySetName, keySet): """ @@ -1136,6 +1145,7 @@ class ConfigDialog(Toplevel): # save these even if unchanged! idleConf.userCfg[configType].Save() self.ResetChangedItems() #clear the changed items dict + self.save_all_changed_extensions() # uses a different mechanism def DeactivateCurrentConfig(self): #Before a config is saved, some cleanup of current @@ -1167,114 +1177,62 @@ class ConfigDialog(Toplevel): self.ActivateConfigChanges() def Help(self): - pass - -class VerticalScrolledFrame(Frame): - """A pure Tkinter vertically scrollable frame. - - * Use the 'interior' attribute to place widgets inside the scrollable frame - * Construct and pack/place/grid normally - * This frame only allows vertical scrolling - """ - def __init__(self, parent, *args, **kw): - Frame.__init__(self, parent, *args, **kw) - - # create a canvas object and a vertical scrollbar for scrolling it - vscrollbar = Scrollbar(self, orient=VERTICAL) - vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE) - canvas = Canvas(self, bd=0, highlightthickness=0, - yscrollcommand=vscrollbar.set) - canvas.pack(side=LEFT, fill=BOTH, expand=TRUE) - vscrollbar.config(command=canvas.yview) - - # reset the view - canvas.xview_moveto(0) - canvas.yview_moveto(0) - - # create a frame inside the canvas which will be scrolled with it - self.interior = interior = Frame(canvas) - interior_id = canvas.create_window(0, 0, window=interior, anchor=NW) - - # track changes to the canvas and frame width and sync them, - # also updating the scrollbar - def _configure_interior(event): - # update the scrollbars to match the size of the inner frame - size = (interior.winfo_reqwidth(), interior.winfo_reqheight()) - canvas.config(scrollregion="0 0 %s %s" % size) - if interior.winfo_reqwidth() != canvas.winfo_width(): - # update the canvas's width to fit the inner frame - canvas.config(width=interior.winfo_reqwidth()) - interior.bind('', _configure_interior) + page = self.tabPages._current_page + view_text(self, title='Help for IDLE preferences', + text=help_common+help_pages.get(page, '')) - def _configure_canvas(event): - if interior.winfo_reqwidth() != canvas.winfo_width(): - # update the inner frame's width to fill the canvas - canvas.itemconfigure(interior_id, width=canvas.winfo_width()) - canvas.bind('', _configure_canvas) + def CreatePageExtensions(self): + """Part of the config dialog used for configuring IDLE extensions. - return + This code is generic - it works for any and all IDLE extensions. -def is_int(s): - "Return 's is blank or represents an int'" - if not s: - return True - try: - int(s) - return True - except ValueError: - return False - -# TODO: -# * Revert to default(s)? Per option or per extension? -# * List options in their original order (possible??) -class ConfigExtensionsDialog(Toplevel): - """A dialog for configuring IDLE extensions. - - This dialog is generic - it works for any and all IDLE extensions. - - IDLE extensions save their configuration options using idleConf. - ConfigExtensionsDialog reads the current configuration using idleConf, - supplies a GUI interface to change the configuration values, and saves the - changes using idleConf. + IDLE extensions save their configuration options using idleConf. + This code reads the current configuration using idleConf, supplies a + GUI interface to change the configuration values, and saves the + changes using idleConf. - Not all changes take effect immediately - some may require restarting IDLE. - This depends on each extension's implementation. - - All values are treated as text, and it is up to the user to supply - reasonable values. The only exception to this are the 'enable*' options, - which are boolean, and can be toggled with an True/False button. - """ - def __init__(self, parent, title=None, _htest=False): - Toplevel.__init__(self, parent) - self.wm_withdraw() + Not all changes take effect immediately - some may require restarting IDLE. + This depends on each extension's implementation. - self.configure(borderwidth=5) - self.geometry( - "+%d+%d" % (parent.winfo_rootx() + 20, - parent.winfo_rooty() + (30 if not _htest else 150))) - self.wm_title(title or 'IDLE Extensions Configuration') - - self.defaultCfg = idleConf.defaultCfg['extensions'] - self.userCfg = idleConf.userCfg['extensions'] + All values are treated as text, and it is up to the user to supply + reasonable values. The only exception to this are the 'enable*' options, + which are boolean, and can be toggled with an True/False button. + """ + parent = self.parent + frame = self.tabPages.pages['Extensions'].frame + self.ext_defaultCfg = idleConf.defaultCfg['extensions'] + self.ext_userCfg = idleConf.userCfg['extensions'] self.is_int = self.register(is_int) self.load_extensions() - self.create_widgets() - - self.resizable(height=FALSE, width=FALSE) # don't allow resizing yet - self.transient(parent) - self.protocol("WM_DELETE_WINDOW", self.Cancel) - self.tabbed_page_set.focus_set() - # wait for window to be generated - self.update() - # set current width as the minimum width - self.wm_minsize(self.winfo_width(), 1) - # now allow resizing - self.resizable(height=TRUE, width=TRUE) - - self.wm_deiconify() - if not _htest: - self.grab_set() - self.wait_window() + # create widgets - a listbox shows all available extensions, with the + # controls for the extension selected in the listbox to the right + self.extension_names = StringVar(self) + frame.rowconfigure(0, weight=1) + frame.columnconfigure(2, weight=1) + self.extension_list = Listbox(frame, listvariable=self.extension_names, + selectmode='browse') + self.extension_list.bind('<>', self.extension_selected) + scroll = Scrollbar(frame, command=self.extension_list.yview) + self.extension_list.yscrollcommand=scroll.set + self.details_frame = LabelFrame(frame, width=250, height=250) + self.extension_list.grid(column=0, row=0, sticky='nws') + scroll.grid(column=1, row=0, sticky='ns') + self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) + frame.configure(padx=10, pady=10) + self.config_frame = {} + self.current_extension = None + + self.outerframe = self # TEMPORARY + self.tabbed_page_set = self.extension_list # TEMPORARY + + # create the frame holding controls for each extension + ext_names = '' + for ext_name in sorted(self.extensions): + self.create_extension_frame(ext_name) + ext_names = ext_names + '{' + ext_name + '} ' + self.extension_names.set(ext_names) + self.extension_list.selection_set(0) + self.extension_selected(None) def load_extensions(self): "Fill self.extensions with data from the default and user configs." @@ -1283,7 +1241,7 @@ class ConfigExtensionsDialog(Toplevel): self.extensions[ext_name] = [] for ext_name in self.extensions: - opt_list = sorted(self.defaultCfg.GetOptionList(ext_name)) + opt_list = sorted(self.ext_defaultCfg.GetOptionList(ext_name)) # bring 'enable' options to the beginning of the list enables = [opt_name for opt_name in opt_list @@ -1293,7 +1251,7 @@ class ConfigExtensionsDialog(Toplevel): opt_list = enables + opt_list for opt_name in opt_list: - def_str = self.defaultCfg.Get( + def_str = self.ext_defaultCfg.Get( ext_name, opt_name, raw=True) try: def_obj = {'True':True, 'False':False}[def_str] @@ -1306,7 +1264,7 @@ class ConfigExtensionsDialog(Toplevel): def_obj = def_str opt_type = None try: - value = self.userCfg.Get( + value = self.ext_userCfg.Get( ext_name, opt_name, type=opt_type, raw=True, default=def_obj) except ValueError: # Need this until .Get fixed @@ -1321,40 +1279,25 @@ class ConfigExtensionsDialog(Toplevel): 'var': var, }) - def create_widgets(self): - """Create the dialog's widgets.""" - self.rowconfigure(0, weight=1) - self.rowconfigure(1, weight=0) - self.columnconfigure(0, weight=1) - - # create the tabbed pages - self.tabbed_page_set = TabbedPageSet( - self, page_names=self.extensions.keys(), - n_rows=None, max_tabs_per_row=5, - page_class=TabbedPageSet.PageRemove) - self.tabbed_page_set.grid(row=0, column=0, sticky=NSEW) - for ext_name in self.extensions: - self.create_tab_page(ext_name) - - self.create_action_buttons().grid(row=1) - - create_action_buttons = ConfigDialog.create_action_buttons - - def create_tab_page(self, ext_name): - """Create the page for an extension.""" - - page = LabelFrame(self.tabbed_page_set.pages[ext_name].frame, - border=2, padx=2, relief=GROOVE, - text=' %s ' % ext_name) - page.pack(fill=BOTH, expand=True, padx=12, pady=2) - - # create the scrollable frame which will contain the entries - scrolled_frame = VerticalScrolledFrame(page, pady=2, height=250) - scrolled_frame.pack(side=BOTTOM, fill=BOTH, expand=TRUE) - entry_area = scrolled_frame.interior - entry_area.columnconfigure(0, weight=0) - entry_area.columnconfigure(1, weight=1) - + def extension_selected(self, event): + newsel = self.extension_list.curselection() + if newsel: + newsel = self.extension_list.get(newsel) + if newsel is None or newsel != self.current_extension: + if self.current_extension: + self.details_frame.config(text='') + self.config_frame[self.current_extension].grid_forget() + self.current_extension = None + if newsel: + self.details_frame.config(text=newsel) + self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') + self.current_extension = newsel + + def create_extension_frame(self, ext_name): + """Create a frame holding the widgets to configure one extension""" + f = VerticalScrolledFrame(self.details_frame, height=250, width=250) + self.config_frame[ext_name] = f + entry_area = f.interior # create an entry for each configuration option for row, opt in enumerate(self.extensions[ext_name]): # create a row with a label and entry/checkbutton @@ -1365,30 +1308,18 @@ class ConfigExtensionsDialog(Toplevel): Checkbutton(entry_area, textvariable=var, variable=var, onvalue='True', offvalue='False', indicatoron=FALSE, selectcolor='', width=8 - ).grid(row=row, column=1, sticky=W, padx=7) + ).grid(row=row, column=1, sticky=W, padx=7) elif opt['type'] == 'int': Entry(entry_area, textvariable=var, validate='key', - validatecommand=(self.is_int, '%P') - ).grid(row=row, column=1, sticky=NSEW, padx=7) + validatecommand=(self.is_int, '%P') + ).grid(row=row, column=1, sticky=NSEW, padx=7) else: Entry(entry_area, textvariable=var - ).grid(row=row, column=1, sticky=NSEW, padx=7) + ).grid(row=row, column=1, sticky=NSEW, padx=7) return - - Ok = ConfigDialog.Ok - - def Apply(self): - self.save_all_changed_configs() - pass - - Cancel = ConfigDialog.Cancel - - def Help(self): - pass - - def set_user_value(self, section, opt): + def set_extension_value(self, section, opt): name = opt['name'] default = opt['default'] value = opt['var'].get().strip() or default @@ -1396,20 +1327,92 @@ class ConfigExtensionsDialog(Toplevel): # if self.defaultCfg.has_section(section): # Currently, always true; if not, indent to return if (value == default): - return self.userCfg.RemoveOption(section, name) + return self.ext_userCfg.RemoveOption(section, name) # set the option - return self.userCfg.SetOption(section, name, value) + return self.ext_userCfg.SetOption(section, name, value) - def save_all_changed_configs(self): + def save_all_changed_extensions(self): """Save configuration changes to the user config file.""" has_changes = False for ext_name in self.extensions: options = self.extensions[ext_name] for opt in options: - if self.set_user_value(ext_name, opt): + if self.set_extension_value(ext_name, opt): has_changes = True if has_changes: - self.userCfg.Save() + self.ext_userCfg.Save() + + +help_common = '''\ +When you click either the Apply or Ok buttons, settings in this +dialog that are different from IDLE's default are saved in +a .idlerc directory in your home directory. Except as noted, +these changes apply to all versions of IDLE installed on this +machine. Some do not take affect until IDLE is restarted. +[Cancel] only cancels changes made since the last save. +''' +help_pages = { + 'Highlighting':''' +Highlighting: +The IDLE Dark color theme is new in October 2015. It can only +be used with older IDLE releases if it is saved as a custom +theme, with a different name. +''' +} + + +def is_int(s): + "Return 's is blank or represents an int'" + if not s: + return True + try: + int(s) + return True + except ValueError: + return False + + +class VerticalScrolledFrame(Frame): + """A pure Tkinter vertically scrollable frame. + + * Use the 'interior' attribute to place widgets inside the scrollable frame + * Construct and pack/place/grid normally + * This frame only allows vertical scrolling + """ + def __init__(self, parent, *args, **kw): + Frame.__init__(self, parent, *args, **kw) + + # create a canvas object and a vertical scrollbar for scrolling it + vscrollbar = Scrollbar(self, orient=VERTICAL) + vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE) + canvas = Canvas(self, bd=0, highlightthickness=0, + yscrollcommand=vscrollbar.set, width=240) + canvas.pack(side=LEFT, fill=BOTH, expand=TRUE) + vscrollbar.config(command=canvas.yview) + + # reset the view + canvas.xview_moveto(0) + canvas.yview_moveto(0) + + # create a frame inside the canvas which will be scrolled with it + self.interior = interior = Frame(canvas) + interior_id = canvas.create_window(0, 0, window=interior, anchor=NW) + + # track changes to the canvas and frame width and sync them, + # also updating the scrollbar + def _configure_interior(event): + # update the scrollbars to match the size of the inner frame + size = (interior.winfo_reqwidth(), interior.winfo_reqheight()) + canvas.config(scrollregion="0 0 %s %s" % size) + interior.bind('', _configure_interior) + + def _configure_canvas(event): + if interior.winfo_reqwidth() != canvas.winfo_width(): + # update the inner frame's width to fill the canvas + canvas.itemconfigure(interior_id, width=canvas.winfo_width()) + canvas.bind('', _configure_canvas) + + return if __name__ == '__main__': @@ -1417,4 +1420,4 @@ if __name__ == '__main__': unittest.main('idlelib.idle_test.test_configdialog', verbosity=2, exit=False) from idlelib.idle_test.htest import run - run(ConfigDialog, ConfigExtensionsDialog) + run(ConfigDialog) diff --git a/Lib/idlelib/configHandler.py b/Lib/idlelib/configHandler.py index 83abad78..531efb4b 100644 --- a/Lib/idlelib/configHandler.py +++ b/Lib/idlelib/configHandler.py @@ -372,8 +372,32 @@ class IdleConf: return theme def CurrentTheme(self): - "Return the name of the currently active theme." - return self.GetOption('main', 'Theme', 'name', default='') + """Return the name of the currently active text color theme. + + idlelib.config-main.def includes this section + [Theme] + default= 1 + name= IDLE Classic + name2= + # name2 set in user config-main.cfg for themes added after 2015 Oct 1 + + Item name2 is needed because setting name to a new builtin + causes older IDLEs to display multiple error messages or quit. + See https://bugs.python.org/issue25313. + When default = True, name2 takes precedence over name, + while older IDLEs will just use name. + """ + default = self.GetOption('main', 'Theme', 'default', + type='bool', default=True) + if default: + theme = self.GetOption('main', 'Theme', 'name2', default='') + if default and not theme or not default: + theme = self.GetOption('main', 'Theme', 'name', default='') + source = self.defaultCfg if default else self.userCfg + if source['highlight'].has_section(theme): + return theme + else: + return "IDLE Classic" def CurrentKeys(self): "Return the name of the currently active key set." diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html new file mode 100644 index 00000000..2189fd4c --- /dev/null +++ b/Lib/idlelib/help.html @@ -0,0 +1,709 @@ + + + + + + + + 25.5. IDLE — Python 3.4.3 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

25.5. IDLE¶

+

IDLE is Python’s Integrated Development and Learning Environment.

+

IDLE has the following features:

+
    +
  • coded in 100% pure Python, using the tkinter GUI toolkit
  • +
  • cross-platform: works mostly the same on Windows, Unix, and Mac OS X
  • +
  • Python shell window (interactive interpreter) with colorizing +of code input, output, and error messages
  • +
  • multi-window text editor with multiple undo, Python colorizing, +smart indent, call tips, auto completion, and other features
  • +
  • search within any window, replace within editor windows, and search +through multiple files (grep)
  • +
  • debugger with persistent breakpoints, stepping, and viewing +of global and local namespaces
  • +
  • configuration, browsers, and other dialogs
  • +
+ +
+

25.5.2. Editing and navigation¶

+

In this section, ‘C’ refers to the Control key on Windows and Unix and +the Command key on Mac OSX.

+
    +
  • Backspace deletes to the left; Del deletes to the right

    +
  • +
  • C-Backspace delete word left; C-Del delete word to the right

    +
  • +
  • Arrow keys and Page Up/Page Down to move around

    +
  • +
  • C-LeftArrow and C-RightArrow moves by words

    +
  • +
  • Home/End go to begin/end of line

    +
  • +
  • C-Home/C-End go to begin/end of file

    +
  • +
  • Some useful Emacs bindings are inherited from Tcl/Tk:

    +
    +
      +
    • C-a beginning of line
    • +
    • C-e end of line
    • +
    • C-k kill line (but doesn’t put it in clipboard)
    • +
    • C-l center window around the insertion point
    • +
    • C-b go backwards one character without deleting (usually you can +also use the cursor key for this)
    • +
    • C-f go forward one character without deleting (usually you can +also use the cursor key for this)
    • +
    • C-p go up one line (usually you can also use the cursor key for +this)
    • +
    • C-d delete next character
    • +
    +
    +
  • +
+

Standard keybindings (like C-c to copy and C-v to paste) +may work. Keybindings are selected in the Configure IDLE dialog.

+
+

25.5.2.1. Automatic indentation¶

+

After a block-opening statement, the next line is indented by 4 spaces (in the +Python Shell window by one tab). After certain keywords (break, return etc.) +the next line is dedented. In leading indentation, Backspace deletes up +to 4 spaces if they are there. Tab inserts spaces (in the Python +Shell window one tab), number depends on Indent width. Currently tabs +are restricted to four spaces due to Tcl/Tk limitations.

+

See also the indent/dedent region commands in the edit menu.

+
+
+

25.5.2.2. Completions¶

+

Completions are supplied for functions, classes, and attributes of classes, +both built-in and user-defined. Completions are also provided for +filenames.

+

The AutoCompleteWindow (ACW) will open after a predefined delay (default is +two seconds) after a ‘.’ or (in a string) an os.sep is typed. If after one +of those characters (plus zero or more other characters) a tab is typed +the ACW will open immediately if a possible continuation is found.

+

If there is only one possible completion for the characters entered, a +Tab will supply that completion without opening the ACW.

+

‘Show Completions’ will force open a completions window, by default the +C-space will open a completions window. In an empty +string, this will contain the files in the current directory. On a +blank line, it will contain the built-in and user-defined functions and +classes in the current name spaces, plus any modules imported. If some +characters have been entered, the ACW will attempt to be more specific.

+

If a string of characters is typed, the ACW selection will jump to the +entry most closely matching those characters. Entering a tab will +cause the longest non-ambiguous match to be entered in the Editor window or +Shell. Two tab in a row will supply the current ACW selection, as +will return or a double click. Cursor keys, Page Up/Down, mouse selection, +and the scroll wheel all operate on the ACW.

+

“Hidden” attributes can be accessed by typing the beginning of hidden +name after a ‘.’, e.g. ‘_’. This allows access to modules with +__all__ set, or to class-private attributes.

+

Completions and the ‘Expand Word’ facility can save a lot of typing!

+

Completions are currently limited to those in the namespaces. Names in +an Editor window which are not via __main__ and sys.modules will +not be found. Run the module once with your imports to correct this situation. +Note that IDLE itself places quite a few modules in sys.modules, so +much can be found by default, e.g. the re module.

+

If you don’t like the ACW popping up unbidden, simply make the delay +longer or disable the extension.

+
+
+

25.5.2.3. Calltips¶

+

A calltip is shown when one types ( after the name of an acccessible +function. A name expression may include dots and subscripts. A calltip +remains until it is clicked, the cursor is moved out of the argument area, +or ) is typed. When the cursor is in the argument part of a definition, +the menu or shortcut display a calltip.

+

A calltip consists of the function signature and the first line of the +docstring. For builtins without an accessible signature, the calltip +consists of all lines up the fifth line or the first blank line. These +details may change.

+

The set of accessible functions depends on what modules have been imported +into the user process, including those imported by Idle itself, +and what definitions have been run, all since the last restart.

+

For example, restart the Shell and enter itertools.count(. A calltip +appears because Idle imports itertools into the user process for its own use. +(This could change.) Enter turtle.write( and nothing appears. Idle does +not import turtle. The menu or shortcut do nothing either. Enter +import turtle and then turtle.write( will work.

+

In an editor, import statements have no effect until one runs the file. One +might want to run a file after writing the import statements at the top, +or immediately run an existing file before editing.

+
+
+

25.5.2.4. Python Shell window¶

+
    +
  • C-c interrupts executing command

    +
  • +
  • C-d sends end-of-file; closes window if typed at a >>> prompt

    +
  • +
  • Alt-/ (Expand word) is also useful to reduce typing

    +

    Command history

    +
      +
    • Alt-p retrieves previous command matching what you have typed. On +OS X use C-p.
    • +
    • Alt-n retrieves next. On OS X use C-n.
    • +
    • Return while on any previous command retrieves that command
    • +
    +
  • +
+
+
+

25.5.2.5. Text colors¶

+

Idle defaults to black on white text, but colors text with special meanings. +For the shell, these are shell output, shell error, user output, and +user error. For Python code, at the shell prompt or in an editor, these are +keywords, builtin class and function names, names following class and +def, strings, and comments. For any text window, these are the cursor (when +present), found text (when possible), and selected text.

+

Text coloring is done in the background, so uncolorized text is occasionally +visible. To change the color scheme, use the Configure IDLE dialog +Highlighting tab. The marking of debugger breakpoint lines in the editor and +text in popups and dialogs is not user-configurable.

+
+
+
+

25.5.3. Startup and code execution¶

+

Upon startup with the -s option, IDLE will execute the file referenced by +the environment variables IDLESTARTUP or PYTHONSTARTUP. +IDLE first checks for IDLESTARTUP; if IDLESTARTUP is present the file +referenced is run. If IDLESTARTUP is not present, IDLE checks for +PYTHONSTARTUP. Files referenced by these environment variables are +convenient places to store functions that are used frequently from the IDLE +shell, or for executing import statements to import common modules.

+

In addition, Tk also loads a startup file if it is present. Note that the +Tk file is loaded unconditionally. This additional file is .Idle.py and is +looked for in the user’s home directory. Statements in this file will be +executed in the Tk namespace, so this file is not useful for importing +functions to be used from IDLE’s Python shell.

+
+

25.5.3.1. Command line usage¶

+
idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
+
+-c command  run command in the shell window
+-d          enable debugger and open shell window
+-e          open editor window
+-h          print help message with legal combinatios and exit
+-i          open shell window
+-r file     run file in shell window
+-s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
+-t title    set title of shell window
+-           run stdin in shell (- must be last option before args)
+
+
+

If there are arguments:

+
    +
  • If -, -c, or r is used, all arguments are placed in +sys.argv[1:...] and sys.argv[0] is set to '', '-c', +or '-r'. No editor window is opened, even if that is the default +set in the Options dialog.
  • +
  • Otherwise, arguments are files opened for editing and +sys.argv reflects the arguments passed to IDLE itself.
  • +
+
+
+

25.5.3.2. IDLE-console differences¶

+

As much as possible, the result of executing Python code with IDLE is the +same as executing the same code in a console window. However, the different +interface and operation occasionally affects results.

+

For instance, IDLE normally executes user code in a separate process from +the IDLE GUI itself. The IDLE versions of sys.stdin, .stdout, and .stderr in the +execution process get input from and send output to the GUI process, +which keeps control of the keyboard and screen. This is normally transparent, +but code that access these object will see different attribute values. +Also, functions that directly access the keyboard and screen will not work.

+

With IDLE’s Shell, one enters, edits, and recalls complete statements. +Some consoles only work with a single physical line at a time.

+
+
+

25.5.3.3. Running without a subprocess¶

+

By default, IDLE executes user code in a separate subprocess via a socket, +which uses the internal loopback interface. This connection is not +externally visible and no data is sent to or received from the Internet. +If firewall software complains anyway, you can ignore it.

+

If the attempt to make the socket connection fails, Idle will notify you. +Such failures are sometimes transient, but if persistent, the problem +may be either a firewall blocking the connecton or misconfiguration of +a particular system. Until the problem is fixed, one can run Idle with +the -n command line switch.

+

If IDLE is started with the -n command line switch it will run in a +single process and will not create the subprocess which runs the RPC +Python execution server. This can be useful if Python cannot create +the subprocess or the RPC socket interface on your platform. However, +in this mode user code is not isolated from IDLE itself. Also, the +environment is not restarted when Run/Run Module (F5) is selected. If +your code has been modified, you must reload() the affected modules and +re-import any specific items (e.g. from foo import baz) if the changes +are to take effect. For these reasons, it is preferable to run IDLE +with the default subprocess if at all possible.

+
+

Deprecated since version 3.4.

+
+
+
+
+

25.5.4. Help and preferences¶

+
+

25.5.4.1. Additional help sources¶

+

IDLE includes a help menu entry called “Python Docs” that will open the +extensive sources of help, including tutorials, available at docs.python.org. +Selected URLs can be added or removed from the help menu at any time using the +Configure IDLE dialog. See the IDLE help option in the help menu of IDLE for +more information.

+
+
+

25.5.4.2. Setting preferences¶

+

The font preferences, highlighting, keys, and general preferences can be +changed via Configure IDLE on the Option menu. Keys can be user defined; +IDLE ships with four built in key sets. In addition a user can create a +custom key set in the Configure IDLE dialog under the keys tab.

+
+
+

25.5.4.3. Extensions¶

+

IDLE contains an extension facility. Peferences for extensions can be +changed with Configure Extensions. See the beginning of config-extensions.def +in the idlelib directory for further information. The default extensions +are currently:

+
    +
  • FormatParagraph
  • +
  • AutoExpand
  • +
  • ZoomHeight
  • +
  • ScriptBinding
  • +
  • CallTips
  • +
  • ParenMatch
  • +
  • AutoComplete
  • +
  • CodeContext
  • +
  • RstripExtension
  • +
+
+
+
+ + +
+
+
+ +
+
+ + + + + diff --git a/Lib/idlelib/help.py b/Lib/idlelib/help.py new file mode 100644 index 00000000..b31596c2 --- /dev/null +++ b/Lib/idlelib/help.py @@ -0,0 +1,249 @@ +""" help.py: Implement the Idle help menu. +Contents are subject to revision at any time, without notice. + + +Help => About IDLE: diplay About Idle dialog + + + + +Help => IDLE Help: Display help.html with proper formatting. +Doc/library/idle.rst (Sphinx)=> Doc/build/html/library/idle.html +(help.copy_strip)=> Lib/idlelib/help.html + +HelpParser - Parse help.html and and render to tk Text. + +HelpText - Display formatted help.html. + +HelpFrame - Contain text, scrollbar, and table-of-contents. +(This will be needed for display in a future tabbed window.) + +HelpWindow - Display HelpFrame in a standalone window. + +copy_strip - Copy idle.html to help.html, rstripping each line. + +show_idlehelp - Create HelpWindow. Called in EditorWindow.help_dialog. +""" +from html.parser import HTMLParser +from os.path import abspath, dirname, isdir, isfile, join +from tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton +from tkinter import font as tkfont +from idlelib.configHandler import idleConf + +use_ttk = False # until available to import +if use_ttk: + from tkinter.ttk import Menubutton + +## About IDLE ## + + +## IDLE Help ## + +class HelpParser(HTMLParser): + """Render help.html into a text widget. + + The overridden handle_xyz methods handle a subset of html tags. + The supplied text should have the needed tag configurations. + The behavior for unsupported tags, such as table, is undefined. + """ + def __init__(self, text): + HTMLParser.__init__(self, convert_charrefs=True) + self.text = text # text widget we're rendering into + self.tags = '' # current block level text tags to apply + self.chartags = '' # current character level text tags + self.show = False # used so we exclude page navigation + self.hdrlink = False # used so we don't show header links + self.level = 0 # indentation level + self.pre = False # displaying preformatted text + self.hprefix = '' # prefix such as '25.5' to strip from headings + self.nested_dl = False # if we're in a nested
+ self.simplelist = False # simple list (no double spacing) + self.toc = [] # pair headers with text indexes for toc + self.header = '' # text within header tags for toc + + def indent(self, amt=1): + self.level += amt + self.tags = '' if self.level == 0 else 'l'+str(self.level) + + def handle_starttag(self, tag, attrs): + "Handle starttags in help.html." + class_ = '' + for a, v in attrs: + if a == 'class': + class_ = v + s = '' + if tag == 'div' and class_ == 'section': + self.show = True # start of main content + elif tag == 'div' and class_ == 'sphinxsidebar': + self.show = False # end of main content + elif tag == 'p' and class_ != 'first': + s = '\n\n' + elif tag == 'span' and class_ == 'pre': + self.chartags = 'pre' + elif tag == 'span' and class_ == 'versionmodified': + self.chartags = 'em' + elif tag == 'em': + self.chartags = 'em' + elif tag in ['ul', 'ol']: + if class_.find('simple') != -1: + s = '\n' + self.simplelist = True + else: + self.simplelist = False + self.indent() + elif tag == 'dl': + if self.level > 0: + self.nested_dl = True + elif tag == 'li': + s = '\n* ' if self.simplelist else '\n\n* ' + elif tag == 'dt': + s = '\n\n' if not self.nested_dl else '\n' # avoid extra line + self.nested_dl = False + elif tag == 'dd': + self.indent() + s = '\n' + elif tag == 'pre': + self.pre = True + if self.show: + self.text.insert('end', '\n\n') + self.tags = 'preblock' + elif tag == 'a' and class_ == 'headerlink': + self.hdrlink = True + elif tag == 'h1': + self.tags = tag + elif tag in ['h2', 'h3']: + if self.show: + self.header = '' + self.text.insert('end', '\n\n') + self.tags = tag + if self.show: + self.text.insert('end', s, (self.tags, self.chartags)) + + def handle_endtag(self, tag): + "Handle endtags in help.html." + if tag in ['h1', 'h2', 'h3']: + self.indent(0) # clear tag, reset indent + if self.show: + self.toc.append((self.header, self.text.index('insert'))) + elif tag in ['span', 'em']: + self.chartags = '' + elif tag == 'a': + self.hdrlink = False + elif tag == 'pre': + self.pre = False + self.tags = '' + elif tag in ['ul', 'dd', 'ol']: + self.indent(amt=-1) + + def handle_data(self, data): + "Handle date segments in help.html." + if self.show and not self.hdrlink: + d = data if self.pre else data.replace('\n', ' ') + if self.tags == 'h1': + self.hprefix = d[0:d.index(' ')] + if self.tags in ['h1', 'h2', 'h3'] and self.hprefix != '': + if d[0:len(self.hprefix)] == self.hprefix: + d = d[len(self.hprefix):].strip() + self.header += d + self.text.insert('end', d, (self.tags, self.chartags)) + + +class HelpText(Text): + "Display help.html." + def __init__(self, parent, filename): + "Configure tags and feed file to parser." + uwide = idleConf.GetOption('main', 'EditorWindow', 'width', type='int') + uhigh = idleConf.GetOption('main', 'EditorWindow', 'height', type='int') + uhigh = 3 * uhigh // 4 # lines average 4/3 of editor line height + Text.__init__(self, parent, wrap='word', highlightthickness=0, + padx=5, borderwidth=0, width=uwide, height=uhigh) + + normalfont = self.findfont(['TkDefaultFont', 'arial', 'helvetica']) + fixedfont = self.findfont(['TkFixedFont', 'monaco', 'courier']) + self['font'] = (normalfont, 12) + self.tag_configure('em', font=(normalfont, 12, 'italic')) + self.tag_configure('h1', font=(normalfont, 20, 'bold')) + self.tag_configure('h2', font=(normalfont, 18, 'bold')) + self.tag_configure('h3', font=(normalfont, 15, 'bold')) + self.tag_configure('pre', font=(fixedfont, 12), background='#f6f6ff') + self.tag_configure('preblock', font=(fixedfont, 10), lmargin1=25, + borderwidth=1, relief='solid', background='#eeffcc') + self.tag_configure('l1', lmargin1=25, lmargin2=25) + self.tag_configure('l2', lmargin1=50, lmargin2=50) + self.tag_configure('l3', lmargin1=75, lmargin2=75) + self.tag_configure('l4', lmargin1=100, lmargin2=100) + + self.parser = HelpParser(self) + with open(filename, encoding='utf-8') as f: + contents = f.read() + self.parser.feed(contents) + self['state'] = 'disabled' + + def findfont(self, names): + "Return name of first font family derived from names." + for name in names: + if name.lower() in (x.lower() for x in tkfont.names(root=self)): + font = tkfont.Font(name=name, exists=True, root=self) + return font.actual()['family'] + elif name.lower() in (x.lower() + for x in tkfont.families(root=self)): + return name + + +class HelpFrame(Frame): + "Display html text, scrollbar, and toc." + def __init__(self, parent, filename): + Frame.__init__(self, parent) + text = HelpText(self, filename) + self['background'] = text['background'] + scroll = Scrollbar(self, command=text.yview) + text['yscrollcommand'] = scroll.set + self.rowconfigure(0, weight=1) + self.columnconfigure(1, weight=1) # text + self.toc_menu(text).grid(column=0, row=0, sticky='nw') + text.grid(column=1, row=0, sticky='nsew') + scroll.grid(column=2, row=0, sticky='ns') + + def toc_menu(self, text): + "Create table of contents as drop-down menu." + toc = Menubutton(self, text='TOC') + drop = Menu(toc, tearoff=False) + for lbl, dex in text.parser.toc: + drop.add_command(label=lbl, command=lambda dex=dex:text.yview(dex)) + toc['menu'] = drop + return toc + + +class HelpWindow(Toplevel): + "Display frame with rendered html." + def __init__(self, parent, filename, title): + Toplevel.__init__(self, parent) + self.wm_title(title) + self.protocol("WM_DELETE_WINDOW", self.destroy) + HelpFrame(self, filename).grid(column=0, row=0, sticky='nsew') + self.grid_columnconfigure(0, weight=1) + self.grid_rowconfigure(0, weight=1) + + +def copy_strip(): + "Copy idle.html to idlelib/help.html, stripping trailing whitespace." + src = join(abspath(dirname(dirname(dirname(__file__)))), + 'Doc', 'build', 'html', 'library', 'idle.html') + dst = join(abspath(dirname(__file__)), 'help.html') + with open(src, 'rb') as inn,\ + open(dst, 'wb') as out: + for line in inn: + out.write(line.rstrip() + b'\n') + print('idle.html copied to help.html') + +def show_idlehelp(parent): + "Create HelpWindow; called from Idle Help event handler." + filename = join(abspath(dirname(__file__)), 'help.html') + if not isfile(filename): + # try copy_strip, present message + return + HelpWindow(parent, filename, 'IDLE Help') + +if __name__ == '__main__': + from idlelib.idle_test.htest import run + run(show_idlehelp) diff --git a/Lib/idlelib/help.txt b/Lib/idlelib/help.txt index 3f7bb23b..89fbe0b4 100644 --- a/Lib/idlelib/help.txt +++ b/Lib/idlelib/help.txt @@ -1,3 +1,7 @@ +This file, idlelib/help.txt is out-of-date and no longer used by Idle. +It is deprecated and will be removed in the future, possibly in 3.6 +---------------------------------------------------------------------- + [See the end of this file for ** TIPS ** on using IDLE !!] IDLE is the Python IDE built with the tkinter GUI toolkit. diff --git a/Lib/idlelib/idle_test/htest.py b/Lib/idlelib/idle_test/htest.py index aa7f2e8f..3e24518a 100644 --- a/Lib/idlelib/idle_test/htest.py +++ b/Lib/idlelib/idle_test/htest.py @@ -93,15 +93,6 @@ _class_browser_spec = { "Double clicking on items prints a traceback for an exception " "that is ignored." } -ConfigExtensionsDialog_spec = { - 'file': 'configDialog', - 'kwds': {'title': 'Test Extension Configuration', - '_htest': True,}, - 'msg': "IDLE extensions dialog.\n" - "\n[Ok] to close the dialog.[Apply] to apply the settings and " - "and [Cancel] to revert all changes.\nRe-run the test to ensure " - "changes made have persisted." - } _color_delegator_spec = { 'file': 'ColorDelegator', @@ -121,7 +112,8 @@ ConfigDialog_spec = { "font face of the text in the area below it.\nIn the " "'Highlighting' tab, try different color schemes. Clicking " "items in the sample program should update the choices above it." - "\nIn the 'Keys' and 'General' tab, test settings of interest." + "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings" + "of interest." "\n[Ok] to close the dialog.[Apply] to apply the settings and " "and [Cancel] to revert all changes.\nRe-run the test to ensure " "changes made have persisted." @@ -194,19 +186,14 @@ _grep_dialog_spec = { "should open that file \nin a new EditorWindow." } -_help_dialog_spec = { - 'file': 'EditorWindow', - 'kwds': {}, - 'msg': "If the help text displays, this works.\n" - "Text is selectable. Window is scrollable." - } - _io_binding_spec = { 'file': 'IOBinding', 'kwds': {}, - 'msg': "Test the following bindings\n" - " to display open window from file dialog.\n" - " to save the file\n" + 'msg': "Test the following bindings.\n" + " to open file from dialog.\n" + "Edit the file.\n" + " to save the file.\n" + "Check that changes were saved by opening the file elsewhere." } _multi_call_spec = { @@ -279,6 +266,13 @@ _scrolled_list_spec = { "Right clicking an item will display a popup." } +show_idlehelp_spec = { + 'file': 'help', + 'kwds': {}, + 'msg': "If the help text displays, this works.\n" + "Text is selectable. Window is scrollable." + } + _stack_viewer_spec = { 'file': 'StackViewer', 'kwds': {}, diff --git a/Lib/idlelib/idle_test/test_warning.py b/Lib/idlelib/idle_test/test_warning.py index 18627ddd..54ac993e 100644 --- a/Lib/idlelib/idle_test/test_warning.py +++ b/Lib/idlelib/idle_test/test_warning.py @@ -68,6 +68,15 @@ class ShellWarnTest(unittest.TestCase): 'Test', UserWarning, 'test_warning.py', 99, f, 'Line of code') self.assertEqual(shellmsg.splitlines(), f.getvalue().splitlines()) +class ImportWarnTest(unittest.TestCase): + def test_idlever(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + import idlelib.idlever + self.assertEqual(len(w), 1) + self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) + self.assertIn("version", str(w[-1].message)) + if __name__ == '__main__': unittest.main(verbosity=2, exit=False) diff --git a/Lib/idlelib/idlever.py b/Lib/idlelib/idlever.py index 563d933f..3e9f69a3 100644 --- a/Lib/idlelib/idlever.py +++ b/Lib/idlelib/idlever.py @@ -1,4 +1,12 @@ -"""Unused by Idle: there is no separate Idle version anymore. -Kept only for possible existing extension use.""" +""" +The separate Idle version was eliminated years ago; +idlelib.idlever is no longer used by Idle +and will be removed in 3.6 or later. Use + from sys import version + IDLE_VERSION = version[:version.index(' ')] +""" +# Kept for now only for possible existing extension use +import warnings as w +w.warn(__doc__, DeprecationWarning, stacklevel=2) from sys import version IDLE_VERSION = version[:version.index(' ')] diff --git a/Lib/idlelib/macosxSupport.py b/Lib/idlelib/macosxSupport.py index 77330cf9..b96bae1d 100644 --- a/Lib/idlelib/macosxSupport.py +++ b/Lib/idlelib/macosxSupport.py @@ -159,10 +159,14 @@ def overrideRootMenu(root, flist): WindowList.register_callback(postwindowsmenu) def about_dialog(event=None): + "Handle Help 'About IDLE' event." + # Synchronize with EditorWindow.EditorWindow.about_dialog. from idlelib import aboutDialog aboutDialog.AboutDialog(root, 'About IDLE') def config_dialog(event=None): + "Handle Options 'Configure IDLE' event." + # Synchronize with EditorWindow.EditorWindow.config_dialog. from idlelib import configDialog # Ensure that the root object has an instance_dict attribute, @@ -170,13 +174,13 @@ def overrideRootMenu(root, flist): # on an EditorWindow instance that is then passed as the first # argument to ConfigDialog) root.instance_dict = flist.inversedict - root.instance_dict = flist.inversedict configDialog.ConfigDialog(root, 'Settings') def help_dialog(event=None): - from idlelib import textView - fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt') - textView.view_file(root, 'Help', fn) + "Handle Help 'IDLE Help' event." + # Synchronize with EditorWindow.EditorWindow.help_dialog. + from idlelib import help + help.show_idlehelp(root) root.bind('<>', about_dialog) root.bind('<>', config_dialog) diff --git a/Lib/idlelib/run.py b/Lib/idlelib/run.py index 228875c5..595e7bc3 100644 --- a/Lib/idlelib/run.py +++ b/Lib/idlelib/run.py @@ -174,7 +174,7 @@ def show_socket_error(err, address): tkMessageBox.showerror("IDLE Subprocess Error", msg, parent=root) else: tkMessageBox.showerror("IDLE Subprocess Error", - "Socket Error: %s" % err.args[1]) + "Socket Error: %s" % err.args[1], parent=root) root.destroy() def print_exception(): diff --git a/Lib/idlelib/textView.py b/Lib/idlelib/textView.py index 4257eeab..01b2d8f4 100644 --- a/Lib/idlelib/textView.py +++ b/Lib/idlelib/textView.py @@ -21,7 +21,7 @@ class TextViewer(Toplevel): Toplevel.__init__(self, parent) self.configure(borderwidth=5) # place dialog below parent if running htest - self.geometry("=%dx%d+%d+%d" % (625, 500, + self.geometry("=%dx%d+%d+%d" % (750, 500, parent.winfo_rootx() + 10, parent.winfo_rooty() + (10 if not _htest else 100))) #elguavas - config placeholders til config stuff completed diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py index 3508ce96..616b17f8 100644 --- a/Lib/importlib/_bootstrap_external.py +++ b/Lib/importlib/_bootstrap_external.py @@ -437,15 +437,15 @@ def _validate_bytecode_header(data, source_stats=None, name=None, path=None): raw_size = data[8:12] if magic != MAGIC_NUMBER: message = 'bad magic number in {!r}: {!r}'.format(name, magic) - _verbose_message(message) + _verbose_message('{}', message) raise ImportError(message, **exc_details) elif len(raw_timestamp) != 4: message = 'reached EOF while reading timestamp in {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise EOFError(message) elif len(raw_size) != 4: message = 'reached EOF while reading size of source in {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise EOFError(message) if source_stats is not None: try: @@ -455,7 +455,7 @@ def _validate_bytecode_header(data, source_stats=None, name=None, path=None): else: if _r_long(raw_timestamp) != source_mtime: message = 'bytecode is stale for {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise ImportError(message, **exc_details) try: source_size = source_stats['size'] & 0xFFFFFFFF diff --git a/Lib/inspect.py b/Lib/inspect.py index bf4f87d5..b65bec7a 100644 --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -527,17 +527,18 @@ def _finddoc(obj): cls = self else: cls = self.__class__ - elif ismethoddescriptor(obj) or isdatadescriptor(obj): - name = obj.__name__ - cls = obj.__objclass__ - if getattr(cls, name) is not obj: - return None + # Should be tested before isdatadescriptor(). elif isinstance(obj, property): - func = f.fget + func = obj.fget name = func.__name__ cls = _findclass(func) if cls is None or getattr(cls, name) is not obj: return None + elif ismethoddescriptor(obj) or isdatadescriptor(obj): + name = obj.__name__ + cls = obj.__objclass__ + if getattr(cls, name) is not obj: + return None else: return None diff --git a/Lib/io.py b/Lib/io.py index 8d68f1e4..e03db97e 100644 --- a/Lib/io.py +++ b/Lib/io.py @@ -19,7 +19,7 @@ streams. BytesIO is a simple stream of in-memory bytes. Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/Lib/lib2to3/fixes/fix_input.py b/Lib/lib2to3/fixes/fix_input.py index 126da1b9..9cf9a48c 100644 --- a/Lib/lib2to3/fixes/fix_input.py +++ b/Lib/lib2to3/fixes/fix_input.py @@ -17,7 +17,7 @@ class FixInput(fixer_base.BaseFix): """ def transform(self, node, results): - # If we're already wrapped in a eval() call, we're done. + # If we're already wrapped in an eval() call, we're done. if context.match(node.parent.parent): return diff --git a/Lib/lib2to3/fixes/fix_metaclass.py b/Lib/lib2to3/fixes/fix_metaclass.py index d4f99a38..f3f87085 100644 --- a/Lib/lib2to3/fixes/fix_metaclass.py +++ b/Lib/lib2to3/fixes/fix_metaclass.py @@ -114,7 +114,7 @@ def find_metas(cls_node): left_node = expr_node.children[0] if isinstance(left_node, Leaf) and \ left_node.value == '__metaclass__': - # We found a assignment to __metaclass__. + # We found an assignment to __metaclass__. fixup_simple_stmt(node, i, simple_node) remove_trailing_newline(simple_node) yield (node, i, simple_node) diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py index 02a5fc12..b810fa9c 100644 --- a/Lib/logging/handlers.py +++ b/Lib/logging/handlers.py @@ -965,24 +965,26 @@ class SMTPHandler(logging.Handler): """ try: import smtplib - from email.utils import formatdate + from email.message import EmailMessage + import email.utils + port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) - msg = self.format(record) - msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( - self.fromaddr, - ",".join(self.toaddrs), - self.getSubject(record), - formatdate(), msg) + msg = EmailMessage() + msg['From'] = self.fromaddr + msg['To'] = ','.join(self.toaddrs) + msg['Subject'] = self.getSubject(record) + msg['Date'] = email.utils.localtime() + msg.set_content(self.format(record)) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) - smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.send_message(msg) smtp.quit() except Exception: self.handleError(record) diff --git a/Lib/msilib/schema.py b/Lib/msilib/schema.py index a9e16791..70fe138b 100644 --- a/Lib/msilib/schema.py +++ b/Lib/msilib/schema.py @@ -733,7 +733,7 @@ _Validation_records = [ ('CustomAction','Source','Y',None, None, None, None, 'CustomSource',None, 'The table reference of the source of the code.',), ('CustomAction','Target','Y',None, None, None, None, 'Formatted',None, 'Excecution parameter, depends on the type of custom action',), ('DrLocator','Signature_','N',None, None, None, None, 'Identifier',None, 'The Signature_ represents a unique file signature and is also the foreign key in the Signature table.',), -('DrLocator','Path','Y',None, None, None, None, 'AnyPath',None, 'The path on the user system. This is a either a subpath below the value of the Parent or a full path. The path may contain properties enclosed within [ ] that will be expanded.',), +('DrLocator','Path','Y',None, None, None, None, 'AnyPath',None, 'The path on the user system. This is either a subpath below the value of the Parent or a full path. The path may contain properties enclosed within [ ] that will be expanded.',), ('DrLocator','Depth','Y',0,32767,None, None, None, None, 'The depth below the path to which the Signature_ is recursively searched. If absent, the depth is assumed to be 0.',), ('DrLocator','Parent','Y',None, None, None, None, 'Identifier',None, 'The parent file signature. It is also a foreign key in the Signature table. If null and the Path column does not expand to a full path, then all the fixed drives of the user system are searched using the Path.',), ('DuplicateFile','File_','N',None, None, 'File',1,'Identifier',None, 'Foreign key referencing the source file to be duplicated.',), diff --git a/Lib/nntplib.py b/Lib/nntplib.py index 3413610a..a75faade 100644 --- a/Lib/nntplib.py +++ b/Lib/nntplib.py @@ -201,7 +201,7 @@ def _parse_overview_fmt(lines): return fmt def _parse_overview(lines, fmt, data_process_func=None): - """Parse the response to a OVER or XOVER command according to the + """Parse the response to an OVER or XOVER command according to the overview format `fmt`.""" n_defaults = len(_DEFAULT_OVERVIEW_FMT) overview = [] diff --git a/Lib/nturl2path.py b/Lib/nturl2path.py index 5a6d44a6..36dc7658 100644 --- a/Lib/nturl2path.py +++ b/Lib/nturl2path.py @@ -4,9 +4,11 @@ def url2pathname(url): """OS-specific conversion from a relative URL of the 'file' scheme to a file system path; not recommended for general use.""" # e.g. - # ///C|/foo/bar/spam.foo - # becomes - # C:\foo\bar\spam.foo + # ///C|/foo/bar/spam.foo + # and + # ///C:/foo/bar/spam.foo + # become + # C:\foo\bar\spam.foo import string, urllib.parse # Windows itself uses ":" even in URLs. url = url.replace(':', '|') @@ -39,9 +41,9 @@ def pathname2url(p): """OS-specific conversion from a file system path to a relative URL of the 'file' scheme; not recommended for general use.""" # e.g. - # C:\foo\bar\spam.foo + # C:\foo\bar\spam.foo # becomes - # ///C|/foo/bar/spam.foo + # ///C:/foo/bar/spam.foo import urllib.parse if not ':' in p: # No drive specifier, just convert slashes and quote the name diff --git a/Lib/os.py b/Lib/os.py index 3d2c6d3d..2b89b93d 100644 --- a/Lib/os.py +++ b/Lib/os.py @@ -230,7 +230,7 @@ def makedirs(name, mode=0o777, exist_ok=False): try: makedirs(head, mode, exist_ok) except FileExistsError: - # be happy if someone already created the path + # Defeats race condition when another thread created the path pass cdir = curdir if isinstance(tail, bytes): @@ -239,8 +239,10 @@ def makedirs(name, mode=0o777, exist_ok=False): return try: mkdir(name, mode) - except OSError as e: - if not exist_ok or e.errno != errno.EEXIST or not path.isdir(name): + except OSError: + # Cannot rely on checking for EEXIST, since the operating system + # could give priority to other errors like EACCES or EROFS + if not exist_ok or not path.isdir(name): raise def removedirs(name): diff --git a/Lib/pathlib.py b/Lib/pathlib.py index 01e66a09..1b5d2a90 100644 --- a/Lib/pathlib.py +++ b/Lib/pathlib.py @@ -1017,8 +1017,8 @@ class Path(PurePath): return cls(cls()._flavour.gethomedir(None)) def samefile(self, other_path): - """Return whether `other_file` is the same or not as this file. - (as returned by os.path.samefile(file, other_file)). + """Return whether other_path is the same or not as this file + (as returned by os.path.samefile()). """ st = self.stat() try: diff --git a/Lib/pdb.py b/Lib/pdb.py index cf2edbf8..4cba8a0c 100755 --- a/Lib/pdb.py +++ b/Lib/pdb.py @@ -1669,6 +1669,9 @@ def main(): # In most cases SystemExit does not warrant a post-mortem session. print("The program exited via sys.exit(). Exit status:", end=' ') print(sys.exc_info()[1]) + except SyntaxError: + traceback.print_exc() + sys.exit(1) except: traceback.print_exc() print("Uncaught exception. Entering post mortem debugging") diff --git a/Lib/pickle.py b/Lib/pickle.py index 6c26c5e0..45fd383e 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -358,7 +358,7 @@ class _Pickler: The *file* argument must have a write() method that accepts a single bytes argument. It can thus be a file object opened for - binary writing, a io.BytesIO instance, or any other custom + binary writing, an io.BytesIO instance, or any other custom object that meets this interface. If *fix_imports* is True and *protocol* is less than 3, pickle @@ -984,7 +984,7 @@ class _Unpickler: The argument *file* must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return bytes. Thus *file* - can be a binary file object opened for reading, a io.BytesIO + can be a binary file object opened for reading, an io.BytesIO object, or any other custom object that meets this interface. The file-like object must have two methods, a read() method @@ -1205,6 +1205,14 @@ class _Unpickler: self.append(str(self.read(len), 'utf-8', 'surrogatepass')) dispatch[BINUNICODE8[0]] = load_binunicode8 + def load_binbytes8(self): + len, = unpack(' maxsize: + raise UnpicklingError("BINBYTES8 exceeds system's maximum size " + "of %d bytes" % maxsize) + self.append(self.read(len)) + dispatch[BINBYTES8[0]] = load_binbytes8 + def load_short_binstring(self): len = self.read(1)[0] data = self.read(len) diff --git a/Lib/pickletools.py b/Lib/pickletools.py index 6b86723a..155bd5b4 100644 --- a/Lib/pickletools.py +++ b/Lib/pickletools.py @@ -1898,7 +1898,7 @@ opcodes = [ arg=None, stack_before=[pyunicode, pyunicode], stack_after=[anyobject], - proto=0, + proto=4, doc="""Push a global object (module.attr) on the stack. """), @@ -2793,7 +2793,7 @@ def _test(): return doctest.testmod() if __name__ == "__main__": - import sys, argparse + import argparse parser = argparse.ArgumentParser( description='disassemble one or more pickle files') parser.add_argument( diff --git a/Lib/platform.py b/Lib/platform.py index 9096696a..f3d2c423 100755 --- a/Lib/platform.py +++ b/Lib/platform.py @@ -26,12 +26,14 @@ # Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg # Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark # Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support), -# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter +# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve +# Dower # # History: # # # +# 1.0.8 - changed Windows support to read version from kernel32.dll # 1.0.7 - added DEV_NULL # 1.0.6 - added linux_distribution() # 1.0.5 - fixed Java support to allow running the module on Jython @@ -469,189 +471,142 @@ def _syscmd_ver(system='', release='', version='', version = _norm_version(version) return system, release, version -def _win32_getvalue(key, name, default=''): +_WIN32_CLIENT_RELEASES = { + (5, 0): "2000", + (5, 1): "XP", + # Strictly, 5.2 client is XP 64-bit, but platform.py historically + # has always called it 2003 Server + (5, 2): "2003Server", + (5, None): "post2003", + + (6, 0): "Vista", + (6, 1): "7", + (6, 2): "8", + (6, 3): "8.1", + (6, None): "post8.1", + + (10, 0): "10", + (10, None): "post10", +} + +# Server release name lookup will default to client names if necessary +_WIN32_SERVER_RELEASES = { + (5, 2): "2003Server", - """ Read a value for name from the registry key. + (6, 0): "2008Server", + (6, 1): "2008ServerR2", + (6, 2): "2012Server", + (6, 3): "2012ServerR2", + (6, None): "post2012ServerR2", +} - In case this fails, default is returned. +def _get_real_winver(maj, min, build): + if maj < 6 or (maj == 6 and min < 2): + return maj, min, build + + from ctypes import (c_buffer, POINTER, byref, create_unicode_buffer, + Structure, WinDLL) + from ctypes.wintypes import DWORD, HANDLE + + class VS_FIXEDFILEINFO(Structure): + _fields_ = [ + ("dwSignature", DWORD), + ("dwStrucVersion", DWORD), + ("dwFileVersionMS", DWORD), + ("dwFileVersionLS", DWORD), + ("dwProductVersionMS", DWORD), + ("dwProductVersionLS", DWORD), + ("dwFileFlagsMask", DWORD), + ("dwFileFlags", DWORD), + ("dwFileOS", DWORD), + ("dwFileType", DWORD), + ("dwFileSubtype", DWORD), + ("dwFileDateMS", DWORD), + ("dwFileDateLS", DWORD), + ] + + kernel32 = WinDLL('kernel32') + version = WinDLL('version') + + # We will immediately double the length up to MAX_PATH, but the + # path may be longer, so we retry until the returned string is + # shorter than our buffer. + name_len = actual_len = 130 + while actual_len == name_len: + name_len *= 2 + name = create_unicode_buffer(name_len) + actual_len = kernel32.GetModuleFileNameW(HANDLE(kernel32._handle), + name, len(name)) + if not actual_len: + return maj, min, build + + size = version.GetFileVersionInfoSizeW(name, None) + if not size: + return maj, min, build + + ver_block = c_buffer(size) + if (not version.GetFileVersionInfoW(name, None, size, ver_block) or + not ver_block): + return maj, min, build + + pvi = POINTER(VS_FIXEDFILEINFO)() + if not version.VerQueryValueW(ver_block, "", byref(pvi), byref(DWORD())): + return maj, min, build + + maj = pvi.contents.dwProductVersionMS >> 16 + min = pvi.contents.dwProductVersionMS & 0xFFFF + build = pvi.contents.dwProductVersionLS >> 16 + + return maj, min, build - """ +def win32_ver(release='', version='', csd='', ptype=''): try: - # Use win32api if available - from win32api import RegQueryValueEx + from sys import getwindowsversion except ImportError: - # On Python 2.0 and later, emulate using winreg - import winreg - RegQueryValueEx = winreg.QueryValueEx + return release, version, csd, ptype try: - return RegQueryValueEx(key, name) - except: - return default - -def win32_ver(release='', version='', csd='', ptype=''): - - """ Get additional version information from the Windows Registry - and return a tuple (version, csd, ptype) referring to version - number, CSD level (service pack), and OS type (multi/single - processor). - - As a hint: ptype returns 'Uniprocessor Free' on single - processor NT machines and 'Multiprocessor Free' on multi - processor machines. The 'Free' refers to the OS version being - free of debugging code. It could also state 'Checked' which - means the OS version uses debugging code, i.e. code that - checks arguments, ranges, etc. (Thomas Heller). + from winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE + except ImportError: + from _winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE - Note: this function works best with Mark Hammond's win32 - package installed, but also on Python 2.3 and later. It - obviously only runs on Win32 compatible platforms. + winver = getwindowsversion() + maj, min, build = _get_real_winver(*winver[:3]) + version = '{0}.{1}.{2}'.format(maj, min, build) - """ - # XXX Is there any way to find out the processor type on WinXX ? - # XXX Is win32 available on Windows CE ? - # - # Adapted from code posted by Karl Putland to comp.lang.python. - # - # The mappings between reg. values and release names can be found - # here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp + release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or + _WIN32_CLIENT_RELEASES.get((maj, None)) or + release) - # Import the needed APIs - try: - from win32api import RegQueryValueEx, RegOpenKeyEx, \ - RegCloseKey, GetVersionEx - from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \ - VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION - except ImportError: - # Emulate the win32api module using Python APIs + # getwindowsversion() reflect the compatibility mode Python is + # running under, and so the service pack value is only going to be + # valid if the versions match. + if winver[:2] == (maj, min): try: - sys.getwindowsversion + csd = 'SP{}'.format(winver.service_pack_major) except AttributeError: - # No emulation possible, so return the defaults... - return release, version, csd, ptype - else: - # Emulation using winreg (added in Python 2.0) and - # sys.getwindowsversion() (added in Python 2.3) - import winreg - GetVersionEx = sys.getwindowsversion - RegQueryValueEx = winreg.QueryValueEx - RegOpenKeyEx = winreg.OpenKeyEx - RegCloseKey = winreg.CloseKey - HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE - VER_PLATFORM_WIN32_WINDOWS = 1 - VER_PLATFORM_WIN32_NT = 2 - VER_NT_WORKSTATION = 1 - VER_NT_SERVER = 3 - REG_SZ = 1 - - # Find out the registry key and some general version infos - winver = GetVersionEx() - maj, min, buildno, plat, csd = winver - version = '%i.%i.%i' % (maj, min, buildno & 0xFFFF) - if hasattr(winver, "service_pack"): - if winver.service_pack != "": - csd = 'SP%s' % winver.service_pack_major - else: - if csd[:13] == 'Service Pack ': - csd = 'SP' + csd[13:] - - if plat == VER_PLATFORM_WIN32_WINDOWS: - regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion' - # Try to guess the release name - if maj == 4: - if min == 0: - release = '95' - elif min == 10: - release = '98' - elif min == 90: - release = 'Me' - else: - release = 'postMe' - elif maj == 5: - release = '2000' - - elif plat == VER_PLATFORM_WIN32_NT: - regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion' - if maj <= 4: - release = 'NT' - elif maj == 5: - if min == 0: - release = '2000' - elif min == 1: - release = 'XP' - elif min == 2: - release = '2003Server' - else: - release = 'post2003' - elif maj == 6: - if hasattr(winver, "product_type"): - product_type = winver.product_type - else: - product_type = VER_NT_WORKSTATION - # Without an OSVERSIONINFOEX capable sys.getwindowsversion(), - # or help from the registry, we cannot properly identify - # non-workstation versions. - try: - key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey) - name, type = RegQueryValueEx(key, "ProductName") - # Discard any type that isn't REG_SZ - if type == REG_SZ and name.find("Server") != -1: - product_type = VER_NT_SERVER - except OSError: - # Use default of VER_NT_WORKSTATION - pass - - if min == 0: - if product_type == VER_NT_WORKSTATION: - release = 'Vista' - else: - release = '2008Server' - elif min == 1: - if product_type == VER_NT_WORKSTATION: - release = '7' - else: - release = '2008ServerR2' - elif min == 2: - if product_type == VER_NT_WORKSTATION: - release = '8' - else: - release = '2012Server' - else: - release = 'post2012Server' + if csd[:13] == 'Service Pack ': + csd = 'SP' + csd[13:] - else: - if not release: - # E.g. Win3.1 with win32s - release = '%i.%i' % (maj, min) - return release, version, csd, ptype + # VER_NT_SERVER = 3 + if getattr(winver, 'product_type', None) == 3: + release = (_WIN32_SERVER_RELEASES.get((maj, min)) or + _WIN32_SERVER_RELEASES.get((maj, None)) or + release) - # Open the registry key + key = None try: - keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey) - # Get a value to make sure the key exists... - RegQueryValueEx(keyCurVer, 'SystemRoot') + key = OpenKeyEx(HKEY_LOCAL_MACHINE, + r'SOFTWARE\Microsoft\Windows NT\CurrentVersion') + ptype = QueryValueEx(key, 'CurrentType')[0] except: - return release, version, csd, ptype + pass + finally: + if key: + CloseKey(key) - # Parse values - #subversion = _win32_getvalue(keyCurVer, - # 'SubVersionNumber', - # ('',1))[0] - #if subversion: - # release = release + subversion # 95a, 95b, etc. - build = _win32_getvalue(keyCurVer, - 'CurrentBuildNumber', - ('', 1))[0] - ptype = _win32_getvalue(keyCurVer, - 'CurrentType', - (ptype, 1))[0] - - # Normalize version - version = _norm_version(version, build) - - # Close key - RegCloseKey(keyCurVer) return release, version, csd, ptype + def _mac_ver_xml(): fn = '/System/Library/CoreServices/SystemVersion.plist' if not os.path.exists(fn): diff --git a/Lib/pydoc.py b/Lib/pydoc.py index ee558bfe..a9c04f07 100755 --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -2359,7 +2359,9 @@ def _url_handler(url, content_type="text/html"): with warnings.catch_warnings(): warnings.filterwarnings('ignore') # ignore problems during import - ModuleScanner().run(callback, key) + def onerror(modname): + pass + ModuleScanner().run(callback, key, onerror=onerror) # format page def bltinlink(name): diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py index f66dd623..250bfd33 100644 --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,12881 +1,79 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sat Sep 12 17:22:24 2015 -topics = {'assert': '\n' - 'The "assert" statement\n' - '**********************\n' - '\n' - 'Assert statements are a convenient way to insert debugging ' - 'assertions\n' - 'into a program:\n' - '\n' - ' assert_stmt ::= "assert" expression ["," expression]\n' - '\n' - 'The simple form, "assert expression", is equivalent to\n' - '\n' - ' if __debug__:\n' - ' if not expression: raise AssertionError\n' - '\n' - 'The extended form, "assert expression1, expression2", is ' - 'equivalent to\n' - '\n' - ' if __debug__:\n' - ' if not expression1: raise AssertionError(expression2)\n' - '\n' - 'These equivalences assume that "__debug__" and "AssertionError" ' - 'refer\n' - 'to the built-in variables with those names. In the current\n' - 'implementation, the built-in variable "__debug__" is "True" ' - 'under\n' - 'normal circumstances, "False" when optimization is requested ' - '(command\n' - 'line option -O). The current code generator emits no code for ' - 'an\n' - 'assert statement when optimization is requested at compile ' - 'time. Note\n' - 'that it is unnecessary to include the source code for the ' - 'expression\n' - 'that failed in the error message; it will be displayed as part ' - 'of the\n' - 'stack trace.\n' - '\n' - 'Assignments to "__debug__" are illegal. The value for the ' - 'built-in\n' - 'variable is determined when the interpreter starts.\n', - 'assignment': '\n' - 'Assignment statements\n' - '*********************\n' - '\n' - 'Assignment statements are used to (re)bind names to values ' - 'and to\n' - 'modify attributes or items of mutable objects:\n' - '\n' - ' assignment_stmt ::= (target_list "=")+ (expression_list | ' - 'yield_expression)\n' - ' target_list ::= target ("," target)* [","]\n' - ' target ::= identifier\n' - ' | "(" target_list ")"\n' - ' | "[" target_list "]"\n' - ' | attributeref\n' - ' | subscription\n' - ' | slicing\n' - ' | "*" target\n' - '\n' - '(See section *Primaries* for the syntax definitions for\n' - '*attributeref*, *subscription*, and *slicing*.)\n' - '\n' - 'An assignment statement evaluates the expression list ' - '(remember that\n' - 'this can be a single expression or a comma-separated list, ' - 'the latter\n' - 'yielding a tuple) and assigns the single resulting object to ' - 'each of\n' - 'the target lists, from left to right.\n' - '\n' - 'Assignment is defined recursively depending on the form of ' - 'the target\n' - '(list). When a target is part of a mutable object (an ' - 'attribute\n' - 'reference, subscription or slicing), the mutable object ' - 'must\n' - 'ultimately perform the assignment and decide about its ' - 'validity, and\n' - 'may raise an exception if the assignment is unacceptable. ' - 'The rules\n' - 'observed by various types and the exceptions raised are ' - 'given with the\n' - 'definition of the object types (see section *The standard ' - 'type\n' - 'hierarchy*).\n' - '\n' - 'Assignment of an object to a target list, optionally ' - 'enclosed in\n' - 'parentheses or square brackets, is recursively defined as ' - 'follows.\n' - '\n' - '* If the target list is a single target: The object is ' - 'assigned to\n' - ' that target.\n' - '\n' - '* If the target list is a comma-separated list of targets: ' - 'The\n' - ' object must be an iterable with the same number of items ' - 'as there\n' - ' are targets in the target list, and the items are ' - 'assigned, from\n' - ' left to right, to the corresponding targets.\n' - '\n' - ' * If the target list contains one target prefixed with an\n' - ' asterisk, called a "starred" target: The object must be ' - 'a sequence\n' - ' with at least as many items as there are targets in the ' - 'target\n' - ' list, minus one. The first items of the sequence are ' - 'assigned,\n' - ' from left to right, to the targets before the starred ' - 'target. The\n' - ' final items of the sequence are assigned to the targets ' - 'after the\n' - ' starred target. A list of the remaining items in the ' - 'sequence is\n' - ' then assigned to the starred target (the list can be ' - 'empty).\n' - '\n' - ' * Else: The object must be a sequence with the same number ' - 'of\n' - ' items as there are targets in the target list, and the ' - 'items are\n' - ' assigned, from left to right, to the corresponding ' - 'targets.\n' - '\n' - 'Assignment of an object to a single target is recursively ' - 'defined as\n' - 'follows.\n' - '\n' - '* If the target is an identifier (name):\n' - '\n' - ' * If the name does not occur in a "global" or "nonlocal" ' - 'statement\n' - ' in the current code block: the name is bound to the ' - 'object in the\n' - ' current local namespace.\n' - '\n' - ' * Otherwise: the name is bound to the object in the ' - 'global\n' - ' namespace or the outer namespace determined by ' - '"nonlocal",\n' - ' respectively.\n' - '\n' - ' The name is rebound if it was already bound. This may ' - 'cause the\n' - ' reference count for the object previously bound to the ' - 'name to reach\n' - ' zero, causing the object to be deallocated and its ' - 'destructor (if it\n' - ' has one) to be called.\n' - '\n' - '* If the target is a target list enclosed in parentheses or ' - 'in\n' - ' square brackets: The object must be an iterable with the ' - 'same number\n' - ' of items as there are targets in the target list, and its ' - 'items are\n' - ' assigned, from left to right, to the corresponding ' - 'targets.\n' - '\n' - '* If the target is an attribute reference: The primary ' - 'expression in\n' - ' the reference is evaluated. It should yield an object ' - 'with\n' - ' assignable attributes; if this is not the case, ' - '"TypeError" is\n' - ' raised. That object is then asked to assign the assigned ' - 'object to\n' - ' the given attribute; if it cannot perform the assignment, ' - 'it raises\n' - ' an exception (usually but not necessarily ' - '"AttributeError").\n' - '\n' - ' Note: If the object is a class instance and the attribute ' - 'reference\n' - ' occurs on both sides of the assignment operator, the RHS ' - 'expression,\n' - ' "a.x" can access either an instance attribute or (if no ' - 'instance\n' - ' attribute exists) a class attribute. The LHS target "a.x" ' - 'is always\n' - ' set as an instance attribute, creating it if necessary. ' - 'Thus, the\n' - ' two occurrences of "a.x" do not necessarily refer to the ' - 'same\n' - ' attribute: if the RHS expression refers to a class ' - 'attribute, the\n' - ' LHS creates a new instance attribute as the target of the\n' - ' assignment:\n' - '\n' - ' class Cls:\n' - ' x = 3 # class variable\n' - ' inst = Cls()\n' - ' inst.x = inst.x + 1 # writes inst.x as 4 leaving ' - 'Cls.x as 3\n' - '\n' - ' This description does not necessarily apply to descriptor\n' - ' attributes, such as properties created with "property()".\n' - '\n' - '* If the target is a subscription: The primary expression in ' - 'the\n' - ' reference is evaluated. It should yield either a mutable ' - 'sequence\n' - ' object (such as a list) or a mapping object (such as a ' - 'dictionary).\n' - ' Next, the subscript expression is evaluated.\n' - '\n' - ' If the primary is a mutable sequence object (such as a ' - 'list), the\n' - ' subscript must yield an integer. If it is negative, the ' - "sequence's\n" - ' length is added to it. The resulting value must be a ' - 'nonnegative\n' - " integer less than the sequence's length, and the sequence " - 'is asked\n' - ' to assign the assigned object to its item with that ' - 'index. If the\n' - ' index is out of range, "IndexError" is raised (assignment ' - 'to a\n' - ' subscripted sequence cannot add new items to a list).\n' - '\n' - ' If the primary is a mapping object (such as a dictionary), ' - 'the\n' - " subscript must have a type compatible with the mapping's " - 'key type,\n' - ' and the mapping is then asked to create a key/datum pair ' - 'which maps\n' - ' the subscript to the assigned object. This can either ' - 'replace an\n' - ' existing key/value pair with the same key value, or insert ' - 'a new\n' - ' key/value pair (if no key with the same value existed).\n' - '\n' - ' For user-defined objects, the "__setitem__()" method is ' - 'called with\n' - ' appropriate arguments.\n' - '\n' - '* If the target is a slicing: The primary expression in the\n' - ' reference is evaluated. It should yield a mutable ' - 'sequence object\n' - ' (such as a list). The assigned object should be a ' - 'sequence object\n' - ' of the same type. Next, the lower and upper bound ' - 'expressions are\n' - ' evaluated, insofar they are present; defaults are zero and ' - 'the\n' - " sequence's length. The bounds should evaluate to " - 'integers. If\n' - " either bound is negative, the sequence's length is added " - 'to it. The\n' - ' resulting bounds are clipped to lie between zero and the ' - "sequence's\n" - ' length, inclusive. Finally, the sequence object is asked ' - 'to replace\n' - ' the slice with the items of the assigned sequence. The ' - 'length of\n' - ' the slice may be different from the length of the assigned ' - 'sequence,\n' - ' thus changing the length of the target sequence, if the ' - 'target\n' - ' sequence allows it.\n' - '\n' - '**CPython implementation detail:** In the current ' - 'implementation, the\n' - 'syntax for targets is taken to be the same as for ' - 'expressions, and\n' - 'invalid syntax is rejected during the code generation phase, ' - 'causing\n' - 'less detailed error messages.\n' - '\n' - 'Although the definition of assignment implies that overlaps ' - 'between\n' - 'the left-hand side and the right-hand side are ' - "'simultanenous' (for\n" - 'example "a, b = b, a" swaps two variables), overlaps ' - '*within* the\n' - 'collection of assigned-to variables occur left-to-right, ' - 'sometimes\n' - 'resulting in confusion. For instance, the following program ' - 'prints\n' - '"[0, 2]":\n' - '\n' - ' x = [0, 1]\n' - ' i = 0\n' - ' i, x[i] = 1, 2 # i is updated, then x[i] is ' - 'updated\n' - ' print(x)\n' - '\n' - 'See also: **PEP 3132** - Extended Iterable Unpacking\n' - '\n' - ' The specification for the "*target" feature.\n' - '\n' - '\n' - 'Augmented assignment statements\n' - '===============================\n' - '\n' - 'Augmented assignment is the combination, in a single ' - 'statement, of a\n' - 'binary operation and an assignment statement:\n' - '\n' - ' augmented_assignment_stmt ::= augtarget augop ' - '(expression_list | yield_expression)\n' - ' augtarget ::= identifier | attributeref | ' - 'subscription | slicing\n' - ' augop ::= "+=" | "-=" | "*=" | "@=" | ' - '"/=" | "//=" | "%=" | "**="\n' - ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' - '\n' - '(See section *Primaries* for the syntax definitions of the ' - 'last three\n' - 'symbols.)\n' - '\n' - 'An augmented assignment evaluates the target (which, unlike ' - 'normal\n' - 'assignment statements, cannot be an unpacking) and the ' - 'expression\n' - 'list, performs the binary operation specific to the type of ' - 'assignment\n' - 'on the two operands, and assigns the result to the original ' - 'target.\n' - 'The target is only evaluated once.\n' - '\n' - 'An augmented assignment expression like "x += 1" can be ' - 'rewritten as\n' - '"x = x + 1" to achieve a similar, but not exactly equal ' - 'effect. In the\n' - 'augmented version, "x" is only evaluated once. Also, when ' - 'possible,\n' - 'the actual operation is performed *in-place*, meaning that ' - 'rather than\n' - 'creating a new object and assigning that to the target, the ' - 'old object\n' - 'is modified instead.\n' - '\n' - 'Unlike normal assignments, augmented assignments evaluate ' - 'the left-\n' - 'hand side *before* evaluating the right-hand side. For ' - 'example, "a[i]\n' - '+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and ' - 'performs\n' - 'the addition, and lastly, it writes the result back to ' - '"a[i]".\n' - '\n' - 'With the exception of assigning to tuples and multiple ' - 'targets in a\n' - 'single statement, the assignment done by augmented ' - 'assignment\n' - 'statements is handled the same way as normal assignments. ' - 'Similarly,\n' - 'with the exception of the possible *in-place* behavior, the ' - 'binary\n' - 'operation performed by augmented assignment is the same as ' - 'the normal\n' - 'binary operations.\n' - '\n' - 'For targets which are attribute references, the same *caveat ' - 'about\n' - 'class and instance attributes* applies as for regular ' - 'assignments.\n', - 'atom-identifiers': '\n' - 'Identifiers (Names)\n' - '*******************\n' - '\n' - 'An identifier occurring as an atom is a name. See ' - 'section\n' - '*Identifiers and keywords* for lexical definition and ' - 'section *Naming\n' - 'and binding* for documentation of naming and binding.\n' - '\n' - 'When the name is bound to an object, evaluation of the ' - 'atom yields\n' - 'that object. When a name is not bound, an attempt to ' - 'evaluate it\n' - 'raises a "NameError" exception.\n' - '\n' - '**Private name mangling:** When an identifier that ' - 'textually occurs in\n' - 'a class definition begins with two or more underscore ' - 'characters and\n' - 'does not end in two or more underscores, it is ' - 'considered a *private\n' - 'name* of that class. Private names are transformed to ' - 'a longer form\n' - 'before code is generated for them. The transformation ' - 'inserts the\n' - 'class name, with leading underscores removed and a ' - 'single underscore\n' - 'inserted, in front of the name. For example, the ' - 'identifier "__spam"\n' - 'occurring in a class named "Ham" will be transformed ' - 'to "_Ham__spam".\n' - 'This transformation is independent of the syntactical ' - 'context in which\n' - 'the identifier is used. If the transformed name is ' - 'extremely long\n' - '(longer than 255 characters), implementation defined ' - 'truncation may\n' - 'happen. If the class name consists only of ' - 'underscores, no\n' - 'transformation is done.\n', - 'atom-literals': '\n' - 'Literals\n' - '********\n' - '\n' - 'Python supports string and bytes literals and various ' - 'numeric\n' - 'literals:\n' - '\n' - ' literal ::= stringliteral | bytesliteral\n' - ' | integer | floatnumber | imagnumber\n' - '\n' - 'Evaluation of a literal yields an object of the given ' - 'type (string,\n' - 'bytes, integer, floating point number, complex number) ' - 'with the given\n' - 'value. The value may be approximated in the case of ' - 'floating point\n' - 'and imaginary (complex) literals. See section *Literals* ' - 'for details.\n' - '\n' - 'All literals correspond to immutable data types, and ' - 'hence the\n' - "object's identity is less important than its value. " - 'Multiple\n' - 'evaluations of literals with the same value (either the ' - 'same\n' - 'occurrence in the program text or a different occurrence) ' - 'may obtain\n' - 'the same object or a different object with the same ' - 'value.\n', - 'attribute-access': '\n' - 'Customizing attribute access\n' - '****************************\n' - '\n' - 'The following methods can be defined to customize the ' - 'meaning of\n' - 'attribute access (use of, assignment to, or deletion ' - 'of "x.name") for\n' - 'class instances.\n' - '\n' - 'object.__getattr__(self, name)\n' - '\n' - ' Called when an attribute lookup has not found the ' - 'attribute in the\n' - ' usual places (i.e. it is not an instance attribute ' - 'nor is it found\n' - ' in the class tree for "self"). "name" is the ' - 'attribute name. This\n' - ' method should return the (computed) attribute value ' - 'or raise an\n' - ' "AttributeError" exception.\n' - '\n' - ' Note that if the attribute is found through the ' - 'normal mechanism,\n' - ' "__getattr__()" is not called. (This is an ' - 'intentional asymmetry\n' - ' between "__getattr__()" and "__setattr__()".) This ' - 'is done both for\n' - ' efficiency reasons and because otherwise ' - '"__getattr__()" would have\n' - ' no way to access other attributes of the instance. ' - 'Note that at\n' - ' least for instance variables, you can fake total ' - 'control by not\n' - ' inserting any values in the instance attribute ' - 'dictionary (but\n' - ' instead inserting them in another object). See ' - 'the\n' - ' "__getattribute__()" method below for a way to ' - 'actually get total\n' - ' control over attribute access.\n' - '\n' - 'object.__getattribute__(self, name)\n' - '\n' - ' Called unconditionally to implement attribute ' - 'accesses for\n' - ' instances of the class. If the class also defines ' - '"__getattr__()",\n' - ' the latter will not be called unless ' - '"__getattribute__()" either\n' - ' calls it explicitly or raises an "AttributeError". ' - 'This method\n' - ' should return the (computed) attribute value or ' - 'raise an\n' - ' "AttributeError" exception. In order to avoid ' - 'infinite recursion in\n' - ' this method, its implementation should always call ' - 'the base class\n' - ' method with the same name to access any attributes ' - 'it needs, for\n' - ' example, "object.__getattribute__(self, name)".\n' - '\n' - ' Note: This method may still be bypassed when ' - 'looking up special\n' - ' methods as the result of implicit invocation via ' - 'language syntax\n' - ' or built-in functions. See *Special method ' - 'lookup*.\n' - '\n' - 'object.__setattr__(self, name, value)\n' - '\n' - ' Called when an attribute assignment is attempted. ' - 'This is called\n' - ' instead of the normal mechanism (i.e. store the ' - 'value in the\n' - ' instance dictionary). *name* is the attribute name, ' - '*value* is the\n' - ' value to be assigned to it.\n' - '\n' - ' If "__setattr__()" wants to assign to an instance ' - 'attribute, it\n' - ' should call the base class method with the same ' - 'name, for example,\n' - ' "object.__setattr__(self, name, value)".\n' - '\n' - 'object.__delattr__(self, name)\n' - '\n' - ' Like "__setattr__()" but for attribute deletion ' - 'instead of\n' - ' assignment. This should only be implemented if ' - '"del obj.name" is\n' - ' meaningful for the object.\n' - '\n' - 'object.__dir__(self)\n' - '\n' - ' Called when "dir()" is called on the object. A ' - 'sequence must be\n' - ' returned. "dir()" converts the returned sequence to ' - 'a list and\n' - ' sorts it.\n' - '\n' - '\n' - 'Implementing Descriptors\n' - '========================\n' - '\n' - 'The following methods only apply when an instance of ' - 'the class\n' - 'containing the method (a so-called *descriptor* class) ' - 'appears in an\n' - '*owner* class (the descriptor must be in either the ' - "owner's class\n" - 'dictionary or in the class dictionary for one of its ' - 'parents). In the\n' - 'examples below, "the attribute" refers to the ' - 'attribute whose name is\n' - "the key of the property in the owner class' " - '"__dict__".\n' - '\n' - 'object.__get__(self, instance, owner)\n' - '\n' - ' Called to get the attribute of the owner class ' - '(class attribute\n' - ' access) or of an instance of that class (instance ' - 'attribute\n' - ' access). *owner* is always the owner class, while ' - '*instance* is the\n' - ' instance that the attribute was accessed through, ' - 'or "None" when\n' - ' the attribute is accessed through the *owner*. ' - 'This method should\n' - ' return the (computed) attribute value or raise an ' - '"AttributeError"\n' - ' exception.\n' - '\n' - 'object.__set__(self, instance, value)\n' - '\n' - ' Called to set the attribute on an instance ' - '*instance* of the owner\n' - ' class to a new value, *value*.\n' - '\n' - 'object.__delete__(self, instance)\n' - '\n' - ' Called to delete the attribute on an instance ' - '*instance* of the\n' - ' owner class.\n' - '\n' - 'The attribute "__objclass__" is interpreted by the ' - '"inspect" module as\n' - 'specifying the class where this object was defined ' - '(setting this\n' - 'appropriately can assist in runtime introspection of ' - 'dynamic class\n' - 'attributes). For callables, it may indicate that an ' - 'instance of the\n' - 'given type (or a subclass) is expected or required as ' - 'the first\n' - 'positional argument (for example, CPython sets this ' - 'attribute for\n' - 'unbound methods that are implemented in C).\n' - '\n' - '\n' - 'Invoking Descriptors\n' - '====================\n' - '\n' - 'In general, a descriptor is an object attribute with ' - '"binding\n' - 'behavior", one whose attribute access has been ' - 'overridden by methods\n' - 'in the descriptor protocol: "__get__()", "__set__()", ' - 'and\n' - '"__delete__()". If any of those methods are defined ' - 'for an object, it\n' - 'is said to be a descriptor.\n' - '\n' - 'The default behavior for attribute access is to get, ' - 'set, or delete\n' - "the attribute from an object's dictionary. For " - 'instance, "a.x" has a\n' - 'lookup chain starting with "a.__dict__[\'x\']", then\n' - '"type(a).__dict__[\'x\']", and continuing through the ' - 'base classes of\n' - '"type(a)" excluding metaclasses.\n' - '\n' - 'However, if the looked-up value is an object defining ' - 'one of the\n' - 'descriptor methods, then Python may override the ' - 'default behavior and\n' - 'invoke the descriptor method instead. Where this ' - 'occurs in the\n' - 'precedence chain depends on which descriptor methods ' - 'were defined and\n' - 'how they were called.\n' - '\n' - 'The starting point for descriptor invocation is a ' - 'binding, "a.x". How\n' - 'the arguments are assembled depends on "a":\n' - '\n' - 'Direct Call\n' - ' The simplest and least common call is when user ' - 'code directly\n' - ' invokes a descriptor method: "x.__get__(a)".\n' - '\n' - 'Instance Binding\n' - ' If binding to an object instance, "a.x" is ' - 'transformed into the\n' - ' call: "type(a).__dict__[\'x\'].__get__(a, ' - 'type(a))".\n' - '\n' - 'Class Binding\n' - ' If binding to a class, "A.x" is transformed into ' - 'the call:\n' - ' "A.__dict__[\'x\'].__get__(None, A)".\n' - '\n' - 'Super Binding\n' - ' If "a" is an instance of "super", then the binding ' - '"super(B,\n' - ' obj).m()" searches "obj.__class__.__mro__" for the ' - 'base class "A"\n' - ' immediately preceding "B" and then invokes the ' - 'descriptor with the\n' - ' call: "A.__dict__[\'m\'].__get__(obj, ' - 'obj.__class__)".\n' - '\n' - 'For instance bindings, the precedence of descriptor ' - 'invocation depends\n' - 'on the which descriptor methods are defined. A ' - 'descriptor can define\n' - 'any combination of "__get__()", "__set__()" and ' - '"__delete__()". If it\n' - 'does not define "__get__()", then accessing the ' - 'attribute will return\n' - 'the descriptor object itself unless there is a value ' - "in the object's\n" - 'instance dictionary. If the descriptor defines ' - '"__set__()" and/or\n' - '"__delete__()", it is a data descriptor; if it defines ' - 'neither, it is\n' - 'a non-data descriptor. Normally, data descriptors ' - 'define both\n' - '"__get__()" and "__set__()", while non-data ' - 'descriptors have just the\n' - '"__get__()" method. Data descriptors with "__set__()" ' - 'and "__get__()"\n' - 'defined always override a redefinition in an instance ' - 'dictionary. In\n' - 'contrast, non-data descriptors can be overridden by ' - 'instances.\n' - '\n' - 'Python methods (including "staticmethod()" and ' - '"classmethod()") are\n' - 'implemented as non-data descriptors. Accordingly, ' - 'instances can\n' - 'redefine and override methods. This allows individual ' - 'instances to\n' - 'acquire behaviors that differ from other instances of ' - 'the same class.\n' - '\n' - 'The "property()" function is implemented as a data ' - 'descriptor.\n' - 'Accordingly, instances cannot override the behavior of ' - 'a property.\n' - '\n' - '\n' - '__slots__\n' - '=========\n' - '\n' - 'By default, instances of classes have a dictionary for ' - 'attribute\n' - 'storage. This wastes space for objects having very ' - 'few instance\n' - 'variables. The space consumption can become acute ' - 'when creating large\n' - 'numbers of instances.\n' - '\n' - 'The default can be overridden by defining *__slots__* ' - 'in a class\n' - 'definition. The *__slots__* declaration takes a ' - 'sequence of instance\n' - 'variables and reserves just enough space in each ' - 'instance to hold a\n' - 'value for each variable. Space is saved because ' - '*__dict__* is not\n' - 'created for each instance.\n' - '\n' - 'object.__slots__\n' - '\n' - ' This class variable can be assigned a string, ' - 'iterable, or sequence\n' - ' of strings with variable names used by instances. ' - '*__slots__*\n' - ' reserves space for the declared variables and ' - 'prevents the\n' - ' automatic creation of *__dict__* and *__weakref__* ' - 'for each\n' - ' instance.\n' - '\n' - '\n' - 'Notes on using *__slots__*\n' - '--------------------------\n' - '\n' - '* When inheriting from a class without *__slots__*, ' - 'the *__dict__*\n' - ' attribute of that class will always be accessible, ' - 'so a *__slots__*\n' - ' definition in the subclass is meaningless.\n' - '\n' - '* Without a *__dict__* variable, instances cannot be ' - 'assigned new\n' - ' variables not listed in the *__slots__* definition. ' - 'Attempts to\n' - ' assign to an unlisted variable name raises ' - '"AttributeError". If\n' - ' dynamic assignment of new variables is desired, then ' - 'add\n' - ' "\'__dict__\'" to the sequence of strings in the ' - '*__slots__*\n' - ' declaration.\n' - '\n' - '* Without a *__weakref__* variable for each instance, ' - 'classes\n' - ' defining *__slots__* do not support weak references ' - 'to its\n' - ' instances. If weak reference support is needed, then ' - 'add\n' - ' "\'__weakref__\'" to the sequence of strings in the ' - '*__slots__*\n' - ' declaration.\n' - '\n' - '* *__slots__* are implemented at the class level by ' - 'creating\n' - ' descriptors (*Implementing Descriptors*) for each ' - 'variable name. As\n' - ' a result, class attributes cannot be used to set ' - 'default values for\n' - ' instance variables defined by *__slots__*; ' - 'otherwise, the class\n' - ' attribute would overwrite the descriptor ' - 'assignment.\n' - '\n' - '* The action of a *__slots__* declaration is limited ' - 'to the class\n' - ' where it is defined. As a result, subclasses will ' - 'have a *__dict__*\n' - ' unless they also define *__slots__* (which must only ' - 'contain names\n' - ' of any *additional* slots).\n' - '\n' - '* If a class defines a slot also defined in a base ' - 'class, the\n' - ' instance variable defined by the base class slot is ' - 'inaccessible\n' - ' (except by retrieving its descriptor directly from ' - 'the base class).\n' - ' This renders the meaning of the program undefined. ' - 'In the future, a\n' - ' check may be added to prevent this.\n' - '\n' - '* Nonempty *__slots__* does not work for classes ' - 'derived from\n' - ' "variable-length" built-in types such as "int", ' - '"bytes" and "tuple".\n' - '\n' - '* Any non-string iterable may be assigned to ' - '*__slots__*. Mappings\n' - ' may also be used; however, in the future, special ' - 'meaning may be\n' - ' assigned to the values corresponding to each key.\n' - '\n' - '* *__class__* assignment works only if both classes ' - 'have the same\n' - ' *__slots__*.\n', - 'attribute-references': '\n' - 'Attribute references\n' - '********************\n' - '\n' - 'An attribute reference is a primary followed by a ' - 'period and a name:\n' - '\n' - ' attributeref ::= primary "." identifier\n' - '\n' - 'The primary must evaluate to an object of a type ' - 'that supports\n' - 'attribute references, which most objects do. This ' - 'object is then\n' - 'asked to produce the attribute whose name is the ' - 'identifier. This\n' - 'production can be customized by overriding the ' - '"__getattr__()" method.\n' - 'If this attribute is not available, the exception ' - '"AttributeError" is\n' - 'raised. Otherwise, the type and value of the ' - 'object produced is\n' - 'determined by the object. Multiple evaluations of ' - 'the same attribute\n' - 'reference may yield different objects.\n', - 'augassign': '\n' - 'Augmented assignment statements\n' - '*******************************\n' - '\n' - 'Augmented assignment is the combination, in a single ' - 'statement, of a\n' - 'binary operation and an assignment statement:\n' - '\n' - ' augmented_assignment_stmt ::= augtarget augop ' - '(expression_list | yield_expression)\n' - ' augtarget ::= identifier | attributeref | ' - 'subscription | slicing\n' - ' augop ::= "+=" | "-=" | "*=" | "@=" | ' - '"/=" | "//=" | "%=" | "**="\n' - ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' - '\n' - '(See section *Primaries* for the syntax definitions of the ' - 'last three\n' - 'symbols.)\n' - '\n' - 'An augmented assignment evaluates the target (which, unlike ' - 'normal\n' - 'assignment statements, cannot be an unpacking) and the ' - 'expression\n' - 'list, performs the binary operation specific to the type of ' - 'assignment\n' - 'on the two operands, and assigns the result to the original ' - 'target.\n' - 'The target is only evaluated once.\n' - '\n' - 'An augmented assignment expression like "x += 1" can be ' - 'rewritten as\n' - '"x = x + 1" to achieve a similar, but not exactly equal ' - 'effect. In the\n' - 'augmented version, "x" is only evaluated once. Also, when ' - 'possible,\n' - 'the actual operation is performed *in-place*, meaning that ' - 'rather than\n' - 'creating a new object and assigning that to the target, the ' - 'old object\n' - 'is modified instead.\n' - '\n' - 'Unlike normal assignments, augmented assignments evaluate the ' - 'left-\n' - 'hand side *before* evaluating the right-hand side. For ' - 'example, "a[i]\n' - '+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and ' - 'performs\n' - 'the addition, and lastly, it writes the result back to ' - '"a[i]".\n' - '\n' - 'With the exception of assigning to tuples and multiple ' - 'targets in a\n' - 'single statement, the assignment done by augmented ' - 'assignment\n' - 'statements is handled the same way as normal assignments. ' - 'Similarly,\n' - 'with the exception of the possible *in-place* behavior, the ' - 'binary\n' - 'operation performed by augmented assignment is the same as ' - 'the normal\n' - 'binary operations.\n' - '\n' - 'For targets which are attribute references, the same *caveat ' - 'about\n' - 'class and instance attributes* applies as for regular ' - 'assignments.\n', - 'binary': '\n' - 'Binary arithmetic operations\n' - '****************************\n' - '\n' - 'The binary arithmetic operations have the conventional priority\n' - 'levels. Note that some of these operations also apply to ' - 'certain non-\n' - 'numeric types. Apart from the power operator, there are only ' - 'two\n' - 'levels, one for multiplicative operators and one for additive\n' - 'operators:\n' - '\n' - ' m_expr ::= u_expr | m_expr "*" u_expr | m_expr "@" m_expr |\n' - ' m_expr "//" u_expr| m_expr "/" u_expr |\n' - ' m_expr "%" u_expr\n' - ' a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n' - '\n' - 'The "*" (multiplication) operator yields the product of its ' - 'arguments.\n' - 'The arguments must either both be numbers, or one argument must ' - 'be an\n' - 'integer and the other must be a sequence. In the former case, ' - 'the\n' - 'numbers are converted to a common type and then multiplied ' - 'together.\n' - 'In the latter case, sequence repetition is performed; a ' - 'negative\n' - 'repetition factor yields an empty sequence.\n' - '\n' - 'The "@" (at) operator is intended to be used for matrix\n' - 'multiplication. No builtin Python types implement this ' - 'operator.\n' - '\n' - 'New in version 3.5.\n' - '\n' - 'The "/" (division) and "//" (floor division) operators yield ' - 'the\n' - 'quotient of their arguments. The numeric arguments are first\n' - 'converted to a common type. Division of integers yields a float, ' - 'while\n' - 'floor division of integers results in an integer; the result is ' - 'that\n' - "of mathematical division with the 'floor' function applied to " - 'the\n' - 'result. Division by zero raises the "ZeroDivisionError" ' - 'exception.\n' - '\n' - 'The "%" (modulo) operator yields the remainder from the division ' - 'of\n' - 'the first argument by the second. The numeric arguments are ' - 'first\n' - 'converted to a common type. A zero right argument raises the\n' - '"ZeroDivisionError" exception. The arguments may be floating ' - 'point\n' - 'numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals ' - '"4*0.7 +\n' - '0.34".) The modulo operator always yields a result with the ' - 'same sign\n' - 'as its second operand (or zero); the absolute value of the ' - 'result is\n' - 'strictly smaller than the absolute value of the second operand ' - '[1].\n' - '\n' - 'The floor division and modulo operators are connected by the ' - 'following\n' - 'identity: "x == (x//y)*y + (x%y)". Floor division and modulo ' - 'are also\n' - 'connected with the built-in function "divmod()": "divmod(x, y) ' - '==\n' - '(x//y, x%y)". [2].\n' - '\n' - 'In addition to performing the modulo operation on numbers, the ' - '"%"\n' - 'operator is also overloaded by string objects to perform ' - 'old-style\n' - 'string formatting (also known as interpolation). The syntax ' - 'for\n' - 'string formatting is described in the Python Library Reference,\n' - 'section *printf-style String Formatting*.\n' - '\n' - 'The floor division operator, the modulo operator, and the ' - '"divmod()"\n' - 'function are not defined for complex numbers. Instead, convert ' - 'to a\n' - 'floating point number using the "abs()" function if ' - 'appropriate.\n' - '\n' - 'The "+" (addition) operator yields the sum of its arguments. ' - 'The\n' - 'arguments must either both be numbers or both be sequences of ' - 'the same\n' - 'type. In the former case, the numbers are converted to a common ' - 'type\n' - 'and then added together. In the latter case, the sequences are\n' - 'concatenated.\n' - '\n' - 'The "-" (subtraction) operator yields the difference of its ' - 'arguments.\n' - 'The numeric arguments are first converted to a common type.\n', - 'bitwise': '\n' - 'Binary bitwise operations\n' - '*************************\n' - '\n' - 'Each of the three bitwise operations has a different priority ' - 'level:\n' - '\n' - ' and_expr ::= shift_expr | and_expr "&" shift_expr\n' - ' xor_expr ::= and_expr | xor_expr "^" and_expr\n' - ' or_expr ::= xor_expr | or_expr "|" xor_expr\n' - '\n' - 'The "&" operator yields the bitwise AND of its arguments, which ' - 'must\n' - 'be integers.\n' - '\n' - 'The "^" operator yields the bitwise XOR (exclusive OR) of its\n' - 'arguments, which must be integers.\n' - '\n' - 'The "|" operator yields the bitwise (inclusive) OR of its ' - 'arguments,\n' - 'which must be integers.\n', - 'bltin-code-objects': '\n' - 'Code Objects\n' - '************\n' - '\n' - 'Code objects are used by the implementation to ' - 'represent "pseudo-\n' - 'compiled" executable Python code such as a function ' - 'body. They differ\n' - "from function objects because they don't contain a " - 'reference to their\n' - 'global execution environment. Code objects are ' - 'returned by the built-\n' - 'in "compile()" function and can be extracted from ' - 'function objects\n' - 'through their "__code__" attribute. See also the ' - '"code" module.\n' - '\n' - 'A code object can be executed or evaluated by ' - 'passing it (instead of a\n' - 'source string) to the "exec()" or "eval()" built-in ' - 'functions.\n' - '\n' - 'See *The standard type hierarchy* for more ' - 'information.\n', - 'bltin-ellipsis-object': '\n' - 'The Ellipsis Object\n' - '*******************\n' - '\n' - 'This object is commonly used by slicing (see ' - '*Slicings*). It supports\n' - 'no special operations. There is exactly one ' - 'ellipsis object, named\n' - '"Ellipsis" (a built-in name). "type(Ellipsis)()" ' - 'produces the\n' - '"Ellipsis" singleton.\n' - '\n' - 'It is written as "Ellipsis" or "...".\n', - 'bltin-null-object': '\n' - 'The Null Object\n' - '***************\n' - '\n' - "This object is returned by functions that don't " - 'explicitly return a\n' - 'value. It supports no special operations. There is ' - 'exactly one null\n' - 'object, named "None" (a built-in name). ' - '"type(None)()" produces the\n' - 'same singleton.\n' - '\n' - 'It is written as "None".\n', - 'bltin-type-objects': '\n' - 'Type Objects\n' - '************\n' - '\n' - 'Type objects represent the various object types. An ' - "object's type is\n" - 'accessed by the built-in function "type()". There ' - 'are no special\n' - 'operations on types. The standard module "types" ' - 'defines names for\n' - 'all standard built-in types.\n' - '\n' - 'Types are written like this: "".\n', - 'booleans': '\n' - 'Boolean operations\n' - '******************\n' - '\n' - ' or_test ::= and_test | or_test "or" and_test\n' - ' and_test ::= not_test | and_test "and" not_test\n' - ' not_test ::= comparison | "not" not_test\n' - '\n' - 'In the context of Boolean operations, and also when ' - 'expressions are\n' - 'used by control flow statements, the following values are ' - 'interpreted\n' - 'as false: "False", "None", numeric zero of all types, and ' - 'empty\n' - 'strings and containers (including strings, tuples, lists,\n' - 'dictionaries, sets and frozensets). All other values are ' - 'interpreted\n' - 'as true. User-defined objects can customize their truth value ' - 'by\n' - 'providing a "__bool__()" method.\n' - '\n' - 'The operator "not" yields "True" if its argument is false, ' - '"False"\n' - 'otherwise.\n' - '\n' - 'The expression "x and y" first evaluates *x*; if *x* is false, ' - 'its\n' - 'value is returned; otherwise, *y* is evaluated and the ' - 'resulting value\n' - 'is returned.\n' - '\n' - 'The expression "x or y" first evaluates *x*; if *x* is true, ' - 'its value\n' - 'is returned; otherwise, *y* is evaluated and the resulting ' - 'value is\n' - 'returned.\n' - '\n' - '(Note that neither "and" nor "or" restrict the value and type ' - 'they\n' - 'return to "False" and "True", but rather return the last ' - 'evaluated\n' - 'argument. This is sometimes useful, e.g., if "s" is a string ' - 'that\n' - 'should be replaced by a default value if it is empty, the ' - 'expression\n' - '"s or \'foo\'" yields the desired value. Because "not" has to ' - 'create a\n' - 'new value, it returns a boolean value regardless of the type ' - 'of its\n' - 'argument (for example, "not \'foo\'" produces "False" rather ' - 'than "\'\'".)\n', - 'break': '\n' - 'The "break" statement\n' - '*********************\n' - '\n' - ' break_stmt ::= "break"\n' - '\n' - '"break" may only occur syntactically nested in a "for" or ' - '"while"\n' - 'loop, but not nested in a function or class definition within ' - 'that\n' - 'loop.\n' - '\n' - 'It terminates the nearest enclosing loop, skipping the optional ' - '"else"\n' - 'clause if the loop has one.\n' - '\n' - 'If a "for" loop is terminated by "break", the loop control ' - 'target\n' - 'keeps its current value.\n' - '\n' - 'When "break" passes control out of a "try" statement with a ' - '"finally"\n' - 'clause, that "finally" clause is executed before really leaving ' - 'the\n' - 'loop.\n', - 'callable-types': '\n' - 'Emulating callable objects\n' - '**************************\n' - '\n' - 'object.__call__(self[, args...])\n' - '\n' - ' Called when the instance is "called" as a function; ' - 'if this method\n' - ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' - ' "x.__call__(arg1, arg2, ...)".\n', - 'calls': '\n' - 'Calls\n' - '*****\n' - '\n' - 'A call calls a callable object (e.g., a *function*) with a ' - 'possibly\n' - 'empty series of *arguments*:\n' - '\n' - ' call ::= primary "(" [argument_list [","] | ' - 'comprehension] ")"\n' - ' argument_list ::= positional_arguments ["," ' - 'keyword_arguments]\n' - ' ["," "*" expression] ["," ' - 'keyword_arguments]\n' - ' ["," "**" expression]\n' - ' | keyword_arguments ["," "*" expression]\n' - ' ["," keyword_arguments] ["," "**" ' - 'expression]\n' - ' | "*" expression ["," keyword_arguments] ' - '["," "**" expression]\n' - ' | "**" expression\n' - ' positional_arguments ::= expression ("," expression)*\n' - ' keyword_arguments ::= keyword_item ("," keyword_item)*\n' - ' keyword_item ::= identifier "=" expression\n' - '\n' - 'An optional trailing comma may be present after the positional ' - 'and\n' - 'keyword arguments but does not affect the semantics.\n' - '\n' - 'The primary must evaluate to a callable object (user-defined\n' - 'functions, built-in functions, methods of built-in objects, ' - 'class\n' - 'objects, methods of class instances, and all objects having a\n' - '"__call__()" method are callable). All argument expressions are\n' - 'evaluated before the call is attempted. Please refer to section\n' - '*Function definitions* for the syntax of formal *parameter* ' - 'lists.\n' - '\n' - 'If keyword arguments are present, they are first converted to\n' - 'positional arguments, as follows. First, a list of unfilled ' - 'slots is\n' - 'created for the formal parameters. If there are N positional\n' - 'arguments, they are placed in the first N slots. Next, for each\n' - 'keyword argument, the identifier is used to determine the\n' - 'corresponding slot (if the identifier is the same as the first ' - 'formal\n' - 'parameter name, the first slot is used, and so on). If the slot ' - 'is\n' - 'already filled, a "TypeError" exception is raised. Otherwise, ' - 'the\n' - 'value of the argument is placed in the slot, filling it (even if ' - 'the\n' - 'expression is "None", it fills the slot). When all arguments ' - 'have\n' - 'been processed, the slots that are still unfilled are filled with ' - 'the\n' - 'corresponding default value from the function definition. ' - '(Default\n' - 'values are calculated, once, when the function is defined; thus, ' - 'a\n' - 'mutable object such as a list or dictionary used as default value ' - 'will\n' - "be shared by all calls that don't specify an argument value for " - 'the\n' - 'corresponding slot; this should usually be avoided.) If there ' - 'are any\n' - 'unfilled slots for which no default value is specified, a ' - '"TypeError"\n' - 'exception is raised. Otherwise, the list of filled slots is used ' - 'as\n' - 'the argument list for the call.\n' - '\n' - '**CPython implementation detail:** An implementation may provide\n' - 'built-in functions whose positional parameters do not have names, ' - 'even\n' - "if they are 'named' for the purpose of documentation, and which\n" - 'therefore cannot be supplied by keyword. In CPython, this is the ' - 'case\n' - 'for functions implemented in C that use "PyArg_ParseTuple()" to ' - 'parse\n' - 'their arguments.\n' - '\n' - 'If there are more positional arguments than there are formal ' - 'parameter\n' - 'slots, a "TypeError" exception is raised, unless a formal ' - 'parameter\n' - 'using the syntax "*identifier" is present; in this case, that ' - 'formal\n' - 'parameter receives a tuple containing the excess positional ' - 'arguments\n' - '(or an empty tuple if there were no excess positional ' - 'arguments).\n' - '\n' - 'If any keyword argument does not correspond to a formal ' - 'parameter\n' - 'name, a "TypeError" exception is raised, unless a formal ' - 'parameter\n' - 'using the syntax "**identifier" is present; in this case, that ' - 'formal\n' - 'parameter receives a dictionary containing the excess keyword\n' - 'arguments (using the keywords as keys and the argument values as\n' - 'corresponding values), or a (new) empty dictionary if there were ' - 'no\n' - 'excess keyword arguments.\n' - '\n' - 'If the syntax "*expression" appears in the function call, ' - '"expression"\n' - 'must evaluate to an iterable. Elements from this iterable are ' - 'treated\n' - 'as if they were additional positional arguments; if there are\n' - 'positional arguments *x1*, ..., *xN*, and "expression" evaluates ' - 'to a\n' - 'sequence *y1*, ..., *yM*, this is equivalent to a call with M+N\n' - 'positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n' - '\n' - 'A consequence of this is that although the "*expression" syntax ' - 'may\n' - 'appear *after* some keyword arguments, it is processed *before* ' - 'the\n' - 'keyword arguments (and the "**expression" argument, if any -- ' - 'see\n' - 'below). So:\n' - '\n' - ' >>> def f(a, b):\n' - ' ... print(a, b)\n' - ' ...\n' - ' >>> f(b=1, *(2,))\n' - ' 2 1\n' - ' >>> f(a=1, *(2,))\n' - ' Traceback (most recent call last):\n' - ' File "", line 1, in ?\n' - " TypeError: f() got multiple values for keyword argument 'a'\n" - ' >>> f(1, *(2,))\n' - ' 1 2\n' - '\n' - 'It is unusual for both keyword arguments and the "*expression" ' - 'syntax\n' - 'to be used in the same call, so in practice this confusion does ' - 'not\n' - 'arise.\n' - '\n' - 'If the syntax "**expression" appears in the function call,\n' - '"expression" must evaluate to a mapping, the contents of which ' - 'are\n' - 'treated as additional keyword arguments. In the case of a ' - 'keyword\n' - 'appearing in both "expression" and as an explicit keyword ' - 'argument, a\n' - '"TypeError" exception is raised.\n' - '\n' - 'Formal parameters using the syntax "*identifier" or ' - '"**identifier"\n' - 'cannot be used as positional argument slots or as keyword ' - 'argument\n' - 'names.\n' - '\n' - 'A call always returns some value, possibly "None", unless it ' - 'raises an\n' - 'exception. How this value is computed depends on the type of ' - 'the\n' - 'callable object.\n' - '\n' - 'If it is---\n' - '\n' - 'a user-defined function:\n' - ' The code block for the function is executed, passing it the\n' - ' argument list. The first thing the code block will do is bind ' - 'the\n' - ' formal parameters to the arguments; this is described in ' - 'section\n' - ' *Function definitions*. When the code block executes a ' - '"return"\n' - ' statement, this specifies the return value of the function ' - 'call.\n' - '\n' - 'a built-in function or method:\n' - ' The result is up to the interpreter; see *Built-in Functions* ' - 'for\n' - ' the descriptions of built-in functions and methods.\n' - '\n' - 'a class object:\n' - ' A new instance of that class is returned.\n' - '\n' - 'a class instance method:\n' - ' The corresponding user-defined function is called, with an ' - 'argument\n' - ' list that is one longer than the argument list of the call: ' - 'the\n' - ' instance becomes the first argument.\n' - '\n' - 'a class instance:\n' - ' The class must define a "__call__()" method; the effect is ' - 'then the\n' - ' same as if that method was called.\n', - 'class': '\n' - 'Class definitions\n' - '*****************\n' - '\n' - 'A class definition defines a class object (see section *The ' - 'standard\n' - 'type hierarchy*):\n' - '\n' - ' classdef ::= [decorators] "class" classname [inheritance] ' - '":" suite\n' - ' inheritance ::= "(" [parameter_list] ")"\n' - ' classname ::= identifier\n' - '\n' - 'A class definition is an executable statement. The inheritance ' - 'list\n' - 'usually gives a list of base classes (see *Customizing class ' - 'creation*\n' - 'for more advanced uses), so each item in the list should evaluate ' - 'to a\n' - 'class object which allows subclassing. Classes without an ' - 'inheritance\n' - 'list inherit, by default, from the base class "object"; hence,\n' - '\n' - ' class Foo:\n' - ' pass\n' - '\n' - 'is equivalent to\n' - '\n' - ' class Foo(object):\n' - ' pass\n' - '\n' - "The class's suite is then executed in a new execution frame (see\n" - '*Naming and binding*), using a newly created local namespace and ' - 'the\n' - 'original global namespace. (Usually, the suite contains mostly\n' - "function definitions.) When the class's suite finishes " - 'execution, its\n' - 'execution frame is discarded but its local namespace is saved. ' - '[4] A\n' - 'class object is then created using the inheritance list for the ' - 'base\n' - 'classes and the saved local namespace for the attribute ' - 'dictionary.\n' - 'The class name is bound to this class object in the original ' - 'local\n' - 'namespace.\n' - '\n' - 'Class creation can be customized heavily using *metaclasses*.\n' - '\n' - 'Classes can also be decorated: just like when decorating ' - 'functions,\n' - '\n' - ' @f1(arg)\n' - ' @f2\n' - ' class Foo: pass\n' - '\n' - 'is equivalent to\n' - '\n' - ' class Foo: pass\n' - ' Foo = f1(arg)(f2(Foo))\n' - '\n' - 'The evaluation rules for the decorator expressions are the same ' - 'as for\n' - 'function decorators. The result must be a class object, which is ' - 'then\n' - 'bound to the class name.\n' - '\n' - "**Programmer's note:** Variables defined in the class definition " - 'are\n' - 'class attributes; they are shared by instances. Instance ' - 'attributes\n' - 'can be set in a method with "self.name = value". Both class and\n' - 'instance attributes are accessible through the notation ' - '""self.name"",\n' - 'and an instance attribute hides a class attribute with the same ' - 'name\n' - 'when accessed in this way. Class attributes can be used as ' - 'defaults\n' - 'for instance attributes, but using mutable values there can lead ' - 'to\n' - 'unexpected results. *Descriptors* can be used to create ' - 'instance\n' - 'variables with different implementation details.\n' - '\n' - 'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n' - ' Class Decorators\n', - 'comparisons': '\n' - 'Comparisons\n' - '***********\n' - '\n' - 'Unlike C, all comparison operations in Python have the same ' - 'priority,\n' - 'which is lower than that of any arithmetic, shifting or ' - 'bitwise\n' - 'operation. Also unlike C, expressions like "a < b < c" ' - 'have the\n' - 'interpretation that is conventional in mathematics:\n' - '\n' - ' comparison ::= or_expr ( comp_operator or_expr )*\n' - ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n' - ' | "is" ["not"] | ["not"] "in"\n' - '\n' - 'Comparisons yield boolean values: "True" or "False".\n' - '\n' - 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" ' - 'is\n' - 'equivalent to "x < y and y <= z", except that "y" is ' - 'evaluated only\n' - 'once (but in both cases "z" is not evaluated at all when "x ' - '< y" is\n' - 'found to be false).\n' - '\n' - 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions ' - 'and *op1*,\n' - '*op2*, ..., *opN* are comparison operators, then "a op1 b ' - 'op2 c ... y\n' - 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN ' - 'z", except\n' - 'that each expression is evaluated at most once.\n' - '\n' - 'Note that "a op1 b op2 c" doesn\'t imply any kind of ' - 'comparison between\n' - '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal ' - '(though\n' - 'perhaps not pretty).\n' - '\n' - 'The operators "<", ">", "==", ">=", "<=", and "!=" compare ' - 'the values\n' - 'of two objects. The objects need not have the same type. ' - 'If both are\n' - 'numbers, they are converted to a common type. Otherwise, ' - 'the "==" and\n' - '"!=" operators *always* consider objects of different types ' - 'to be\n' - 'unequal, while the "<", ">", ">=" and "<=" operators raise ' - 'a\n' - '"TypeError" when comparing objects of different types that ' - 'do not\n' - 'implement these operators for the given pair of types. You ' - 'can\n' - 'control comparison behavior of objects of non-built-in ' - 'types by\n' - 'defining rich comparison methods like "__gt__()", described ' - 'in section\n' - '*Basic customization*.\n' - '\n' - 'Comparison of objects of the same type depends on the ' - 'type:\n' - '\n' - '* Numbers are compared arithmetically.\n' - '\n' - '* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are ' - 'special. They\n' - ' are identical to themselves, "x is x" but are not equal ' - 'to\n' - ' themselves, "x != x". Additionally, comparing any value ' - 'to a\n' - ' not-a-number value will return "False". For example, ' - 'both "3 <\n' - ' float(\'NaN\')" and "float(\'NaN\') < 3" will return ' - '"False".\n' - '\n' - '* Bytes objects are compared lexicographically using the ' - 'numeric\n' - ' values of their elements.\n' - '\n' - '* Strings are compared lexicographically using the numeric\n' - ' equivalents (the result of the built-in function "ord()") ' - 'of their\n' - " characters. [3] String and bytes object can't be " - 'compared!\n' - '\n' - '* Tuples and lists are compared lexicographically using ' - 'comparison\n' - ' of corresponding elements. This means that to compare ' - 'equal, each\n' - ' element must compare equal and the two sequences must be ' - 'of the same\n' - ' type and have the same length.\n' - '\n' - ' If not equal, the sequences are ordered the same as their ' - 'first\n' - ' differing elements. For example, "[1,2,x] <= [1,2,y]" ' - 'has the same\n' - ' value as "x <= y". If the corresponding element does not ' - 'exist, the\n' - ' shorter sequence is ordered first (for example, "[1,2] < ' - '[1,2,3]").\n' - '\n' - '* Mappings (dictionaries) compare equal if and only if they ' - 'have the\n' - ' same "(key, value)" pairs. Order comparisons "(\'<\', ' - "'<=', '>=',\n" - ' \'>\')" raise "TypeError".\n' - '\n' - '* Sets and frozensets define comparison operators to mean ' - 'subset and\n' - ' superset tests. Those relations do not define total ' - 'orderings (the\n' - ' two sets "{1,2}" and "{2,3}" are not equal, nor subsets ' - 'of one\n' - ' another, nor supersets of one another). Accordingly, ' - 'sets are not\n' - ' appropriate arguments for functions which depend on total ' - 'ordering.\n' - ' For example, "min()", "max()", and "sorted()" produce ' - 'undefined\n' - ' results given a list of sets as inputs.\n' - '\n' - '* Most other objects of built-in types compare unequal ' - 'unless they\n' - ' are the same object; the choice whether one object is ' - 'considered\n' - ' smaller or larger than another one is made arbitrarily ' - 'but\n' - ' consistently within one execution of a program.\n' - '\n' - 'Comparison of objects of differing types depends on whether ' - 'either of\n' - 'the types provide explicit support for the comparison. ' - 'Most numeric\n' - 'types can be compared with one another. When cross-type ' - 'comparison is\n' - 'not supported, the comparison method returns ' - '"NotImplemented".\n' - '\n' - 'The operators "in" and "not in" test for membership. "x in ' - 's"\n' - 'evaluates to true if *x* is a member of *s*, and false ' - 'otherwise. "x\n' - 'not in s" returns the negation of "x in s". All built-in ' - 'sequences\n' - 'and set types support this as well as dictionary, for which ' - '"in" tests\n' - 'whether the dictionary has a given key. For container types ' - 'such as\n' - 'list, tuple, set, frozenset, dict, or collections.deque, ' - 'the\n' - 'expression "x in y" is equivalent to "any(x is e or x == e ' - 'for e in\n' - 'y)".\n' - '\n' - 'For the string and bytes types, "x in y" is true if and ' - 'only if *x* is\n' - 'a substring of *y*. An equivalent test is "y.find(x) != ' - '-1". Empty\n' - 'strings are always considered to be a substring of any ' - 'other string,\n' - 'so """ in "abc"" will return "True".\n' - '\n' - 'For user-defined classes which define the "__contains__()" ' - 'method, "x\n' - 'in y" is true if and only if "y.__contains__(x)" is true.\n' - '\n' - 'For user-defined classes which do not define ' - '"__contains__()" but do\n' - 'define "__iter__()", "x in y" is true if some value "z" ' - 'with "x == z"\n' - 'is produced while iterating over "y". If an exception is ' - 'raised\n' - 'during the iteration, it is as if "in" raised that ' - 'exception.\n' - '\n' - 'Lastly, the old-style iteration protocol is tried: if a ' - 'class defines\n' - '"__getitem__()", "x in y" is true if and only if there is a ' - 'non-\n' - 'negative integer index *i* such that "x == y[i]", and all ' - 'lower\n' - 'integer indices do not raise "IndexError" exception. (If ' - 'any other\n' - 'exception is raised, it is as if "in" raised that ' - 'exception).\n' - '\n' - 'The operator "not in" is defined to have the inverse true ' - 'value of\n' - '"in".\n' - '\n' - 'The operators "is" and "is not" test for object identity: ' - '"x is y" is\n' - 'true if and only if *x* and *y* are the same object. "x is ' - 'not y"\n' - 'yields the inverse truth value. [4]\n', - 'compound': '\n' - 'Compound statements\n' - '*******************\n' - '\n' - 'Compound statements contain (groups of) other statements; they ' - 'affect\n' - 'or control the execution of those other statements in some ' - 'way. In\n' - 'general, compound statements span multiple lines, although in ' - 'simple\n' - 'incarnations a whole compound statement may be contained in ' - 'one line.\n' - '\n' - 'The "if", "while" and "for" statements implement traditional ' - 'control\n' - 'flow constructs. "try" specifies exception handlers and/or ' - 'cleanup\n' - 'code for a group of statements, while the "with" statement ' - 'allows the\n' - 'execution of initialization and finalization code around a ' - 'block of\n' - 'code. Function and class definitions are also syntactically ' - 'compound\n' - 'statements.\n' - '\n' - "A compound statement consists of one or more 'clauses.' A " - 'clause\n' - "consists of a header and a 'suite.' The clause headers of a\n" - 'particular compound statement are all at the same indentation ' - 'level.\n' - 'Each clause header begins with a uniquely identifying keyword ' - 'and ends\n' - 'with a colon. A suite is a group of statements controlled by ' - 'a\n' - 'clause. A suite can be one or more semicolon-separated ' - 'simple\n' - 'statements on the same line as the header, following the ' - "header's\n" - 'colon, or it can be one or more indented statements on ' - 'subsequent\n' - 'lines. Only the latter form of a suite can contain nested ' - 'compound\n' - 'statements; the following is illegal, mostly because it ' - "wouldn't be\n" - 'clear to which "if" clause a following "else" clause would ' - 'belong:\n' - '\n' - ' if test1: if test2: print(x)\n' - '\n' - 'Also note that the semicolon binds tighter than the colon in ' - 'this\n' - 'context, so that in the following example, either all or none ' - 'of the\n' - '"print()" calls are executed:\n' - '\n' - ' if x < y < z: print(x); print(y); print(z)\n' - '\n' - 'Summarizing:\n' - '\n' - ' compound_stmt ::= if_stmt\n' - ' | while_stmt\n' - ' | for_stmt\n' - ' | try_stmt\n' - ' | with_stmt\n' - ' | funcdef\n' - ' | classdef\n' - ' | async_with_stmt\n' - ' | async_for_stmt\n' - ' | async_funcdef\n' - ' suite ::= stmt_list NEWLINE | NEWLINE INDENT ' - 'statement+ DEDENT\n' - ' statement ::= stmt_list NEWLINE | compound_stmt\n' - ' stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n' - '\n' - 'Note that statements always end in a "NEWLINE" possibly ' - 'followed by a\n' - '"DEDENT". Also note that optional continuation clauses always ' - 'begin\n' - 'with a keyword that cannot start a statement, thus there are ' - 'no\n' - 'ambiguities (the \'dangling "else"\' problem is solved in ' - 'Python by\n' - 'requiring nested "if" statements to be indented).\n' - '\n' - 'The formatting of the grammar rules in the following sections ' - 'places\n' - 'each clause on a separate line for clarity.\n' - '\n' - '\n' - 'The "if" statement\n' - '==================\n' - '\n' - 'The "if" statement is used for conditional execution:\n' - '\n' - ' if_stmt ::= "if" expression ":" suite\n' - ' ( "elif" expression ":" suite )*\n' - ' ["else" ":" suite]\n' - '\n' - 'It selects exactly one of the suites by evaluating the ' - 'expressions one\n' - 'by one until one is found to be true (see section *Boolean ' - 'operations*\n' - 'for the definition of true and false); then that suite is ' - 'executed\n' - '(and no other part of the "if" statement is executed or ' - 'evaluated).\n' - 'If all expressions are false, the suite of the "else" clause, ' - 'if\n' - 'present, is executed.\n' - '\n' - '\n' - 'The "while" statement\n' - '=====================\n' - '\n' - 'The "while" statement is used for repeated execution as long ' - 'as an\n' - 'expression is true:\n' - '\n' - ' while_stmt ::= "while" expression ":" suite\n' - ' ["else" ":" suite]\n' - '\n' - 'This repeatedly tests the expression and, if it is true, ' - 'executes the\n' - 'first suite; if the expression is false (which may be the ' - 'first time\n' - 'it is tested) the suite of the "else" clause, if present, is ' - 'executed\n' - 'and the loop terminates.\n' - '\n' - 'A "break" statement executed in the first suite terminates the ' - 'loop\n' - 'without executing the "else" clause\'s suite. A "continue" ' - 'statement\n' - 'executed in the first suite skips the rest of the suite and ' - 'goes back\n' - 'to testing the expression.\n' - '\n' - '\n' - 'The "for" statement\n' - '===================\n' - '\n' - 'The "for" statement is used to iterate over the elements of a ' - 'sequence\n' - '(such as a string, tuple or list) or other iterable object:\n' - '\n' - ' for_stmt ::= "for" target_list "in" expression_list ":" ' - 'suite\n' - ' ["else" ":" suite]\n' - '\n' - 'The expression list is evaluated once; it should yield an ' - 'iterable\n' - 'object. An iterator is created for the result of the\n' - '"expression_list". The suite is then executed once for each ' - 'item\n' - 'provided by the iterator, in the order returned by the ' - 'iterator. Each\n' - 'item in turn is assigned to the target list using the standard ' - 'rules\n' - 'for assignments (see *Assignment statements*), and then the ' - 'suite is\n' - 'executed. When the items are exhausted (which is immediately ' - 'when the\n' - 'sequence is empty or an iterator raises a "StopIteration" ' - 'exception),\n' - 'the suite in the "else" clause, if present, is executed, and ' - 'the loop\n' - 'terminates.\n' - '\n' - 'A "break" statement executed in the first suite terminates the ' - 'loop\n' - 'without executing the "else" clause\'s suite. A "continue" ' - 'statement\n' - 'executed in the first suite skips the rest of the suite and ' - 'continues\n' - 'with the next item, or with the "else" clause if there is no ' - 'next\n' - 'item.\n' - '\n' - 'The for-loop makes assignments to the variables(s) in the ' - 'target list.\n' - 'This overwrites all previous assignments to those variables ' - 'including\n' - 'those made in the suite of the for-loop:\n' - '\n' - ' for i in range(10):\n' - ' print(i)\n' - ' i = 5 # this will not affect the for-loop\n' - ' # because i will be overwritten with ' - 'the next\n' - ' # index in the range\n' - '\n' - 'Names in the target list are not deleted when the loop is ' - 'finished,\n' - 'but if the sequence is empty, they will not have been assigned ' - 'to at\n' - 'all by the loop. Hint: the built-in function "range()" ' - 'returns an\n' - 'iterator of integers suitable to emulate the effect of ' - 'Pascal\'s "for i\n' - ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, ' - '2]".\n' - '\n' - 'Note: There is a subtlety when the sequence is being modified ' - 'by the\n' - ' loop (this can only occur for mutable sequences, i.e. ' - 'lists). An\n' - ' internal counter is used to keep track of which item is used ' - 'next,\n' - ' and this is incremented on each iteration. When this ' - 'counter has\n' - ' reached the length of the sequence the loop terminates. ' - 'This means\n' - ' that if the suite deletes the current (or a previous) item ' - 'from the\n' - ' sequence, the next item will be skipped (since it gets the ' - 'index of\n' - ' the current item which has already been treated). Likewise, ' - 'if the\n' - ' suite inserts an item in the sequence before the current ' - 'item, the\n' - ' current item will be treated again the next time through the ' - 'loop.\n' - ' This can lead to nasty bugs that can be avoided by making a\n' - ' temporary copy using a slice of the whole sequence, e.g.,\n' - '\n' - ' for x in a[:]:\n' - ' if x < 0: a.remove(x)\n' - '\n' - '\n' - 'The "try" statement\n' - '===================\n' - '\n' - 'The "try" statement specifies exception handlers and/or ' - 'cleanup code\n' - 'for a group of statements:\n' - '\n' - ' try_stmt ::= try1_stmt | try2_stmt\n' - ' try1_stmt ::= "try" ":" suite\n' - ' ("except" [expression ["as" identifier]] ":" ' - 'suite)+\n' - ' ["else" ":" suite]\n' - ' ["finally" ":" suite]\n' - ' try2_stmt ::= "try" ":" suite\n' - ' "finally" ":" suite\n' - '\n' - 'The "except" clause(s) specify one or more exception handlers. ' - 'When no\n' - 'exception occurs in the "try" clause, no exception handler is\n' - 'executed. When an exception occurs in the "try" suite, a ' - 'search for an\n' - 'exception handler is started. This search inspects the except ' - 'clauses\n' - 'in turn until one is found that matches the exception. An ' - 'expression-\n' - 'less except clause, if present, must be last; it matches any\n' - 'exception. For an except clause with an expression, that ' - 'expression\n' - 'is evaluated, and the clause matches the exception if the ' - 'resulting\n' - 'object is "compatible" with the exception. An object is ' - 'compatible\n' - 'with an exception if it is the class or a base class of the ' - 'exception\n' - 'object or a tuple containing an item compatible with the ' - 'exception.\n' - '\n' - 'If no except clause matches the exception, the search for an ' - 'exception\n' - 'handler continues in the surrounding code and on the ' - 'invocation stack.\n' - '[1]\n' - '\n' - 'If the evaluation of an expression in the header of an except ' - 'clause\n' - 'raises an exception, the original search for a handler is ' - 'canceled and\n' - 'a search starts for the new exception in the surrounding code ' - 'and on\n' - 'the call stack (it is treated as if the entire "try" statement ' - 'raised\n' - 'the exception).\n' - '\n' - 'When a matching except clause is found, the exception is ' - 'assigned to\n' - 'the target specified after the "as" keyword in that except ' - 'clause, if\n' - "present, and the except clause's suite is executed. All " - 'except\n' - 'clauses must have an executable block. When the end of this ' - 'block is\n' - 'reached, execution continues normally after the entire try ' - 'statement.\n' - '(This means that if two nested handlers exist for the same ' - 'exception,\n' - 'and the exception occurs in the try clause of the inner ' - 'handler, the\n' - 'outer handler will not handle the exception.)\n' - '\n' - 'When an exception has been assigned using "as target", it is ' - 'cleared\n' - 'at the end of the except clause. This is as if\n' - '\n' - ' except E as N:\n' - ' foo\n' - '\n' - 'was translated to\n' - '\n' - ' except E as N:\n' - ' try:\n' - ' foo\n' - ' finally:\n' - ' del N\n' - '\n' - 'This means the exception must be assigned to a different name ' - 'to be\n' - 'able to refer to it after the except clause. Exceptions are ' - 'cleared\n' - 'because with the traceback attached to them, they form a ' - 'reference\n' - 'cycle with the stack frame, keeping all locals in that frame ' - 'alive\n' - 'until the next garbage collection occurs.\n' - '\n' - "Before an except clause's suite is executed, details about " - 'the\n' - 'exception are stored in the "sys" module and can be accessed ' - 'via\n' - '"sys.exc_info()". "sys.exc_info()" returns a 3-tuple ' - 'consisting of the\n' - 'exception class, the exception instance and a traceback object ' - '(see\n' - 'section *The standard type hierarchy*) identifying the point ' - 'in the\n' - 'program where the exception occurred. "sys.exc_info()" values ' - 'are\n' - 'restored to their previous values (before the call) when ' - 'returning\n' - 'from a function that handled an exception.\n' - '\n' - 'The optional "else" clause is executed if and when control ' - 'flows off\n' - 'the end of the "try" clause. [2] Exceptions in the "else" ' - 'clause are\n' - 'not handled by the preceding "except" clauses.\n' - '\n' - 'If "finally" is present, it specifies a \'cleanup\' handler. ' - 'The "try"\n' - 'clause is executed, including any "except" and "else" ' - 'clauses. If an\n' - 'exception occurs in any of the clauses and is not handled, ' - 'the\n' - 'exception is temporarily saved. The "finally" clause is ' - 'executed. If\n' - 'there is a saved exception it is re-raised at the end of the ' - '"finally"\n' - 'clause. If the "finally" clause raises another exception, the ' - 'saved\n' - 'exception is set as the context of the new exception. If the ' - '"finally"\n' - 'clause executes a "return" or "break" statement, the saved ' - 'exception\n' - 'is discarded:\n' - '\n' - ' >>> def f():\n' - ' ... try:\n' - ' ... 1/0\n' - ' ... finally:\n' - ' ... return 42\n' - ' ...\n' - ' >>> f()\n' - ' 42\n' - '\n' - 'The exception information is not available to the program ' - 'during\n' - 'execution of the "finally" clause.\n' - '\n' - 'When a "return", "break" or "continue" statement is executed ' - 'in the\n' - '"try" suite of a "try"..."finally" statement, the "finally" ' - 'clause is\n' - 'also executed \'on the way out.\' A "continue" statement is ' - 'illegal in\n' - 'the "finally" clause. (The reason is a problem with the ' - 'current\n' - 'implementation --- this restriction may be lifted in the ' - 'future).\n' - '\n' - 'The return value of a function is determined by the last ' - '"return"\n' - 'statement executed. Since the "finally" clause always ' - 'executes, a\n' - '"return" statement executed in the "finally" clause will ' - 'always be the\n' - 'last one executed:\n' - '\n' - ' >>> def foo():\n' - ' ... try:\n' - " ... return 'try'\n" - ' ... finally:\n' - " ... return 'finally'\n" - ' ...\n' - ' >>> foo()\n' - " 'finally'\n" - '\n' - 'Additional information on exceptions can be found in section\n' - '*Exceptions*, and information on using the "raise" statement ' - 'to\n' - 'generate exceptions may be found in section *The raise ' - 'statement*.\n' - '\n' - '\n' - 'The "with" statement\n' - '====================\n' - '\n' - 'The "with" statement is used to wrap the execution of a block ' - 'with\n' - 'methods defined by a context manager (see section *With ' - 'Statement\n' - 'Context Managers*). This allows common ' - '"try"..."except"..."finally"\n' - 'usage patterns to be encapsulated for convenient reuse.\n' - '\n' - ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n' - ' with_item ::= expression ["as" target]\n' - '\n' - 'The execution of the "with" statement with one "item" proceeds ' - 'as\n' - 'follows:\n' - '\n' - '1. The context expression (the expression given in the ' - '"with_item")\n' - ' is evaluated to obtain a context manager.\n' - '\n' - '2. The context manager\'s "__exit__()" is loaded for later ' - 'use.\n' - '\n' - '3. The context manager\'s "__enter__()" method is invoked.\n' - '\n' - '4. If a target was included in the "with" statement, the ' - 'return\n' - ' value from "__enter__()" is assigned to it.\n' - '\n' - ' Note: The "with" statement guarantees that if the ' - '"__enter__()"\n' - ' method returns without an error, then "__exit__()" will ' - 'always be\n' - ' called. Thus, if an error occurs during the assignment to ' - 'the\n' - ' target list, it will be treated the same as an error ' - 'occurring\n' - ' within the suite would be. See step 6 below.\n' - '\n' - '5. The suite is executed.\n' - '\n' - '6. The context manager\'s "__exit__()" method is invoked. If ' - 'an\n' - ' exception caused the suite to be exited, its type, value, ' - 'and\n' - ' traceback are passed as arguments to "__exit__()". ' - 'Otherwise, three\n' - ' "None" arguments are supplied.\n' - '\n' - ' If the suite was exited due to an exception, and the return ' - 'value\n' - ' from the "__exit__()" method was false, the exception is ' - 'reraised.\n' - ' If the return value was true, the exception is suppressed, ' - 'and\n' - ' execution continues with the statement following the ' - '"with"\n' - ' statement.\n' - '\n' - ' If the suite was exited for any reason other than an ' - 'exception, the\n' - ' return value from "__exit__()" is ignored, and execution ' - 'proceeds\n' - ' at the normal location for the kind of exit that was ' - 'taken.\n' - '\n' - 'With more than one item, the context managers are processed as ' - 'if\n' - 'multiple "with" statements were nested:\n' - '\n' - ' with A() as a, B() as b:\n' - ' suite\n' - '\n' - 'is equivalent to\n' - '\n' - ' with A() as a:\n' - ' with B() as b:\n' - ' suite\n' - '\n' - 'Changed in version 3.1: Support for multiple context ' - 'expressions.\n' - '\n' - 'See also: **PEP 0343** - The "with" statement\n' - '\n' - ' The specification, background, and examples for the ' - 'Python "with"\n' - ' statement.\n' - '\n' - '\n' - 'Function definitions\n' - '====================\n' - '\n' - 'A function definition defines a user-defined function object ' - '(see\n' - 'section *The standard type hierarchy*):\n' - '\n' - ' funcdef ::= [decorators] "def" funcname "(" ' - '[parameter_list] ")" ["->" expression] ":" suite\n' - ' decorators ::= decorator+\n' - ' decorator ::= "@" dotted_name ["(" [parameter_list ' - '[","]] ")"] NEWLINE\n' - ' dotted_name ::= identifier ("." identifier)*\n' - ' parameter_list ::= (defparameter ",")*\n' - ' | "*" [parameter] ("," defparameter)* ' - '["," "**" parameter]\n' - ' | "**" parameter\n' - ' | defparameter [","] )\n' - ' parameter ::= identifier [":" expression]\n' - ' defparameter ::= parameter ["=" expression]\n' - ' funcname ::= identifier\n' - '\n' - 'A function definition is an executable statement. Its ' - 'execution binds\n' - 'the function name in the current local namespace to a function ' - 'object\n' - '(a wrapper around the executable code for the function). ' - 'This\n' - 'function object contains a reference to the current global ' - 'namespace\n' - 'as the global namespace to be used when the function is ' - 'called.\n' - '\n' - 'The function definition does not execute the function body; ' - 'this gets\n' - 'executed only when the function is called. [3]\n' - '\n' - 'A function definition may be wrapped by one or more ' - '*decorator*\n' - 'expressions. Decorator expressions are evaluated when the ' - 'function is\n' - 'defined, in the scope that contains the function definition. ' - 'The\n' - 'result must be a callable, which is invoked with the function ' - 'object\n' - 'as the only argument. The returned value is bound to the ' - 'function name\n' - 'instead of the function object. Multiple decorators are ' - 'applied in\n' - 'nested fashion. For example, the following code\n' - '\n' - ' @f1(arg)\n' - ' @f2\n' - ' def func(): pass\n' - '\n' - 'is equivalent to\n' - '\n' - ' def func(): pass\n' - ' func = f1(arg)(f2(func))\n' - '\n' - 'When one or more *parameters* have the form *parameter* "="\n' - '*expression*, the function is said to have "default parameter ' - 'values."\n' - 'For a parameter with a default value, the corresponding ' - '*argument* may\n' - "be omitted from a call, in which case the parameter's default " - 'value is\n' - 'substituted. If a parameter has a default value, all ' - 'following\n' - 'parameters up until the ""*"" must also have a default value ' - '--- this\n' - 'is a syntactic restriction that is not expressed by the ' - 'grammar.\n' - '\n' - '**Default parameter values are evaluated from left to right ' - 'when the\n' - 'function definition is executed.** This means that the ' - 'expression is\n' - 'evaluated once, when the function is defined, and that the ' - 'same "pre-\n' - 'computed" value is used for each call. This is especially ' - 'important\n' - 'to understand when a default parameter is a mutable object, ' - 'such as a\n' - 'list or a dictionary: if the function modifies the object ' - '(e.g. by\n' - 'appending an item to a list), the default value is in effect ' - 'modified.\n' - 'This is generally not what was intended. A way around this is ' - 'to use\n' - '"None" as the default, and explicitly test for it in the body ' - 'of the\n' - 'function, e.g.:\n' - '\n' - ' def whats_on_the_telly(penguin=None):\n' - ' if penguin is None:\n' - ' penguin = []\n' - ' penguin.append("property of the zoo")\n' - ' return penguin\n' - '\n' - 'Function call semantics are described in more detail in ' - 'section\n' - '*Calls*. A function call always assigns values to all ' - 'parameters\n' - 'mentioned in the parameter list, either from position ' - 'arguments, from\n' - 'keyword arguments, or from default values. If the form\n' - '""*identifier"" is present, it is initialized to a tuple ' - 'receiving any\n' - 'excess positional parameters, defaulting to the empty tuple. ' - 'If the\n' - 'form ""**identifier"" is present, it is initialized to a new\n' - 'dictionary receiving any excess keyword arguments, defaulting ' - 'to a new\n' - 'empty dictionary. Parameters after ""*"" or ""*identifier"" ' - 'are\n' - 'keyword-only parameters and may only be passed used keyword ' - 'arguments.\n' - '\n' - 'Parameters may have annotations of the form "": expression"" ' - 'following\n' - 'the parameter name. Any parameter may have an annotation even ' - 'those\n' - 'of the form "*identifier" or "**identifier". Functions may ' - 'have\n' - '"return" annotation of the form ""-> expression"" after the ' - 'parameter\n' - 'list. These annotations can be any valid Python expression ' - 'and are\n' - 'evaluated when the function definition is executed. ' - 'Annotations may\n' - 'be evaluated in a different order than they appear in the ' - 'source code.\n' - 'The presence of annotations does not change the semantics of ' - 'a\n' - 'function. The annotation values are available as values of a\n' - "dictionary keyed by the parameters' names in the " - '"__annotations__"\n' - 'attribute of the function object.\n' - '\n' - 'It is also possible to create anonymous functions (functions ' - 'not bound\n' - 'to a name), for immediate use in expressions. This uses ' - 'lambda\n' - 'expressions, described in section *Lambdas*. Note that the ' - 'lambda\n' - 'expression is merely a shorthand for a simplified function ' - 'definition;\n' - 'a function defined in a ""def"" statement can be passed around ' - 'or\n' - 'assigned to another name just like a function defined by a ' - 'lambda\n' - 'expression. The ""def"" form is actually more powerful since ' - 'it\n' - 'allows the execution of multiple statements and annotations.\n' - '\n' - "**Programmer's note:** Functions are first-class objects. A " - '""def""\n' - 'statement executed inside a function definition defines a ' - 'local\n' - 'function that can be returned or passed around. Free ' - 'variables used\n' - 'in the nested function can access the local variables of the ' - 'function\n' - 'containing the def. See section *Naming and binding* for ' - 'details.\n' - '\n' - 'See also: **PEP 3107** - Function Annotations\n' - '\n' - ' The original specification for function annotations.\n' - '\n' - '\n' - 'Class definitions\n' - '=================\n' - '\n' - 'A class definition defines a class object (see section *The ' - 'standard\n' - 'type hierarchy*):\n' - '\n' - ' classdef ::= [decorators] "class" classname ' - '[inheritance] ":" suite\n' - ' inheritance ::= "(" [parameter_list] ")"\n' - ' classname ::= identifier\n' - '\n' - 'A class definition is an executable statement. The ' - 'inheritance list\n' - 'usually gives a list of base classes (see *Customizing class ' - 'creation*\n' - 'for more advanced uses), so each item in the list should ' - 'evaluate to a\n' - 'class object which allows subclassing. Classes without an ' - 'inheritance\n' - 'list inherit, by default, from the base class "object"; ' - 'hence,\n' - '\n' - ' class Foo:\n' - ' pass\n' - '\n' - 'is equivalent to\n' - '\n' - ' class Foo(object):\n' - ' pass\n' - '\n' - "The class's suite is then executed in a new execution frame " - '(see\n' - '*Naming and binding*), using a newly created local namespace ' - 'and the\n' - 'original global namespace. (Usually, the suite contains ' - 'mostly\n' - "function definitions.) When the class's suite finishes " - 'execution, its\n' - 'execution frame is discarded but its local namespace is saved. ' - '[4] A\n' - 'class object is then created using the inheritance list for ' - 'the base\n' - 'classes and the saved local namespace for the attribute ' - 'dictionary.\n' - 'The class name is bound to this class object in the original ' - 'local\n' - 'namespace.\n' - '\n' - 'Class creation can be customized heavily using *metaclasses*.\n' - '\n' - 'Classes can also be decorated: just like when decorating ' - 'functions,\n' - '\n' - ' @f1(arg)\n' - ' @f2\n' - ' class Foo: pass\n' - '\n' - 'is equivalent to\n' - '\n' - ' class Foo: pass\n' - ' Foo = f1(arg)(f2(Foo))\n' - '\n' - 'The evaluation rules for the decorator expressions are the ' - 'same as for\n' - 'function decorators. The result must be a class object, which ' - 'is then\n' - 'bound to the class name.\n' - '\n' - "**Programmer's note:** Variables defined in the class " - 'definition are\n' - 'class attributes; they are shared by instances. Instance ' - 'attributes\n' - 'can be set in a method with "self.name = value". Both class ' - 'and\n' - 'instance attributes are accessible through the notation ' - '""self.name"",\n' - 'and an instance attribute hides a class attribute with the ' - 'same name\n' - 'when accessed in this way. Class attributes can be used as ' - 'defaults\n' - 'for instance attributes, but using mutable values there can ' - 'lead to\n' - 'unexpected results. *Descriptors* can be used to create ' - 'instance\n' - 'variables with different implementation details.\n' - '\n' - 'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** ' - '-\n' - ' Class Decorators\n' - '\n' - '\n' - 'Coroutines\n' - '==========\n' - '\n' - 'New in version 3.5.\n' - '\n' - '\n' - 'Coroutine function definition\n' - '-----------------------------\n' - '\n' - ' async_funcdef ::= [decorators] "async" "def" funcname "(" ' - '[parameter_list] ")" ["->" expression] ":" suite\n' - '\n' - 'Execution of Python coroutines can be suspended and resumed at ' - 'many\n' - 'points (see *coroutine*). In the body of a coroutine, any ' - '"await" and\n' - '"async" identifiers become reserved keywords; "await" ' - 'expressions,\n' - '"async for" and "async with" can only be used in coroutine ' - 'bodies.\n' - '\n' - 'Functions defined with "async def" syntax are always ' - 'coroutine\n' - 'functions, even if they do not contain "await" or "async" ' - 'keywords.\n' - '\n' - 'It is a "SyntaxError" to use "yield" expressions in "async ' - 'def"\n' - 'coroutines.\n' - '\n' - 'An example of a coroutine function:\n' - '\n' - ' async def func(param1, param2):\n' - ' do_stuff()\n' - ' await some_coroutine()\n' - '\n' - '\n' - 'The "async for" statement\n' - '-------------------------\n' - '\n' - ' async_for_stmt ::= "async" for_stmt\n' - '\n' - 'An *asynchronous iterable* is able to call asynchronous code ' - 'in its\n' - '*iter* implementation, and *asynchronous iterator* can call\n' - 'asynchronous code in its *next* method.\n' - '\n' - 'The "async for" statement allows convenient iteration over\n' - 'asynchronous iterators.\n' - '\n' - 'The following code:\n' - '\n' - ' async for TARGET in ITER:\n' - ' BLOCK\n' - ' else:\n' - ' BLOCK2\n' - '\n' - 'Is semantically equivalent to:\n' - '\n' - ' iter = (ITER)\n' - ' iter = await type(iter).__aiter__(iter)\n' - ' running = True\n' - ' while running:\n' - ' try:\n' - ' TARGET = await type(iter).__anext__(iter)\n' - ' except StopAsyncIteration:\n' - ' running = False\n' - ' else:\n' - ' BLOCK\n' - ' else:\n' - ' BLOCK2\n' - '\n' - 'See also "__aiter__()" and "__anext__()" for details.\n' - '\n' - 'It is a "SyntaxError" to use "async for" statement outside of ' - 'an\n' - '"async def" function.\n' - '\n' - '\n' - 'The "async with" statement\n' - '--------------------------\n' - '\n' - ' async_with_stmt ::= "async" with_stmt\n' - '\n' - 'An *asynchronous context manager* is a *context manager* that ' - 'is able\n' - 'to suspend execution in its *enter* and *exit* methods.\n' - '\n' - 'The following code:\n' - '\n' - ' async with EXPR as VAR:\n' - ' BLOCK\n' - '\n' - 'Is semantically equivalent to:\n' - '\n' - ' mgr = (EXPR)\n' - ' aexit = type(mgr).__aexit__\n' - ' aenter = type(mgr).__aenter__(mgr)\n' - ' exc = True\n' - '\n' - ' VAR = await aenter\n' - ' try:\n' - ' BLOCK\n' - ' except:\n' - ' if not await aexit(mgr, *sys.exc_info()):\n' - ' raise\n' - ' else:\n' - ' await aexit(mgr, None, None, None)\n' - '\n' - 'See also "__aenter__()" and "__aexit__()" for details.\n' - '\n' - 'It is a "SyntaxError" to use "async with" statement outside of ' - 'an\n' - '"async def" function.\n' - '\n' - 'See also: **PEP 492** - Coroutines with async and await ' - 'syntax\n' - '\n' - '-[ Footnotes ]-\n' - '\n' - '[1] The exception is propagated to the invocation stack ' - 'unless\n' - ' there is a "finally" clause which happens to raise ' - 'another\n' - ' exception. That new exception causes the old one to be ' - 'lost.\n' - '\n' - '[2] Currently, control "flows off the end" except in the case ' - 'of\n' - ' an exception or the execution of a "return", "continue", ' - 'or\n' - ' "break" statement.\n' - '\n' - '[3] A string literal appearing as the first statement in the\n' - " function body is transformed into the function's " - '"__doc__"\n' - " attribute and therefore the function's *docstring*.\n" - '\n' - '[4] A string literal appearing as the first statement in the ' - 'class\n' - ' body is transformed into the namespace\'s "__doc__" item ' - 'and\n' - " therefore the class's *docstring*.\n", - 'context-managers': '\n' - 'With Statement Context Managers\n' - '*******************************\n' - '\n' - 'A *context manager* is an object that defines the ' - 'runtime context to\n' - 'be established when executing a "with" statement. The ' - 'context manager\n' - 'handles the entry into, and the exit from, the desired ' - 'runtime context\n' - 'for the execution of the block of code. Context ' - 'managers are normally\n' - 'invoked using the "with" statement (described in ' - 'section *The with\n' - 'statement*), but can also be used by directly invoking ' - 'their methods.\n' - '\n' - 'Typical uses of context managers include saving and ' - 'restoring various\n' - 'kinds of global state, locking and unlocking ' - 'resources, closing opened\n' - 'files, etc.\n' - '\n' - 'For more information on context managers, see *Context ' - 'Manager Types*.\n' - '\n' - 'object.__enter__(self)\n' - '\n' - ' Enter the runtime context related to this object. ' - 'The "with"\n' - " statement will bind this method's return value to " - 'the target(s)\n' - ' specified in the "as" clause of the statement, if ' - 'any.\n' - '\n' - 'object.__exit__(self, exc_type, exc_value, traceback)\n' - '\n' - ' Exit the runtime context related to this object. ' - 'The parameters\n' - ' describe the exception that caused the context to ' - 'be exited. If the\n' - ' context was exited without an exception, all three ' - 'arguments will\n' - ' be "None".\n' - '\n' - ' If an exception is supplied, and the method wishes ' - 'to suppress the\n' - ' exception (i.e., prevent it from being propagated), ' - 'it should\n' - ' return a true value. Otherwise, the exception will ' - 'be processed\n' - ' normally upon exit from this method.\n' - '\n' - ' Note that "__exit__()" methods should not reraise ' - 'the passed-in\n' - " exception; this is the caller's responsibility.\n" - '\n' - 'See also: **PEP 0343** - The "with" statement\n' - '\n' - ' The specification, background, and examples for ' - 'the Python "with"\n' - ' statement.\n', - 'continue': '\n' - 'The "continue" statement\n' - '************************\n' - '\n' - ' continue_stmt ::= "continue"\n' - '\n' - '"continue" may only occur syntactically nested in a "for" or ' - '"while"\n' - 'loop, but not nested in a function or class definition or ' - '"finally"\n' - 'clause within that loop. It continues with the next cycle of ' - 'the\n' - 'nearest enclosing loop.\n' - '\n' - 'When "continue" passes control out of a "try" statement with ' - 'a\n' - '"finally" clause, that "finally" clause is executed before ' - 'really\n' - 'starting the next loop cycle.\n', - 'conversions': '\n' - 'Arithmetic conversions\n' - '**********************\n' - '\n' - 'When a description of an arithmetic operator below uses the ' - 'phrase\n' - '"the numeric arguments are converted to a common type," ' - 'this means\n' - 'that the operator implementation for built-in types works ' - 'as follows:\n' - '\n' - '* If either argument is a complex number, the other is ' - 'converted to\n' - ' complex;\n' - '\n' - '* otherwise, if either argument is a floating point number, ' - 'the\n' - ' other is converted to floating point;\n' - '\n' - '* otherwise, both must be integers and no conversion is ' - 'necessary.\n' - '\n' - 'Some additional rules apply for certain operators (e.g., a ' - 'string as a\n' - "left argument to the '%' operator). Extensions must define " - 'their own\n' - 'conversion behavior.\n', - 'customization': '\n' - 'Basic customization\n' - '*******************\n' - '\n' - 'object.__new__(cls[, ...])\n' - '\n' - ' Called to create a new instance of class *cls*. ' - '"__new__()" is a\n' - ' static method (special-cased so you need not declare ' - 'it as such)\n' - ' that takes the class of which an instance was ' - 'requested as its\n' - ' first argument. The remaining arguments are those ' - 'passed to the\n' - ' object constructor expression (the call to the ' - 'class). The return\n' - ' value of "__new__()" should be the new object instance ' - '(usually an\n' - ' instance of *cls*).\n' - '\n' - ' Typical implementations create a new instance of the ' - 'class by\n' - ' invoking the superclass\'s "__new__()" method using\n' - ' "super(currentclass, cls).__new__(cls[, ...])" with ' - 'appropriate\n' - ' arguments and then modifying the newly-created ' - 'instance as\n' - ' necessary before returning it.\n' - '\n' - ' If "__new__()" returns an instance of *cls*, then the ' - 'new\n' - ' instance\'s "__init__()" method will be invoked like\n' - ' "__init__(self[, ...])", where *self* is the new ' - 'instance and the\n' - ' remaining arguments are the same as were passed to ' - '"__new__()".\n' - '\n' - ' If "__new__()" does not return an instance of *cls*, ' - 'then the new\n' - ' instance\'s "__init__()" method will not be invoked.\n' - '\n' - ' "__new__()" is intended mainly to allow subclasses of ' - 'immutable\n' - ' types (like int, str, or tuple) to customize instance ' - 'creation. It\n' - ' is also commonly overridden in custom metaclasses in ' - 'order to\n' - ' customize class creation.\n' - '\n' - 'object.__init__(self[, ...])\n' - '\n' - ' Called after the instance has been created (by ' - '"__new__()"), but\n' - ' before it is returned to the caller. The arguments ' - 'are those\n' - ' passed to the class constructor expression. If a base ' - 'class has an\n' - ' "__init__()" method, the derived class\'s "__init__()" ' - 'method, if\n' - ' any, must explicitly call it to ensure proper ' - 'initialization of the\n' - ' base class part of the instance; for example:\n' - ' "BaseClass.__init__(self, [args...])".\n' - '\n' - ' Because "__new__()" and "__init__()" work together in ' - 'constructing\n' - ' objects ("__new__()" to create it, and "__init__()" to ' - 'customise\n' - ' it), no non-"None" value may be returned by ' - '"__init__()"; doing so\n' - ' will cause a "TypeError" to be raised at runtime.\n' - '\n' - 'object.__del__(self)\n' - '\n' - ' Called when the instance is about to be destroyed. ' - 'This is also\n' - ' called a destructor. If a base class has a ' - '"__del__()" method, the\n' - ' derived class\'s "__del__()" method, if any, must ' - 'explicitly call it\n' - ' to ensure proper deletion of the base class part of ' - 'the instance.\n' - ' Note that it is possible (though not recommended!) for ' - 'the\n' - ' "__del__()" method to postpone destruction of the ' - 'instance by\n' - ' creating a new reference to it. It may then be called ' - 'at a later\n' - ' time when this new reference is deleted. It is not ' - 'guaranteed that\n' - ' "__del__()" methods are called for objects that still ' - 'exist when\n' - ' the interpreter exits.\n' - '\n' - ' Note: "del x" doesn\'t directly call "x.__del__()" --- ' - 'the former\n' - ' decrements the reference count for "x" by one, and ' - 'the latter is\n' - ' only called when "x"\'s reference count reaches ' - 'zero. Some common\n' - ' situations that may prevent the reference count of ' - 'an object from\n' - ' going to zero include: circular references between ' - 'objects (e.g.,\n' - ' a doubly-linked list or a tree data structure with ' - 'parent and\n' - ' child pointers); a reference to the object on the ' - 'stack frame of\n' - ' a function that caught an exception (the traceback ' - 'stored in\n' - ' "sys.exc_info()[2]" keeps the stack frame alive); or ' - 'a reference\n' - ' to the object on the stack frame that raised an ' - 'unhandled\n' - ' exception in interactive mode (the traceback stored ' - 'in\n' - ' "sys.last_traceback" keeps the stack frame alive). ' - 'The first\n' - ' situation can only be remedied by explicitly ' - 'breaking the cycles;\n' - ' the second can be resolved by freeing the reference ' - 'to the\n' - ' traceback object when it is no longer useful, and ' - 'the third can\n' - ' be resolved by storing "None" in ' - '"sys.last_traceback". Circular\n' - ' references which are garbage are detected and ' - 'cleaned up when the\n' - " cyclic garbage collector is enabled (it's on by " - 'default). Refer\n' - ' to the documentation for the "gc" module for more ' - 'information\n' - ' about this topic.\n' - '\n' - ' Warning: Due to the precarious circumstances under ' - 'which\n' - ' "__del__()" methods are invoked, exceptions that ' - 'occur during\n' - ' their execution are ignored, and a warning is ' - 'printed to\n' - ' "sys.stderr" instead. Also, when "__del__()" is ' - 'invoked in\n' - ' response to a module being deleted (e.g., when ' - 'execution of the\n' - ' program is done), other globals referenced by the ' - '"__del__()"\n' - ' method may already have been deleted or in the ' - 'process of being\n' - ' torn down (e.g. the import machinery shutting ' - 'down). For this\n' - ' reason, "__del__()" methods should do the absolute ' - 'minimum needed\n' - ' to maintain external invariants. Starting with ' - 'version 1.5,\n' - ' Python guarantees that globals whose name begins ' - 'with a single\n' - ' underscore are deleted from their module before ' - 'other globals are\n' - ' deleted; if no other references to such globals ' - 'exist, this may\n' - ' help in assuring that imported modules are still ' - 'available at the\n' - ' time when the "__del__()" method is called.\n' - '\n' - 'object.__repr__(self)\n' - '\n' - ' Called by the "repr()" built-in function to compute ' - 'the "official"\n' - ' string representation of an object. If at all ' - 'possible, this\n' - ' should look like a valid Python expression that could ' - 'be used to\n' - ' recreate an object with the same value (given an ' - 'appropriate\n' - ' environment). If this is not possible, a string of ' - 'the form\n' - ' "<...some useful description...>" should be returned. ' - 'The return\n' - ' value must be a string object. If a class defines ' - '"__repr__()" but\n' - ' not "__str__()", then "__repr__()" is also used when ' - 'an "informal"\n' - ' string representation of instances of that class is ' - 'required.\n' - '\n' - ' This is typically used for debugging, so it is ' - 'important that the\n' - ' representation is information-rich and unambiguous.\n' - '\n' - 'object.__str__(self)\n' - '\n' - ' Called by "str(object)" and the built-in functions ' - '"format()" and\n' - ' "print()" to compute the "informal" or nicely ' - 'printable string\n' - ' representation of an object. The return value must be ' - 'a *string*\n' - ' object.\n' - '\n' - ' This method differs from "object.__repr__()" in that ' - 'there is no\n' - ' expectation that "__str__()" return a valid Python ' - 'expression: a\n' - ' more convenient or concise representation can be ' - 'used.\n' - '\n' - ' The default implementation defined by the built-in ' - 'type "object"\n' - ' calls "object.__repr__()".\n' - '\n' - 'object.__bytes__(self)\n' - '\n' - ' Called by "bytes()" to compute a byte-string ' - 'representation of an\n' - ' object. This should return a "bytes" object.\n' - '\n' - 'object.__format__(self, format_spec)\n' - '\n' - ' Called by the "format()" built-in function (and by ' - 'extension, the\n' - ' "str.format()" method of class "str") to produce a ' - '"formatted"\n' - ' string representation of an object. The "format_spec" ' - 'argument is a\n' - ' string that contains a description of the formatting ' - 'options\n' - ' desired. The interpretation of the "format_spec" ' - 'argument is up to\n' - ' the type implementing "__format__()", however most ' - 'classes will\n' - ' either delegate formatting to one of the built-in ' - 'types, or use a\n' - ' similar formatting option syntax.\n' - '\n' - ' See *Format Specification Mini-Language* for a ' - 'description of the\n' - ' standard formatting syntax.\n' - '\n' - ' The return value must be a string object.\n' - '\n' - ' Changed in version 3.4: The __format__ method of ' - '"object" itself\n' - ' raises a "TypeError" if passed any non-empty string.\n' - '\n' - 'object.__lt__(self, other)\n' - 'object.__le__(self, other)\n' - 'object.__eq__(self, other)\n' - 'object.__ne__(self, other)\n' - 'object.__gt__(self, other)\n' - 'object.__ge__(self, other)\n' - '\n' - ' These are the so-called "rich comparison" methods. ' - 'The\n' - ' correspondence between operator symbols and method ' - 'names is as\n' - ' follows: "xy" calls\n' - ' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n' - '\n' - ' A rich comparison method may return the singleton ' - '"NotImplemented"\n' - ' if it does not implement the operation for a given ' - 'pair of\n' - ' arguments. By convention, "False" and "True" are ' - 'returned for a\n' - ' successful comparison. However, these methods can ' - 'return any value,\n' - ' so if the comparison operator is used in a Boolean ' - 'context (e.g.,\n' - ' in the condition of an "if" statement), Python will ' - 'call "bool()"\n' - ' on the value to determine if the result is true or ' - 'false.\n' - '\n' - ' By default, "__ne__()" delegates to "__eq__()" and ' - 'inverts the\n' - ' result unless it is "NotImplemented". There are no ' - 'other implied\n' - ' relationships among the comparison operators, for ' - 'example, the\n' - ' truth of "(x.__hash__".\n' - '\n' - ' If a class that does not override "__eq__()" wishes to ' - 'suppress\n' - ' hash support, it should include "__hash__ = None" in ' - 'the class\n' - ' definition. A class which defines its own "__hash__()" ' - 'that\n' - ' explicitly raises a "TypeError" would be incorrectly ' - 'identified as\n' - ' hashable by an "isinstance(obj, collections.Hashable)" ' - 'call.\n' - '\n' - ' Note: By default, the "__hash__()" values of str, ' - 'bytes and\n' - ' datetime objects are "salted" with an unpredictable ' - 'random value.\n' - ' Although they remain constant within an individual ' - 'Python\n' - ' process, they are not predictable between repeated ' - 'invocations of\n' - ' Python.This is intended to provide protection ' - 'against a denial-\n' - ' of-service caused by carefully-chosen inputs that ' - 'exploit the\n' - ' worst case performance of a dict insertion, O(n^2) ' - 'complexity.\n' - ' See ' - 'http://www.ocert.org/advisories/ocert-2011-003.html for\n' - ' details.Changing hash values affects the iteration ' - 'order of\n' - ' dicts, sets and other mappings. Python has never ' - 'made guarantees\n' - ' about this ordering (and it typically varies between ' - '32-bit and\n' - ' 64-bit builds).See also "PYTHONHASHSEED".\n' - '\n' - ' Changed in version 3.3: Hash randomization is enabled ' - 'by default.\n' - '\n' - 'object.__bool__(self)\n' - '\n' - ' Called to implement truth value testing and the ' - 'built-in operation\n' - ' "bool()"; should return "False" or "True". When this ' - 'method is not\n' - ' defined, "__len__()" is called, if it is defined, and ' - 'the object is\n' - ' considered true if its result is nonzero. If a class ' - 'defines\n' - ' neither "__len__()" nor "__bool__()", all its ' - 'instances are\n' - ' considered true.\n', - 'debugger': '\n' - '"pdb" --- The Python Debugger\n' - '*****************************\n' - '\n' - '**Source code:** Lib/pdb.py\n' - '\n' - '======================================================================\n' - '\n' - 'The module "pdb" defines an interactive source code debugger ' - 'for\n' - 'Python programs. It supports setting (conditional) ' - 'breakpoints and\n' - 'single stepping at the source line level, inspection of stack ' - 'frames,\n' - 'source code listing, and evaluation of arbitrary Python code ' - 'in the\n' - 'context of any stack frame. It also supports post-mortem ' - 'debugging\n' - 'and can be called under program control.\n' - '\n' - 'The debugger is extensible -- it is actually defined as the ' - 'class\n' - '"Pdb". This is currently undocumented but easily understood by ' - 'reading\n' - 'the source. The extension interface uses the modules "bdb" ' - 'and "cmd".\n' - '\n' - 'The debugger\'s prompt is "(Pdb)". Typical usage to run a ' - 'program under\n' - 'control of the debugger is:\n' - '\n' - ' >>> import pdb\n' - ' >>> import mymodule\n' - " >>> pdb.run('mymodule.test()')\n" - ' > (0)?()\n' - ' (Pdb) continue\n' - ' > (1)?()\n' - ' (Pdb) continue\n' - " NameError: 'spam'\n" - ' > (1)?()\n' - ' (Pdb)\n' - '\n' - 'Changed in version 3.3: Tab-completion via the "readline" ' - 'module is\n' - 'available for commands and command arguments, e.g. the current ' - 'global\n' - 'and local names are offered as arguments of the "p" command.\n' - '\n' - '"pdb.py" can also be invoked as a script to debug other ' - 'scripts. For\n' - 'example:\n' - '\n' - ' python3 -m pdb myscript.py\n' - '\n' - 'When invoked as a script, pdb will automatically enter ' - 'post-mortem\n' - 'debugging if the program being debugged exits abnormally. ' - 'After post-\n' - 'mortem debugging (or after normal exit of the program), pdb ' - 'will\n' - "restart the program. Automatic restarting preserves pdb's " - 'state (such\n' - 'as breakpoints) and in most cases is more useful than quitting ' - 'the\n' - "debugger upon program's exit.\n" - '\n' - 'New in version 3.2: "pdb.py" now accepts a "-c" option that ' - 'executes\n' - 'commands as if given in a ".pdbrc" file, see *Debugger ' - 'Commands*.\n' - '\n' - 'The typical usage to break into the debugger from a running ' - 'program is\n' - 'to insert\n' - '\n' - ' import pdb; pdb.set_trace()\n' - '\n' - 'at the location you want to break into the debugger. You can ' - 'then\n' - 'step through the code following this statement, and continue ' - 'running\n' - 'without the debugger using the "continue" command.\n' - '\n' - 'The typical usage to inspect a crashed program is:\n' - '\n' - ' >>> import pdb\n' - ' >>> import mymodule\n' - ' >>> mymodule.test()\n' - ' Traceback (most recent call last):\n' - ' File "", line 1, in ?\n' - ' File "./mymodule.py", line 4, in test\n' - ' test2()\n' - ' File "./mymodule.py", line 3, in test2\n' - ' print(spam)\n' - ' NameError: spam\n' - ' >>> pdb.pm()\n' - ' > ./mymodule.py(3)test2()\n' - ' -> print(spam)\n' - ' (Pdb)\n' - '\n' - 'The module defines the following functions; each enters the ' - 'debugger\n' - 'in a slightly different way:\n' - '\n' - 'pdb.run(statement, globals=None, locals=None)\n' - '\n' - ' Execute the *statement* (given as a string or a code ' - 'object) under\n' - ' debugger control. The debugger prompt appears before any ' - 'code is\n' - ' executed; you can set breakpoints and type "continue", or ' - 'you can\n' - ' step through the statement using "step" or "next" (all ' - 'these\n' - ' commands are explained below). The optional *globals* and ' - '*locals*\n' - ' arguments specify the environment in which the code is ' - 'executed; by\n' - ' default the dictionary of the module "__main__" is used. ' - '(See the\n' - ' explanation of the built-in "exec()" or "eval()" ' - 'functions.)\n' - '\n' - 'pdb.runeval(expression, globals=None, locals=None)\n' - '\n' - ' Evaluate the *expression* (given as a string or a code ' - 'object)\n' - ' under debugger control. When "runeval()" returns, it ' - 'returns the\n' - ' value of the expression. Otherwise this function is ' - 'similar to\n' - ' "run()".\n' - '\n' - 'pdb.runcall(function, *args, **kwds)\n' - '\n' - ' Call the *function* (a function or method object, not a ' - 'string)\n' - ' with the given arguments. When "runcall()" returns, it ' - 'returns\n' - ' whatever the function call returned. The debugger prompt ' - 'appears\n' - ' as soon as the function is entered.\n' - '\n' - 'pdb.set_trace()\n' - '\n' - ' Enter the debugger at the calling stack frame. This is ' - 'useful to\n' - ' hard-code a breakpoint at a given point in a program, even ' - 'if the\n' - ' code is not otherwise being debugged (e.g. when an ' - 'assertion\n' - ' fails).\n' - '\n' - 'pdb.post_mortem(traceback=None)\n' - '\n' - ' Enter post-mortem debugging of the given *traceback* ' - 'object. If no\n' - ' *traceback* is given, it uses the one of the exception that ' - 'is\n' - ' currently being handled (an exception must be being handled ' - 'if the\n' - ' default is to be used).\n' - '\n' - 'pdb.pm()\n' - '\n' - ' Enter post-mortem debugging of the traceback found in\n' - ' "sys.last_traceback".\n' - '\n' - 'The "run*" functions and "set_trace()" are aliases for ' - 'instantiating\n' - 'the "Pdb" class and calling the method of the same name. If ' - 'you want\n' - 'to access further features, you have to do this yourself:\n' - '\n' - "class class pdb.Pdb(completekey='tab', stdin=None, " - 'stdout=None, skip=None, nosigint=False)\n' - '\n' - ' "Pdb" is the debugger class.\n' - '\n' - ' The *completekey*, *stdin* and *stdout* arguments are ' - 'passed to the\n' - ' underlying "cmd.Cmd" class; see the description there.\n' - '\n' - ' The *skip* argument, if given, must be an iterable of ' - 'glob-style\n' - ' module name patterns. The debugger will not step into ' - 'frames that\n' - ' originate in a module that matches one of these patterns. ' - '[1]\n' - '\n' - ' By default, Pdb sets a handler for the SIGINT signal (which ' - 'is sent\n' - ' when the user presses Ctrl-C on the console) when you give ' - 'a\n' - ' "continue" command. This allows you to break into the ' - 'debugger\n' - ' again by pressing Ctrl-C. If you want Pdb not to touch the ' - 'SIGINT\n' - ' handler, set *nosigint* tot true.\n' - '\n' - ' Example call to enable tracing with *skip*:\n' - '\n' - " import pdb; pdb.Pdb(skip=['django.*']).set_trace()\n" - '\n' - ' New in version 3.1: The *skip* argument.\n' - '\n' - ' New in version 3.2: The *nosigint* argument. Previously, a ' - 'SIGINT\n' - ' handler was never set by Pdb.\n' - '\n' - ' run(statement, globals=None, locals=None)\n' - ' runeval(expression, globals=None, locals=None)\n' - ' runcall(function, *args, **kwds)\n' - ' set_trace()\n' - '\n' - ' See the documentation for the functions explained ' - 'above.\n' - '\n' - '\n' - 'Debugger Commands\n' - '=================\n' - '\n' - 'The commands recognized by the debugger are listed below. ' - 'Most\n' - 'commands can be abbreviated to one or two letters as ' - 'indicated; e.g.\n' - '"h(elp)" means that either "h" or "help" can be used to enter ' - 'the help\n' - 'command (but not "he" or "hel", nor "H" or "Help" or "HELP").\n' - 'Arguments to commands must be separated by whitespace (spaces ' - 'or\n' - 'tabs). Optional arguments are enclosed in square brackets ' - '("[]") in\n' - 'the command syntax; the square brackets must not be typed.\n' - 'Alternatives in the command syntax are separated by a vertical ' - 'bar\n' - '("|").\n' - '\n' - 'Entering a blank line repeats the last command entered. ' - 'Exception: if\n' - 'the last command was a "list" command, the next 11 lines are ' - 'listed.\n' - '\n' - "Commands that the debugger doesn't recognize are assumed to be " - 'Python\n' - 'statements and are executed in the context of the program ' - 'being\n' - 'debugged. Python statements can also be prefixed with an ' - 'exclamation\n' - 'point ("!"). This is a powerful way to inspect the program ' - 'being\n' - 'debugged; it is even possible to change a variable or call a ' - 'function.\n' - 'When an exception occurs in such a statement, the exception ' - 'name is\n' - "printed but the debugger's state is not changed.\n" - '\n' - 'The debugger supports *aliases*. Aliases can have parameters ' - 'which\n' - 'allows one a certain level of adaptability to the context ' - 'under\n' - 'examination.\n' - '\n' - 'Multiple commands may be entered on a single line, separated ' - 'by ";;".\n' - '(A single ";" is not used as it is the separator for multiple ' - 'commands\n' - 'in a line that is passed to the Python parser.) No ' - 'intelligence is\n' - 'applied to separating the commands; the input is split at the ' - 'first\n' - '";;" pair, even if it is in the middle of a quoted string.\n' - '\n' - 'If a file ".pdbrc" exists in the user\'s home directory or in ' - 'the\n' - 'current directory, it is read in and executed as if it had ' - 'been typed\n' - 'at the debugger prompt. This is particularly useful for ' - 'aliases. If\n' - 'both files exist, the one in the home directory is read first ' - 'and\n' - 'aliases defined there can be overridden by the local file.\n' - '\n' - 'Changed in version 3.2: ".pdbrc" can now contain commands ' - 'that\n' - 'continue debugging, such as "continue" or "next". Previously, ' - 'these\n' - 'commands had no effect.\n' - '\n' - 'h(elp) [command]\n' - '\n' - ' Without argument, print the list of available commands. ' - 'With a\n' - ' *command* as argument, print help about that command. ' - '"help pdb"\n' - ' displays the full documentation (the docstring of the ' - '"pdb"\n' - ' module). Since the *command* argument must be an ' - 'identifier, "help\n' - ' exec" must be entered to get help on the "!" command.\n' - '\n' - 'w(here)\n' - '\n' - ' Print a stack trace, with the most recent frame at the ' - 'bottom. An\n' - ' arrow indicates the current frame, which determines the ' - 'context of\n' - ' most commands.\n' - '\n' - 'd(own) [count]\n' - '\n' - ' Move the current frame *count* (default one) levels down in ' - 'the\n' - ' stack trace (to a newer frame).\n' - '\n' - 'u(p) [count]\n' - '\n' - ' Move the current frame *count* (default one) levels up in ' - 'the stack\n' - ' trace (to an older frame).\n' - '\n' - 'b(reak) [([filename:]lineno | function) [, condition]]\n' - '\n' - ' With a *lineno* argument, set a break there in the current ' - 'file.\n' - ' With a *function* argument, set a break at the first ' - 'executable\n' - ' statement within that function. The line number may be ' - 'prefixed\n' - ' with a filename and a colon, to specify a breakpoint in ' - 'another\n' - " file (probably one that hasn't been loaded yet). The file " - 'is\n' - ' searched on "sys.path". Note that each breakpoint is ' - 'assigned a\n' - ' number to which all the other breakpoint commands refer.\n' - '\n' - ' If a second argument is present, it is an expression which ' - 'must\n' - ' evaluate to true before the breakpoint is honored.\n' - '\n' - ' Without argument, list all breaks, including for each ' - 'breakpoint,\n' - ' the number of times that breakpoint has been hit, the ' - 'current\n' - ' ignore count, and the associated condition if any.\n' - '\n' - 'tbreak [([filename:]lineno | function) [, condition]]\n' - '\n' - ' Temporary breakpoint, which is removed automatically when ' - 'it is\n' - ' first hit. The arguments are the same as for "break".\n' - '\n' - 'cl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n' - '\n' - ' With a *filename:lineno* argument, clear all the ' - 'breakpoints at\n' - ' this line. With a space separated list of breakpoint ' - 'numbers, clear\n' - ' those breakpoints. Without argument, clear all breaks (but ' - 'first\n' - ' ask confirmation).\n' - '\n' - 'disable [bpnumber [bpnumber ...]]\n' - '\n' - ' Disable the breakpoints given as a space separated list of\n' - ' breakpoint numbers. Disabling a breakpoint means it cannot ' - 'cause\n' - ' the program to stop execution, but unlike clearing a ' - 'breakpoint, it\n' - ' remains in the list of breakpoints and can be ' - '(re-)enabled.\n' - '\n' - 'enable [bpnumber [bpnumber ...]]\n' - '\n' - ' Enable the breakpoints specified.\n' - '\n' - 'ignore bpnumber [count]\n' - '\n' - ' Set the ignore count for the given breakpoint number. If ' - 'count is\n' - ' omitted, the ignore count is set to 0. A breakpoint ' - 'becomes active\n' - ' when the ignore count is zero. When non-zero, the count ' - 'is\n' - ' decremented each time the breakpoint is reached and the ' - 'breakpoint\n' - ' is not disabled and any associated condition evaluates to ' - 'true.\n' - '\n' - 'condition bpnumber [condition]\n' - '\n' - ' Set a new *condition* for the breakpoint, an expression ' - 'which must\n' - ' evaluate to true before the breakpoint is honored. If ' - '*condition*\n' - ' is absent, any existing condition is removed; i.e., the ' - 'breakpoint\n' - ' is made unconditional.\n' - '\n' - 'commands [bpnumber]\n' - '\n' - ' Specify a list of commands for breakpoint number ' - '*bpnumber*. The\n' - ' commands themselves appear on the following lines. Type a ' - 'line\n' - ' containing just "end" to terminate the commands. An ' - 'example:\n' - '\n' - ' (Pdb) commands 1\n' - ' (com) p some_variable\n' - ' (com) end\n' - ' (Pdb)\n' - '\n' - ' To remove all commands from a breakpoint, type commands and ' - 'follow\n' - ' it immediately with "end"; that is, give no commands.\n' - '\n' - ' With no *bpnumber* argument, commands refers to the last ' - 'breakpoint\n' - ' set.\n' - '\n' - ' You can use breakpoint commands to start your program up ' - 'again.\n' - ' Simply use the continue command, or step, or any other ' - 'command that\n' - ' resumes execution.\n' - '\n' - ' Specifying any command resuming execution (currently ' - 'continue,\n' - ' step, next, return, jump, quit and their abbreviations) ' - 'terminates\n' - ' the command list (as if that command was immediately ' - 'followed by\n' - ' end). This is because any time you resume execution (even ' - 'with a\n' - ' simple next or step), you may encounter another ' - 'breakpoint--which\n' - ' could have its own command list, leading to ambiguities ' - 'about which\n' - ' list to execute.\n' - '\n' - " If you use the 'silent' command in the command list, the " - 'usual\n' - ' message about stopping at a breakpoint is not printed. ' - 'This may be\n' - ' desirable for breakpoints that are to print a specific ' - 'message and\n' - ' then continue. If none of the other commands print ' - 'anything, you\n' - ' see no sign that the breakpoint was reached.\n' - '\n' - 's(tep)\n' - '\n' - ' Execute the current line, stop at the first possible ' - 'occasion\n' - ' (either in a function that is called or on the next line in ' - 'the\n' - ' current function).\n' - '\n' - 'n(ext)\n' - '\n' - ' Continue execution until the next line in the current ' - 'function is\n' - ' reached or it returns. (The difference between "next" and ' - '"step"\n' - ' is that "step" stops inside a called function, while ' - '"next"\n' - ' executes called functions at (nearly) full speed, only ' - 'stopping at\n' - ' the next line in the current function.)\n' - '\n' - 'unt(il) [lineno]\n' - '\n' - ' Without argument, continue execution until the line with a ' - 'number\n' - ' greater than the current one is reached.\n' - '\n' - ' With a line number, continue execution until a line with a ' - 'number\n' - ' greater or equal to that is reached. In both cases, also ' - 'stop when\n' - ' the current frame returns.\n' - '\n' - ' Changed in version 3.2: Allow giving an explicit line ' - 'number.\n' - '\n' - 'r(eturn)\n' - '\n' - ' Continue execution until the current function returns.\n' - '\n' - 'c(ont(inue))\n' - '\n' - ' Continue execution, only stop when a breakpoint is ' - 'encountered.\n' - '\n' - 'j(ump) lineno\n' - '\n' - ' Set the next line that will be executed. Only available in ' - 'the\n' - ' bottom-most frame. This lets you jump back and execute ' - 'code again,\n' - " or jump forward to skip code that you don't want to run.\n" - '\n' - ' It should be noted that not all jumps are allowed -- for ' - 'instance\n' - ' it is not possible to jump into the middle of a "for" loop ' - 'or out\n' - ' of a "finally" clause.\n' - '\n' - 'l(ist) [first[, last]]\n' - '\n' - ' List source code for the current file. Without arguments, ' - 'list 11\n' - ' lines around the current line or continue the previous ' - 'listing.\n' - ' With "." as argument, list 11 lines around the current ' - 'line. With\n' - ' one argument, list 11 lines around at that line. With two\n' - ' arguments, list the given range; if the second argument is ' - 'less\n' - ' than the first, it is interpreted as a count.\n' - '\n' - ' The current line in the current frame is indicated by ' - '"->". If an\n' - ' exception is being debugged, the line where the exception ' - 'was\n' - ' originally raised or propagated is indicated by ">>", if it ' - 'differs\n' - ' from the current line.\n' - '\n' - ' New in version 3.2: The ">>" marker.\n' - '\n' - 'll | longlist\n' - '\n' - ' List all source code for the current function or frame.\n' - ' Interesting lines are marked as for "list".\n' - '\n' - ' New in version 3.2.\n' - '\n' - 'a(rgs)\n' - '\n' - ' Print the argument list of the current function.\n' - '\n' - 'p expression\n' - '\n' - ' Evaluate the *expression* in the current context and print ' - 'its\n' - ' value.\n' - '\n' - ' Note: "print()" can also be used, but is not a debugger ' - 'command\n' - ' --- this executes the Python "print()" function.\n' - '\n' - 'pp expression\n' - '\n' - ' Like the "p" command, except the value of the expression is ' - 'pretty-\n' - ' printed using the "pprint" module.\n' - '\n' - 'whatis expression\n' - '\n' - ' Print the type of the *expression*.\n' - '\n' - 'source expression\n' - '\n' - ' Try to get source code for the given object and display ' - 'it.\n' - '\n' - ' New in version 3.2.\n' - '\n' - 'display [expression]\n' - '\n' - ' Display the value of the expression if it changed, each ' - 'time\n' - ' execution stops in the current frame.\n' - '\n' - ' Without expression, list all display expressions for the ' - 'current\n' - ' frame.\n' - '\n' - ' New in version 3.2.\n' - '\n' - 'undisplay [expression]\n' - '\n' - ' Do not display the expression any more in the current ' - 'frame.\n' - ' Without expression, clear all display expressions for the ' - 'current\n' - ' frame.\n' - '\n' - ' New in version 3.2.\n' - '\n' - 'interact\n' - '\n' - ' Start an interative interpreter (using the "code" module) ' - 'whose\n' - ' global namespace contains all the (global and local) names ' - 'found in\n' - ' the current scope.\n' - '\n' - ' New in version 3.2.\n' - '\n' - 'alias [name [command]]\n' - '\n' - ' Create an alias called *name* that executes *command*. The ' - 'command\n' - ' must *not* be enclosed in quotes. Replaceable parameters ' - 'can be\n' - ' indicated by "%1", "%2", and so on, while "%*" is replaced ' - 'by all\n' - ' the parameters. If no command is given, the current alias ' - 'for\n' - ' *name* is shown. If no arguments are given, all aliases are ' - 'listed.\n' - '\n' - ' Aliases may be nested and can contain anything that can be ' - 'legally\n' - ' typed at the pdb prompt. Note that internal pdb commands ' - '*can* be\n' - ' overridden by aliases. Such a command is then hidden until ' - 'the\n' - ' alias is removed. Aliasing is recursively applied to the ' - 'first\n' - ' word of the command line; all other words in the line are ' - 'left\n' - ' alone.\n' - '\n' - ' As an example, here are two useful aliases (especially when ' - 'placed\n' - ' in the ".pdbrc" file):\n' - '\n' - ' # Print instance variables (usage "pi classInst")\n' - ' alias pi for k in %1.__dict__.keys(): ' - 'print("%1.",k,"=",%1.__dict__[k])\n' - ' # Print instance variables in self\n' - ' alias ps pi self\n' - '\n' - 'unalias name\n' - '\n' - ' Delete the specified alias.\n' - '\n' - '! statement\n' - '\n' - ' Execute the (one-line) *statement* in the context of the ' - 'current\n' - ' stack frame. The exclamation point can be omitted unless ' - 'the first\n' - ' word of the statement resembles a debugger command. To set ' - 'a\n' - ' global variable, you can prefix the assignment command with ' - 'a\n' - ' "global" statement on the same line, e.g.:\n' - '\n' - " (Pdb) global list_options; list_options = ['-l']\n" - ' (Pdb)\n' - '\n' - 'run [args ...]\n' - 'restart [args ...]\n' - '\n' - ' Restart the debugged Python program. If an argument is ' - 'supplied,\n' - ' it is split with "shlex" and the result is used as the new\n' - ' "sys.argv". History, breakpoints, actions and debugger ' - 'options are\n' - ' preserved. "restart" is an alias for "run".\n' - '\n' - 'q(uit)\n' - '\n' - ' Quit from the debugger. The program being executed is ' - 'aborted.\n' - '\n' - '-[ Footnotes ]-\n' - '\n' - '[1] Whether a frame is considered to originate in a certain ' - 'module\n' - ' is determined by the "__name__" in the frame globals.\n', - 'del': '\n' - 'The "del" statement\n' - '*******************\n' - '\n' - ' del_stmt ::= "del" target_list\n' - '\n' - 'Deletion is recursively defined very similar to the way assignment ' - 'is\n' - 'defined. Rather than spelling it out in full details, here are ' - 'some\n' - 'hints.\n' - '\n' - 'Deletion of a target list recursively deletes each target, from ' - 'left\n' - 'to right.\n' - '\n' - 'Deletion of a name removes the binding of that name from the local ' - 'or\n' - 'global namespace, depending on whether the name occurs in a ' - '"global"\n' - 'statement in the same code block. If the name is unbound, a\n' - '"NameError" exception will be raised.\n' - '\n' - 'Deletion of attribute references, subscriptions and slicings is ' - 'passed\n' - 'to the primary object involved; deletion of a slicing is in ' - 'general\n' - 'equivalent to assignment of an empty slice of the right type (but ' - 'even\n' - 'this is determined by the sliced object).\n' - '\n' - 'Changed in version 3.2: Previously it was illegal to delete a name\n' - 'from the local namespace if it occurs as a free variable in a ' - 'nested\n' - 'block.\n', - 'dict': '\n' - 'Dictionary displays\n' - '*******************\n' - '\n' - 'A dictionary display is a possibly empty series of key/datum ' - 'pairs\n' - 'enclosed in curly braces:\n' - '\n' - ' dict_display ::= "{" [key_datum_list | ' - 'dict_comprehension] "}"\n' - ' key_datum_list ::= key_datum ("," key_datum)* [","]\n' - ' key_datum ::= expression ":" expression\n' - ' dict_comprehension ::= expression ":" expression comp_for\n' - '\n' - 'A dictionary display yields a new dictionary object.\n' - '\n' - 'If a comma-separated sequence of key/datum pairs is given, they ' - 'are\n' - 'evaluated from left to right to define the entries of the ' - 'dictionary:\n' - 'each key object is used as a key into the dictionary to store the\n' - 'corresponding datum. This means that you can specify the same ' - 'key\n' - "multiple times in the key/datum list, and the final dictionary's " - 'value\n' - 'for that key will be the last one given.\n' - '\n' - 'A dict comprehension, in contrast to list and set comprehensions,\n' - 'needs two expressions separated with a colon followed by the ' - 'usual\n' - '"for" and "if" clauses. When the comprehension is run, the ' - 'resulting\n' - 'key and value elements are inserted in the new dictionary in the ' - 'order\n' - 'they are produced.\n' - '\n' - 'Restrictions on the types of the key values are listed earlier in\n' - 'section *The standard type hierarchy*. (To summarize, the key ' - 'type\n' - 'should be *hashable*, which excludes all mutable objects.) ' - 'Clashes\n' - 'between duplicate keys are not detected; the last datum ' - '(textually\n' - 'rightmost in the display) stored for a given key value prevails.\n', - 'dynamic-features': '\n' - 'Interaction with dynamic features\n' - '*********************************\n' - '\n' - 'Name resolution of free variables occurs at runtime, ' - 'not at compile\n' - 'time. This means that the following code will print ' - '42:\n' - '\n' - ' i = 10\n' - ' def f():\n' - ' print(i)\n' - ' i = 42\n' - ' f()\n' - '\n' - 'There are several cases where Python statements are ' - 'illegal when used\n' - 'in conjunction with nested scopes that contain free ' - 'variables.\n' - '\n' - 'If a variable is referenced in an enclosing scope, it ' - 'is illegal to\n' - 'delete the name. An error will be reported at compile ' - 'time.\n' - '\n' - 'The "eval()" and "exec()" functions do not have access ' - 'to the full\n' - 'environment for resolving names. Names may be ' - 'resolved in the local\n' - 'and global namespaces of the caller. Free variables ' - 'are not resolved\n' - 'in the nearest enclosing namespace, but in the global ' - 'namespace. [1]\n' - 'The "exec()" and "eval()" functions have optional ' - 'arguments to\n' - 'override the global and local namespace. If only one ' - 'namespace is\n' - 'specified, it is used for both.\n', - 'else': '\n' - 'The "if" statement\n' - '******************\n' - '\n' - 'The "if" statement is used for conditional execution:\n' - '\n' - ' if_stmt ::= "if" expression ":" suite\n' - ' ( "elif" expression ":" suite )*\n' - ' ["else" ":" suite]\n' - '\n' - 'It selects exactly one of the suites by evaluating the expressions ' - 'one\n' - 'by one until one is found to be true (see section *Boolean ' - 'operations*\n' - 'for the definition of true and false); then that suite is ' - 'executed\n' - '(and no other part of the "if" statement is executed or ' - 'evaluated).\n' - 'If all expressions are false, the suite of the "else" clause, if\n' - 'present, is executed.\n', - 'exceptions': '\n' - 'Exceptions\n' - '**********\n' - '\n' - 'Exceptions are a means of breaking out of the normal flow of ' - 'control\n' - 'of a code block in order to handle errors or other ' - 'exceptional\n' - 'conditions. An exception is *raised* at the point where the ' - 'error is\n' - 'detected; it may be *handled* by the surrounding code block ' - 'or by any\n' - 'code block that directly or indirectly invoked the code ' - 'block where\n' - 'the error occurred.\n' - '\n' - 'The Python interpreter raises an exception when it detects a ' - 'run-time\n' - 'error (such as division by zero). A Python program can ' - 'also\n' - 'explicitly raise an exception with the "raise" statement. ' - 'Exception\n' - 'handlers are specified with the "try" ... "except" ' - 'statement. The\n' - '"finally" clause of such a statement can be used to specify ' - 'cleanup\n' - 'code which does not handle the exception, but is executed ' - 'whether an\n' - 'exception occurred or not in the preceding code.\n' - '\n' - 'Python uses the "termination" model of error handling: an ' - 'exception\n' - 'handler can find out what happened and continue execution at ' - 'an outer\n' - 'level, but it cannot repair the cause of the error and retry ' - 'the\n' - 'failing operation (except by re-entering the offending piece ' - 'of code\n' - 'from the top).\n' - '\n' - 'When an exception is not handled at all, the interpreter ' - 'terminates\n' - 'execution of the program, or returns to its interactive main ' - 'loop. In\n' - 'either case, it prints a stack backtrace, except when the ' - 'exception is\n' - '"SystemExit".\n' - '\n' - 'Exceptions are identified by class instances. The "except" ' - 'clause is\n' - 'selected depending on the class of the instance: it must ' - 'reference the\n' - 'class of the instance or a base class thereof. The instance ' - 'can be\n' - 'received by the handler and can carry additional information ' - 'about the\n' - 'exceptional condition.\n' - '\n' - 'Note: Exception messages are not part of the Python API. ' - 'Their\n' - ' contents may change from one version of Python to the next ' - 'without\n' - ' warning and should not be relied on by code which will run ' - 'under\n' - ' multiple versions of the interpreter.\n' - '\n' - 'See also the description of the "try" statement in section ' - '*The try\n' - 'statement* and "raise" statement in section *The raise ' - 'statement*.\n' - '\n' - '-[ Footnotes ]-\n' - '\n' - '[1] This limitation occurs because the code that is executed ' - 'by\n' - ' these operations is not available at the time the module ' - 'is\n' - ' compiled.\n', - 'execmodel': '\n' - 'Execution model\n' - '***************\n' - '\n' - '\n' - 'Structure of a programm\n' - '=======================\n' - '\n' - 'A Python program is constructed from code blocks. A *block* ' - 'is a piece\n' - 'of Python program text that is executed as a unit. The ' - 'following are\n' - 'blocks: a module, a function body, and a class definition. ' - 'Each\n' - 'command typed interactively is a block. A script file (a ' - 'file given\n' - 'as standard input to the interpreter or specified as a ' - 'command line\n' - 'argument to the interpreter) is a code block. A script ' - 'command (a\n' - 'command specified on the interpreter command line with the ' - "'**-c**'\n" - 'option) is a code block. The string argument passed to the ' - 'built-in\n' - 'functions "eval()" and "exec()" is a code block.\n' - '\n' - 'A code block is executed in an *execution frame*. A frame ' - 'contains\n' - 'some administrative information (used for debugging) and ' - 'determines\n' - "where and how execution continues after the code block's " - 'execution has\n' - 'completed.\n' - '\n' - '\n' - 'Naming and binding\n' - '==================\n' - '\n' - '\n' - 'Binding of names\n' - '----------------\n' - '\n' - '*Names* refer to objects. Names are introduced by name ' - 'binding\n' - 'operations.\n' - '\n' - 'The following constructs bind names: formal parameters to ' - 'functions,\n' - '"import" statements, class and function definitions (these ' - 'bind the\n' - 'class or function name in the defining block), and targets ' - 'that are\n' - 'identifiers if occurring in an assignment, "for" loop header, ' - 'or after\n' - '"as" in a "with" statement or "except" clause. The "import" ' - 'statement\n' - 'of the form "from ... import *" binds all names defined in ' - 'the\n' - 'imported module, except those beginning with an underscore. ' - 'This form\n' - 'may only be used at the module level.\n' - '\n' - 'A target occurring in a "del" statement is also considered ' - 'bound for\n' - 'this purpose (though the actual semantics are to unbind the ' - 'name).\n' - '\n' - 'Each assignment or import statement occurs within a block ' - 'defined by a\n' - 'class or function definition or at the module level (the ' - 'top-level\n' - 'code block).\n' - '\n' - 'If a name is bound in a block, it is a local variable of that ' - 'block,\n' - 'unless declared as "nonlocal" or "global". If a name is ' - 'bound at the\n' - 'module level, it is a global variable. (The variables of the ' - 'module\n' - 'code block are local and global.) If a variable is used in a ' - 'code\n' - 'block but not defined there, it is a *free variable*.\n' - '\n' - 'Each occurrence of a name in the program text refers to the ' - '*binding*\n' - 'of that name established by the following name resolution ' - 'rules.\n' - '\n' - '\n' - 'Resolution of names\n' - '-------------------\n' - '\n' - 'A *scope* defines the visibility of a name within a block. ' - 'If a local\n' - 'variable is defined in a block, its scope includes that ' - 'block. If the\n' - 'definition occurs in a function block, the scope extends to ' - 'any blocks\n' - 'contained within the defining one, unless a contained block ' - 'introduces\n' - 'a different binding for the name.\n' - '\n' - 'When a name is used in a code block, it is resolved using the ' - 'nearest\n' - 'enclosing scope. The set of all such scopes visible to a ' - 'code block\n' - "is called the block's *environment*.\n" - '\n' - 'When a name is not found at all, a "NameError" exception is ' - 'raised. If\n' - 'the current scope is a function scope, and the name refers to ' - 'a local\n' - 'variable that has not yet been bound to a value at the point ' - 'where the\n' - 'name is used, an "UnboundLocalError" exception is raised.\n' - '"UnboundLocalError" is a subclass of "NameError".\n' - '\n' - 'If a name binding operation occurs anywhere within a code ' - 'block, all\n' - 'uses of the name within the block are treated as references ' - 'to the\n' - 'current block. This can lead to errors when a name is used ' - 'within a\n' - 'block before it is bound. This rule is subtle. Python ' - 'lacks\n' - 'declarations and allows name binding operations to occur ' - 'anywhere\n' - 'within a code block. The local variables of a code block can ' - 'be\n' - 'determined by scanning the entire text of the block for name ' - 'binding\n' - 'operations.\n' - '\n' - 'If the "global" statement occurs within a block, all uses of ' - 'the name\n' - 'specified in the statement refer to the binding of that name ' - 'in the\n' - 'top-level namespace. Names are resolved in the top-level ' - 'namespace by\n' - 'searching the global namespace, i.e. the namespace of the ' - 'module\n' - 'containing the code block, and the builtins namespace, the ' - 'namespace\n' - 'of the module "builtins". The global namespace is searched ' - 'first. If\n' - 'the name is not found there, the builtins namespace is ' - 'searched. The\n' - '"global" statement must precede all uses of the name.\n' - '\n' - 'The "global" statement has the same scope as a name binding ' - 'operation\n' - 'in the same block. If the nearest enclosing scope for a free ' - 'variable\n' - 'contains a global statement, the free variable is treated as ' - 'a global.\n' - '\n' - 'The "nonlocal" statement causes corresponding names to refer ' - 'to\n' - 'previously bound variables in the nearest enclosing function ' - 'scope.\n' - '"SyntaxError" is raised at compile time if the given name ' - 'does not\n' - 'exist in any enclosing function scope.\n' - '\n' - 'The namespace for a module is automatically created the first ' - 'time a\n' - 'module is imported. The main module for a script is always ' - 'called\n' - '"__main__".\n' - '\n' - 'Class definition blocks and arguments to "exec()" and ' - '"eval()" are\n' - 'special in the context of name resolution. A class definition ' - 'is an\n' - 'executable statement that may use and define names. These ' - 'references\n' - 'follow the normal rules for name resolution with an exception ' - 'that\n' - 'unbound local variables are looked up in the global ' - 'namespace. The\n' - 'namespace of the class definition becomes the attribute ' - 'dictionary of\n' - 'the class. The scope of names defined in a class block is ' - 'limited to\n' - 'the class block; it does not extend to the code blocks of ' - 'methods --\n' - 'this includes comprehensions and generator expressions since ' - 'they are\n' - 'implemented using a function scope. This means that the ' - 'following\n' - 'will fail:\n' - '\n' - ' class A:\n' - ' a = 42\n' - ' b = list(a + i for i in range(10))\n' - '\n' - '\n' - 'Builtins and restricted execution\n' - '---------------------------------\n' - '\n' - 'The builtins namespace associated with the execution of a ' - 'code block\n' - 'is actually found by looking up the name "__builtins__" in ' - 'its global\n' - 'namespace; this should be a dictionary or a module (in the ' - 'latter case\n' - "the module's dictionary is used). By default, when in the " - '"__main__"\n' - 'module, "__builtins__" is the built-in module "builtins"; ' - 'when in any\n' - 'other module, "__builtins__" is an alias for the dictionary ' - 'of the\n' - '"builtins" module itself. "__builtins__" can be set to a ' - 'user-created\n' - 'dictionary to create a weak form of restricted execution.\n' - '\n' - '**CPython implementation detail:** Users should not touch\n' - '"__builtins__"; it is strictly an implementation detail. ' - 'Users\n' - 'wanting to override values in the builtins namespace should ' - '"import"\n' - 'the "builtins" module and modify its attributes ' - 'appropriately.\n' - '\n' - '\n' - 'Interaction with dynamic features\n' - '---------------------------------\n' - '\n' - 'Name resolution of free variables occurs at runtime, not at ' - 'compile\n' - 'time. This means that the following code will print 42:\n' - '\n' - ' i = 10\n' - ' def f():\n' - ' print(i)\n' - ' i = 42\n' - ' f()\n' - '\n' - 'There are several cases where Python statements are illegal ' - 'when used\n' - 'in conjunction with nested scopes that contain free ' - 'variables.\n' - '\n' - 'If a variable is referenced in an enclosing scope, it is ' - 'illegal to\n' - 'delete the name. An error will be reported at compile time.\n' - '\n' - 'The "eval()" and "exec()" functions do not have access to the ' - 'full\n' - 'environment for resolving names. Names may be resolved in ' - 'the local\n' - 'and global namespaces of the caller. Free variables are not ' - 'resolved\n' - 'in the nearest enclosing namespace, but in the global ' - 'namespace. [1]\n' - 'The "exec()" and "eval()" functions have optional arguments ' - 'to\n' - 'override the global and local namespace. If only one ' - 'namespace is\n' - 'specified, it is used for both.\n' - '\n' - '\n' - 'Exceptions\n' - '==========\n' - '\n' - 'Exceptions are a means of breaking out of the normal flow of ' - 'control\n' - 'of a code block in order to handle errors or other ' - 'exceptional\n' - 'conditions. An exception is *raised* at the point where the ' - 'error is\n' - 'detected; it may be *handled* by the surrounding code block ' - 'or by any\n' - 'code block that directly or indirectly invoked the code block ' - 'where\n' - 'the error occurred.\n' - '\n' - 'The Python interpreter raises an exception when it detects a ' - 'run-time\n' - 'error (such as division by zero). A Python program can also\n' - 'explicitly raise an exception with the "raise" statement. ' - 'Exception\n' - 'handlers are specified with the "try" ... "except" ' - 'statement. The\n' - '"finally" clause of such a statement can be used to specify ' - 'cleanup\n' - 'code which does not handle the exception, but is executed ' - 'whether an\n' - 'exception occurred or not in the preceding code.\n' - '\n' - 'Python uses the "termination" model of error handling: an ' - 'exception\n' - 'handler can find out what happened and continue execution at ' - 'an outer\n' - 'level, but it cannot repair the cause of the error and retry ' - 'the\n' - 'failing operation (except by re-entering the offending piece ' - 'of code\n' - 'from the top).\n' - '\n' - 'When an exception is not handled at all, the interpreter ' - 'terminates\n' - 'execution of the program, or returns to its interactive main ' - 'loop. In\n' - 'either case, it prints a stack backtrace, except when the ' - 'exception is\n' - '"SystemExit".\n' - '\n' - 'Exceptions are identified by class instances. The "except" ' - 'clause is\n' - 'selected depending on the class of the instance: it must ' - 'reference the\n' - 'class of the instance or a base class thereof. The instance ' - 'can be\n' - 'received by the handler and can carry additional information ' - 'about the\n' - 'exceptional condition.\n' - '\n' - 'Note: Exception messages are not part of the Python API. ' - 'Their\n' - ' contents may change from one version of Python to the next ' - 'without\n' - ' warning and should not be relied on by code which will run ' - 'under\n' - ' multiple versions of the interpreter.\n' - '\n' - 'See also the description of the "try" statement in section ' - '*The try\n' - 'statement* and "raise" statement in section *The raise ' - 'statement*.\n' - '\n' - '-[ Footnotes ]-\n' - '\n' - '[1] This limitation occurs because the code that is executed ' - 'by\n' - ' these operations is not available at the time the module ' - 'is\n' - ' compiled.\n', - 'exprlists': '\n' - 'Expression lists\n' - '****************\n' - '\n' - ' expression_list ::= expression ( "," expression )* [","]\n' - '\n' - 'An expression list containing at least one comma yields a ' - 'tuple. The\n' - 'length of the tuple is the number of expressions in the ' - 'list. The\n' - 'expressions are evaluated from left to right.\n' - '\n' - 'The trailing comma is required only to create a single tuple ' - '(a.k.a. a\n' - '*singleton*); it is optional in all other cases. A single ' - 'expression\n' - "without a trailing comma doesn't create a tuple, but rather " - 'yields the\n' - 'value of that expression. (To create an empty tuple, use an ' - 'empty pair\n' - 'of parentheses: "()".)\n', - 'floating': '\n' - 'Floating point literals\n' - '***********************\n' - '\n' - 'Floating point literals are described by the following ' - 'lexical\n' - 'definitions:\n' - '\n' - ' floatnumber ::= pointfloat | exponentfloat\n' - ' pointfloat ::= [intpart] fraction | intpart "."\n' - ' exponentfloat ::= (intpart | pointfloat) exponent\n' - ' intpart ::= digit+\n' - ' fraction ::= "." digit+\n' - ' exponent ::= ("e" | "E") ["+" | "-"] digit+\n' - '\n' - 'Note that the integer and exponent parts are always ' - 'interpreted using\n' - 'radix 10. For example, "077e010" is legal, and denotes the ' - 'same number\n' - 'as "77e10". The allowed range of floating point literals is\n' - 'implementation-dependent. Some examples of floating point ' - 'literals:\n' - '\n' - ' 3.14 10. .001 1e100 3.14e-10 0e0\n' - '\n' - 'Note that numeric literals do not include a sign; a phrase ' - 'like "-1"\n' - 'is actually an expression composed of the unary operator "-" ' - 'and the\n' - 'literal "1".\n', - 'for': '\n' - 'The "for" statement\n' - '*******************\n' - '\n' - 'The "for" statement is used to iterate over the elements of a ' - 'sequence\n' - '(such as a string, tuple or list) or other iterable object:\n' - '\n' - ' for_stmt ::= "for" target_list "in" expression_list ":" suite\n' - ' ["else" ":" suite]\n' - '\n' - 'The expression list is evaluated once; it should yield an iterable\n' - 'object. An iterator is created for the result of the\n' - '"expression_list". The suite is then executed once for each item\n' - 'provided by the iterator, in the order returned by the iterator. ' - 'Each\n' - 'item in turn is assigned to the target list using the standard ' - 'rules\n' - 'for assignments (see *Assignment statements*), and then the suite ' - 'is\n' - 'executed. When the items are exhausted (which is immediately when ' - 'the\n' - 'sequence is empty or an iterator raises a "StopIteration" ' - 'exception),\n' - 'the suite in the "else" clause, if present, is executed, and the ' - 'loop\n' - 'terminates.\n' - '\n' - 'A "break" statement executed in the first suite terminates the ' - 'loop\n' - 'without executing the "else" clause\'s suite. A "continue" ' - 'statement\n' - 'executed in the first suite skips the rest of the suite and ' - 'continues\n' - 'with the next item, or with the "else" clause if there is no next\n' - 'item.\n' - '\n' - 'The for-loop makes assignments to the variables(s) in the target ' - 'list.\n' - 'This overwrites all previous assignments to those variables ' - 'including\n' - 'those made in the suite of the for-loop:\n' - '\n' - ' for i in range(10):\n' - ' print(i)\n' - ' i = 5 # this will not affect the for-loop\n' - ' # because i will be overwritten with the ' - 'next\n' - ' # index in the range\n' - '\n' - 'Names in the target list are not deleted when the loop is ' - 'finished,\n' - 'but if the sequence is empty, they will not have been assigned to ' - 'at\n' - 'all by the loop. Hint: the built-in function "range()" returns an\n' - "iterator of integers suitable to emulate the effect of Pascal's " - '"for i\n' - ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, ' - '2]".\n' - '\n' - 'Note: There is a subtlety when the sequence is being modified by ' - 'the\n' - ' loop (this can only occur for mutable sequences, i.e. lists). ' - 'An\n' - ' internal counter is used to keep track of which item is used ' - 'next,\n' - ' and this is incremented on each iteration. When this counter ' - 'has\n' - ' reached the length of the sequence the loop terminates. This ' - 'means\n' - ' that if the suite deletes the current (or a previous) item from ' - 'the\n' - ' sequence, the next item will be skipped (since it gets the index ' - 'of\n' - ' the current item which has already been treated). Likewise, if ' - 'the\n' - ' suite inserts an item in the sequence before the current item, ' - 'the\n' - ' current item will be treated again the next time through the ' - 'loop.\n' - ' This can lead to nasty bugs that can be avoided by making a\n' - ' temporary copy using a slice of the whole sequence, e.g.,\n' - '\n' - ' for x in a[:]:\n' - ' if x < 0: a.remove(x)\n', - 'formatstrings': '\n' - 'Format String Syntax\n' - '********************\n' - '\n' - 'The "str.format()" method and the "Formatter" class share ' - 'the same\n' - 'syntax for format strings (although in the case of ' - '"Formatter",\n' - 'subclasses can define their own format string syntax).\n' - '\n' - 'Format strings contain "replacement fields" surrounded by ' - 'curly braces\n' - '"{}". Anything that is not contained in braces is ' - 'considered literal\n' - 'text, which is copied unchanged to the output. If you ' - 'need to include\n' - 'a brace character in the literal text, it can be escaped ' - 'by doubling:\n' - '"{{" and "}}".\n' - '\n' - 'The grammar for a replacement field is as follows:\n' - '\n' - ' replacement_field ::= "{" [field_name] ["!" ' - 'conversion] [":" format_spec] "}"\n' - ' field_name ::= arg_name ("." attribute_name ' - '| "[" element_index "]")*\n' - ' arg_name ::= [identifier | integer]\n' - ' attribute_name ::= identifier\n' - ' element_index ::= integer | index_string\n' - ' index_string ::= +\n' - ' conversion ::= "r" | "s" | "a"\n' - ' format_spec ::= \n' - '\n' - 'In less formal terms, the replacement field can start ' - 'with a\n' - '*field_name* that specifies the object whose value is to ' - 'be formatted\n' - 'and inserted into the output instead of the replacement ' - 'field. The\n' - '*field_name* is optionally followed by a *conversion* ' - 'field, which is\n' - 'preceded by an exclamation point "\'!\'", and a ' - '*format_spec*, which is\n' - 'preceded by a colon "\':\'". These specify a non-default ' - 'format for the\n' - 'replacement value.\n' - '\n' - 'See also the *Format Specification Mini-Language* ' - 'section.\n' - '\n' - 'The *field_name* itself begins with an *arg_name* that is ' - 'either a\n' - "number or a keyword. If it's a number, it refers to a " - 'positional\n' - "argument, and if it's a keyword, it refers to a named " - 'keyword\n' - 'argument. If the numerical arg_names in a format string ' - 'are 0, 1, 2,\n' - '... in sequence, they can all be omitted (not just some) ' - 'and the\n' - 'numbers 0, 1, 2, ... will be automatically inserted in ' - 'that order.\n' - 'Because *arg_name* is not quote-delimited, it is not ' - 'possible to\n' - 'specify arbitrary dictionary keys (e.g., the strings ' - '"\'10\'" or\n' - '"\':-]\'") within a format string. The *arg_name* can be ' - 'followed by any\n' - 'number of index or attribute expressions. An expression ' - 'of the form\n' - '"\'.name\'" selects the named attribute using ' - '"getattr()", while an\n' - 'expression of the form "\'[index]\'" does an index lookup ' - 'using\n' - '"__getitem__()".\n' - '\n' - 'Changed in version 3.1: The positional argument ' - 'specifiers can be\n' - 'omitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n' - '\n' - 'Some simple format string examples:\n' - '\n' - ' "First, thou shalt count to {0}" # References first ' - 'positional argument\n' - ' "Bring me a {}" # Implicitly ' - 'references the first positional argument\n' - ' "From {} to {}" # Same as "From {0} ' - 'to {1}"\n' - ' "My quest is {name}" # References keyword ' - "argument 'name'\n" - ' "Weight in tons {0.weight}" # \'weight\' ' - 'attribute of first positional arg\n' - ' "Units destroyed: {players[0]}" # First element of ' - "keyword argument 'players'.\n" - '\n' - 'The *conversion* field causes a type coercion before ' - 'formatting.\n' - 'Normally, the job of formatting a value is done by the ' - '"__format__()"\n' - 'method of the value itself. However, in some cases it is ' - 'desirable to\n' - 'force a type to be formatted as a string, overriding its ' - 'own\n' - 'definition of formatting. By converting the value to a ' - 'string before\n' - 'calling "__format__()", the normal formatting logic is ' - 'bypassed.\n' - '\n' - 'Three conversion flags are currently supported: "\'!s\'" ' - 'which calls\n' - '"str()" on the value, "\'!r\'" which calls "repr()" and ' - '"\'!a\'" which\n' - 'calls "ascii()".\n' - '\n' - 'Some examples:\n' - '\n' - ' "Harold\'s a clever {0!s}" # Calls str() on the ' - 'argument first\n' - ' "Bring out the holy {name!r}" # Calls repr() on the ' - 'argument first\n' - ' "More {!a}" # Calls ascii() on ' - 'the argument first\n' - '\n' - 'The *format_spec* field contains a specification of how ' - 'the value\n' - 'should be presented, including such details as field ' - 'width, alignment,\n' - 'padding, decimal precision and so on. Each value type ' - 'can define its\n' - 'own "formatting mini-language" or interpretation of the ' - '*format_spec*.\n' - '\n' - 'Most built-in types support a common formatting ' - 'mini-language, which\n' - 'is described in the next section.\n' - '\n' - 'A *format_spec* field can also include nested replacement ' - 'fields\n' - 'within it. These nested replacement fields can contain ' - 'only a field\n' - 'name; conversion flags and format specifications are not ' - 'allowed. The\n' - 'replacement fields within the format_spec are substituted ' - 'before the\n' - '*format_spec* string is interpreted. This allows the ' - 'formatting of a\n' - 'value to be dynamically specified.\n' - '\n' - 'See the *Format examples* section for some examples.\n' - '\n' - '\n' - 'Format Specification Mini-Language\n' - '==================================\n' - '\n' - '"Format specifications" are used within replacement ' - 'fields contained\n' - 'within a format string to define how individual values ' - 'are presented\n' - '(see *Format String Syntax*). They can also be passed ' - 'directly to the\n' - 'built-in "format()" function. Each formattable type may ' - 'define how\n' - 'the format specification is to be interpreted.\n' - '\n' - 'Most built-in types implement the following options for ' - 'format\n' - 'specifications, although some of the formatting options ' - 'are only\n' - 'supported by the numeric types.\n' - '\n' - 'A general convention is that an empty format string ' - '("""") produces\n' - 'the same result as if you had called "str()" on the ' - 'value. A non-empty\n' - 'format string typically modifies the result.\n' - '\n' - 'The general form of a *standard format specifier* is:\n' - '\n' - ' format_spec ::= ' - '[[fill]align][sign][#][0][width][,][.precision][type]\n' - ' fill ::= \n' - ' align ::= "<" | ">" | "=" | "^"\n' - ' sign ::= "+" | "-" | " "\n' - ' width ::= integer\n' - ' precision ::= integer\n' - ' type ::= "b" | "c" | "d" | "e" | "E" | "f" | ' - '"F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n' - '\n' - 'If a valid *align* value is specified, it can be preceded ' - 'by a *fill*\n' - 'character that can be any character and defaults to a ' - 'space if\n' - 'omitted. Note that it is not possible to use "{" and "}" ' - 'as *fill*\n' - 'char while using the "str.format()" method; this ' - 'limitation however\n' - 'doesn\'t affect the "format()" function.\n' - '\n' - 'The meaning of the various alignment options is as ' - 'follows:\n' - '\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | Option | ' - 'Meaning ' - '|\n' - ' ' - '+===========+============================================================+\n' - ' | "\'<\'" | Forces the field to be left-aligned ' - 'within the available |\n' - ' | | space (this is the default for most ' - 'objects). |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'>\'" | Forces the field to be right-aligned ' - 'within the available |\n' - ' | | space (this is the default for ' - 'numbers). |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'=\'" | Forces the padding to be placed after ' - 'the sign (if any) |\n' - ' | | but before the digits. This is used for ' - 'printing fields |\n' - " | | in the form '+000000120'. This alignment " - 'option is only |\n' - ' | | valid for numeric ' - 'types. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'^\'" | Forces the field to be centered within ' - 'the available |\n' - ' | | ' - 'space. ' - '|\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - '\n' - 'Note that unless a minimum field width is defined, the ' - 'field width\n' - 'will always be the same size as the data to fill it, so ' - 'that the\n' - 'alignment option has no meaning in this case.\n' - '\n' - 'The *sign* option is only valid for number types, and can ' - 'be one of\n' - 'the following:\n' - '\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | Option | ' - 'Meaning ' - '|\n' - ' ' - '+===========+============================================================+\n' - ' | "\'+\'" | indicates that a sign should be used ' - 'for both positive as |\n' - ' | | well as negative ' - 'numbers. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'-\'" | indicates that a sign should be used ' - 'only for negative |\n' - ' | | numbers (this is the default ' - 'behavior). |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | space | indicates that a leading space should be ' - 'used on positive |\n' - ' | | numbers, and a minus sign on negative ' - 'numbers. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - '\n' - 'The "\'#\'" option causes the "alternate form" to be used ' - 'for the\n' - 'conversion. The alternate form is defined differently ' - 'for different\n' - 'types. This option is only valid for integer, float, ' - 'complex and\n' - 'Decimal types. For integers, when binary, octal, or ' - 'hexadecimal output\n' - 'is used, this option adds the prefix respective "\'0b\'", ' - '"\'0o\'", or\n' - '"\'0x\'" to the output value. For floats, complex and ' - 'Decimal the\n' - 'alternate form causes the result of the conversion to ' - 'always contain a\n' - 'decimal-point character, even if no digits follow it. ' - 'Normally, a\n' - 'decimal-point character appears in the result of these ' - 'conversions\n' - 'only if a digit follows it. In addition, for "\'g\'" and ' - '"\'G\'"\n' - 'conversions, trailing zeros are not removed from the ' - 'result.\n' - '\n' - 'The "\',\'" option signals the use of a comma for a ' - 'thousands separator.\n' - 'For a locale aware separator, use the "\'n\'" integer ' - 'presentation type\n' - 'instead.\n' - '\n' - 'Changed in version 3.1: Added the "\',\'" option (see ' - 'also **PEP 378**).\n' - '\n' - '*width* is a decimal integer defining the minimum field ' - 'width. If not\n' - 'specified, then the field width will be determined by the ' - 'content.\n' - '\n' - 'Preceding the *width* field by a zero ("\'0\'") character ' - 'enables sign-\n' - 'aware zero-padding for numeric types. This is equivalent ' - 'to a *fill*\n' - 'character of "\'0\'" with an *alignment* type of ' - '"\'=\'".\n' - '\n' - 'The *precision* is a decimal number indicating how many ' - 'digits should\n' - 'be displayed after the decimal point for a floating point ' - 'value\n' - 'formatted with "\'f\'" and "\'F\'", or before and after ' - 'the decimal point\n' - 'for a floating point value formatted with "\'g\'" or ' - '"\'G\'". For non-\n' - 'number types the field indicates the maximum field size - ' - 'in other\n' - 'words, how many characters will be used from the field ' - 'content. The\n' - '*precision* is not allowed for integer values.\n' - '\n' - 'Finally, the *type* determines how the data should be ' - 'presented.\n' - '\n' - 'The available string presentation types are:\n' - '\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | Type | ' - 'Meaning ' - '|\n' - ' ' - '+===========+============================================================+\n' - ' | "\'s\'" | String format. This is the default ' - 'type for strings and |\n' - ' | | may be ' - 'omitted. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | None | The same as ' - '"\'s\'". |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - '\n' - 'The available integer presentation types are:\n' - '\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | Type | ' - 'Meaning ' - '|\n' - ' ' - '+===========+============================================================+\n' - ' | "\'b\'" | Binary format. Outputs the number in ' - 'base 2. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'c\'" | Character. Converts the integer to the ' - 'corresponding |\n' - ' | | unicode character before ' - 'printing. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'d\'" | Decimal Integer. Outputs the number in ' - 'base 10. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'o\'" | Octal format. Outputs the number in ' - 'base 8. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'x\'" | Hex format. Outputs the number in base ' - '16, using lower- |\n' - ' | | case letters for the digits above ' - '9. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'X\'" | Hex format. Outputs the number in base ' - '16, using upper- |\n' - ' | | case letters for the digits above ' - '9. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'n\'" | Number. This is the same as "\'d\'", ' - 'except that it uses the |\n' - ' | | current locale setting to insert the ' - 'appropriate number |\n' - ' | | separator ' - 'characters. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | None | The same as ' - '"\'d\'". |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - '\n' - 'In addition to the above presentation types, integers can ' - 'be formatted\n' - 'with the floating point presentation types listed below ' - '(except "\'n\'"\n' - 'and None). When doing so, "float()" is used to convert ' - 'the integer to\n' - 'a floating point number before formatting.\n' - '\n' - 'The available presentation types for floating point and ' - 'decimal values\n' - 'are:\n' - '\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | Type | ' - 'Meaning ' - '|\n' - ' ' - '+===========+============================================================+\n' - ' | "\'e\'" | Exponent notation. Prints the number ' - 'in scientific |\n' - " | | notation using the letter 'e' to " - 'indicate the exponent. |\n' - ' | | The default precision is ' - '"6". |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'E\'" | Exponent notation. Same as "\'e\'" ' - 'except it uses an upper |\n' - " | | case 'E' as the separator " - 'character. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'f\'" | Fixed point. Displays the number as a ' - 'fixed-point number. |\n' - ' | | The default precision is ' - '"6". |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'F\'" | Fixed point. Same as "\'f\'", but ' - 'converts "nan" to "NAN" |\n' - ' | | and "inf" to ' - '"INF". |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'g\'" | General format. For a given precision ' - '"p >= 1", this |\n' - ' | | rounds the number to "p" significant ' - 'digits and then |\n' - ' | | formats the result in either fixed-point ' - 'format or in |\n' - ' | | scientific notation, depending on its ' - 'magnitude. The |\n' - ' | | precise rules are as follows: suppose ' - 'that the result |\n' - ' | | formatted with presentation type "\'e\'" ' - 'and precision "p-1" |\n' - ' | | would have exponent "exp". Then if "-4 ' - '<= exp < p", the |\n' - ' | | number is formatted with presentation ' - 'type "\'f\'" and |\n' - ' | | precision "p-1-exp". Otherwise, the ' - 'number is formatted |\n' - ' | | with presentation type "\'e\'" and ' - 'precision "p-1". In both |\n' - ' | | cases insignificant trailing zeros are ' - 'removed from the |\n' - ' | | significand, and the decimal point is ' - 'also removed if |\n' - ' | | there are no remaining digits following ' - 'it. Positive and |\n' - ' | | negative infinity, positive and negative ' - 'zero, and nans, |\n' - ' | | are formatted as "inf", "-inf", "0", ' - '"-0" and "nan" |\n' - ' | | respectively, regardless of the ' - 'precision. A precision of |\n' - ' | | "0" is treated as equivalent to a ' - 'precision of "1". The |\n' - ' | | default precision is ' - '"6". |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'G\'" | General format. Same as "\'g\'" except ' - 'switches to "\'E\'" if |\n' - ' | | the number gets too large. The ' - 'representations of infinity |\n' - ' | | and NaN are uppercased, ' - 'too. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'n\'" | Number. This is the same as "\'g\'", ' - 'except that it uses the |\n' - ' | | current locale setting to insert the ' - 'appropriate number |\n' - ' | | separator ' - 'characters. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | "\'%\'" | Percentage. Multiplies the number by ' - '100 and displays in |\n' - ' | | fixed ("\'f\'") format, followed by a ' - 'percent sign. |\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - ' | None | Similar to "\'g\'", except that ' - 'fixed-point notation, when |\n' - ' | | used, has at least one digit past the ' - 'decimal point. The |\n' - ' | | default precision is as high as needed ' - 'to represent the |\n' - ' | | particular value. The overall effect is ' - 'to match the |\n' - ' | | output of "str()" as altered by the ' - 'other format |\n' - ' | | ' - 'modifiers. ' - '|\n' - ' ' - '+-----------+------------------------------------------------------------+\n' - '\n' - '\n' - 'Format examples\n' - '===============\n' - '\n' - 'This section contains examples of the new format syntax ' - 'and comparison\n' - 'with the old "%"-formatting.\n' - '\n' - 'In most of the cases the syntax is similar to the old ' - '"%"-formatting,\n' - 'with the addition of the "{}" and with ":" used instead ' - 'of "%". For\n' - 'example, "\'%03.2f\'" can be translated to ' - '"\'{:03.2f}\'".\n' - '\n' - 'The new format syntax also supports new and different ' - 'options, shown\n' - 'in the follow examples.\n' - '\n' - 'Accessing arguments by position:\n' - '\n' - " >>> '{0}, {1}, {2}'.format('a', 'b', 'c')\n" - " 'a, b, c'\n" - " >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only\n" - " 'a, b, c'\n" - " >>> '{2}, {1}, {0}'.format('a', 'b', 'c')\n" - " 'c, b, a'\n" - " >>> '{2}, {1}, {0}'.format(*'abc') # unpacking " - 'argument sequence\n' - " 'c, b, a'\n" - " >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' " - 'indices can be repeated\n' - " 'abracadabra'\n" - '\n' - 'Accessing arguments by name:\n' - '\n' - " >>> 'Coordinates: {latitude}, " - "{longitude}'.format(latitude='37.24N', " - "longitude='-115.81W')\n" - " 'Coordinates: 37.24N, -115.81W'\n" - " >>> coord = {'latitude': '37.24N', 'longitude': " - "'-115.81W'}\n" - " >>> 'Coordinates: {latitude}, " - "{longitude}'.format(**coord)\n" - " 'Coordinates: 37.24N, -115.81W'\n" - '\n' - "Accessing arguments' attributes:\n" - '\n' - ' >>> c = 3-5j\n' - " >>> ('The complex number {0} is formed from the real " - "part {0.real} '\n" - " ... 'and the imaginary part {0.imag}.').format(c)\n" - " 'The complex number (3-5j) is formed from the real " - "part 3.0 and the imaginary part -5.0.'\n" - ' >>> class Point:\n' - ' ... def __init__(self, x, y):\n' - ' ... self.x, self.y = x, y\n' - ' ... def __str__(self):\n' - " ... return 'Point({self.x}, " - "{self.y})'.format(self=self)\n" - ' ...\n' - ' >>> str(Point(4, 2))\n' - " 'Point(4, 2)'\n" - '\n' - "Accessing arguments' items:\n" - '\n' - ' >>> coord = (3, 5)\n' - " >>> 'X: {0[0]}; Y: {0[1]}'.format(coord)\n" - " 'X: 3; Y: 5'\n" - '\n' - 'Replacing "%s" and "%r":\n' - '\n' - ' >>> "repr() shows quotes: {!r}; str() doesn\'t: ' - '{!s}".format(\'test1\', \'test2\')\n' - ' "repr() shows quotes: \'test1\'; str() doesn\'t: ' - 'test2"\n' - '\n' - 'Aligning the text and specifying a width:\n' - '\n' - " >>> '{:<30}'.format('left aligned')\n" - " 'left aligned '\n" - " >>> '{:>30}'.format('right aligned')\n" - " ' right aligned'\n" - " >>> '{:^30}'.format('centered')\n" - " ' centered '\n" - " >>> '{:*^30}'.format('centered') # use '*' as a fill " - 'char\n' - " '***********centered***********'\n" - '\n' - 'Replacing "%+f", "%-f", and "% f" and specifying a sign:\n' - '\n' - " >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it " - 'always\n' - " '+3.140000; -3.140000'\n" - " >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space " - 'for positive numbers\n' - " ' 3.140000; -3.140000'\n" - " >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only " - "the minus -- same as '{:f}; {:f}'\n" - " '3.140000; -3.140000'\n" - '\n' - 'Replacing "%x" and "%o" and converting the value to ' - 'different bases:\n' - '\n' - ' >>> # format also supports binary numbers\n' - ' >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: ' - '{0:b}".format(42)\n' - " 'int: 42; hex: 2a; oct: 52; bin: 101010'\n" - ' >>> # with 0x, 0o, or 0b as prefix:\n' - ' >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: ' - '{0:#b}".format(42)\n' - " 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010'\n" - '\n' - 'Using the comma as a thousands separator:\n' - '\n' - " >>> '{:,}'.format(1234567890)\n" - " '1,234,567,890'\n" - '\n' - 'Expressing a percentage:\n' - '\n' - ' >>> points = 19\n' - ' >>> total = 22\n' - " >>> 'Correct answers: {:.2%}'.format(points/total)\n" - " 'Correct answers: 86.36%'\n" - '\n' - 'Using type-specific formatting:\n' - '\n' - ' >>> import datetime\n' - ' >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n' - " >>> '{:%Y-%m-%d %H:%M:%S}'.format(d)\n" - " '2010-07-04 12:15:58'\n" - '\n' - 'Nesting arguments and more complex examples:\n' - '\n' - " >>> for align, text in zip('<^>', ['left', 'center', " - "'right']):\n" - " ... '{0:{fill}{align}16}'.format(text, fill=align, " - 'align=align)\n' - ' ...\n' - " 'left<<<<<<<<<<<<'\n" - " '^^^^^center^^^^^'\n" - " '>>>>>>>>>>>right'\n" - ' >>>\n' - ' >>> octets = [192, 168, 0, 1]\n' - " >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets)\n" - " 'C0A80001'\n" - ' >>> int(_, 16)\n' - ' 3232235521\n' - ' >>>\n' - ' >>> width = 5\n' - ' >>> for num in range(5,12): #doctest: ' - '+NORMALIZE_WHITESPACE\n' - " ... for base in 'dXob':\n" - " ... print('{0:{width}{base}}'.format(num, " - "base=base, width=width), end=' ')\n" - ' ... print()\n' - ' ...\n' - ' 5 5 5 101\n' - ' 6 6 6 110\n' - ' 7 7 7 111\n' - ' 8 8 10 1000\n' - ' 9 9 11 1001\n' - ' 10 A 12 1010\n' - ' 11 B 13 1011\n', - 'function': '\n' - 'Function definitions\n' - '********************\n' - '\n' - 'A function definition defines a user-defined function object ' - '(see\n' - 'section *The standard type hierarchy*):\n' - '\n' - ' funcdef ::= [decorators] "def" funcname "(" ' - '[parameter_list] ")" ["->" expression] ":" suite\n' - ' decorators ::= decorator+\n' - ' decorator ::= "@" dotted_name ["(" [parameter_list ' - '[","]] ")"] NEWLINE\n' - ' dotted_name ::= identifier ("." identifier)*\n' - ' parameter_list ::= (defparameter ",")*\n' - ' | "*" [parameter] ("," defparameter)* ' - '["," "**" parameter]\n' - ' | "**" parameter\n' - ' | defparameter [","] )\n' - ' parameter ::= identifier [":" expression]\n' - ' defparameter ::= parameter ["=" expression]\n' - ' funcname ::= identifier\n' - '\n' - 'A function definition is an executable statement. Its ' - 'execution binds\n' - 'the function name in the current local namespace to a function ' - 'object\n' - '(a wrapper around the executable code for the function). ' - 'This\n' - 'function object contains a reference to the current global ' - 'namespace\n' - 'as the global namespace to be used when the function is ' - 'called.\n' - '\n' - 'The function definition does not execute the function body; ' - 'this gets\n' - 'executed only when the function is called. [3]\n' - '\n' - 'A function definition may be wrapped by one or more ' - '*decorator*\n' - 'expressions. Decorator expressions are evaluated when the ' - 'function is\n' - 'defined, in the scope that contains the function definition. ' - 'The\n' - 'result must be a callable, which is invoked with the function ' - 'object\n' - 'as the only argument. The returned value is bound to the ' - 'function name\n' - 'instead of the function object. Multiple decorators are ' - 'applied in\n' - 'nested fashion. For example, the following code\n' - '\n' - ' @f1(arg)\n' - ' @f2\n' - ' def func(): pass\n' - '\n' - 'is equivalent to\n' - '\n' - ' def func(): pass\n' - ' func = f1(arg)(f2(func))\n' - '\n' - 'When one or more *parameters* have the form *parameter* "="\n' - '*expression*, the function is said to have "default parameter ' - 'values."\n' - 'For a parameter with a default value, the corresponding ' - '*argument* may\n' - "be omitted from a call, in which case the parameter's default " - 'value is\n' - 'substituted. If a parameter has a default value, all ' - 'following\n' - 'parameters up until the ""*"" must also have a default value ' - '--- this\n' - 'is a syntactic restriction that is not expressed by the ' - 'grammar.\n' - '\n' - '**Default parameter values are evaluated from left to right ' - 'when the\n' - 'function definition is executed.** This means that the ' - 'expression is\n' - 'evaluated once, when the function is defined, and that the ' - 'same "pre-\n' - 'computed" value is used for each call. This is especially ' - 'important\n' - 'to understand when a default parameter is a mutable object, ' - 'such as a\n' - 'list or a dictionary: if the function modifies the object ' - '(e.g. by\n' - 'appending an item to a list), the default value is in effect ' - 'modified.\n' - 'This is generally not what was intended. A way around this is ' - 'to use\n' - '"None" as the default, and explicitly test for it in the body ' - 'of the\n' - 'function, e.g.:\n' - '\n' - ' def whats_on_the_telly(penguin=None):\n' - ' if penguin is None:\n' - ' penguin = []\n' - ' penguin.append("property of the zoo")\n' - ' return penguin\n' - '\n' - 'Function call semantics are described in more detail in ' - 'section\n' - '*Calls*. A function call always assigns values to all ' - 'parameters\n' - 'mentioned in the parameter list, either from position ' - 'arguments, from\n' - 'keyword arguments, or from default values. If the form\n' - '""*identifier"" is present, it is initialized to a tuple ' - 'receiving any\n' - 'excess positional parameters, defaulting to the empty tuple. ' - 'If the\n' - 'form ""**identifier"" is present, it is initialized to a new\n' - 'dictionary receiving any excess keyword arguments, defaulting ' - 'to a new\n' - 'empty dictionary. Parameters after ""*"" or ""*identifier"" ' - 'are\n' - 'keyword-only parameters and may only be passed used keyword ' - 'arguments.\n' - '\n' - 'Parameters may have annotations of the form "": expression"" ' - 'following\n' - 'the parameter name. Any parameter may have an annotation even ' - 'those\n' - 'of the form "*identifier" or "**identifier". Functions may ' - 'have\n' - '"return" annotation of the form ""-> expression"" after the ' - 'parameter\n' - 'list. These annotations can be any valid Python expression ' - 'and are\n' - 'evaluated when the function definition is executed. ' - 'Annotations may\n' - 'be evaluated in a different order than they appear in the ' - 'source code.\n' - 'The presence of annotations does not change the semantics of ' - 'a\n' - 'function. The annotation values are available as values of a\n' - "dictionary keyed by the parameters' names in the " - '"__annotations__"\n' - 'attribute of the function object.\n' - '\n' - 'It is also possible to create anonymous functions (functions ' - 'not bound\n' - 'to a name), for immediate use in expressions. This uses ' - 'lambda\n' - 'expressions, described in section *Lambdas*. Note that the ' - 'lambda\n' - 'expression is merely a shorthand for a simplified function ' - 'definition;\n' - 'a function defined in a ""def"" statement can be passed around ' - 'or\n' - 'assigned to another name just like a function defined by a ' - 'lambda\n' - 'expression. The ""def"" form is actually more powerful since ' - 'it\n' - 'allows the execution of multiple statements and annotations.\n' - '\n' - "**Programmer's note:** Functions are first-class objects. A " - '""def""\n' - 'statement executed inside a function definition defines a ' - 'local\n' - 'function that can be returned or passed around. Free ' - 'variables used\n' - 'in the nested function can access the local variables of the ' - 'function\n' - 'containing the def. See section *Naming and binding* for ' - 'details.\n' - '\n' - 'See also: **PEP 3107** - Function Annotations\n' - '\n' - ' The original specification for function annotations.\n', - 'global': '\n' - 'The "global" statement\n' - '**********************\n' - '\n' - ' global_stmt ::= "global" identifier ("," identifier)*\n' - '\n' - 'The "global" statement is a declaration which holds for the ' - 'entire\n' - 'current code block. It means that the listed identifiers are to ' - 'be\n' - 'interpreted as globals. It would be impossible to assign to a ' - 'global\n' - 'variable without "global", although free variables may refer to\n' - 'globals without being declared global.\n' - '\n' - 'Names listed in a "global" statement must not be used in the ' - 'same code\n' - 'block textually preceding that "global" statement.\n' - '\n' - 'Names listed in a "global" statement must not be defined as ' - 'formal\n' - 'parameters or in a "for" loop control target, "class" ' - 'definition,\n' - 'function definition, or "import" statement.\n' - '\n' - '**CPython implementation detail:** The current implementation ' - 'does not\n' - 'enforce the two restrictions, but programs should not abuse ' - 'this\n' - 'freedom, as future implementations may enforce them or silently ' - 'change\n' - 'the meaning of the program.\n' - '\n' - '**Programmer\'s note:** the "global" is a directive to the ' - 'parser. It\n' - 'applies only to code parsed at the same time as the "global"\n' - 'statement. In particular, a "global" statement contained in a ' - 'string\n' - 'or code object supplied to the built-in "exec()" function does ' - 'not\n' - 'affect the code block *containing* the function call, and code\n' - 'contained in such a string is unaffected by "global" statements ' - 'in the\n' - 'code containing the function call. The same applies to the ' - '"eval()"\n' - 'and "compile()" functions.\n', - 'id-classes': '\n' - 'Reserved classes of identifiers\n' - '*******************************\n' - '\n' - 'Certain classes of identifiers (besides keywords) have ' - 'special\n' - 'meanings. These classes are identified by the patterns of ' - 'leading and\n' - 'trailing underscore characters:\n' - '\n' - '"_*"\n' - ' Not imported by "from module import *". The special ' - 'identifier "_"\n' - ' is used in the interactive interpreter to store the ' - 'result of the\n' - ' last evaluation; it is stored in the "builtins" module. ' - 'When not\n' - ' in interactive mode, "_" has no special meaning and is ' - 'not defined.\n' - ' See section *The import statement*.\n' - '\n' - ' Note: The name "_" is often used in conjunction with\n' - ' internationalization; refer to the documentation for ' - 'the\n' - ' "gettext" module for more information on this ' - 'convention.\n' - '\n' - '"__*__"\n' - ' System-defined names. These names are defined by the ' - 'interpreter\n' - ' and its implementation (including the standard library). ' - 'Current\n' - ' system names are discussed in the *Special method names* ' - 'section\n' - ' and elsewhere. More will likely be defined in future ' - 'versions of\n' - ' Python. *Any* use of "__*__" names, in any context, that ' - 'does not\n' - ' follow explicitly documented use, is subject to breakage ' - 'without\n' - ' warning.\n' - '\n' - '"__*"\n' - ' Class-private names. Names in this category, when used ' - 'within the\n' - ' context of a class definition, are re-written to use a ' - 'mangled form\n' - ' to help avoid name clashes between "private" attributes ' - 'of base and\n' - ' derived classes. See section *Identifiers (Names)*.\n', - 'identifiers': '\n' - 'Identifiers and keywords\n' - '************************\n' - '\n' - 'Identifiers (also referred to as *names*) are described by ' - 'the\n' - 'following lexical definitions.\n' - '\n' - 'The syntax of identifiers in Python is based on the Unicode ' - 'standard\n' - 'annex UAX-31, with elaboration and changes as defined ' - 'below; see also\n' - '**PEP 3131** for further details.\n' - '\n' - 'Within the ASCII range (U+0001..U+007F), the valid ' - 'characters for\n' - 'identifiers are the same as in Python 2.x: the uppercase ' - 'and lowercase\n' - 'letters "A" through "Z", the underscore "_" and, except for ' - 'the first\n' - 'character, the digits "0" through "9".\n' - '\n' - 'Python 3.0 introduces additional characters from outside ' - 'the ASCII\n' - 'range (see **PEP 3131**). For these characters, the ' - 'classification\n' - 'uses the version of the Unicode Character Database as ' - 'included in the\n' - '"unicodedata" module.\n' - '\n' - 'Identifiers are unlimited in length. Case is significant.\n' - '\n' - ' identifier ::= xid_start xid_continue*\n' - ' id_start ::= \n' - ' id_continue ::= \n' - ' xid_start ::= \n' - ' xid_continue ::= \n' - '\n' - 'The Unicode category codes mentioned above stand for:\n' - '\n' - '* *Lu* - uppercase letters\n' - '\n' - '* *Ll* - lowercase letters\n' - '\n' - '* *Lt* - titlecase letters\n' - '\n' - '* *Lm* - modifier letters\n' - '\n' - '* *Lo* - other letters\n' - '\n' - '* *Nl* - letter numbers\n' - '\n' - '* *Mn* - nonspacing marks\n' - '\n' - '* *Mc* - spacing combining marks\n' - '\n' - '* *Nd* - decimal numbers\n' - '\n' - '* *Pc* - connector punctuations\n' - '\n' - '* *Other_ID_Start* - explicit list of characters in ' - 'PropList.txt to\n' - ' support backwards compatibility\n' - '\n' - '* *Other_ID_Continue* - likewise\n' - '\n' - 'All identifiers are converted into the normal form NFKC ' - 'while parsing;\n' - 'comparison of identifiers is based on NFKC.\n' - '\n' - 'A non-normative HTML file listing all valid identifier ' - 'characters for\n' - 'Unicode 4.1 can be found at http://www.dcl.hpi.uni-\n' - 'potsdam.de/home/loewis/table-3131.html.\n' - '\n' - '\n' - 'Keywords\n' - '========\n' - '\n' - 'The following identifiers are used as reserved words, or ' - '*keywords* of\n' - 'the language, and cannot be used as ordinary identifiers. ' - 'They must\n' - 'be spelled exactly as written here:\n' - '\n' - ' False class finally is return\n' - ' None continue for lambda try\n' - ' True def from nonlocal while\n' - ' and del global not with\n' - ' as elif if or yield\n' - ' assert else import pass\n' - ' break except in raise\n' - '\n' - '\n' - 'Reserved classes of identifiers\n' - '===============================\n' - '\n' - 'Certain classes of identifiers (besides keywords) have ' - 'special\n' - 'meanings. These classes are identified by the patterns of ' - 'leading and\n' - 'trailing underscore characters:\n' - '\n' - '"_*"\n' - ' Not imported by "from module import *". The special ' - 'identifier "_"\n' - ' is used in the interactive interpreter to store the ' - 'result of the\n' - ' last evaluation; it is stored in the "builtins" module. ' - 'When not\n' - ' in interactive mode, "_" has no special meaning and is ' - 'not defined.\n' - ' See section *The import statement*.\n' - '\n' - ' Note: The name "_" is often used in conjunction with\n' - ' internationalization; refer to the documentation for ' - 'the\n' - ' "gettext" module for more information on this ' - 'convention.\n' - '\n' - '"__*__"\n' - ' System-defined names. These names are defined by the ' - 'interpreter\n' - ' and its implementation (including the standard ' - 'library). Current\n' - ' system names are discussed in the *Special method names* ' - 'section\n' - ' and elsewhere. More will likely be defined in future ' - 'versions of\n' - ' Python. *Any* use of "__*__" names, in any context, ' - 'that does not\n' - ' follow explicitly documented use, is subject to breakage ' - 'without\n' - ' warning.\n' - '\n' - '"__*"\n' - ' Class-private names. Names in this category, when used ' - 'within the\n' - ' context of a class definition, are re-written to use a ' - 'mangled form\n' - ' to help avoid name clashes between "private" attributes ' - 'of base and\n' - ' derived classes. See section *Identifiers (Names)*.\n', - 'if': '\n' - 'The "if" statement\n' - '******************\n' - '\n' - 'The "if" statement is used for conditional execution:\n' - '\n' - ' if_stmt ::= "if" expression ":" suite\n' - ' ( "elif" expression ":" suite )*\n' - ' ["else" ":" suite]\n' - '\n' - 'It selects exactly one of the suites by evaluating the expressions ' - 'one\n' - 'by one until one is found to be true (see section *Boolean ' - 'operations*\n' - 'for the definition of true and false); then that suite is executed\n' - '(and no other part of the "if" statement is executed or evaluated).\n' - 'If all expressions are false, the suite of the "else" clause, if\n' - 'present, is executed.\n', - 'imaginary': '\n' - 'Imaginary literals\n' - '******************\n' - '\n' - 'Imaginary literals are described by the following lexical ' - 'definitions:\n' - '\n' - ' imagnumber ::= (floatnumber | intpart) ("j" | "J")\n' - '\n' - 'An imaginary literal yields a complex number with a real part ' - 'of 0.0.\n' - 'Complex numbers are represented as a pair of floating point ' - 'numbers\n' - 'and have the same restrictions on their range. To create a ' - 'complex\n' - 'number with a nonzero real part, add a floating point number ' - 'to it,\n' - 'e.g., "(3+4j)". Some examples of imaginary literals:\n' - '\n' - ' 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', - 'import': '\n' - 'The "import" statement\n' - '**********************\n' - '\n' - ' import_stmt ::= "import" module ["as" name] ( "," module ' - '["as" name] )*\n' - ' | "from" relative_module "import" identifier ' - '["as" name]\n' - ' ( "," identifier ["as" name] )*\n' - ' | "from" relative_module "import" "(" ' - 'identifier ["as" name]\n' - ' ( "," identifier ["as" name] )* [","] ")"\n' - ' | "from" module "import" "*"\n' - ' module ::= (identifier ".")* identifier\n' - ' relative_module ::= "."* module | "."+\n' - ' name ::= identifier\n' - '\n' - 'The basic import statement (no "from" clause) is executed in ' - 'two\n' - 'steps:\n' - '\n' - '1. find a module, loading and initializing it if necessary\n' - '\n' - '2. define a name or names in the local namespace for the scope\n' - ' where the "import" statement occurs.\n' - '\n' - 'When the statement contains multiple clauses (separated by ' - 'commas) the\n' - 'two steps are carried out separately for each clause, just as ' - 'though\n' - 'the clauses had been separated out into individiual import ' - 'statements.\n' - '\n' - 'The details of the first step, finding and loading modules are\n' - 'described in greater detail in the section on the *import ' - 'system*,\n' - 'which also describes the various types of packages and modules ' - 'that\n' - 'can be imported, as well as all the hooks that can be used to\n' - 'customize the import system. Note that failures in this step ' - 'may\n' - 'indicate either that the module could not be located, *or* that ' - 'an\n' - 'error occurred while initializing the module, which includes ' - 'execution\n' - "of the module's code.\n" - '\n' - 'If the requested module is retrieved successfully, it will be ' - 'made\n' - 'available in the local namespace in one of three ways:\n' - '\n' - '* If the module name is followed by "as", then the name ' - 'following\n' - ' "as" is bound directly to the imported module.\n' - '\n' - '* If no other name is specified, and the module being imported ' - 'is a\n' - " top level module, the module's name is bound in the local " - 'namespace\n' - ' as a reference to the imported module\n' - '\n' - '* If the module being imported is *not* a top level module, then ' - 'the\n' - ' name of the top level package that contains the module is ' - 'bound in\n' - ' the local namespace as a reference to the top level package. ' - 'The\n' - ' imported module must be accessed using its full qualified ' - 'name\n' - ' rather than directly\n' - '\n' - 'The "from" form uses a slightly more complex process:\n' - '\n' - '1. find the module specified in the "from" clause, loading and\n' - ' initializing it if necessary;\n' - '\n' - '2. for each of the identifiers specified in the "import" ' - 'clauses:\n' - '\n' - ' 1. check if the imported module has an attribute by that ' - 'name\n' - '\n' - ' 2. if not, attempt to import a submodule with that name and ' - 'then\n' - ' check the imported module again for that attribute\n' - '\n' - ' 3. if the attribute is not found, "ImportError" is raised.\n' - '\n' - ' 4. otherwise, a reference to that value is stored in the ' - 'local\n' - ' namespace, using the name in the "as" clause if it is ' - 'present,\n' - ' otherwise using the attribute name\n' - '\n' - 'Examples:\n' - '\n' - ' import foo # foo imported and bound locally\n' - ' import foo.bar.baz # foo.bar.baz imported, foo bound ' - 'locally\n' - ' import foo.bar.baz as fbb # foo.bar.baz imported and bound ' - 'as fbb\n' - ' from foo.bar import baz # foo.bar.baz imported and bound ' - 'as baz\n' - ' from foo import attr # foo imported and foo.attr bound ' - 'as attr\n' - '\n' - 'If the list of identifiers is replaced by a star ("\'*\'"), all ' - 'public\n' - 'names defined in the module are bound in the local namespace for ' - 'the\n' - 'scope where the "import" statement occurs.\n' - '\n' - 'The *public names* defined by a module are determined by ' - 'checking the\n' - 'module\'s namespace for a variable named "__all__"; if defined, ' - 'it must\n' - 'be a sequence of strings which are names defined or imported by ' - 'that\n' - 'module. The names given in "__all__" are all considered public ' - 'and\n' - 'are required to exist. If "__all__" is not defined, the set of ' - 'public\n' - "names includes all names found in the module's namespace which " - 'do not\n' - 'begin with an underscore character ("\'_\'"). "__all__" should ' - 'contain\n' - 'the entire public API. It is intended to avoid accidentally ' - 'exporting\n' - 'items that are not part of the API (such as library modules ' - 'which were\n' - 'imported and used within the module).\n' - '\n' - 'The wild card form of import --- "from module import *" --- is ' - 'only\n' - 'allowed at the module level. Attempting to use it in class or\n' - 'function definitions will raise a "SyntaxError".\n' - '\n' - 'When specifying what module to import you do not have to specify ' - 'the\n' - 'absolute name of the module. When a module or package is ' - 'contained\n' - 'within another package it is possible to make a relative import ' - 'within\n' - 'the same top package without having to mention the package name. ' - 'By\n' - 'using leading dots in the specified module or package after ' - '"from" you\n' - 'can specify how high to traverse up the current package ' - 'hierarchy\n' - 'without specifying exact names. One leading dot means the ' - 'current\n' - 'package where the module making the import exists. Two dots ' - 'means up\n' - 'one package level. Three dots is up two levels, etc. So if you ' - 'execute\n' - '"from . import mod" from a module in the "pkg" package then you ' - 'will\n' - 'end up importing "pkg.mod". If you execute "from ..subpkg2 ' - 'import mod"\n' - 'from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". ' - 'The\n' - 'specification for relative imports is contained within **PEP ' - '328**.\n' - '\n' - '"importlib.import_module()" is provided to support applications ' - 'that\n' - 'determine dynamically the modules to be loaded.\n' - '\n' - '\n' - 'Future statements\n' - '=================\n' - '\n' - 'A *future statement* is a directive to the compiler that a ' - 'particular\n' - 'module should be compiled using syntax or semantics that will ' - 'be\n' - 'available in a specified future release of Python where the ' - 'feature\n' - 'becomes standard.\n' - '\n' - 'The future statement is intended to ease migration to future ' - 'versions\n' - 'of Python that introduce incompatible changes to the language. ' - 'It\n' - 'allows use of the new features on a per-module basis before the\n' - 'release in which the feature becomes standard.\n' - '\n' - ' future_statement ::= "from" "__future__" "import" feature ' - '["as" name]\n' - ' ("," feature ["as" name])*\n' - ' | "from" "__future__" "import" "(" ' - 'feature ["as" name]\n' - ' ("," feature ["as" name])* [","] ")"\n' - ' feature ::= identifier\n' - ' name ::= identifier\n' - '\n' - 'A future statement must appear near the top of the module. The ' - 'only\n' - 'lines that can appear before a future statement are:\n' - '\n' - '* the module docstring (if any),\n' - '\n' - '* comments,\n' - '\n' - '* blank lines, and\n' - '\n' - '* other future statements.\n' - '\n' - 'The features recognized by Python 3.0 are "absolute_import",\n' - '"division", "generators", "unicode_literals", "print_function",\n' - '"nested_scopes" and "with_statement". They are all redundant ' - 'because\n' - 'they are always enabled, and only kept for backwards ' - 'compatibility.\n' - '\n' - 'A future statement is recognized and treated specially at ' - 'compile\n' - 'time: Changes to the semantics of core constructs are often\n' - 'implemented by generating different code. It may even be the ' - 'case\n' - 'that a new feature introduces new incompatible syntax (such as a ' - 'new\n' - 'reserved word), in which case the compiler may need to parse ' - 'the\n' - 'module differently. Such decisions cannot be pushed off until\n' - 'runtime.\n' - '\n' - 'For any given release, the compiler knows which feature names ' - 'have\n' - 'been defined, and raises a compile-time error if a future ' - 'statement\n' - 'contains a feature not known to it.\n' - '\n' - 'The direct runtime semantics are the same as for any import ' - 'statement:\n' - 'there is a standard module "__future__", described later, and it ' - 'will\n' - 'be imported in the usual way at the time the future statement ' - 'is\n' - 'executed.\n' - '\n' - 'The interesting runtime semantics depend on the specific ' - 'feature\n' - 'enabled by the future statement.\n' - '\n' - 'Note that there is nothing special about the statement:\n' - '\n' - ' import __future__ [as name]\n' - '\n' - "That is not a future statement; it's an ordinary import " - 'statement with\n' - 'no special semantics or syntax restrictions.\n' - '\n' - 'Code compiled by calls to the built-in functions "exec()" and\n' - '"compile()" that occur in a module "M" containing a future ' - 'statement\n' - 'will, by default, use the new syntax or semantics associated ' - 'with the\n' - 'future statement. This can be controlled by optional arguments ' - 'to\n' - '"compile()" --- see the documentation of that function for ' - 'details.\n' - '\n' - 'A future statement typed at an interactive interpreter prompt ' - 'will\n' - 'take effect for the rest of the interpreter session. If an\n' - 'interpreter is started with the *-i* option, is passed a script ' - 'name\n' - 'to execute, and the script includes a future statement, it will ' - 'be in\n' - 'effect in the interactive session started after the script is\n' - 'executed.\n' - '\n' - 'See also: **PEP 236** - Back to the __future__\n' - '\n' - ' The original proposal for the __future__ mechanism.\n', - 'in': '\n' - 'Comparisons\n' - '***********\n' - '\n' - 'Unlike C, all comparison operations in Python have the same ' - 'priority,\n' - 'which is lower than that of any arithmetic, shifting or bitwise\n' - 'operation. Also unlike C, expressions like "a < b < c" have the\n' - 'interpretation that is conventional in mathematics:\n' - '\n' - ' comparison ::= or_expr ( comp_operator or_expr )*\n' - ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n' - ' | "is" ["not"] | ["not"] "in"\n' - '\n' - 'Comparisons yield boolean values: "True" or "False".\n' - '\n' - 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" is\n' - 'equivalent to "x < y and y <= z", except that "y" is evaluated only\n' - 'once (but in both cases "z" is not evaluated at all when "x < y" is\n' - 'found to be false).\n' - '\n' - 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and ' - '*op1*,\n' - '*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... ' - 'y\n' - 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN z", ' - 'except\n' - 'that each expression is evaluated at most once.\n' - '\n' - 'Note that "a op1 b op2 c" doesn\'t imply any kind of comparison ' - 'between\n' - '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\n' - 'perhaps not pretty).\n' - '\n' - 'The operators "<", ">", "==", ">=", "<=", and "!=" compare the ' - 'values\n' - 'of two objects. The objects need not have the same type. If both ' - 'are\n' - 'numbers, they are converted to a common type. Otherwise, the "==" ' - 'and\n' - '"!=" operators *always* consider objects of different types to be\n' - 'unequal, while the "<", ">", ">=" and "<=" operators raise a\n' - '"TypeError" when comparing objects of different types that do not\n' - 'implement these operators for the given pair of types. You can\n' - 'control comparison behavior of objects of non-built-in types by\n' - 'defining rich comparison methods like "__gt__()", described in ' - 'section\n' - '*Basic customization*.\n' - '\n' - 'Comparison of objects of the same type depends on the type:\n' - '\n' - '* Numbers are compared arithmetically.\n' - '\n' - '* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. ' - 'They\n' - ' are identical to themselves, "x is x" but are not equal to\n' - ' themselves, "x != x". Additionally, comparing any value to a\n' - ' not-a-number value will return "False". For example, both "3 <\n' - ' float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n' - '\n' - '* Bytes objects are compared lexicographically using the numeric\n' - ' values of their elements.\n' - '\n' - '* Strings are compared lexicographically using the numeric\n' - ' equivalents (the result of the built-in function "ord()") of ' - 'their\n' - " characters. [3] String and bytes object can't be compared!\n" - '\n' - '* Tuples and lists are compared lexicographically using comparison\n' - ' of corresponding elements. This means that to compare equal, ' - 'each\n' - ' element must compare equal and the two sequences must be of the ' - 'same\n' - ' type and have the same length.\n' - '\n' - ' If not equal, the sequences are ordered the same as their first\n' - ' differing elements. For example, "[1,2,x] <= [1,2,y]" has the ' - 'same\n' - ' value as "x <= y". If the corresponding element does not exist, ' - 'the\n' - ' shorter sequence is ordered first (for example, "[1,2] < ' - '[1,2,3]").\n' - '\n' - '* Mappings (dictionaries) compare equal if and only if they have ' - 'the\n' - ' same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', ' - "'>=',\n" - ' \'>\')" raise "TypeError".\n' - '\n' - '* Sets and frozensets define comparison operators to mean subset ' - 'and\n' - ' superset tests. Those relations do not define total orderings ' - '(the\n' - ' two sets "{1,2}" and "{2,3}" are not equal, nor subsets of one\n' - ' another, nor supersets of one another). Accordingly, sets are ' - 'not\n' - ' appropriate arguments for functions which depend on total ' - 'ordering.\n' - ' For example, "min()", "max()", and "sorted()" produce undefined\n' - ' results given a list of sets as inputs.\n' - '\n' - '* Most other objects of built-in types compare unequal unless they\n' - ' are the same object; the choice whether one object is considered\n' - ' smaller or larger than another one is made arbitrarily but\n' - ' consistently within one execution of a program.\n' - '\n' - 'Comparison of objects of differing types depends on whether either ' - 'of\n' - 'the types provide explicit support for the comparison. Most ' - 'numeric\n' - 'types can be compared with one another. When cross-type comparison ' - 'is\n' - 'not supported, the comparison method returns "NotImplemented".\n' - '\n' - 'The operators "in" and "not in" test for membership. "x in s"\n' - 'evaluates to true if *x* is a member of *s*, and false otherwise. ' - '"x\n' - 'not in s" returns the negation of "x in s". All built-in sequences\n' - 'and set types support this as well as dictionary, for which "in" ' - 'tests\n' - 'whether the dictionary has a given key. For container types such as\n' - 'list, tuple, set, frozenset, dict, or collections.deque, the\n' - 'expression "x in y" is equivalent to "any(x is e or x == e for e in\n' - 'y)".\n' - '\n' - 'For the string and bytes types, "x in y" is true if and only if *x* ' - 'is\n' - 'a substring of *y*. An equivalent test is "y.find(x) != -1". ' - 'Empty\n' - 'strings are always considered to be a substring of any other ' - 'string,\n' - 'so """ in "abc"" will return "True".\n' - '\n' - 'For user-defined classes which define the "__contains__()" method, ' - '"x\n' - 'in y" is true if and only if "y.__contains__(x)" is true.\n' - '\n' - 'For user-defined classes which do not define "__contains__()" but ' - 'do\n' - 'define "__iter__()", "x in y" is true if some value "z" with "x == ' - 'z"\n' - 'is produced while iterating over "y". If an exception is raised\n' - 'during the iteration, it is as if "in" raised that exception.\n' - '\n' - 'Lastly, the old-style iteration protocol is tried: if a class ' - 'defines\n' - '"__getitem__()", "x in y" is true if and only if there is a non-\n' - 'negative integer index *i* such that "x == y[i]", and all lower\n' - 'integer indices do not raise "IndexError" exception. (If any other\n' - 'exception is raised, it is as if "in" raised that exception).\n' - '\n' - 'The operator "not in" is defined to have the inverse true value of\n' - '"in".\n' - '\n' - 'The operators "is" and "is not" test for object identity: "x is y" ' - 'is\n' - 'true if and only if *x* and *y* are the same object. "x is not y"\n' - 'yields the inverse truth value. [4]\n', - 'integers': '\n' - 'Integer literals\n' - '****************\n' - '\n' - 'Integer literals are described by the following lexical ' - 'definitions:\n' - '\n' - ' integer ::= decimalinteger | octinteger | hexinteger ' - '| bininteger\n' - ' decimalinteger ::= nonzerodigit digit* | "0"+\n' - ' nonzerodigit ::= "1"..."9"\n' - ' digit ::= "0"..."9"\n' - ' octinteger ::= "0" ("o" | "O") octdigit+\n' - ' hexinteger ::= "0" ("x" | "X") hexdigit+\n' - ' bininteger ::= "0" ("b" | "B") bindigit+\n' - ' octdigit ::= "0"..."7"\n' - ' hexdigit ::= digit | "a"..."f" | "A"..."F"\n' - ' bindigit ::= "0" | "1"\n' - '\n' - 'There is no limit for the length of integer literals apart ' - 'from what\n' - 'can be stored in available memory.\n' - '\n' - 'Note that leading zeros in a non-zero decimal number are not ' - 'allowed.\n' - 'This is for disambiguation with C-style octal literals, which ' - 'Python\n' - 'used before version 3.0.\n' - '\n' - 'Some examples of integer literals:\n' - '\n' - ' 7 2147483647 0o177 ' - '0b100110111\n' - ' 3 79228162514264337593543950336 0o377 ' - '0xdeadbeef\n', - 'lambda': '\n' - 'Lambdas\n' - '*******\n' - '\n' - ' lambda_expr ::= "lambda" [parameter_list]: expression\n' - ' lambda_expr_nocond ::= "lambda" [parameter_list]: ' - 'expression_nocond\n' - '\n' - 'Lambda expressions (sometimes called lambda forms) are used to ' - 'create\n' - 'anonymous functions. The expression "lambda arguments: ' - 'expression"\n' - 'yields a function object. The unnamed object behaves like a ' - 'function\n' - 'object defined with\n' - '\n' - ' def (arguments):\n' - ' return expression\n' - '\n' - 'See section *Function definitions* for the syntax of parameter ' - 'lists.\n' - 'Note that functions created with lambda expressions cannot ' - 'contain\n' - 'statements or annotations.\n', - 'lists': '\n' - 'List displays\n' - '*************\n' - '\n' - 'A list display is a possibly empty series of expressions enclosed ' - 'in\n' - 'square brackets:\n' - '\n' - ' list_display ::= "[" [expression_list | comprehension] "]"\n' - '\n' - 'A list display yields a new list object, the contents being ' - 'specified\n' - 'by either a list of expressions or a comprehension. When a ' - 'comma-\n' - 'separated list of expressions is supplied, its elements are ' - 'evaluated\n' - 'from left to right and placed into the list object in that ' - 'order.\n' - 'When a comprehension is supplied, the list is constructed from ' - 'the\n' - 'elements resulting from the comprehension.\n', - 'naming': '\n' - 'Naming and binding\n' - '******************\n' - '\n' - '\n' - 'Binding of names\n' - '================\n' - '\n' - '*Names* refer to objects. Names are introduced by name binding\n' - 'operations.\n' - '\n' - 'The following constructs bind names: formal parameters to ' - 'functions,\n' - '"import" statements, class and function definitions (these bind ' - 'the\n' - 'class or function name in the defining block), and targets that ' - 'are\n' - 'identifiers if occurring in an assignment, "for" loop header, or ' - 'after\n' - '"as" in a "with" statement or "except" clause. The "import" ' - 'statement\n' - 'of the form "from ... import *" binds all names defined in the\n' - 'imported module, except those beginning with an underscore. ' - 'This form\n' - 'may only be used at the module level.\n' - '\n' - 'A target occurring in a "del" statement is also considered bound ' - 'for\n' - 'this purpose (though the actual semantics are to unbind the ' - 'name).\n' - '\n' - 'Each assignment or import statement occurs within a block ' - 'defined by a\n' - 'class or function definition or at the module level (the ' - 'top-level\n' - 'code block).\n' - '\n' - 'If a name is bound in a block, it is a local variable of that ' - 'block,\n' - 'unless declared as "nonlocal" or "global". If a name is bound ' - 'at the\n' - 'module level, it is a global variable. (The variables of the ' - 'module\n' - 'code block are local and global.) If a variable is used in a ' - 'code\n' - 'block but not defined there, it is a *free variable*.\n' - '\n' - 'Each occurrence of a name in the program text refers to the ' - '*binding*\n' - 'of that name established by the following name resolution ' - 'rules.\n' - '\n' - '\n' - 'Resolution of names\n' - '===================\n' - '\n' - 'A *scope* defines the visibility of a name within a block. If a ' - 'local\n' - 'variable is defined in a block, its scope includes that block. ' - 'If the\n' - 'definition occurs in a function block, the scope extends to any ' - 'blocks\n' - 'contained within the defining one, unless a contained block ' - 'introduces\n' - 'a different binding for the name.\n' - '\n' - 'When a name is used in a code block, it is resolved using the ' - 'nearest\n' - 'enclosing scope. The set of all such scopes visible to a code ' - 'block\n' - "is called the block's *environment*.\n" - '\n' - 'When a name is not found at all, a "NameError" exception is ' - 'raised. If\n' - 'the current scope is a function scope, and the name refers to a ' - 'local\n' - 'variable that has not yet been bound to a value at the point ' - 'where the\n' - 'name is used, an "UnboundLocalError" exception is raised.\n' - '"UnboundLocalError" is a subclass of "NameError".\n' - '\n' - 'If a name binding operation occurs anywhere within a code block, ' - 'all\n' - 'uses of the name within the block are treated as references to ' - 'the\n' - 'current block. This can lead to errors when a name is used ' - 'within a\n' - 'block before it is bound. This rule is subtle. Python lacks\n' - 'declarations and allows name binding operations to occur ' - 'anywhere\n' - 'within a code block. The local variables of a code block can ' - 'be\n' - 'determined by scanning the entire text of the block for name ' - 'binding\n' - 'operations.\n' - '\n' - 'If the "global" statement occurs within a block, all uses of the ' - 'name\n' - 'specified in the statement refer to the binding of that name in ' - 'the\n' - 'top-level namespace. Names are resolved in the top-level ' - 'namespace by\n' - 'searching the global namespace, i.e. the namespace of the ' - 'module\n' - 'containing the code block, and the builtins namespace, the ' - 'namespace\n' - 'of the module "builtins". The global namespace is searched ' - 'first. If\n' - 'the name is not found there, the builtins namespace is ' - 'searched. The\n' - '"global" statement must precede all uses of the name.\n' - '\n' - 'The "global" statement has the same scope as a name binding ' - 'operation\n' - 'in the same block. If the nearest enclosing scope for a free ' - 'variable\n' - 'contains a global statement, the free variable is treated as a ' - 'global.\n' - '\n' - 'The "nonlocal" statement causes corresponding names to refer to\n' - 'previously bound variables in the nearest enclosing function ' - 'scope.\n' - '"SyntaxError" is raised at compile time if the given name does ' - 'not\n' - 'exist in any enclosing function scope.\n' - '\n' - 'The namespace for a module is automatically created the first ' - 'time a\n' - 'module is imported. The main module for a script is always ' - 'called\n' - '"__main__".\n' - '\n' - 'Class definition blocks and arguments to "exec()" and "eval()" ' - 'are\n' - 'special in the context of name resolution. A class definition is ' - 'an\n' - 'executable statement that may use and define names. These ' - 'references\n' - 'follow the normal rules for name resolution with an exception ' - 'that\n' - 'unbound local variables are looked up in the global namespace. ' - 'The\n' - 'namespace of the class definition becomes the attribute ' - 'dictionary of\n' - 'the class. The scope of names defined in a class block is ' - 'limited to\n' - 'the class block; it does not extend to the code blocks of ' - 'methods --\n' - 'this includes comprehensions and generator expressions since ' - 'they are\n' - 'implemented using a function scope. This means that the ' - 'following\n' - 'will fail:\n' - '\n' - ' class A:\n' - ' a = 42\n' - ' b = list(a + i for i in range(10))\n' - '\n' - '\n' - 'Builtins and restricted execution\n' - '=================================\n' - '\n' - 'The builtins namespace associated with the execution of a code ' - 'block\n' - 'is actually found by looking up the name "__builtins__" in its ' - 'global\n' - 'namespace; this should be a dictionary or a module (in the ' - 'latter case\n' - "the module's dictionary is used). By default, when in the " - '"__main__"\n' - 'module, "__builtins__" is the built-in module "builtins"; when ' - 'in any\n' - 'other module, "__builtins__" is an alias for the dictionary of ' - 'the\n' - '"builtins" module itself. "__builtins__" can be set to a ' - 'user-created\n' - 'dictionary to create a weak form of restricted execution.\n' - '\n' - '**CPython implementation detail:** Users should not touch\n' - '"__builtins__"; it is strictly an implementation detail. Users\n' - 'wanting to override values in the builtins namespace should ' - '"import"\n' - 'the "builtins" module and modify its attributes appropriately.\n' - '\n' - '\n' - 'Interaction with dynamic features\n' - '=================================\n' - '\n' - 'Name resolution of free variables occurs at runtime, not at ' - 'compile\n' - 'time. This means that the following code will print 42:\n' - '\n' - ' i = 10\n' - ' def f():\n' - ' print(i)\n' - ' i = 42\n' - ' f()\n' - '\n' - 'There are several cases where Python statements are illegal when ' - 'used\n' - 'in conjunction with nested scopes that contain free variables.\n' - '\n' - 'If a variable is referenced in an enclosing scope, it is illegal ' - 'to\n' - 'delete the name. An error will be reported at compile time.\n' - '\n' - 'The "eval()" and "exec()" functions do not have access to the ' - 'full\n' - 'environment for resolving names. Names may be resolved in the ' - 'local\n' - 'and global namespaces of the caller. Free variables are not ' - 'resolved\n' - 'in the nearest enclosing namespace, but in the global ' - 'namespace. [1]\n' - 'The "exec()" and "eval()" functions have optional arguments to\n' - 'override the global and local namespace. If only one namespace ' - 'is\n' - 'specified, it is used for both.\n', - 'nonlocal': '\n' - 'The "nonlocal" statement\n' - '************************\n' - '\n' - ' nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n' - '\n' - 'The "nonlocal" statement causes the listed identifiers to ' - 'refer to\n' - 'previously bound variables in the nearest enclosing scope ' - 'excluding\n' - 'globals. This is important because the default behavior for ' - 'binding is\n' - 'to search the local namespace first. The statement allows\n' - 'encapsulated code to rebind variables outside of the local ' - 'scope\n' - 'besides the global (module) scope.\n' - '\n' - 'Names listed in a "nonlocal" statement, unlike those listed in ' - 'a\n' - '"global" statement, must refer to pre-existing bindings in an\n' - 'enclosing scope (the scope in which a new binding should be ' - 'created\n' - 'cannot be determined unambiguously).\n' - '\n' - 'Names listed in a "nonlocal" statement must not collide with ' - 'pre-\n' - 'existing bindings in the local scope.\n' - '\n' - 'See also: **PEP 3104** - Access to Names in Outer Scopes\n' - '\n' - ' The specification for the "nonlocal" statement.\n', - 'numbers': '\n' - 'Numeric literals\n' - '****************\n' - '\n' - 'There are three types of numeric literals: integers, floating ' - 'point\n' - 'numbers, and imaginary numbers. There are no complex literals\n' - '(complex numbers can be formed by adding a real number and an\n' - 'imaginary number).\n' - '\n' - 'Note that numeric literals do not include a sign; a phrase like ' - '"-1"\n' - 'is actually an expression composed of the unary operator ' - '\'"-"\' and the\n' - 'literal "1".\n', - 'numeric-types': '\n' - 'Emulating numeric types\n' - '***********************\n' - '\n' - 'The following methods can be defined to emulate numeric ' - 'objects.\n' - 'Methods corresponding to operations that are not ' - 'supported by the\n' - 'particular kind of number implemented (e.g., bitwise ' - 'operations for\n' - 'non-integral numbers) should be left undefined.\n' - '\n' - 'object.__add__(self, other)\n' - 'object.__sub__(self, other)\n' - 'object.__mul__(self, other)\n' - 'object.__matmul__(self, other)\n' - 'object.__truediv__(self, other)\n' - 'object.__floordiv__(self, other)\n' - 'object.__mod__(self, other)\n' - 'object.__divmod__(self, other)\n' - 'object.__pow__(self, other[, modulo])\n' - 'object.__lshift__(self, other)\n' - 'object.__rshift__(self, other)\n' - 'object.__and__(self, other)\n' - 'object.__xor__(self, other)\n' - 'object.__or__(self, other)\n' - '\n' - ' These methods are called to implement the binary ' - 'arithmetic\n' - ' operations ("+", "-", "*", "@", "/", "//", "%", ' - '"divmod()",\n' - ' "pow()", "**", "<<", ">>", "&", "^", "|"). For ' - 'instance, to\n' - ' evaluate the expression "x + y", where *x* is an ' - 'instance of a\n' - ' class that has an "__add__()" method, "x.__add__(y)" ' - 'is called.\n' - ' The "__divmod__()" method should be the equivalent to ' - 'using\n' - ' "__floordiv__()" and "__mod__()"; it should not be ' - 'related to\n' - ' "__truediv__()". Note that "__pow__()" should be ' - 'defined to accept\n' - ' an optional third argument if the ternary version of ' - 'the built-in\n' - ' "pow()" function is to be supported.\n' - '\n' - ' If one of those methods does not support the operation ' - 'with the\n' - ' supplied arguments, it should return ' - '"NotImplemented".\n' - '\n' - 'object.__radd__(self, other)\n' - 'object.__rsub__(self, other)\n' - 'object.__rmul__(self, other)\n' - 'object.__rmatmul__(self, other)\n' - 'object.__rtruediv__(self, other)\n' - 'object.__rfloordiv__(self, other)\n' - 'object.__rmod__(self, other)\n' - 'object.__rdivmod__(self, other)\n' - 'object.__rpow__(self, other)\n' - 'object.__rlshift__(self, other)\n' - 'object.__rrshift__(self, other)\n' - 'object.__rand__(self, other)\n' - 'object.__rxor__(self, other)\n' - 'object.__ror__(self, other)\n' - '\n' - ' These methods are called to implement the binary ' - 'arithmetic\n' - ' operations ("+", "-", "*", "@", "/", "//", "%", ' - '"divmod()",\n' - ' "pow()", "**", "<<", ">>", "&", "^", "|") with ' - 'reflected (swapped)\n' - ' operands. These functions are only called if the left ' - 'operand does\n' - ' not support the corresponding operation and the ' - 'operands are of\n' - ' different types. [2] For instance, to evaluate the ' - 'expression "x -\n' - ' y", where *y* is an instance of a class that has an ' - '"__rsub__()"\n' - ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' - 'returns\n' - ' *NotImplemented*.\n' - '\n' - ' Note that ternary "pow()" will not try calling ' - '"__rpow__()" (the\n' - ' coercion rules would become too complicated).\n' - '\n' - " Note: If the right operand's type is a subclass of the " - 'left\n' - " operand's type and that subclass provides the " - 'reflected method\n' - ' for the operation, this method will be called before ' - 'the left\n' - " operand's non-reflected method. This behavior " - 'allows subclasses\n' - " to override their ancestors' operations.\n" - '\n' - 'object.__iadd__(self, other)\n' - 'object.__isub__(self, other)\n' - 'object.__imul__(self, other)\n' - 'object.__imatmul__(self, other)\n' - 'object.__itruediv__(self, other)\n' - 'object.__ifloordiv__(self, other)\n' - 'object.__imod__(self, other)\n' - 'object.__ipow__(self, other[, modulo])\n' - 'object.__ilshift__(self, other)\n' - 'object.__irshift__(self, other)\n' - 'object.__iand__(self, other)\n' - 'object.__ixor__(self, other)\n' - 'object.__ior__(self, other)\n' - '\n' - ' These methods are called to implement the augmented ' - 'arithmetic\n' - ' assignments ("+=", "-=", "*=", "@=", "/=", "//=", ' - '"%=", "**=",\n' - ' "<<=", ">>=", "&=", "^=", "|="). These methods should ' - 'attempt to\n' - ' do the operation in-place (modifying *self*) and ' - 'return the result\n' - ' (which could be, but does not have to be, *self*). If ' - 'a specific\n' - ' method is not defined, the augmented assignment falls ' - 'back to the\n' - ' normal methods. For instance, if *x* is an instance ' - 'of a class\n' - ' with an "__iadd__()" method, "x += y" is equivalent to ' - '"x =\n' - ' x.__iadd__(y)" . Otherwise, "x.__add__(y)" and ' - '"y.__radd__(x)" are\n' - ' considered, as with the evaluation of "x + y". In ' - 'certain\n' - ' situations, augmented assignment can result in ' - 'unexpected errors\n' - " (see *Why does a_tuple[i] += ['item'] raise an " - 'exception when the\n' - ' addition works?*), but this behavior is in fact part ' - 'of the data\n' - ' model.\n' - '\n' - 'object.__neg__(self)\n' - 'object.__pos__(self)\n' - 'object.__abs__(self)\n' - 'object.__invert__(self)\n' - '\n' - ' Called to implement the unary arithmetic operations ' - '("-", "+",\n' - ' "abs()" and "~").\n' - '\n' - 'object.__complex__(self)\n' - 'object.__int__(self)\n' - 'object.__float__(self)\n' - 'object.__round__(self[, n])\n' - '\n' - ' Called to implement the built-in functions ' - '"complex()", "int()",\n' - ' "float()" and "round()". Should return a value of the ' - 'appropriate\n' - ' type.\n' - '\n' - 'object.__index__(self)\n' - '\n' - ' Called to implement "operator.index()", and whenever ' - 'Python needs\n' - ' to losslessly convert the numeric object to an integer ' - 'object (such\n' - ' as in slicing, or in the built-in "bin()", "hex()" and ' - '"oct()"\n' - ' functions). Presence of this method indicates that the ' - 'numeric\n' - ' object is an integer type. Must return an integer.\n' - '\n' - ' Note: In order to have a coherent integer type class, ' - 'when\n' - ' "__index__()" is defined "__int__()" should also be ' - 'defined, and\n' - ' both should return the same value.\n', - 'objects': '\n' - 'Objects, values and types\n' - '*************************\n' - '\n' - "*Objects* are Python's abstraction for data. All data in a " - 'Python\n' - 'program is represented by objects or by relations between ' - 'objects. (In\n' - "a sense, and in conformance to Von Neumann's model of a " - '"stored\n' - 'program computer," code is also represented by objects.)\n' - '\n' - "Every object has an identity, a type and a value. An object's\n" - '*identity* never changes once it has been created; you may ' - 'think of it\n' - 'as the object\'s address in memory. The \'"is"\' operator ' - 'compares the\n' - 'identity of two objects; the "id()" function returns an ' - 'integer\n' - 'representing its identity.\n' - '\n' - '**CPython implementation detail:** For CPython, "id(x)" is the ' - 'memory\n' - 'address where "x" is stored.\n' - '\n' - "An object's type determines the operations that the object " - 'supports\n' - '(e.g., "does it have a length?") and also defines the possible ' - 'values\n' - 'for objects of that type. The "type()" function returns an ' - "object's\n" - 'type (which is an object itself). Like its identity, an ' - "object's\n" - '*type* is also unchangeable. [1]\n' - '\n' - 'The *value* of some objects can change. Objects whose value ' - 'can\n' - 'change are said to be *mutable*; objects whose value is ' - 'unchangeable\n' - 'once they are created are called *immutable*. (The value of an\n' - 'immutable container object that contains a reference to a ' - 'mutable\n' - "object can change when the latter's value is changed; however " - 'the\n' - 'container is still considered immutable, because the collection ' - 'of\n' - 'objects it contains cannot be changed. So, immutability is ' - 'not\n' - 'strictly the same as having an unchangeable value, it is more ' - 'subtle.)\n' - "An object's mutability is determined by its type; for " - 'instance,\n' - 'numbers, strings and tuples are immutable, while dictionaries ' - 'and\n' - 'lists are mutable.\n' - '\n' - 'Objects are never explicitly destroyed; however, when they ' - 'become\n' - 'unreachable they may be garbage-collected. An implementation ' - 'is\n' - 'allowed to postpone garbage collection or omit it altogether ' - '--- it is\n' - 'a matter of implementation quality how garbage collection is\n' - 'implemented, as long as no objects are collected that are ' - 'still\n' - 'reachable.\n' - '\n' - '**CPython implementation detail:** CPython currently uses a ' - 'reference-\n' - 'counting scheme with (optional) delayed detection of cyclically ' - 'linked\n' - 'garbage, which collects most objects as soon as they become\n' - 'unreachable, but is not guaranteed to collect garbage ' - 'containing\n' - 'circular references. See the documentation of the "gc" module ' - 'for\n' - 'information on controlling the collection of cyclic garbage. ' - 'Other\n' - 'implementations act differently and CPython may change. Do not ' - 'depend\n' - 'on immediate finalization of objects when they become ' - 'unreachable (so\n' - 'you should always close files explicitly).\n' - '\n' - "Note that the use of the implementation's tracing or debugging\n" - 'facilities may keep objects alive that would normally be ' - 'collectable.\n' - 'Also note that catching an exception with a ' - '\'"try"..."except"\'\n' - 'statement may keep objects alive.\n' - '\n' - 'Some objects contain references to "external" resources such as ' - 'open\n' - 'files or windows. It is understood that these resources are ' - 'freed\n' - 'when the object is garbage-collected, but since garbage ' - 'collection is\n' - 'not guaranteed to happen, such objects also provide an explicit ' - 'way to\n' - 'release the external resource, usually a "close()" method. ' - 'Programs\n' - 'are strongly recommended to explicitly close such objects. ' - 'The\n' - '\'"try"..."finally"\' statement and the \'"with"\' statement ' - 'provide\n' - 'convenient ways to do this.\n' - '\n' - 'Some objects contain references to other objects; these are ' - 'called\n' - '*containers*. Examples of containers are tuples, lists and\n' - "dictionaries. The references are part of a container's value. " - 'In\n' - 'most cases, when we talk about the value of a container, we ' - 'imply the\n' - 'values, not the identities of the contained objects; however, ' - 'when we\n' - 'talk about the mutability of a container, only the identities ' - 'of the\n' - 'immediately contained objects are implied. So, if an ' - 'immutable\n' - 'container (like a tuple) contains a reference to a mutable ' - 'object, its\n' - 'value changes if that mutable object is changed.\n' - '\n' - 'Types affect almost all aspects of object behavior. Even the\n' - 'importance of object identity is affected in some sense: for ' - 'immutable\n' - 'types, operations that compute new values may actually return ' - 'a\n' - 'reference to any existing object with the same type and value, ' - 'while\n' - 'for mutable objects this is not allowed. E.g., after "a = 1; b ' - '= 1",\n' - '"a" and "b" may or may not refer to the same object with the ' - 'value\n' - 'one, depending on the implementation, but after "c = []; d = ' - '[]", "c"\n' - 'and "d" are guaranteed to refer to two different, unique, ' - 'newly\n' - 'created empty lists. (Note that "c = d = []" assigns the same ' - 'object\n' - 'to both "c" and "d".)\n', - 'operator-summary': '\n' - 'Operator precedence\n' - '*******************\n' - '\n' - 'The following table summarizes the operator precedence ' - 'in Python, from\n' - 'lowest precedence (least binding) to highest ' - 'precedence (most\n' - 'binding). Operators in the same box have the same ' - 'precedence. Unless\n' - 'the syntax is explicitly given, operators are binary. ' - 'Operators in\n' - 'the same box group left to right (except for ' - 'exponentiation, which\n' - 'groups from right to left).\n' - '\n' - 'Note that comparisons, membership tests, and identity ' - 'tests, all have\n' - 'the same precedence and have a left-to-right chaining ' - 'feature as\n' - 'described in the *Comparisons* section.\n' - '\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| Operator | ' - 'Description |\n' - '+=================================================+=======================================+\n' - '| "lambda" | ' - 'Lambda expression |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "if" -- "else" | ' - 'Conditional expression |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "or" | ' - 'Boolean OR |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "and" | ' - 'Boolean AND |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "not" "x" | ' - 'Boolean NOT |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "in", "not in", "is", "is not", "<", "<=", ">", | ' - 'Comparisons, including membership |\n' - '| ">=", "!=", "==" | ' - 'tests and identity tests |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "|" | ' - 'Bitwise OR |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "^" | ' - 'Bitwise XOR |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "&" | ' - 'Bitwise AND |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "<<", ">>" | ' - 'Shifts |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "+", "-" | ' - 'Addition and subtraction |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "*", "@", "/", "//", "%" | ' - 'Multiplication, matrix multiplication |\n' - '| | ' - 'division, remainder [5] |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "+x", "-x", "~x" | ' - 'Positive, negative, bitwise NOT |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "**" | ' - 'Exponentiation [6] |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "await" "x" | ' - 'Await expression |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "x[index]", "x[index:index]", | ' - 'Subscription, slicing, call, |\n' - '| "x(arguments...)", "x.attribute" | ' - 'attribute reference |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '| "(expressions...)", "[expressions...]", "{key: | ' - 'Binding or tuple display, list |\n' - '| value...}", "{expressions...}" | ' - 'display, dictionary display, set |\n' - '| | ' - 'display |\n' - '+-------------------------------------------------+---------------------------------------+\n' - '\n' - '-[ Footnotes ]-\n' - '\n' - '[1] While "abs(x%y) < abs(y)" is true mathematically, ' - 'for floats\n' - ' it may not be true numerically due to roundoff. ' - 'For example, and\n' - ' assuming a platform on which a Python float is an ' - 'IEEE 754 double-\n' - ' precision number, in order that "-1e-100 % 1e100" ' - 'have the same\n' - ' sign as "1e100", the computed result is "-1e-100 + ' - '1e100", which\n' - ' is numerically exactly equal to "1e100". The ' - 'function\n' - ' "math.fmod()" returns a result whose sign matches ' - 'the sign of the\n' - ' first argument instead, and so returns "-1e-100" ' - 'in this case.\n' - ' Which approach is more appropriate depends on the ' - 'application.\n' - '\n' - '[2] If x is very close to an exact integer multiple of ' - "y, it's\n" - ' possible for "x//y" to be one larger than ' - '"(x-x%y)//y" due to\n' - ' rounding. In such cases, Python returns the ' - 'latter result, in\n' - ' order to preserve that "divmod(x,y)[0] * y + x % ' - 'y" be very close\n' - ' to "x".\n' - '\n' - '[3] While comparisons between strings make sense at ' - 'the byte\n' - ' level, they may be counter-intuitive to users. ' - 'For example, the\n' - ' strings ""\\u00C7"" and ""\\u0043\\u0327"" compare ' - 'differently, even\n' - ' though they both represent the same unicode ' - 'character (LATIN\n' - ' CAPITAL LETTER C WITH CEDILLA). To compare ' - 'strings in a human\n' - ' recognizable way, compare using ' - '"unicodedata.normalize()".\n' - '\n' - '[4] Due to automatic garbage-collection, free lists, ' - 'and the\n' - ' dynamic nature of descriptors, you may notice ' - 'seemingly unusual\n' - ' behaviour in certain uses of the "is" operator, ' - 'like those\n' - ' involving comparisons between instance methods, or ' - 'constants.\n' - ' Check their documentation for more info.\n' - '\n' - '[5] The "%" operator is also used for string ' - 'formatting; the same\n' - ' precedence applies.\n' - '\n' - '[6] The power operator "**" binds less tightly than an ' - 'arithmetic\n' - ' or bitwise unary operator on its right, that is, ' - '"2**-1" is "0.5".\n', - 'pass': '\n' - 'The "pass" statement\n' - '********************\n' - '\n' - ' pass_stmt ::= "pass"\n' - '\n' - '"pass" is a null operation --- when it is executed, nothing ' - 'happens.\n' - 'It is useful as a placeholder when a statement is required\n' - 'syntactically, but no code needs to be executed, for example:\n' - '\n' - ' def f(arg): pass # a function that does nothing (yet)\n' - '\n' - ' class C: pass # a class with no methods (yet)\n', - 'power': '\n' - 'The power operator\n' - '******************\n' - '\n' - 'The power operator binds more tightly than unary operators on ' - 'its\n' - 'left; it binds less tightly than unary operators on its right. ' - 'The\n' - 'syntax is:\n' - '\n' - ' power ::= await ["**" u_expr]\n' - '\n' - 'Thus, in an unparenthesized sequence of power and unary ' - 'operators, the\n' - 'operators are evaluated from right to left (this does not ' - 'constrain\n' - 'the evaluation order for the operands): "-1**2" results in "-1".\n' - '\n' - 'The power operator has the same semantics as the built-in ' - '"pow()"\n' - 'function, when called with two arguments: it yields its left ' - 'argument\n' - 'raised to the power of its right argument. The numeric arguments ' - 'are\n' - 'first converted to a common type, and the result is of that ' - 'type.\n' - '\n' - 'For int operands, the result has the same type as the operands ' - 'unless\n' - 'the second argument is negative; in that case, all arguments are\n' - 'converted to float and a float result is delivered. For example,\n' - '"10**2" returns "100", but "10**-2" returns "0.01".\n' - '\n' - 'Raising "0.0" to a negative power results in a ' - '"ZeroDivisionError".\n' - 'Raising a negative number to a fractional power results in a ' - '"complex"\n' - 'number. (In earlier versions it raised a "ValueError".)\n', - 'raise': '\n' - 'The "raise" statement\n' - '*********************\n' - '\n' - ' raise_stmt ::= "raise" [expression ["from" expression]]\n' - '\n' - 'If no expressions are present, "raise" re-raises the last ' - 'exception\n' - 'that was active in the current scope. If no exception is active ' - 'in\n' - 'the current scope, a "RuntimeError" exception is raised ' - 'indicating\n' - 'that this is an error.\n' - '\n' - 'Otherwise, "raise" evaluates the first expression as the ' - 'exception\n' - 'object. It must be either a subclass or an instance of\n' - '"BaseException". If it is a class, the exception instance will ' - 'be\n' - 'obtained when needed by instantiating the class with no ' - 'arguments.\n' - '\n' - "The *type* of the exception is the exception instance's class, " - 'the\n' - '*value* is the instance itself.\n' - '\n' - 'A traceback object is normally created automatically when an ' - 'exception\n' - 'is raised and attached to it as the "__traceback__" attribute, ' - 'which\n' - 'is writable. You can create an exception and set your own ' - 'traceback in\n' - 'one step using the "with_traceback()" exception method (which ' - 'returns\n' - 'the same exception instance, with its traceback set to its ' - 'argument),\n' - 'like so:\n' - '\n' - ' raise Exception("foo occurred").with_traceback(tracebackobj)\n' - '\n' - 'The "from" clause is used for exception chaining: if given, the ' - 'second\n' - '*expression* must be another exception class or instance, which ' - 'will\n' - 'then be attached to the raised exception as the "__cause__" ' - 'attribute\n' - '(which is writable). If the raised exception is not handled, ' - 'both\n' - 'exceptions will be printed:\n' - '\n' - ' >>> try:\n' - ' ... print(1 / 0)\n' - ' ... except Exception as exc:\n' - ' ... raise RuntimeError("Something bad happened") from exc\n' - ' ...\n' - ' Traceback (most recent call last):\n' - ' File "", line 2, in \n' - ' ZeroDivisionError: int division or modulo by zero\n' - '\n' - ' The above exception was the direct cause of the following ' - 'exception:\n' - '\n' - ' Traceback (most recent call last):\n' - ' File "", line 4, in \n' - ' RuntimeError: Something bad happened\n' - '\n' - 'A similar mechanism works implicitly if an exception is raised ' - 'inside\n' - 'an exception handler or a "finally" clause: the previous ' - 'exception is\n' - 'then attached as the new exception\'s "__context__" attribute:\n' - '\n' - ' >>> try:\n' - ' ... print(1 / 0)\n' - ' ... except:\n' - ' ... raise RuntimeError("Something bad happened")\n' - ' ...\n' - ' Traceback (most recent call last):\n' - ' File "", line 2, in \n' - ' ZeroDivisionError: int division or modulo by zero\n' - '\n' - ' During handling of the above exception, another exception ' - 'occurred:\n' - '\n' - ' Traceback (most recent call last):\n' - ' File "", line 4, in \n' - ' RuntimeError: Something bad happened\n' - '\n' - 'Additional information on exceptions can be found in section\n' - '*Exceptions*, and information about handling exceptions is in ' - 'section\n' - '*The try statement*.\n', - 'return': '\n' - 'The "return" statement\n' - '**********************\n' - '\n' - ' return_stmt ::= "return" [expression_list]\n' - '\n' - '"return" may only occur syntactically nested in a function ' - 'definition,\n' - 'not within a nested class definition.\n' - '\n' - 'If an expression list is present, it is evaluated, else "None" ' - 'is\n' - 'substituted.\n' - '\n' - '"return" leaves the current function call with the expression ' - 'list (or\n' - '"None") as return value.\n' - '\n' - 'When "return" passes control out of a "try" statement with a ' - '"finally"\n' - 'clause, that "finally" clause is executed before really leaving ' - 'the\n' - 'function.\n' - '\n' - 'In a generator function, the "return" statement indicates that ' - 'the\n' - 'generator is done and will cause "StopIteration" to be raised. ' - 'The\n' - 'returned value (if any) is used as an argument to construct\n' - '"StopIteration" and becomes the "StopIteration.value" ' - 'attribute.\n', - 'sequence-types': '\n' - 'Emulating container types\n' - '*************************\n' - '\n' - 'The following methods can be defined to implement ' - 'container objects.\n' - 'Containers usually are sequences (such as lists or ' - 'tuples) or mappings\n' - '(like dictionaries), but can represent other containers ' - 'as well. The\n' - 'first set of methods is used either to emulate a ' - 'sequence or to\n' - 'emulate a mapping; the difference is that for a ' - 'sequence, the\n' - 'allowable keys should be the integers *k* for which "0 ' - '<= k < N" where\n' - '*N* is the length of the sequence, or slice objects, ' - 'which define a\n' - 'range of items. It is also recommended that mappings ' - 'provide the\n' - 'methods "keys()", "values()", "items()", "get()", ' - '"clear()",\n' - '"setdefault()", "pop()", "popitem()", "copy()", and ' - '"update()"\n' - "behaving similar to those for Python's standard " - 'dictionary objects.\n' - 'The "collections" module provides a "MutableMapping" ' - 'abstract base\n' - 'class to help create those methods from a base set of ' - '"__getitem__()",\n' - '"__setitem__()", "__delitem__()", and "keys()". Mutable ' - 'sequences\n' - 'should provide methods "append()", "count()", "index()", ' - '"extend()",\n' - '"insert()", "pop()", "remove()", "reverse()" and ' - '"sort()", like Python\n' - 'standard list objects. Finally, sequence types should ' - 'implement\n' - 'addition (meaning concatenation) and multiplication ' - '(meaning\n' - 'repetition) by defining the methods "__add__()", ' - '"__radd__()",\n' - '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" ' - 'described\n' - 'below; they should not define other numerical ' - 'operators. It is\n' - 'recommended that both mappings and sequences implement ' - 'the\n' - '"__contains__()" method to allow efficient use of the ' - '"in" operator;\n' - 'for mappings, "in" should search the mapping\'s keys; ' - 'for sequences, it\n' - 'should search through the values. It is further ' - 'recommended that both\n' - 'mappings and sequences implement the "__iter__()" method ' - 'to allow\n' - 'efficient iteration through the container; for mappings, ' - '"__iter__()"\n' - 'should be the same as "keys()"; for sequences, it should ' - 'iterate\n' - 'through the values.\n' - '\n' - 'object.__len__(self)\n' - '\n' - ' Called to implement the built-in function "len()". ' - 'Should return\n' - ' the length of the object, an integer ">=" 0. Also, ' - 'an object that\n' - ' doesn\'t define a "__bool__()" method and whose ' - '"__len__()" method\n' - ' returns zero is considered to be false in a Boolean ' - 'context.\n' - '\n' - 'object.__length_hint__(self)\n' - '\n' - ' Called to implement "operator.length_hint()". Should ' - 'return an\n' - ' estimated length for the object (which may be greater ' - 'or less than\n' - ' the actual length). The length must be an integer ' - '">=" 0. This\n' - ' method is purely an optimization and is never ' - 'required for\n' - ' correctness.\n' - '\n' - ' New in version 3.4.\n' - '\n' - 'Note: Slicing is done exclusively with the following ' - 'three methods.\n' - ' A call like\n' - '\n' - ' a[1:2] = b\n' - '\n' - ' is translated to\n' - '\n' - ' a[slice(1, 2, None)] = b\n' - '\n' - ' and so forth. Missing slice items are always filled ' - 'in with "None".\n' - '\n' - 'object.__getitem__(self, key)\n' - '\n' - ' Called to implement evaluation of "self[key]". For ' - 'sequence types,\n' - ' the accepted keys should be integers and slice ' - 'objects. Note that\n' - ' the special interpretation of negative indexes (if ' - 'the class wishes\n' - ' to emulate a sequence type) is up to the ' - '"__getitem__()" method. If\n' - ' *key* is of an inappropriate type, "TypeError" may be ' - 'raised; if of\n' - ' a value outside the set of indexes for the sequence ' - '(after any\n' - ' special interpretation of negative values), ' - '"IndexError" should be\n' - ' raised. For mapping types, if *key* is missing (not ' - 'in the\n' - ' container), "KeyError" should be raised.\n' - '\n' - ' Note: "for" loops expect that an "IndexError" will be ' - 'raised for\n' - ' illegal indexes to allow proper detection of the ' - 'end of the\n' - ' sequence.\n' - '\n' - 'object.__missing__(self, key)\n' - '\n' - ' Called by "dict"."__getitem__()" to implement ' - '"self[key]" for dict\n' - ' subclasses when key is not in the dictionary.\n' - '\n' - 'object.__setitem__(self, key, value)\n' - '\n' - ' Called to implement assignment to "self[key]". Same ' - 'note as for\n' - ' "__getitem__()". This should only be implemented for ' - 'mappings if\n' - ' the objects support changes to the values for keys, ' - 'or if new keys\n' - ' can be added, or for sequences if elements can be ' - 'replaced. The\n' - ' same exceptions should be raised for improper *key* ' - 'values as for\n' - ' the "__getitem__()" method.\n' - '\n' - 'object.__delitem__(self, key)\n' - '\n' - ' Called to implement deletion of "self[key]". Same ' - 'note as for\n' - ' "__getitem__()". This should only be implemented for ' - 'mappings if\n' - ' the objects support removal of keys, or for sequences ' - 'if elements\n' - ' can be removed from the sequence. The same ' - 'exceptions should be\n' - ' raised for improper *key* values as for the ' - '"__getitem__()" method.\n' - '\n' - 'object.__iter__(self)\n' - '\n' - ' This method is called when an iterator is required ' - 'for a container.\n' - ' This method should return a new iterator object that ' - 'can iterate\n' - ' over all the objects in the container. For mappings, ' - 'it should\n' - ' iterate over the keys of the container.\n' - '\n' - ' Iterator objects also need to implement this method; ' - 'they are\n' - ' required to return themselves. For more information ' - 'on iterator\n' - ' objects, see *Iterator Types*.\n' - '\n' - 'object.__reversed__(self)\n' - '\n' - ' Called (if present) by the "reversed()" built-in to ' - 'implement\n' - ' reverse iteration. It should return a new iterator ' - 'object that\n' - ' iterates over all the objects in the container in ' - 'reverse order.\n' - '\n' - ' If the "__reversed__()" method is not provided, the ' - '"reversed()"\n' - ' built-in will fall back to using the sequence ' - 'protocol ("__len__()"\n' - ' and "__getitem__()"). Objects that support the ' - 'sequence protocol\n' - ' should only provide "__reversed__()" if they can ' - 'provide an\n' - ' implementation that is more efficient than the one ' - 'provided by\n' - ' "reversed()".\n' - '\n' - 'The membership test operators ("in" and "not in") are ' - 'normally\n' - 'implemented as an iteration through a sequence. ' - 'However, container\n' - 'objects can supply the following special method with a ' - 'more efficient\n' - 'implementation, which also does not require the object ' - 'be a sequence.\n' - '\n' - 'object.__contains__(self, item)\n' - '\n' - ' Called to implement membership test operators. ' - 'Should return true\n' - ' if *item* is in *self*, false otherwise. For mapping ' - 'objects, this\n' - ' should consider the keys of the mapping rather than ' - 'the values or\n' - ' the key-item pairs.\n' - '\n' - ' For objects that don\'t define "__contains__()", the ' - 'membership test\n' - ' first tries iteration via "__iter__()", then the old ' - 'sequence\n' - ' iteration protocol via "__getitem__()", see *this ' - 'section in the\n' - ' language reference*.\n', - 'shifting': '\n' - 'Shifting operations\n' - '*******************\n' - '\n' - 'The shifting operations have lower priority than the ' - 'arithmetic\n' - 'operations:\n' - '\n' - ' shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n' - '\n' - 'These operators accept integers as arguments. They shift the ' - 'first\n' - 'argument to the left or right by the number of bits given by ' - 'the\n' - 'second argument.\n' - '\n' - 'A right shift by *n* bits is defined as floor division by ' - '"pow(2,n)".\n' - 'A left shift by *n* bits is defined as multiplication with ' - '"pow(2,n)".\n' - '\n' - 'Note: In the current implementation, the right-hand operand ' - 'is\n' - ' required to be at most "sys.maxsize". If the right-hand ' - 'operand is\n' - ' larger than "sys.maxsize" an "OverflowError" exception is ' - 'raised.\n', - 'slicings': '\n' - 'Slicings\n' - '********\n' - '\n' - 'A slicing selects a range of items in a sequence object (e.g., ' - 'a\n' - 'string, tuple or list). Slicings may be used as expressions ' - 'or as\n' - 'targets in assignment or "del" statements. The syntax for a ' - 'slicing:\n' - '\n' - ' slicing ::= primary "[" slice_list "]"\n' - ' slice_list ::= slice_item ("," slice_item)* [","]\n' - ' slice_item ::= expression | proper_slice\n' - ' proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" ' - '[stride] ]\n' - ' lower_bound ::= expression\n' - ' upper_bound ::= expression\n' - ' stride ::= expression\n' - '\n' - 'There is ambiguity in the formal syntax here: anything that ' - 'looks like\n' - 'an expression list also looks like a slice list, so any ' - 'subscription\n' - 'can be interpreted as a slicing. Rather than further ' - 'complicating the\n' - 'syntax, this is disambiguated by defining that in this case ' - 'the\n' - 'interpretation as a subscription takes priority over the\n' - 'interpretation as a slicing (this is the case if the slice ' - 'list\n' - 'contains no proper slice).\n' - '\n' - 'The semantics for a slicing are as follows. The primary is ' - 'indexed\n' - '(using the same "__getitem__()" method as normal subscription) ' - 'with a\n' - 'key that is constructed from the slice list, as follows. If ' - 'the slice\n' - 'list contains at least one comma, the key is a tuple ' - 'containing the\n' - 'conversion of the slice items; otherwise, the conversion of ' - 'the lone\n' - 'slice item is the key. The conversion of a slice item that is ' - 'an\n' - 'expression is that expression. The conversion of a proper ' - 'slice is a\n' - 'slice object (see section *The standard type hierarchy*) ' - 'whose\n' - '"start", "stop" and "step" attributes are the values of the\n' - 'expressions given as lower bound, upper bound and stride,\n' - 'respectively, substituting "None" for missing expressions.\n', - 'specialattrs': '\n' - 'Special Attributes\n' - '******************\n' - '\n' - 'The implementation adds a few special read-only attributes ' - 'to several\n' - 'object types, where they are relevant. Some of these are ' - 'not reported\n' - 'by the "dir()" built-in function.\n' - '\n' - 'object.__dict__\n' - '\n' - ' A dictionary or other mapping object used to store an ' - "object's\n" - ' (writable) attributes.\n' - '\n' - 'instance.__class__\n' - '\n' - ' The class to which a class instance belongs.\n' - '\n' - 'class.__bases__\n' - '\n' - ' The tuple of base classes of a class object.\n' - '\n' - 'class.__name__\n' - '\n' - ' The name of the class or type.\n' - '\n' - 'class.__qualname__\n' - '\n' - ' The *qualified name* of the class or type.\n' - '\n' - ' New in version 3.3.\n' - '\n' - 'class.__mro__\n' - '\n' - ' This attribute is a tuple of classes that are ' - 'considered when\n' - ' looking for base classes during method resolution.\n' - '\n' - 'class.mro()\n' - '\n' - ' This method can be overridden by a metaclass to ' - 'customize the\n' - ' method resolution order for its instances. It is ' - 'called at class\n' - ' instantiation, and its result is stored in "__mro__".\n' - '\n' - 'class.__subclasses__()\n' - '\n' - ' Each class keeps a list of weak references to its ' - 'immediate\n' - ' subclasses. This method returns a list of all those ' - 'references\n' - ' still alive. Example:\n' - '\n' - ' >>> int.__subclasses__()\n' - " []\n" - '\n' - '-[ Footnotes ]-\n' - '\n' - '[1] Additional information on these special methods may be ' - 'found\n' - ' in the Python Reference Manual (*Basic ' - 'customization*).\n' - '\n' - '[2] As a consequence, the list "[1, 2]" is considered ' - 'equal to\n' - ' "[1.0, 2.0]", and similarly for tuples.\n' - '\n' - "[3] They must have since the parser can't tell the type of " - 'the\n' - ' operands.\n' - '\n' - '[4] Cased characters are those with general category ' - 'property\n' - ' being one of "Lu" (Letter, uppercase), "Ll" (Letter, ' - 'lowercase),\n' - ' or "Lt" (Letter, titlecase).\n' - '\n' - '[5] To format only a tuple you should therefore provide a\n' - ' singleton tuple whose only element is the tuple to be ' - 'formatted.\n', - 'specialnames': '\n' - 'Special method names\n' - '********************\n' - '\n' - 'A class can implement certain operations that are invoked ' - 'by special\n' - 'syntax (such as arithmetic operations or subscripting and ' - 'slicing) by\n' - "defining methods with special names. This is Python's " - 'approach to\n' - '*operator overloading*, allowing classes to define their ' - 'own behavior\n' - 'with respect to language operators. For instance, if a ' - 'class defines\n' - 'a method named "__getitem__()", and "x" is an instance of ' - 'this class,\n' - 'then "x[i]" is roughly equivalent to ' - '"type(x).__getitem__(x, i)".\n' - 'Except where mentioned, attempts to execute an operation ' - 'raise an\n' - 'exception when no appropriate method is defined ' - '(typically\n' - '"AttributeError" or "TypeError").\n' - '\n' - 'When implementing a class that emulates any built-in type, ' - 'it is\n' - 'important that the emulation only be implemented to the ' - 'degree that it\n' - 'makes sense for the object being modelled. For example, ' - 'some\n' - 'sequences may work well with retrieval of individual ' - 'elements, but\n' - 'extracting a slice may not make sense. (One example of ' - 'this is the\n' - '"NodeList" interface in the W3C\'s Document Object ' - 'Model.)\n' - '\n' - '\n' - 'Basic customization\n' - '===================\n' - '\n' - 'object.__new__(cls[, ...])\n' - '\n' - ' Called to create a new instance of class *cls*. ' - '"__new__()" is a\n' - ' static method (special-cased so you need not declare it ' - 'as such)\n' - ' that takes the class of which an instance was requested ' - 'as its\n' - ' first argument. The remaining arguments are those ' - 'passed to the\n' - ' object constructor expression (the call to the class). ' - 'The return\n' - ' value of "__new__()" should be the new object instance ' - '(usually an\n' - ' instance of *cls*).\n' - '\n' - ' Typical implementations create a new instance of the ' - 'class by\n' - ' invoking the superclass\'s "__new__()" method using\n' - ' "super(currentclass, cls).__new__(cls[, ...])" with ' - 'appropriate\n' - ' arguments and then modifying the newly-created instance ' - 'as\n' - ' necessary before returning it.\n' - '\n' - ' If "__new__()" returns an instance of *cls*, then the ' - 'new\n' - ' instance\'s "__init__()" method will be invoked like\n' - ' "__init__(self[, ...])", where *self* is the new ' - 'instance and the\n' - ' remaining arguments are the same as were passed to ' - '"__new__()".\n' - '\n' - ' If "__new__()" does not return an instance of *cls*, ' - 'then the new\n' - ' instance\'s "__init__()" method will not be invoked.\n' - '\n' - ' "__new__()" is intended mainly to allow subclasses of ' - 'immutable\n' - ' types (like int, str, or tuple) to customize instance ' - 'creation. It\n' - ' is also commonly overridden in custom metaclasses in ' - 'order to\n' - ' customize class creation.\n' - '\n' - 'object.__init__(self[, ...])\n' - '\n' - ' Called after the instance has been created (by ' - '"__new__()"), but\n' - ' before it is returned to the caller. The arguments are ' - 'those\n' - ' passed to the class constructor expression. If a base ' - 'class has an\n' - ' "__init__()" method, the derived class\'s "__init__()" ' - 'method, if\n' - ' any, must explicitly call it to ensure proper ' - 'initialization of the\n' - ' base class part of the instance; for example:\n' - ' "BaseClass.__init__(self, [args...])".\n' - '\n' - ' Because "__new__()" and "__init__()" work together in ' - 'constructing\n' - ' objects ("__new__()" to create it, and "__init__()" to ' - 'customise\n' - ' it), no non-"None" value may be returned by ' - '"__init__()"; doing so\n' - ' will cause a "TypeError" to be raised at runtime.\n' - '\n' - 'object.__del__(self)\n' - '\n' - ' Called when the instance is about to be destroyed. ' - 'This is also\n' - ' called a destructor. If a base class has a "__del__()" ' - 'method, the\n' - ' derived class\'s "__del__()" method, if any, must ' - 'explicitly call it\n' - ' to ensure proper deletion of the base class part of the ' - 'instance.\n' - ' Note that it is possible (though not recommended!) for ' - 'the\n' - ' "__del__()" method to postpone destruction of the ' - 'instance by\n' - ' creating a new reference to it. It may then be called ' - 'at a later\n' - ' time when this new reference is deleted. It is not ' - 'guaranteed that\n' - ' "__del__()" methods are called for objects that still ' - 'exist when\n' - ' the interpreter exits.\n' - '\n' - ' Note: "del x" doesn\'t directly call "x.__del__()" --- ' - 'the former\n' - ' decrements the reference count for "x" by one, and ' - 'the latter is\n' - ' only called when "x"\'s reference count reaches ' - 'zero. Some common\n' - ' situations that may prevent the reference count of an ' - 'object from\n' - ' going to zero include: circular references between ' - 'objects (e.g.,\n' - ' a doubly-linked list or a tree data structure with ' - 'parent and\n' - ' child pointers); a reference to the object on the ' - 'stack frame of\n' - ' a function that caught an exception (the traceback ' - 'stored in\n' - ' "sys.exc_info()[2]" keeps the stack frame alive); or ' - 'a reference\n' - ' to the object on the stack frame that raised an ' - 'unhandled\n' - ' exception in interactive mode (the traceback stored ' - 'in\n' - ' "sys.last_traceback" keeps the stack frame alive). ' - 'The first\n' - ' situation can only be remedied by explicitly breaking ' - 'the cycles;\n' - ' the second can be resolved by freeing the reference ' - 'to the\n' - ' traceback object when it is no longer useful, and the ' - 'third can\n' - ' be resolved by storing "None" in ' - '"sys.last_traceback". Circular\n' - ' references which are garbage are detected and cleaned ' - 'up when the\n' - " cyclic garbage collector is enabled (it's on by " - 'default). Refer\n' - ' to the documentation for the "gc" module for more ' - 'information\n' - ' about this topic.\n' - '\n' - ' Warning: Due to the precarious circumstances under ' - 'which\n' - ' "__del__()" methods are invoked, exceptions that ' - 'occur during\n' - ' their execution are ignored, and a warning is printed ' - 'to\n' - ' "sys.stderr" instead. Also, when "__del__()" is ' - 'invoked in\n' - ' response to a module being deleted (e.g., when ' - 'execution of the\n' - ' program is done), other globals referenced by the ' - '"__del__()"\n' - ' method may already have been deleted or in the ' - 'process of being\n' - ' torn down (e.g. the import machinery shutting down). ' - 'For this\n' - ' reason, "__del__()" methods should do the absolute ' - 'minimum needed\n' - ' to maintain external invariants. Starting with ' - 'version 1.5,\n' - ' Python guarantees that globals whose name begins with ' - 'a single\n' - ' underscore are deleted from their module before other ' - 'globals are\n' - ' deleted; if no other references to such globals ' - 'exist, this may\n' - ' help in assuring that imported modules are still ' - 'available at the\n' - ' time when the "__del__()" method is called.\n' - '\n' - 'object.__repr__(self)\n' - '\n' - ' Called by the "repr()" built-in function to compute the ' - '"official"\n' - ' string representation of an object. If at all ' - 'possible, this\n' - ' should look like a valid Python expression that could ' - 'be used to\n' - ' recreate an object with the same value (given an ' - 'appropriate\n' - ' environment). If this is not possible, a string of the ' - 'form\n' - ' "<...some useful description...>" should be returned. ' - 'The return\n' - ' value must be a string object. If a class defines ' - '"__repr__()" but\n' - ' not "__str__()", then "__repr__()" is also used when an ' - '"informal"\n' - ' string representation of instances of that class is ' - 'required.\n' - '\n' - ' This is typically used for debugging, so it is ' - 'important that the\n' - ' representation is information-rich and unambiguous.\n' - '\n' - 'object.__str__(self)\n' - '\n' - ' Called by "str(object)" and the built-in functions ' - '"format()" and\n' - ' "print()" to compute the "informal" or nicely printable ' - 'string\n' - ' representation of an object. The return value must be ' - 'a *string*\n' - ' object.\n' - '\n' - ' This method differs from "object.__repr__()" in that ' - 'there is no\n' - ' expectation that "__str__()" return a valid Python ' - 'expression: a\n' - ' more convenient or concise representation can be used.\n' - '\n' - ' The default implementation defined by the built-in type ' - '"object"\n' - ' calls "object.__repr__()".\n' - '\n' - 'object.__bytes__(self)\n' - '\n' - ' Called by "bytes()" to compute a byte-string ' - 'representation of an\n' - ' object. This should return a "bytes" object.\n' - '\n' - 'object.__format__(self, format_spec)\n' - '\n' - ' Called by the "format()" built-in function (and by ' - 'extension, the\n' - ' "str.format()" method of class "str") to produce a ' - '"formatted"\n' - ' string representation of an object. The "format_spec" ' - 'argument is a\n' - ' string that contains a description of the formatting ' - 'options\n' - ' desired. The interpretation of the "format_spec" ' - 'argument is up to\n' - ' the type implementing "__format__()", however most ' - 'classes will\n' - ' either delegate formatting to one of the built-in ' - 'types, or use a\n' - ' similar formatting option syntax.\n' - '\n' - ' See *Format Specification Mini-Language* for a ' - 'description of the\n' - ' standard formatting syntax.\n' - '\n' - ' The return value must be a string object.\n' - '\n' - ' Changed in version 3.4: The __format__ method of ' - '"object" itself\n' - ' raises a "TypeError" if passed any non-empty string.\n' - '\n' - 'object.__lt__(self, other)\n' - 'object.__le__(self, other)\n' - 'object.__eq__(self, other)\n' - 'object.__ne__(self, other)\n' - 'object.__gt__(self, other)\n' - 'object.__ge__(self, other)\n' - '\n' - ' These are the so-called "rich comparison" methods. The\n' - ' correspondence between operator symbols and method ' - 'names is as\n' - ' follows: "xy" calls\n' - ' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n' - '\n' - ' A rich comparison method may return the singleton ' - '"NotImplemented"\n' - ' if it does not implement the operation for a given pair ' - 'of\n' - ' arguments. By convention, "False" and "True" are ' - 'returned for a\n' - ' successful comparison. However, these methods can ' - 'return any value,\n' - ' so if the comparison operator is used in a Boolean ' - 'context (e.g.,\n' - ' in the condition of an "if" statement), Python will ' - 'call "bool()"\n' - ' on the value to determine if the result is true or ' - 'false.\n' - '\n' - ' By default, "__ne__()" delegates to "__eq__()" and ' - 'inverts the\n' - ' result unless it is "NotImplemented". There are no ' - 'other implied\n' - ' relationships among the comparison operators, for ' - 'example, the\n' - ' truth of "(x.__hash__".\n' - '\n' - ' If a class that does not override "__eq__()" wishes to ' - 'suppress\n' - ' hash support, it should include "__hash__ = None" in ' - 'the class\n' - ' definition. A class which defines its own "__hash__()" ' - 'that\n' - ' explicitly raises a "TypeError" would be incorrectly ' - 'identified as\n' - ' hashable by an "isinstance(obj, collections.Hashable)" ' - 'call.\n' - '\n' - ' Note: By default, the "__hash__()" values of str, bytes ' - 'and\n' - ' datetime objects are "salted" with an unpredictable ' - 'random value.\n' - ' Although they remain constant within an individual ' - 'Python\n' - ' process, they are not predictable between repeated ' - 'invocations of\n' - ' Python.This is intended to provide protection against ' - 'a denial-\n' - ' of-service caused by carefully-chosen inputs that ' - 'exploit the\n' - ' worst case performance of a dict insertion, O(n^2) ' - 'complexity.\n' - ' See ' - 'http://www.ocert.org/advisories/ocert-2011-003.html for\n' - ' details.Changing hash values affects the iteration ' - 'order of\n' - ' dicts, sets and other mappings. Python has never ' - 'made guarantees\n' - ' about this ordering (and it typically varies between ' - '32-bit and\n' - ' 64-bit builds).See also "PYTHONHASHSEED".\n' - '\n' - ' Changed in version 3.3: Hash randomization is enabled ' - 'by default.\n' - '\n' - 'object.__bool__(self)\n' - '\n' - ' Called to implement truth value testing and the ' - 'built-in operation\n' - ' "bool()"; should return "False" or "True". When this ' - 'method is not\n' - ' defined, "__len__()" is called, if it is defined, and ' - 'the object is\n' - ' considered true if its result is nonzero. If a class ' - 'defines\n' - ' neither "__len__()" nor "__bool__()", all its instances ' - 'are\n' - ' considered true.\n' - '\n' - '\n' - 'Customizing attribute access\n' - '============================\n' - '\n' - 'The following methods can be defined to customize the ' - 'meaning of\n' - 'attribute access (use of, assignment to, or deletion of ' - '"x.name") for\n' - 'class instances.\n' - '\n' - 'object.__getattr__(self, name)\n' - '\n' - ' Called when an attribute lookup has not found the ' - 'attribute in the\n' - ' usual places (i.e. it is not an instance attribute nor ' - 'is it found\n' - ' in the class tree for "self"). "name" is the attribute ' - 'name. This\n' - ' method should return the (computed) attribute value or ' - 'raise an\n' - ' "AttributeError" exception.\n' - '\n' - ' Note that if the attribute is found through the normal ' - 'mechanism,\n' - ' "__getattr__()" is not called. (This is an intentional ' - 'asymmetry\n' - ' between "__getattr__()" and "__setattr__()".) This is ' - 'done both for\n' - ' efficiency reasons and because otherwise ' - '"__getattr__()" would have\n' - ' no way to access other attributes of the instance. ' - 'Note that at\n' - ' least for instance variables, you can fake total ' - 'control by not\n' - ' inserting any values in the instance attribute ' - 'dictionary (but\n' - ' instead inserting them in another object). See the\n' - ' "__getattribute__()" method below for a way to actually ' - 'get total\n' - ' control over attribute access.\n' - '\n' - 'object.__getattribute__(self, name)\n' - '\n' - ' Called unconditionally to implement attribute accesses ' - 'for\n' - ' instances of the class. If the class also defines ' - '"__getattr__()",\n' - ' the latter will not be called unless ' - '"__getattribute__()" either\n' - ' calls it explicitly or raises an "AttributeError". This ' - 'method\n' - ' should return the (computed) attribute value or raise ' - 'an\n' - ' "AttributeError" exception. In order to avoid infinite ' - 'recursion in\n' - ' this method, its implementation should always call the ' - 'base class\n' - ' method with the same name to access any attributes it ' - 'needs, for\n' - ' example, "object.__getattribute__(self, name)".\n' - '\n' - ' Note: This method may still be bypassed when looking up ' - 'special\n' - ' methods as the result of implicit invocation via ' - 'language syntax\n' - ' or built-in functions. See *Special method lookup*.\n' - '\n' - 'object.__setattr__(self, name, value)\n' - '\n' - ' Called when an attribute assignment is attempted. This ' - 'is called\n' - ' instead of the normal mechanism (i.e. store the value ' - 'in the\n' - ' instance dictionary). *name* is the attribute name, ' - '*value* is the\n' - ' value to be assigned to it.\n' - '\n' - ' If "__setattr__()" wants to assign to an instance ' - 'attribute, it\n' - ' should call the base class method with the same name, ' - 'for example,\n' - ' "object.__setattr__(self, name, value)".\n' - '\n' - 'object.__delattr__(self, name)\n' - '\n' - ' Like "__setattr__()" but for attribute deletion instead ' - 'of\n' - ' assignment. This should only be implemented if "del ' - 'obj.name" is\n' - ' meaningful for the object.\n' - '\n' - 'object.__dir__(self)\n' - '\n' - ' Called when "dir()" is called on the object. A sequence ' - 'must be\n' - ' returned. "dir()" converts the returned sequence to a ' - 'list and\n' - ' sorts it.\n' - '\n' - '\n' - 'Implementing Descriptors\n' - '------------------------\n' - '\n' - 'The following methods only apply when an instance of the ' - 'class\n' - 'containing the method (a so-called *descriptor* class) ' - 'appears in an\n' - '*owner* class (the descriptor must be in either the ' - "owner's class\n" - 'dictionary or in the class dictionary for one of its ' - 'parents). In the\n' - 'examples below, "the attribute" refers to the attribute ' - 'whose name is\n' - 'the key of the property in the owner class\' "__dict__".\n' - '\n' - 'object.__get__(self, instance, owner)\n' - '\n' - ' Called to get the attribute of the owner class (class ' - 'attribute\n' - ' access) or of an instance of that class (instance ' - 'attribute\n' - ' access). *owner* is always the owner class, while ' - '*instance* is the\n' - ' instance that the attribute was accessed through, or ' - '"None" when\n' - ' the attribute is accessed through the *owner*. This ' - 'method should\n' - ' return the (computed) attribute value or raise an ' - '"AttributeError"\n' - ' exception.\n' - '\n' - 'object.__set__(self, instance, value)\n' - '\n' - ' Called to set the attribute on an instance *instance* ' - 'of the owner\n' - ' class to a new value, *value*.\n' - '\n' - 'object.__delete__(self, instance)\n' - '\n' - ' Called to delete the attribute on an instance ' - '*instance* of the\n' - ' owner class.\n' - '\n' - 'The attribute "__objclass__" is interpreted by the ' - '"inspect" module as\n' - 'specifying the class where this object was defined ' - '(setting this\n' - 'appropriately can assist in runtime introspection of ' - 'dynamic class\n' - 'attributes). For callables, it may indicate that an ' - 'instance of the\n' - 'given type (or a subclass) is expected or required as the ' - 'first\n' - 'positional argument (for example, CPython sets this ' - 'attribute for\n' - 'unbound methods that are implemented in C).\n' - '\n' - '\n' - 'Invoking Descriptors\n' - '--------------------\n' - '\n' - 'In general, a descriptor is an object attribute with ' - '"binding\n' - 'behavior", one whose attribute access has been overridden ' - 'by methods\n' - 'in the descriptor protocol: "__get__()", "__set__()", ' - 'and\n' - '"__delete__()". If any of those methods are defined for an ' - 'object, it\n' - 'is said to be a descriptor.\n' - '\n' - 'The default behavior for attribute access is to get, set, ' - 'or delete\n' - "the attribute from an object's dictionary. For instance, " - '"a.x" has a\n' - 'lookup chain starting with "a.__dict__[\'x\']", then\n' - '"type(a).__dict__[\'x\']", and continuing through the base ' - 'classes of\n' - '"type(a)" excluding metaclasses.\n' - '\n' - 'However, if the looked-up value is an object defining one ' - 'of the\n' - 'descriptor methods, then Python may override the default ' - 'behavior and\n' - 'invoke the descriptor method instead. Where this occurs ' - 'in the\n' - 'precedence chain depends on which descriptor methods were ' - 'defined and\n' - 'how they were called.\n' - '\n' - 'The starting point for descriptor invocation is a binding, ' - '"a.x". How\n' - 'the arguments are assembled depends on "a":\n' - '\n' - 'Direct Call\n' - ' The simplest and least common call is when user code ' - 'directly\n' - ' invokes a descriptor method: "x.__get__(a)".\n' - '\n' - 'Instance Binding\n' - ' If binding to an object instance, "a.x" is transformed ' - 'into the\n' - ' call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n' - '\n' - 'Class Binding\n' - ' If binding to a class, "A.x" is transformed into the ' - 'call:\n' - ' "A.__dict__[\'x\'].__get__(None, A)".\n' - '\n' - 'Super Binding\n' - ' If "a" is an instance of "super", then the binding ' - '"super(B,\n' - ' obj).m()" searches "obj.__class__.__mro__" for the base ' - 'class "A"\n' - ' immediately preceding "B" and then invokes the ' - 'descriptor with the\n' - ' call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n' - '\n' - 'For instance bindings, the precedence of descriptor ' - 'invocation depends\n' - 'on the which descriptor methods are defined. A descriptor ' - 'can define\n' - 'any combination of "__get__()", "__set__()" and ' - '"__delete__()". If it\n' - 'does not define "__get__()", then accessing the attribute ' - 'will return\n' - 'the descriptor object itself unless there is a value in ' - "the object's\n" - 'instance dictionary. If the descriptor defines ' - '"__set__()" and/or\n' - '"__delete__()", it is a data descriptor; if it defines ' - 'neither, it is\n' - 'a non-data descriptor. Normally, data descriptors define ' - 'both\n' - '"__get__()" and "__set__()", while non-data descriptors ' - 'have just the\n' - '"__get__()" method. Data descriptors with "__set__()" and ' - '"__get__()"\n' - 'defined always override a redefinition in an instance ' - 'dictionary. In\n' - 'contrast, non-data descriptors can be overridden by ' - 'instances.\n' - '\n' - 'Python methods (including "staticmethod()" and ' - '"classmethod()") are\n' - 'implemented as non-data descriptors. Accordingly, ' - 'instances can\n' - 'redefine and override methods. This allows individual ' - 'instances to\n' - 'acquire behaviors that differ from other instances of the ' - 'same class.\n' - '\n' - 'The "property()" function is implemented as a data ' - 'descriptor.\n' - 'Accordingly, instances cannot override the behavior of a ' - 'property.\n' - '\n' - '\n' - '__slots__\n' - '---------\n' - '\n' - 'By default, instances of classes have a dictionary for ' - 'attribute\n' - 'storage. This wastes space for objects having very few ' - 'instance\n' - 'variables. The space consumption can become acute when ' - 'creating large\n' - 'numbers of instances.\n' - '\n' - 'The default can be overridden by defining *__slots__* in a ' - 'class\n' - 'definition. The *__slots__* declaration takes a sequence ' - 'of instance\n' - 'variables and reserves just enough space in each instance ' - 'to hold a\n' - 'value for each variable. Space is saved because ' - '*__dict__* is not\n' - 'created for each instance.\n' - '\n' - 'object.__slots__\n' - '\n' - ' This class variable can be assigned a string, iterable, ' - 'or sequence\n' - ' of strings with variable names used by instances. ' - '*__slots__*\n' - ' reserves space for the declared variables and prevents ' - 'the\n' - ' automatic creation of *__dict__* and *__weakref__* for ' - 'each\n' - ' instance.\n' - '\n' - '\n' - 'Notes on using *__slots__*\n' - '~~~~~~~~~~~~~~~~~~~~~~~~~~\n' - '\n' - '* When inheriting from a class without *__slots__*, the ' - '*__dict__*\n' - ' attribute of that class will always be accessible, so a ' - '*__slots__*\n' - ' definition in the subclass is meaningless.\n' - '\n' - '* Without a *__dict__* variable, instances cannot be ' - 'assigned new\n' - ' variables not listed in the *__slots__* definition. ' - 'Attempts to\n' - ' assign to an unlisted variable name raises ' - '"AttributeError". If\n' - ' dynamic assignment of new variables is desired, then ' - 'add\n' - ' "\'__dict__\'" to the sequence of strings in the ' - '*__slots__*\n' - ' declaration.\n' - '\n' - '* Without a *__weakref__* variable for each instance, ' - 'classes\n' - ' defining *__slots__* do not support weak references to ' - 'its\n' - ' instances. If weak reference support is needed, then ' - 'add\n' - ' "\'__weakref__\'" to the sequence of strings in the ' - '*__slots__*\n' - ' declaration.\n' - '\n' - '* *__slots__* are implemented at the class level by ' - 'creating\n' - ' descriptors (*Implementing Descriptors*) for each ' - 'variable name. As\n' - ' a result, class attributes cannot be used to set default ' - 'values for\n' - ' instance variables defined by *__slots__*; otherwise, ' - 'the class\n' - ' attribute would overwrite the descriptor assignment.\n' - '\n' - '* The action of a *__slots__* declaration is limited to ' - 'the class\n' - ' where it is defined. As a result, subclasses will have ' - 'a *__dict__*\n' - ' unless they also define *__slots__* (which must only ' - 'contain names\n' - ' of any *additional* slots).\n' - '\n' - '* If a class defines a slot also defined in a base class, ' - 'the\n' - ' instance variable defined by the base class slot is ' - 'inaccessible\n' - ' (except by retrieving its descriptor directly from the ' - 'base class).\n' - ' This renders the meaning of the program undefined. In ' - 'the future, a\n' - ' check may be added to prevent this.\n' - '\n' - '* Nonempty *__slots__* does not work for classes derived ' - 'from\n' - ' "variable-length" built-in types such as "int", "bytes" ' - 'and "tuple".\n' - '\n' - '* Any non-string iterable may be assigned to *__slots__*. ' - 'Mappings\n' - ' may also be used; however, in the future, special ' - 'meaning may be\n' - ' assigned to the values corresponding to each key.\n' - '\n' - '* *__class__* assignment works only if both classes have ' - 'the same\n' - ' *__slots__*.\n' - '\n' - '\n' - 'Customizing class creation\n' - '==========================\n' - '\n' - 'By default, classes are constructed using "type()". The ' - 'class body is\n' - 'executed in a new namespace and the class name is bound ' - 'locally to the\n' - 'result of "type(name, bases, namespace)".\n' - '\n' - 'The class creation process can be customised by passing ' - 'the\n' - '"metaclass" keyword argument in the class definition line, ' - 'or by\n' - 'inheriting from an existing class that included such an ' - 'argument. In\n' - 'the following example, both "MyClass" and "MySubclass" are ' - 'instances\n' - 'of "Meta":\n' - '\n' - ' class Meta(type):\n' - ' pass\n' - '\n' - ' class MyClass(metaclass=Meta):\n' - ' pass\n' - '\n' - ' class MySubclass(MyClass):\n' - ' pass\n' - '\n' - 'Any other keyword arguments that are specified in the ' - 'class definition\n' - 'are passed through to all metaclass operations described ' - 'below.\n' - '\n' - 'When a class definition is executed, the following steps ' - 'occur:\n' - '\n' - '* the appropriate metaclass is determined\n' - '\n' - '* the class namespace is prepared\n' - '\n' - '* the class body is executed\n' - '\n' - '* the class object is created\n' - '\n' - '\n' - 'Determining the appropriate metaclass\n' - '-------------------------------------\n' - '\n' - 'The appropriate metaclass for a class definition is ' - 'determined as\n' - 'follows:\n' - '\n' - '* if no bases and no explicit metaclass are given, then ' - '"type()" is\n' - ' used\n' - '\n' - '* if an explicit metaclass is given and it is *not* an ' - 'instance of\n' - ' "type()", then it is used directly as the metaclass\n' - '\n' - '* if an instance of "type()" is given as the explicit ' - 'metaclass, or\n' - ' bases are defined, then the most derived metaclass is ' - 'used\n' - '\n' - 'The most derived metaclass is selected from the explicitly ' - 'specified\n' - 'metaclass (if any) and the metaclasses (i.e. "type(cls)") ' - 'of all\n' - 'specified base classes. The most derived metaclass is one ' - 'which is a\n' - 'subtype of *all* of these candidate metaclasses. If none ' - 'of the\n' - 'candidate metaclasses meets that criterion, then the class ' - 'definition\n' - 'will fail with "TypeError".\n' - '\n' - '\n' - 'Preparing the class namespace\n' - '-----------------------------\n' - '\n' - 'Once the appropriate metaclass has been identified, then ' - 'the class\n' - 'namespace is prepared. If the metaclass has a ' - '"__prepare__" attribute,\n' - 'it is called as "namespace = metaclass.__prepare__(name, ' - 'bases,\n' - '**kwds)" (where the additional keyword arguments, if any, ' - 'come from\n' - 'the class definition).\n' - '\n' - 'If the metaclass has no "__prepare__" attribute, then the ' - 'class\n' - 'namespace is initialised as an empty "dict()" instance.\n' - '\n' - 'See also: **PEP 3115** - Metaclasses in Python 3000\n' - '\n' - ' Introduced the "__prepare__" namespace hook\n' - '\n' - '\n' - 'Executing the class body\n' - '------------------------\n' - '\n' - 'The class body is executed (approximately) as "exec(body, ' - 'globals(),\n' - 'namespace)". The key difference from a normal call to ' - '"exec()" is that\n' - 'lexical scoping allows the class body (including any ' - 'methods) to\n' - 'reference names from the current and outer scopes when the ' - 'class\n' - 'definition occurs inside a function.\n' - '\n' - 'However, even when the class definition occurs inside the ' - 'function,\n' - 'methods defined inside the class still cannot see names ' - 'defined at the\n' - 'class scope. Class variables must be accessed through the ' - 'first\n' - 'parameter of instance or class methods, and cannot be ' - 'accessed at all\n' - 'from static methods.\n' - '\n' - '\n' - 'Creating the class object\n' - '-------------------------\n' - '\n' - 'Once the class namespace has been populated by executing ' - 'the class\n' - 'body, the class object is created by calling ' - '"metaclass(name, bases,\n' - 'namespace, **kwds)" (the additional keywords passed here ' - 'are the same\n' - 'as those passed to "__prepare__").\n' - '\n' - 'This class object is the one that will be referenced by ' - 'the zero-\n' - 'argument form of "super()". "__class__" is an implicit ' - 'closure\n' - 'reference created by the compiler if any methods in a ' - 'class body refer\n' - 'to either "__class__" or "super". This allows the zero ' - 'argument form\n' - 'of "super()" to correctly identify the class being defined ' - 'based on\n' - 'lexical scoping, while the class or instance that was used ' - 'to make the\n' - 'current call is identified based on the first argument ' - 'passed to the\n' - 'method.\n' - '\n' - 'After the class object is created, it is passed to the ' - 'class\n' - 'decorators included in the class definition (if any) and ' - 'the resulting\n' - 'object is bound in the local namespace as the defined ' - 'class.\n' - '\n' - 'See also: **PEP 3135** - New super\n' - '\n' - ' Describes the implicit "__class__" closure reference\n' - '\n' - '\n' - 'Metaclass example\n' - '-----------------\n' - '\n' - 'The potential uses for metaclasses are boundless. Some ' - 'ideas that have\n' - 'been explored include logging, interface checking, ' - 'automatic\n' - 'delegation, automatic property creation, proxies, ' - 'frameworks, and\n' - 'automatic resource locking/synchronization.\n' - '\n' - 'Here is an example of a metaclass that uses an\n' - '"collections.OrderedDict" to remember the order that class ' - 'variables\n' - 'are defined:\n' - '\n' - ' class OrderedClass(type):\n' - '\n' - ' @classmethod\n' - ' def __prepare__(metacls, name, bases, **kwds):\n' - ' return collections.OrderedDict()\n' - '\n' - ' def __new__(cls, name, bases, namespace, **kwds):\n' - ' result = type.__new__(cls, name, bases, ' - 'dict(namespace))\n' - ' result.members = tuple(namespace)\n' - ' return result\n' - '\n' - ' class A(metaclass=OrderedClass):\n' - ' def one(self): pass\n' - ' def two(self): pass\n' - ' def three(self): pass\n' - ' def four(self): pass\n' - '\n' - ' >>> A.members\n' - " ('__module__', 'one', 'two', 'three', 'four')\n" - '\n' - 'When the class definition for *A* gets executed, the ' - 'process begins\n' - 'with calling the metaclass\'s "__prepare__()" method which ' - 'returns an\n' - 'empty "collections.OrderedDict". That mapping records the ' - 'methods and\n' - 'attributes of *A* as they are defined within the body of ' - 'the class\n' - 'statement. Once those definitions are executed, the ' - 'ordered dictionary\n' - 'is fully populated and the metaclass\'s "__new__()" method ' - 'gets\n' - 'invoked. That method builds the new type and it saves the ' - 'ordered\n' - 'dictionary keys in an attribute called "members".\n' - '\n' - '\n' - 'Customizing instance and subclass checks\n' - '========================================\n' - '\n' - 'The following methods are used to override the default ' - 'behavior of the\n' - '"isinstance()" and "issubclass()" built-in functions.\n' - '\n' - 'In particular, the metaclass "abc.ABCMeta" implements ' - 'these methods in\n' - 'order to allow the addition of Abstract Base Classes ' - '(ABCs) as\n' - '"virtual base classes" to any class or type (including ' - 'built-in\n' - 'types), including other ABCs.\n' - '\n' - 'class.__instancecheck__(self, instance)\n' - '\n' - ' Return true if *instance* should be considered a ' - '(direct or\n' - ' indirect) instance of *class*. If defined, called to ' - 'implement\n' - ' "isinstance(instance, class)".\n' - '\n' - 'class.__subclasscheck__(self, subclass)\n' - '\n' - ' Return true if *subclass* should be considered a ' - '(direct or\n' - ' indirect) subclass of *class*. If defined, called to ' - 'implement\n' - ' "issubclass(subclass, class)".\n' - '\n' - 'Note that these methods are looked up on the type ' - '(metaclass) of a\n' - 'class. They cannot be defined as class methods in the ' - 'actual class.\n' - 'This is consistent with the lookup of special methods that ' - 'are called\n' - 'on instances, only in this case the instance is itself a ' - 'class.\n' - '\n' - 'See also: **PEP 3119** - Introducing Abstract Base ' - 'Classes\n' - '\n' - ' Includes the specification for customizing ' - '"isinstance()" and\n' - ' "issubclass()" behavior through "__instancecheck__()" ' - 'and\n' - ' "__subclasscheck__()", with motivation for this ' - 'functionality in\n' - ' the context of adding Abstract Base Classes (see the ' - '"abc"\n' - ' module) to the language.\n' - '\n' - '\n' - 'Emulating callable objects\n' - '==========================\n' - '\n' - 'object.__call__(self[, args...])\n' - '\n' - ' Called when the instance is "called" as a function; if ' - 'this method\n' - ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' - ' "x.__call__(arg1, arg2, ...)".\n' - '\n' - '\n' - 'Emulating container types\n' - '=========================\n' - '\n' - 'The following methods can be defined to implement ' - 'container objects.\n' - 'Containers usually are sequences (such as lists or tuples) ' - 'or mappings\n' - '(like dictionaries), but can represent other containers as ' - 'well. The\n' - 'first set of methods is used either to emulate a sequence ' - 'or to\n' - 'emulate a mapping; the difference is that for a sequence, ' - 'the\n' - 'allowable keys should be the integers *k* for which "0 <= ' - 'k < N" where\n' - '*N* is the length of the sequence, or slice objects, which ' - 'define a\n' - 'range of items. It is also recommended that mappings ' - 'provide the\n' - 'methods "keys()", "values()", "items()", "get()", ' - '"clear()",\n' - '"setdefault()", "pop()", "popitem()", "copy()", and ' - '"update()"\n' - "behaving similar to those for Python's standard dictionary " - 'objects.\n' - 'The "collections" module provides a "MutableMapping" ' - 'abstract base\n' - 'class to help create those methods from a base set of ' - '"__getitem__()",\n' - '"__setitem__()", "__delitem__()", and "keys()". Mutable ' - 'sequences\n' - 'should provide methods "append()", "count()", "index()", ' - '"extend()",\n' - '"insert()", "pop()", "remove()", "reverse()" and "sort()", ' - 'like Python\n' - 'standard list objects. Finally, sequence types should ' - 'implement\n' - 'addition (meaning concatenation) and multiplication ' - '(meaning\n' - 'repetition) by defining the methods "__add__()", ' - '"__radd__()",\n' - '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" ' - 'described\n' - 'below; they should not define other numerical operators. ' - 'It is\n' - 'recommended that both mappings and sequences implement ' - 'the\n' - '"__contains__()" method to allow efficient use of the "in" ' - 'operator;\n' - 'for mappings, "in" should search the mapping\'s keys; for ' - 'sequences, it\n' - 'should search through the values. It is further ' - 'recommended that both\n' - 'mappings and sequences implement the "__iter__()" method ' - 'to allow\n' - 'efficient iteration through the container; for mappings, ' - '"__iter__()"\n' - 'should be the same as "keys()"; for sequences, it should ' - 'iterate\n' - 'through the values.\n' - '\n' - 'object.__len__(self)\n' - '\n' - ' Called to implement the built-in function "len()". ' - 'Should return\n' - ' the length of the object, an integer ">=" 0. Also, an ' - 'object that\n' - ' doesn\'t define a "__bool__()" method and whose ' - '"__len__()" method\n' - ' returns zero is considered to be false in a Boolean ' - 'context.\n' - '\n' - 'object.__length_hint__(self)\n' - '\n' - ' Called to implement "operator.length_hint()". Should ' - 'return an\n' - ' estimated length for the object (which may be greater ' - 'or less than\n' - ' the actual length). The length must be an integer ">=" ' - '0. This\n' - ' method is purely an optimization and is never required ' - 'for\n' - ' correctness.\n' - '\n' - ' New in version 3.4.\n' - '\n' - 'Note: Slicing is done exclusively with the following three ' - 'methods.\n' - ' A call like\n' - '\n' - ' a[1:2] = b\n' - '\n' - ' is translated to\n' - '\n' - ' a[slice(1, 2, None)] = b\n' - '\n' - ' and so forth. Missing slice items are always filled in ' - 'with "None".\n' - '\n' - 'object.__getitem__(self, key)\n' - '\n' - ' Called to implement evaluation of "self[key]". For ' - 'sequence types,\n' - ' the accepted keys should be integers and slice ' - 'objects. Note that\n' - ' the special interpretation of negative indexes (if the ' - 'class wishes\n' - ' to emulate a sequence type) is up to the ' - '"__getitem__()" method. If\n' - ' *key* is of an inappropriate type, "TypeError" may be ' - 'raised; if of\n' - ' a value outside the set of indexes for the sequence ' - '(after any\n' - ' special interpretation of negative values), ' - '"IndexError" should be\n' - ' raised. For mapping types, if *key* is missing (not in ' - 'the\n' - ' container), "KeyError" should be raised.\n' - '\n' - ' Note: "for" loops expect that an "IndexError" will be ' - 'raised for\n' - ' illegal indexes to allow proper detection of the end ' - 'of the\n' - ' sequence.\n' - '\n' - 'object.__missing__(self, key)\n' - '\n' - ' Called by "dict"."__getitem__()" to implement ' - '"self[key]" for dict\n' - ' subclasses when key is not in the dictionary.\n' - '\n' - 'object.__setitem__(self, key, value)\n' - '\n' - ' Called to implement assignment to "self[key]". Same ' - 'note as for\n' - ' "__getitem__()". This should only be implemented for ' - 'mappings if\n' - ' the objects support changes to the values for keys, or ' - 'if new keys\n' - ' can be added, or for sequences if elements can be ' - 'replaced. The\n' - ' same exceptions should be raised for improper *key* ' - 'values as for\n' - ' the "__getitem__()" method.\n' - '\n' - 'object.__delitem__(self, key)\n' - '\n' - ' Called to implement deletion of "self[key]". Same note ' - 'as for\n' - ' "__getitem__()". This should only be implemented for ' - 'mappings if\n' - ' the objects support removal of keys, or for sequences ' - 'if elements\n' - ' can be removed from the sequence. The same exceptions ' - 'should be\n' - ' raised for improper *key* values as for the ' - '"__getitem__()" method.\n' - '\n' - 'object.__iter__(self)\n' - '\n' - ' This method is called when an iterator is required for ' - 'a container.\n' - ' This method should return a new iterator object that ' - 'can iterate\n' - ' over all the objects in the container. For mappings, ' - 'it should\n' - ' iterate over the keys of the container.\n' - '\n' - ' Iterator objects also need to implement this method; ' - 'they are\n' - ' required to return themselves. For more information on ' - 'iterator\n' - ' objects, see *Iterator Types*.\n' - '\n' - 'object.__reversed__(self)\n' - '\n' - ' Called (if present) by the "reversed()" built-in to ' - 'implement\n' - ' reverse iteration. It should return a new iterator ' - 'object that\n' - ' iterates over all the objects in the container in ' - 'reverse order.\n' - '\n' - ' If the "__reversed__()" method is not provided, the ' - '"reversed()"\n' - ' built-in will fall back to using the sequence protocol ' - '("__len__()"\n' - ' and "__getitem__()"). Objects that support the ' - 'sequence protocol\n' - ' should only provide "__reversed__()" if they can ' - 'provide an\n' - ' implementation that is more efficient than the one ' - 'provided by\n' - ' "reversed()".\n' - '\n' - 'The membership test operators ("in" and "not in") are ' - 'normally\n' - 'implemented as an iteration through a sequence. However, ' - 'container\n' - 'objects can supply the following special method with a ' - 'more efficient\n' - 'implementation, which also does not require the object be ' - 'a sequence.\n' - '\n' - 'object.__contains__(self, item)\n' - '\n' - ' Called to implement membership test operators. Should ' - 'return true\n' - ' if *item* is in *self*, false otherwise. For mapping ' - 'objects, this\n' - ' should consider the keys of the mapping rather than the ' - 'values or\n' - ' the key-item pairs.\n' - '\n' - ' For objects that don\'t define "__contains__()", the ' - 'membership test\n' - ' first tries iteration via "__iter__()", then the old ' - 'sequence\n' - ' iteration protocol via "__getitem__()", see *this ' - 'section in the\n' - ' language reference*.\n' - '\n' - '\n' - 'Emulating numeric types\n' - '=======================\n' - '\n' - 'The following methods can be defined to emulate numeric ' - 'objects.\n' - 'Methods corresponding to operations that are not supported ' - 'by the\n' - 'particular kind of number implemented (e.g., bitwise ' - 'operations for\n' - 'non-integral numbers) should be left undefined.\n' - '\n' - 'object.__add__(self, other)\n' - 'object.__sub__(self, other)\n' - 'object.__mul__(self, other)\n' - 'object.__matmul__(self, other)\n' - 'object.__truediv__(self, other)\n' - 'object.__floordiv__(self, other)\n' - 'object.__mod__(self, other)\n' - 'object.__divmod__(self, other)\n' - 'object.__pow__(self, other[, modulo])\n' - 'object.__lshift__(self, other)\n' - 'object.__rshift__(self, other)\n' - 'object.__and__(self, other)\n' - 'object.__xor__(self, other)\n' - 'object.__or__(self, other)\n' - '\n' - ' These methods are called to implement the binary ' - 'arithmetic\n' - ' operations ("+", "-", "*", "@", "/", "//", "%", ' - '"divmod()",\n' - ' "pow()", "**", "<<", ">>", "&", "^", "|"). For ' - 'instance, to\n' - ' evaluate the expression "x + y", where *x* is an ' - 'instance of a\n' - ' class that has an "__add__()" method, "x.__add__(y)" is ' - 'called.\n' - ' The "__divmod__()" method should be the equivalent to ' - 'using\n' - ' "__floordiv__()" and "__mod__()"; it should not be ' - 'related to\n' - ' "__truediv__()". Note that "__pow__()" should be ' - 'defined to accept\n' - ' an optional third argument if the ternary version of ' - 'the built-in\n' - ' "pow()" function is to be supported.\n' - '\n' - ' If one of those methods does not support the operation ' - 'with the\n' - ' supplied arguments, it should return "NotImplemented".\n' - '\n' - 'object.__radd__(self, other)\n' - 'object.__rsub__(self, other)\n' - 'object.__rmul__(self, other)\n' - 'object.__rmatmul__(self, other)\n' - 'object.__rtruediv__(self, other)\n' - 'object.__rfloordiv__(self, other)\n' - 'object.__rmod__(self, other)\n' - 'object.__rdivmod__(self, other)\n' - 'object.__rpow__(self, other)\n' - 'object.__rlshift__(self, other)\n' - 'object.__rrshift__(self, other)\n' - 'object.__rand__(self, other)\n' - 'object.__rxor__(self, other)\n' - 'object.__ror__(self, other)\n' - '\n' - ' These methods are called to implement the binary ' - 'arithmetic\n' - ' operations ("+", "-", "*", "@", "/", "//", "%", ' - '"divmod()",\n' - ' "pow()", "**", "<<", ">>", "&", "^", "|") with ' - 'reflected (swapped)\n' - ' operands. These functions are only called if the left ' - 'operand does\n' - ' not support the corresponding operation and the ' - 'operands are of\n' - ' different types. [2] For instance, to evaluate the ' - 'expression "x -\n' - ' y", where *y* is an instance of a class that has an ' - '"__rsub__()"\n' - ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' - 'returns\n' - ' *NotImplemented*.\n' - '\n' - ' Note that ternary "pow()" will not try calling ' - '"__rpow__()" (the\n' - ' coercion rules would become too complicated).\n' - '\n' - " Note: If the right operand's type is a subclass of the " - 'left\n' - " operand's type and that subclass provides the " - 'reflected method\n' - ' for the operation, this method will be called before ' - 'the left\n' - " operand's non-reflected method. This behavior allows " - 'subclasses\n' - " to override their ancestors' operations.\n" - '\n' - 'object.__iadd__(self, other)\n' - 'object.__isub__(self, other)\n' - 'object.__imul__(self, other)\n' - 'object.__imatmul__(self, other)\n' - 'object.__itruediv__(self, other)\n' - 'object.__ifloordiv__(self, other)\n' - 'object.__imod__(self, other)\n' - 'object.__ipow__(self, other[, modulo])\n' - 'object.__ilshift__(self, other)\n' - 'object.__irshift__(self, other)\n' - 'object.__iand__(self, other)\n' - 'object.__ixor__(self, other)\n' - 'object.__ior__(self, other)\n' - '\n' - ' These methods are called to implement the augmented ' - 'arithmetic\n' - ' assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", ' - '"**=",\n' - ' "<<=", ">>=", "&=", "^=", "|="). These methods should ' - 'attempt to\n' - ' do the operation in-place (modifying *self*) and return ' - 'the result\n' - ' (which could be, but does not have to be, *self*). If ' - 'a specific\n' - ' method is not defined, the augmented assignment falls ' - 'back to the\n' - ' normal methods. For instance, if *x* is an instance of ' - 'a class\n' - ' with an "__iadd__()" method, "x += y" is equivalent to ' - '"x =\n' - ' x.__iadd__(y)" . Otherwise, "x.__add__(y)" and ' - '"y.__radd__(x)" are\n' - ' considered, as with the evaluation of "x + y". In ' - 'certain\n' - ' situations, augmented assignment can result in ' - 'unexpected errors\n' - " (see *Why does a_tuple[i] += ['item'] raise an " - 'exception when the\n' - ' addition works?*), but this behavior is in fact part of ' - 'the data\n' - ' model.\n' - '\n' - 'object.__neg__(self)\n' - 'object.__pos__(self)\n' - 'object.__abs__(self)\n' - 'object.__invert__(self)\n' - '\n' - ' Called to implement the unary arithmetic operations ' - '("-", "+",\n' - ' "abs()" and "~").\n' - '\n' - 'object.__complex__(self)\n' - 'object.__int__(self)\n' - 'object.__float__(self)\n' - 'object.__round__(self[, n])\n' - '\n' - ' Called to implement the built-in functions "complex()", ' - '"int()",\n' - ' "float()" and "round()". Should return a value of the ' - 'appropriate\n' - ' type.\n' - '\n' - 'object.__index__(self)\n' - '\n' - ' Called to implement "operator.index()", and whenever ' - 'Python needs\n' - ' to losslessly convert the numeric object to an integer ' - 'object (such\n' - ' as in slicing, or in the built-in "bin()", "hex()" and ' - '"oct()"\n' - ' functions). Presence of this method indicates that the ' - 'numeric\n' - ' object is an integer type. Must return an integer.\n' - '\n' - ' Note: In order to have a coherent integer type class, ' - 'when\n' - ' "__index__()" is defined "__int__()" should also be ' - 'defined, and\n' - ' both should return the same value.\n' - '\n' - '\n' - 'With Statement Context Managers\n' - '===============================\n' - '\n' - 'A *context manager* is an object that defines the runtime ' - 'context to\n' - 'be established when executing a "with" statement. The ' - 'context manager\n' - 'handles the entry into, and the exit from, the desired ' - 'runtime context\n' - 'for the execution of the block of code. Context managers ' - 'are normally\n' - 'invoked using the "with" statement (described in section ' - '*The with\n' - 'statement*), but can also be used by directly invoking ' - 'their methods.\n' - '\n' - 'Typical uses of context managers include saving and ' - 'restoring various\n' - 'kinds of global state, locking and unlocking resources, ' - 'closing opened\n' - 'files, etc.\n' - '\n' - 'For more information on context managers, see *Context ' - 'Manager Types*.\n' - '\n' - 'object.__enter__(self)\n' - '\n' - ' Enter the runtime context related to this object. The ' - '"with"\n' - " statement will bind this method's return value to the " - 'target(s)\n' - ' specified in the "as" clause of the statement, if any.\n' - '\n' - 'object.__exit__(self, exc_type, exc_value, traceback)\n' - '\n' - ' Exit the runtime context related to this object. The ' - 'parameters\n' - ' describe the exception that caused the context to be ' - 'exited. If the\n' - ' context was exited without an exception, all three ' - 'arguments will\n' - ' be "None".\n' - '\n' - ' If an exception is supplied, and the method wishes to ' - 'suppress the\n' - ' exception (i.e., prevent it from being propagated), it ' - 'should\n' - ' return a true value. Otherwise, the exception will be ' - 'processed\n' - ' normally upon exit from this method.\n' - '\n' - ' Note that "__exit__()" methods should not reraise the ' - 'passed-in\n' - " exception; this is the caller's responsibility.\n" - '\n' - 'See also: **PEP 0343** - The "with" statement\n' - '\n' - ' The specification, background, and examples for the ' - 'Python "with"\n' - ' statement.\n' - '\n' - '\n' - 'Special method lookup\n' - '=====================\n' - '\n' - 'For custom classes, implicit invocations of special ' - 'methods are only\n' - "guaranteed to work correctly if defined on an object's " - 'type, not in\n' - "the object's instance dictionary. That behaviour is the " - 'reason why\n' - 'the following code raises an exception:\n' - '\n' - ' >>> class C:\n' - ' ... pass\n' - ' ...\n' - ' >>> c = C()\n' - ' >>> c.__len__ = lambda: 5\n' - ' >>> len(c)\n' - ' Traceback (most recent call last):\n' - ' File "", line 1, in \n' - " TypeError: object of type 'C' has no len()\n" - '\n' - 'The rationale behind this behaviour lies with a number of ' - 'special\n' - 'methods such as "__hash__()" and "__repr__()" that are ' - 'implemented by\n' - 'all objects, including type objects. If the implicit ' - 'lookup of these\n' - 'methods used the conventional lookup process, they would ' - 'fail when\n' - 'invoked on the type object itself:\n' - '\n' - ' >>> 1 .__hash__() == hash(1)\n' - ' True\n' - ' >>> int.__hash__() == hash(int)\n' - ' Traceback (most recent call last):\n' - ' File "", line 1, in \n' - " TypeError: descriptor '__hash__' of 'int' object needs " - 'an argument\n' - '\n' - 'Incorrectly attempting to invoke an unbound method of a ' - 'class in this\n' - "way is sometimes referred to as 'metaclass confusion', and " - 'is avoided\n' - 'by bypassing the instance when looking up special ' - 'methods:\n' - '\n' - ' >>> type(1).__hash__(1) == hash(1)\n' - ' True\n' - ' >>> type(int).__hash__(int) == hash(int)\n' - ' True\n' - '\n' - 'In addition to bypassing any instance attributes in the ' - 'interest of\n' - 'correctness, implicit special method lookup generally also ' - 'bypasses\n' - 'the "__getattribute__()" method even of the object\'s ' - 'metaclass:\n' - '\n' - ' >>> class Meta(type):\n' - ' ... def __getattribute__(*args):\n' - ' ... print("Metaclass getattribute invoked")\n' - ' ... return type.__getattribute__(*args)\n' - ' ...\n' - ' >>> class C(object, metaclass=Meta):\n' - ' ... def __len__(self):\n' - ' ... return 10\n' - ' ... def __getattribute__(*args):\n' - ' ... print("Class getattribute invoked")\n' - ' ... return object.__getattribute__(*args)\n' - ' ...\n' - ' >>> c = C()\n' - ' >>> c.__len__() # Explicit lookup via ' - 'instance\n' - ' Class getattribute invoked\n' - ' 10\n' - ' >>> type(c).__len__(c) # Explicit lookup via ' - 'type\n' - ' Metaclass getattribute invoked\n' - ' 10\n' - ' >>> len(c) # Implicit lookup\n' - ' 10\n' - '\n' - 'Bypassing the "__getattribute__()" machinery in this ' - 'fashion provides\n' - 'significant scope for speed optimisations within the ' - 'interpreter, at\n' - 'the cost of some flexibility in the handling of special ' - 'methods (the\n' - 'special method *must* be set on the class object itself in ' - 'order to be\n' - 'consistently invoked by the interpreter).\n', - 'string-methods': '\n' - 'String Methods\n' - '**************\n' - '\n' - 'Strings implement all of the *common* sequence ' - 'operations, along with\n' - 'the additional methods described below.\n' - '\n' - 'Strings also support two styles of string formatting, ' - 'one providing a\n' - 'large degree of flexibility and customization (see ' - '"str.format()",\n' - '*Format String Syntax* and *String Formatting*) and the ' - 'other based on\n' - 'C "printf" style formatting that handles a narrower ' - 'range of types and\n' - 'is slightly harder to use correctly, but is often faster ' - 'for the cases\n' - 'it can handle (*printf-style String Formatting*).\n' - '\n' - 'The *Text Processing Services* section of the standard ' - 'library covers\n' - 'a number of other modules that provide various text ' - 'related utilities\n' - '(including regular expression support in the "re" ' - 'module).\n' - '\n' - 'str.capitalize()\n' - '\n' - ' Return a copy of the string with its first character ' - 'capitalized\n' - ' and the rest lowercased.\n' - '\n' - 'str.casefold()\n' - '\n' - ' Return a casefolded copy of the string. Casefolded ' - 'strings may be\n' - ' used for caseless matching.\n' - '\n' - ' Casefolding is similar to lowercasing but more ' - 'aggressive because\n' - ' it is intended to remove all case distinctions in a ' - 'string. For\n' - ' example, the German lowercase letter "\'ß\'" is ' - 'equivalent to ""ss"".\n' - ' Since it is already lowercase, "lower()" would do ' - 'nothing to "\'ß\'";\n' - ' "casefold()" converts it to ""ss"".\n' - '\n' - ' The casefolding algorithm is described in section ' - '3.13 of the\n' - ' Unicode Standard.\n' - '\n' - ' New in version 3.3.\n' - '\n' - 'str.center(width[, fillchar])\n' - '\n' - ' Return centered in a string of length *width*. ' - 'Padding is done\n' - ' using the specified *fillchar* (default is an ASCII ' - 'space). The\n' - ' original string is returned if *width* is less than ' - 'or equal to\n' - ' "len(s)".\n' - '\n' - 'str.count(sub[, start[, end]])\n' - '\n' - ' Return the number of non-overlapping occurrences of ' - 'substring *sub*\n' - ' in the range [*start*, *end*]. Optional arguments ' - '*start* and\n' - ' *end* are interpreted as in slice notation.\n' - '\n' - 'str.encode(encoding="utf-8", errors="strict")\n' - '\n' - ' Return an encoded version of the string as a bytes ' - 'object. Default\n' - ' encoding is "\'utf-8\'". *errors* may be given to set ' - 'a different\n' - ' error handling scheme. The default for *errors* is ' - '"\'strict\'",\n' - ' meaning that encoding errors raise a "UnicodeError". ' - 'Other possible\n' - ' values are "\'ignore\'", "\'replace\'", ' - '"\'xmlcharrefreplace\'",\n' - ' "\'backslashreplace\'" and any other name registered ' - 'via\n' - ' "codecs.register_error()", see section *Error ' - 'Handlers*. For a list\n' - ' of possible encodings, see section *Standard ' - 'Encodings*.\n' - '\n' - ' Changed in version 3.1: Support for keyword arguments ' - 'added.\n' - '\n' - 'str.endswith(suffix[, start[, end]])\n' - '\n' - ' Return "True" if the string ends with the specified ' - '*suffix*,\n' - ' otherwise return "False". *suffix* can also be a ' - 'tuple of suffixes\n' - ' to look for. With optional *start*, test beginning ' - 'at that\n' - ' position. With optional *end*, stop comparing at ' - 'that position.\n' - '\n' - 'str.expandtabs(tabsize=8)\n' - '\n' - ' Return a copy of the string where all tab characters ' - 'are replaced\n' - ' by one or more spaces, depending on the current ' - 'column and the\n' - ' given tab size. Tab positions occur every *tabsize* ' - 'characters\n' - ' (default is 8, giving tab positions at columns 0, 8, ' - '16 and so on).\n' - ' To expand the string, the current column is set to ' - 'zero and the\n' - ' string is examined character by character. If the ' - 'character is a\n' - ' tab ("\\t"), one or more space characters are ' - 'inserted in the result\n' - ' until the current column is equal to the next tab ' - 'position. (The\n' - ' tab character itself is not copied.) If the ' - 'character is a newline\n' - ' ("\\n") or return ("\\r"), it is copied and the ' - 'current column is\n' - ' reset to zero. Any other character is copied ' - 'unchanged and the\n' - ' current column is incremented by one regardless of ' - 'how the\n' - ' character is represented when printed.\n' - '\n' - " >>> '01\\t012\\t0123\\t01234'.expandtabs()\n" - " '01 012 0123 01234'\n" - " >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n" - " '01 012 0123 01234'\n" - '\n' - 'str.find(sub[, start[, end]])\n' - '\n' - ' Return the lowest index in the string where substring ' - '*sub* is\n' - ' found, such that *sub* is contained in the slice ' - '"s[start:end]".\n' - ' Optional arguments *start* and *end* are interpreted ' - 'as in slice\n' - ' notation. Return "-1" if *sub* is not found.\n' - '\n' - ' Note: The "find()" method should be used only if you ' - 'need to know\n' - ' the position of *sub*. To check if *sub* is a ' - 'substring or not,\n' - ' use the "in" operator:\n' - '\n' - " >>> 'Py' in 'Python'\n" - ' True\n' - '\n' - 'str.format(*args, **kwargs)\n' - '\n' - ' Perform a string formatting operation. The string on ' - 'which this\n' - ' method is called can contain literal text or ' - 'replacement fields\n' - ' delimited by braces "{}". Each replacement field ' - 'contains either\n' - ' the numeric index of a positional argument, or the ' - 'name of a\n' - ' keyword argument. Returns a copy of the string where ' - 'each\n' - ' replacement field is replaced with the string value ' - 'of the\n' - ' corresponding argument.\n' - '\n' - ' >>> "The sum of 1 + 2 is {0}".format(1+2)\n' - " 'The sum of 1 + 2 is 3'\n" - '\n' - ' See *Format String Syntax* for a description of the ' - 'various\n' - ' formatting options that can be specified in format ' - 'strings.\n' - '\n' - 'str.format_map(mapping)\n' - '\n' - ' Similar to "str.format(**mapping)", except that ' - '"mapping" is used\n' - ' directly and not copied to a "dict". This is useful ' - 'if for example\n' - ' "mapping" is a dict subclass:\n' - '\n' - ' >>> class Default(dict):\n' - ' ... def __missing__(self, key):\n' - ' ... return key\n' - ' ...\n' - " >>> '{name} was born in " - "{country}'.format_map(Default(name='Guido'))\n" - " 'Guido was born in country'\n" - '\n' - ' New in version 3.2.\n' - '\n' - 'str.index(sub[, start[, end]])\n' - '\n' - ' Like "find()", but raise "ValueError" when the ' - 'substring is not\n' - ' found.\n' - '\n' - 'str.isalnum()\n' - '\n' - ' Return true if all characters in the string are ' - 'alphanumeric and\n' - ' there is at least one character, false otherwise. A ' - 'character "c"\n' - ' is alphanumeric if one of the following returns ' - '"True":\n' - ' "c.isalpha()", "c.isdecimal()", "c.isdigit()", or ' - '"c.isnumeric()".\n' - '\n' - 'str.isalpha()\n' - '\n' - ' Return true if all characters in the string are ' - 'alphabetic and\n' - ' there is at least one character, false otherwise. ' - 'Alphabetic\n' - ' characters are those characters defined in the ' - 'Unicode character\n' - ' database as "Letter", i.e., those with general ' - 'category property\n' - ' being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note ' - 'that this is\n' - ' different from the "Alphabetic" property defined in ' - 'the Unicode\n' - ' Standard.\n' - '\n' - 'str.isdecimal()\n' - '\n' - ' Return true if all characters in the string are ' - 'decimal characters\n' - ' and there is at least one character, false otherwise. ' - 'Decimal\n' - ' characters are those from general category "Nd". This ' - 'category\n' - ' includes digit characters, and all characters that ' - 'can be used to\n' - ' form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC ' - 'DIGIT ZERO.\n' - '\n' - 'str.isdigit()\n' - '\n' - ' Return true if all characters in the string are ' - 'digits and there is\n' - ' at least one character, false otherwise. Digits ' - 'include decimal\n' - ' characters and digits that need special handling, ' - 'such as the\n' - ' compatibility superscript digits. Formally, a digit ' - 'is a character\n' - ' that has the property value Numeric_Type=Digit or\n' - ' Numeric_Type=Decimal.\n' - '\n' - 'str.isidentifier()\n' - '\n' - ' Return true if the string is a valid identifier ' - 'according to the\n' - ' language definition, section *Identifiers and ' - 'keywords*.\n' - '\n' - ' Use "keyword.iskeyword()" to test for reserved ' - 'identifiers such as\n' - ' "def" and "class".\n' - '\n' - 'str.islower()\n' - '\n' - ' Return true if all cased characters [4] in the string ' - 'are lowercase\n' - ' and there is at least one cased character, false ' - 'otherwise.\n' - '\n' - 'str.isnumeric()\n' - '\n' - ' Return true if all characters in the string are ' - 'numeric characters,\n' - ' and there is at least one character, false otherwise. ' - 'Numeric\n' - ' characters include digit characters, and all ' - 'characters that have\n' - ' the Unicode numeric value property, e.g. U+2155, ' - 'VULGAR FRACTION\n' - ' ONE FIFTH. Formally, numeric characters are those ' - 'with the\n' - ' property value Numeric_Type=Digit, ' - 'Numeric_Type=Decimal or\n' - ' Numeric_Type=Numeric.\n' - '\n' - 'str.isprintable()\n' - '\n' - ' Return true if all characters in the string are ' - 'printable or the\n' - ' string is empty, false otherwise. Nonprintable ' - 'characters are\n' - ' those characters defined in the Unicode character ' - 'database as\n' - ' "Other" or "Separator", excepting the ASCII space ' - '(0x20) which is\n' - ' considered printable. (Note that printable ' - 'characters in this\n' - ' context are those which should not be escaped when ' - '"repr()" is\n' - ' invoked on a string. It has no bearing on the ' - 'handling of strings\n' - ' written to "sys.stdout" or "sys.stderr".)\n' - '\n' - 'str.isspace()\n' - '\n' - ' Return true if there are only whitespace characters ' - 'in the string\n' - ' and there is at least one character, false ' - 'otherwise. Whitespace\n' - ' characters are those characters defined in the ' - 'Unicode character\n' - ' database as "Other" or "Separator" and those with ' - 'bidirectional\n' - ' property being one of "WS", "B", or "S".\n' - '\n' - 'str.istitle()\n' - '\n' - ' Return true if the string is a titlecased string and ' - 'there is at\n' - ' least one character, for example uppercase characters ' - 'may only\n' - ' follow uncased characters and lowercase characters ' - 'only cased ones.\n' - ' Return false otherwise.\n' - '\n' - 'str.isupper()\n' - '\n' - ' Return true if all cased characters [4] in the string ' - 'are uppercase\n' - ' and there is at least one cased character, false ' - 'otherwise.\n' - '\n' - 'str.join(iterable)\n' - '\n' - ' Return a string which is the concatenation of the ' - 'strings in the\n' - ' *iterable* *iterable*. A "TypeError" will be raised ' - 'if there are\n' - ' any non-string values in *iterable*, including ' - '"bytes" objects.\n' - ' The separator between elements is the string ' - 'providing this method.\n' - '\n' - 'str.ljust(width[, fillchar])\n' - '\n' - ' Return the string left justified in a string of ' - 'length *width*.\n' - ' Padding is done using the specified *fillchar* ' - '(default is an ASCII\n' - ' space). The original string is returned if *width* is ' - 'less than or\n' - ' equal to "len(s)".\n' - '\n' - 'str.lower()\n' - '\n' - ' Return a copy of the string with all the cased ' - 'characters [4]\n' - ' converted to lowercase.\n' - '\n' - ' The lowercasing algorithm used is described in ' - 'section 3.13 of the\n' - ' Unicode Standard.\n' - '\n' - 'str.lstrip([chars])\n' - '\n' - ' Return a copy of the string with leading characters ' - 'removed. The\n' - ' *chars* argument is a string specifying the set of ' - 'characters to be\n' - ' removed. If omitted or "None", the *chars* argument ' - 'defaults to\n' - ' removing whitespace. The *chars* argument is not a ' - 'prefix; rather,\n' - ' all combinations of its values are stripped:\n' - '\n' - " >>> ' spacious '.lstrip()\n" - " 'spacious '\n" - " >>> 'www.example.com'.lstrip('cmowz.')\n" - " 'example.com'\n" - '\n' - 'static str.maketrans(x[, y[, z]])\n' - '\n' - ' This static method returns a translation table usable ' - 'for\n' - ' "str.translate()".\n' - '\n' - ' If there is only one argument, it must be a ' - 'dictionary mapping\n' - ' Unicode ordinals (integers) or characters (strings of ' - 'length 1) to\n' - ' Unicode ordinals, strings (of arbitrary lengths) or ' - 'None.\n' - ' Character keys will then be converted to ordinals.\n' - '\n' - ' If there are two arguments, they must be strings of ' - 'equal length,\n' - ' and in the resulting dictionary, each character in x ' - 'will be mapped\n' - ' to the character at the same position in y. If there ' - 'is a third\n' - ' argument, it must be a string, whose characters will ' - 'be mapped to\n' - ' None in the result.\n' - '\n' - 'str.partition(sep)\n' - '\n' - ' Split the string at the first occurrence of *sep*, ' - 'and return a\n' - ' 3-tuple containing the part before the separator, the ' - 'separator\n' - ' itself, and the part after the separator. If the ' - 'separator is not\n' - ' found, return a 3-tuple containing the string itself, ' - 'followed by\n' - ' two empty strings.\n' - '\n' - 'str.replace(old, new[, count])\n' - '\n' - ' Return a copy of the string with all occurrences of ' - 'substring *old*\n' - ' replaced by *new*. If the optional argument *count* ' - 'is given, only\n' - ' the first *count* occurrences are replaced.\n' - '\n' - 'str.rfind(sub[, start[, end]])\n' - '\n' - ' Return the highest index in the string where ' - 'substring *sub* is\n' - ' found, such that *sub* is contained within ' - '"s[start:end]".\n' - ' Optional arguments *start* and *end* are interpreted ' - 'as in slice\n' - ' notation. Return "-1" on failure.\n' - '\n' - 'str.rindex(sub[, start[, end]])\n' - '\n' - ' Like "rfind()" but raises "ValueError" when the ' - 'substring *sub* is\n' - ' not found.\n' - '\n' - 'str.rjust(width[, fillchar])\n' - '\n' - ' Return the string right justified in a string of ' - 'length *width*.\n' - ' Padding is done using the specified *fillchar* ' - '(default is an ASCII\n' - ' space). The original string is returned if *width* is ' - 'less than or\n' - ' equal to "len(s)".\n' - '\n' - 'str.rpartition(sep)\n' - '\n' - ' Split the string at the last occurrence of *sep*, and ' - 'return a\n' - ' 3-tuple containing the part before the separator, the ' - 'separator\n' - ' itself, and the part after the separator. If the ' - 'separator is not\n' - ' found, return a 3-tuple containing two empty strings, ' - 'followed by\n' - ' the string itself.\n' - '\n' - 'str.rsplit(sep=None, maxsplit=-1)\n' - '\n' - ' Return a list of the words in the string, using *sep* ' - 'as the\n' - ' delimiter string. If *maxsplit* is given, at most ' - '*maxsplit* splits\n' - ' are done, the *rightmost* ones. If *sep* is not ' - 'specified or\n' - ' "None", any whitespace string is a separator. Except ' - 'for splitting\n' - ' from the right, "rsplit()" behaves like "split()" ' - 'which is\n' - ' described in detail below.\n' - '\n' - 'str.rstrip([chars])\n' - '\n' - ' Return a copy of the string with trailing characters ' - 'removed. The\n' - ' *chars* argument is a string specifying the set of ' - 'characters to be\n' - ' removed. If omitted or "None", the *chars* argument ' - 'defaults to\n' - ' removing whitespace. The *chars* argument is not a ' - 'suffix; rather,\n' - ' all combinations of its values are stripped:\n' - '\n' - " >>> ' spacious '.rstrip()\n" - " ' spacious'\n" - " >>> 'mississippi'.rstrip('ipz')\n" - " 'mississ'\n" - '\n' - 'str.split(sep=None, maxsplit=-1)\n' - '\n' - ' Return a list of the words in the string, using *sep* ' - 'as the\n' - ' delimiter string. If *maxsplit* is given, at most ' - '*maxsplit*\n' - ' splits are done (thus, the list will have at most ' - '"maxsplit+1"\n' - ' elements). If *maxsplit* is not specified or "-1", ' - 'then there is\n' - ' no limit on the number of splits (all possible splits ' - 'are made).\n' - '\n' - ' If *sep* is given, consecutive delimiters are not ' - 'grouped together\n' - ' and are deemed to delimit empty strings (for ' - 'example,\n' - ' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', ' - '\'2\']"). The *sep* argument\n' - ' may consist of multiple characters (for example,\n' - ' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', ' - '\'3\']"). Splitting an\n' - ' empty string with a specified separator returns ' - '"[\'\']".\n' - '\n' - ' For example:\n' - '\n' - " >>> '1,2,3'.split(',')\n" - " ['1', '2', '3']\n" - " >>> '1,2,3'.split(',', maxsplit=1)\n" - " ['1', '2,3']\n" - " >>> '1,2,,3,'.split(',')\n" - " ['1', '2', '', '3', '']\n" - '\n' - ' If *sep* is not specified or is "None", a different ' - 'splitting\n' - ' algorithm is applied: runs of consecutive whitespace ' - 'are regarded\n' - ' as a single separator, and the result will contain no ' - 'empty strings\n' - ' at the start or end if the string has leading or ' - 'trailing\n' - ' whitespace. Consequently, splitting an empty string ' - 'or a string\n' - ' consisting of just whitespace with a "None" separator ' - 'returns "[]".\n' - '\n' - ' For example:\n' - '\n' - " >>> '1 2 3'.split()\n" - " ['1', '2', '3']\n" - " >>> '1 2 3'.split(maxsplit=1)\n" - " ['1', '2 3']\n" - " >>> ' 1 2 3 '.split()\n" - " ['1', '2', '3']\n" - '\n' - 'str.splitlines([keepends])\n' - '\n' - ' Return a list of the lines in the string, breaking at ' - 'line\n' - ' boundaries. Line breaks are not included in the ' - 'resulting list\n' - ' unless *keepends* is given and true.\n' - '\n' - ' This method splits on the following line boundaries. ' - 'In\n' - ' particular, the boundaries are a superset of ' - '*universal newlines*.\n' - '\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | Representation | ' - 'Description |\n' - ' ' - '+=========================+===============================+\n' - ' | "\\n" | Line ' - 'Feed |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\r" | Carriage ' - 'Return |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\r\\n" | Carriage Return + Line ' - 'Feed |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\v" or "\\x0b" | Line ' - 'Tabulation |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\f" or "\\x0c" | Form ' - 'Feed |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\x1c" | File ' - 'Separator |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\x1d" | Group ' - 'Separator |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\x1e" | Record ' - 'Separator |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\x85" | Next Line (C1 Control ' - 'Code) |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\u2028" | Line ' - 'Separator |\n' - ' ' - '+-------------------------+-------------------------------+\n' - ' | "\\u2029" | Paragraph ' - 'Separator |\n' - ' ' - '+-------------------------+-------------------------------+\n' - '\n' - ' Changed in version 3.2: "\\v" and "\\f" added to list ' - 'of line\n' - ' boundaries.\n' - '\n' - ' For example:\n' - '\n' - " >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines()\n" - " ['ab c', '', 'de fg', 'kl']\n" - " >>> 'ab c\\n\\nde " - "fg\\rkl\\r\\n'.splitlines(keepends=True)\n" - " ['ab c\\n', '\\n', 'de fg\\r', 'kl\\r\\n']\n" - '\n' - ' Unlike "split()" when a delimiter string *sep* is ' - 'given, this\n' - ' method returns an empty list for the empty string, ' - 'and a terminal\n' - ' line break does not result in an extra line:\n' - '\n' - ' >>> "".splitlines()\n' - ' []\n' - ' >>> "One line\\n".splitlines()\n' - " ['One line']\n" - '\n' - ' For comparison, "split(\'\\n\')" gives:\n' - '\n' - " >>> ''.split('\\n')\n" - " ['']\n" - " >>> 'Two lines\\n'.split('\\n')\n" - " ['Two lines', '']\n" - '\n' - 'str.startswith(prefix[, start[, end]])\n' - '\n' - ' Return "True" if string starts with the *prefix*, ' - 'otherwise return\n' - ' "False". *prefix* can also be a tuple of prefixes to ' - 'look for.\n' - ' With optional *start*, test string beginning at that ' - 'position.\n' - ' With optional *end*, stop comparing string at that ' - 'position.\n' - '\n' - 'str.strip([chars])\n' - '\n' - ' Return a copy of the string with the leading and ' - 'trailing\n' - ' characters removed. The *chars* argument is a string ' - 'specifying the\n' - ' set of characters to be removed. If omitted or ' - '"None", the *chars*\n' - ' argument defaults to removing whitespace. The *chars* ' - 'argument is\n' - ' not a prefix or suffix; rather, all combinations of ' - 'its values are\n' - ' stripped:\n' - '\n' - " >>> ' spacious '.strip()\n" - " 'spacious'\n" - " >>> 'www.example.com'.strip('cmowz.')\n" - " 'example'\n" - '\n' - ' The outermost leading and trailing *chars* argument ' - 'values are\n' - ' stripped from the string. Characters are removed from ' - 'the leading\n' - ' end until reaching a string character that is not ' - 'contained in the\n' - ' set of characters in *chars*. A similar action takes ' - 'place on the\n' - ' trailing end. For example:\n' - '\n' - " >>> comment_string = '#....... Section 3.2.1 Issue " - "#32 .......'\n" - " >>> comment_string.strip('.#! ')\n" - " 'Section 3.2.1 Issue #32'\n" - '\n' - 'str.swapcase()\n' - '\n' - ' Return a copy of the string with uppercase characters ' - 'converted to\n' - ' lowercase and vice versa. Note that it is not ' - 'necessarily true that\n' - ' "s.swapcase().swapcase() == s".\n' - '\n' - 'str.title()\n' - '\n' - ' Return a titlecased version of the string where words ' - 'start with an\n' - ' uppercase character and the remaining characters are ' - 'lowercase.\n' - '\n' - ' For example:\n' - '\n' - " >>> 'Hello world'.title()\n" - " 'Hello World'\n" - '\n' - ' The algorithm uses a simple language-independent ' - 'definition of a\n' - ' word as groups of consecutive letters. The ' - 'definition works in\n' - ' many contexts but it means that apostrophes in ' - 'contractions and\n' - ' possessives form word boundaries, which may not be ' - 'the desired\n' - ' result:\n' - '\n' - ' >>> "they\'re bill\'s friends from the ' - 'UK".title()\n' - ' "They\'Re Bill\'S Friends From The Uk"\n' - '\n' - ' A workaround for apostrophes can be constructed using ' - 'regular\n' - ' expressions:\n' - '\n' - ' >>> import re\n' - ' >>> def titlecase(s):\n' - ' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n' - ' ... lambda mo: ' - 'mo.group(0)[0].upper() +\n' - ' ... ' - 'mo.group(0)[1:].lower(),\n' - ' ... s)\n' - ' ...\n' - ' >>> titlecase("they\'re bill\'s friends.")\n' - ' "They\'re Bill\'s Friends."\n' - '\n' - 'str.translate(table)\n' - '\n' - ' Return a copy of the string in which each character ' - 'has been mapped\n' - ' through the given translation table. The table must ' - 'be an object\n' - ' that implements indexing via "__getitem__()", ' - 'typically a *mapping*\n' - ' or *sequence*. When indexed by a Unicode ordinal (an ' - 'integer), the\n' - ' table object can do any of the following: return a ' - 'Unicode ordinal\n' - ' or a string, to map the character to one or more ' - 'other characters;\n' - ' return "None", to delete the character from the ' - 'return string; or\n' - ' raise a "LookupError" exception, to map the character ' - 'to itself.\n' - '\n' - ' You can use "str.maketrans()" to create a translation ' - 'map from\n' - ' character-to-character mappings in different ' - 'formats.\n' - '\n' - ' See also the "codecs" module for a more flexible ' - 'approach to custom\n' - ' character mappings.\n' - '\n' - 'str.upper()\n' - '\n' - ' Return a copy of the string with all the cased ' - 'characters [4]\n' - ' converted to uppercase. Note that ' - '"str.upper().isupper()" might be\n' - ' "False" if "s" contains uncased characters or if the ' - 'Unicode\n' - ' category of the resulting character(s) is not "Lu" ' - '(Letter,\n' - ' uppercase), but e.g. "Lt" (Letter, titlecase).\n' - '\n' - ' The uppercasing algorithm used is described in ' - 'section 3.13 of the\n' - ' Unicode Standard.\n' - '\n' - 'str.zfill(width)\n' - '\n' - ' Return a copy of the string left filled with ASCII ' - '"\'0\'" digits to\n' - ' make a string of length *width*. A leading sign ' - 'prefix\n' - ' ("\'+\'"/"\'-\'") is handled by inserting the padding ' - '*after* the sign\n' - ' character rather than before. The original string is ' - 'returned if\n' - ' *width* is less than or equal to "len(s)".\n' - '\n' - ' For example:\n' - '\n' - ' >>> "42".zfill(5)\n' - " '00042'\n" - ' >>> "-42".zfill(5)\n' - " '-0042'\n", - 'strings': '\n' - 'String and Bytes literals\n' - '*************************\n' - '\n' - 'String literals are described by the following lexical ' - 'definitions:\n' - '\n' - ' stringliteral ::= [stringprefix](shortstring | ' - 'longstring)\n' - ' stringprefix ::= "r" | "u" | "R" | "U"\n' - ' shortstring ::= "\'" shortstringitem* "\'" | \'"\' ' - 'shortstringitem* \'"\'\n' - ' longstring ::= "\'\'\'" longstringitem* "\'\'\'" | ' - '\'"""\' longstringitem* \'"""\'\n' - ' shortstringitem ::= shortstringchar | stringescapeseq\n' - ' longstringitem ::= longstringchar | stringescapeseq\n' - ' shortstringchar ::= \n' - ' longstringchar ::= \n' - ' stringescapeseq ::= "\\" \n' - '\n' - ' bytesliteral ::= bytesprefix(shortbytes | longbytes)\n' - ' bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | ' - '"rb" | "rB" | "Rb" | "RB"\n' - ' shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' ' - 'shortbytesitem* \'"\'\n' - ' longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | ' - '\'"""\' longbytesitem* \'"""\'\n' - ' shortbytesitem ::= shortbyteschar | bytesescapeseq\n' - ' longbytesitem ::= longbyteschar | bytesescapeseq\n' - ' shortbyteschar ::= \n' - ' longbyteschar ::= \n' - ' bytesescapeseq ::= "\\" \n' - '\n' - 'One syntactic restriction not indicated by these productions is ' - 'that\n' - 'whitespace is not allowed between the "stringprefix" or ' - '"bytesprefix"\n' - 'and the rest of the literal. The source character set is ' - 'defined by\n' - 'the encoding declaration; it is UTF-8 if no encoding ' - 'declaration is\n' - 'given in the source file; see section *Encoding declarations*.\n' - '\n' - 'In plain English: Both types of literals can be enclosed in ' - 'matching\n' - 'single quotes ("\'") or double quotes ("""). They can also be ' - 'enclosed\n' - 'in matching groups of three single or double quotes (these are\n' - 'generally referred to as *triple-quoted strings*). The ' - 'backslash\n' - '("\\") character is used to escape characters that otherwise ' - 'have a\n' - 'special meaning, such as newline, backslash itself, or the ' - 'quote\n' - 'character.\n' - '\n' - 'Bytes literals are always prefixed with "\'b\'" or "\'B\'"; ' - 'they produce\n' - 'an instance of the "bytes" type instead of the "str" type. ' - 'They may\n' - 'only contain ASCII characters; bytes with a numeric value of ' - '128 or\n' - 'greater must be expressed with escapes.\n' - '\n' - 'As of Python 3.3 it is possible again to prefix string literals ' - 'with a\n' - '"u" prefix to simplify maintenance of dual 2.x and 3.x ' - 'codebases.\n' - '\n' - 'Both string and bytes literals may optionally be prefixed with ' - 'a\n' - 'letter "\'r\'" or "\'R\'"; such strings are called *raw ' - 'strings* and treat\n' - 'backslashes as literal characters. As a result, in string ' - 'literals,\n' - '"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated ' - 'specially.\n' - "Given that Python 2.x's raw unicode literals behave differently " - 'than\n' - 'Python 3.x\'s the "\'ur\'" syntax is not supported.\n' - '\n' - 'New in version 3.3: The "\'rb\'" prefix of raw bytes literals ' - 'has been\n' - 'added as a synonym of "\'br\'".\n' - '\n' - 'New in version 3.3: Support for the unicode legacy literal\n' - '("u\'value\'") was reintroduced to simplify the maintenance of ' - 'dual\n' - 'Python 2.x and 3.x codebases. See **PEP 414** for more ' - 'information.\n' - '\n' - 'In triple-quoted literals, unescaped newlines and quotes are ' - 'allowed\n' - '(and are retained), except that three unescaped quotes in a ' - 'row\n' - 'terminate the literal. (A "quote" is the character used to ' - 'open the\n' - 'literal, i.e. either "\'" or """.)\n' - '\n' - 'Unless an "\'r\'" or "\'R\'" prefix is present, escape ' - 'sequences in string\n' - 'and bytes literals are interpreted according to rules similar ' - 'to those\n' - 'used by Standard C. The recognized escape sequences are:\n' - '\n' - '+-------------------+-----------------------------------+---------+\n' - '| Escape Sequence | Meaning | ' - 'Notes |\n' - '+===================+===================================+=========+\n' - '| "\\newline" | Backslash and newline ignored ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\\\" | Backslash ("\\") ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\\'" | Single quote ("\'") ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\"" | Double quote (""") ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\a" | ASCII Bell (BEL) ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\b" | ASCII Backspace (BS) ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\f" | ASCII Formfeed (FF) ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\n" | ASCII Linefeed (LF) ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\r" | ASCII Carriage Return (CR) ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\t" | ASCII Horizontal Tab (TAB) ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\v" | ASCII Vertical Tab (VT) ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\ooo" | Character with octal value *ooo* | ' - '(1,3) |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\xhh" | Character with hex value *hh* | ' - '(2,3) |\n' - '+-------------------+-----------------------------------+---------+\n' - '\n' - 'Escape sequences only recognized in string literals are:\n' - '\n' - '+-------------------+-----------------------------------+---------+\n' - '| Escape Sequence | Meaning | ' - 'Notes |\n' - '+===================+===================================+=========+\n' - '| "\\N{name}" | Character named *name* in the | ' - '(4) |\n' - '| | Unicode database ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\uxxxx" | Character with 16-bit hex value | ' - '(5) |\n' - '| | *xxxx* ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '| "\\Uxxxxxxxx" | Character with 32-bit hex value | ' - '(6) |\n' - '| | *xxxxxxxx* ' - '| |\n' - '+-------------------+-----------------------------------+---------+\n' - '\n' - 'Notes:\n' - '\n' - '1. As in Standard C, up to three octal digits are accepted.\n' - '\n' - '2. Unlike in Standard C, exactly two hex digits are required.\n' - '\n' - '3. In a bytes literal, hexadecimal and octal escapes denote ' - 'the\n' - ' byte with the given value. In a string literal, these ' - 'escapes\n' - ' denote a Unicode character with the given value.\n' - '\n' - '4. Changed in version 3.3: Support for name aliases [1] has ' - 'been\n' - ' added.\n' - '\n' - '5. Individual code units which form parts of a surrogate pair ' - 'can\n' - ' be encoded using this escape sequence. Exactly four hex ' - 'digits are\n' - ' required.\n' - '\n' - '6. Any Unicode character can be encoded this way. Exactly ' - 'eight\n' - ' hex digits are required.\n' - '\n' - 'Unlike Standard C, all unrecognized escape sequences are left ' - 'in the\n' - 'string unchanged, i.e., *the backslash is left in the result*. ' - '(This\n' - 'behavior is useful when debugging: if an escape sequence is ' - 'mistyped,\n' - 'the resulting output is more easily recognized as broken.) It ' - 'is also\n' - 'important to note that the escape sequences only recognized in ' - 'string\n' - 'literals fall into the category of unrecognized escapes for ' - 'bytes\n' - 'literals.\n' - '\n' - 'Even in a raw literal, quotes can be escaped with a backslash, ' - 'but the\n' - 'backslash remains in the result; for example, "r"\\""" is a ' - 'valid\n' - 'string literal consisting of two characters: a backslash and a ' - 'double\n' - 'quote; "r"\\"" is not a valid string literal (even a raw string ' - 'cannot\n' - 'end in an odd number of backslashes). Specifically, *a raw ' - 'literal\n' - 'cannot end in a single backslash* (since the backslash would ' - 'escape\n' - 'the following quote character). Note also that a single ' - 'backslash\n' - 'followed by a newline is interpreted as those two characters as ' - 'part\n' - 'of the literal, *not* as a line continuation.\n', - 'subscriptions': '\n' - 'Subscriptions\n' - '*************\n' - '\n' - 'A subscription selects an item of a sequence (string, ' - 'tuple or list)\n' - 'or mapping (dictionary) object:\n' - '\n' - ' subscription ::= primary "[" expression_list "]"\n' - '\n' - 'The primary must evaluate to an object that supports ' - 'subscription\n' - '(lists or dictionaries for example). User-defined ' - 'objects can support\n' - 'subscription by defining a "__getitem__()" method.\n' - '\n' - 'For built-in objects, there are two types of objects that ' - 'support\n' - 'subscription:\n' - '\n' - 'If the primary is a mapping, the expression list must ' - 'evaluate to an\n' - 'object whose value is one of the keys of the mapping, and ' - 'the\n' - 'subscription selects the value in the mapping that ' - 'corresponds to that\n' - 'key. (The expression list is a tuple except if it has ' - 'exactly one\n' - 'item.)\n' - '\n' - 'If the primary is a sequence, the expression (list) must ' - 'evaluate to\n' - 'an integer or a slice (as discussed in the following ' - 'section).\n' - '\n' - 'The formal syntax makes no special provision for negative ' - 'indices in\n' - 'sequences; however, built-in sequences all provide a ' - '"__getitem__()"\n' - 'method that interprets negative indices by adding the ' - 'length of the\n' - 'sequence to the index (so that "x[-1]" selects the last ' - 'item of "x").\n' - 'The resulting value must be a nonnegative integer less ' - 'than the number\n' - 'of items in the sequence, and the subscription selects ' - 'the item whose\n' - 'index is that value (counting from zero). Since the ' - 'support for\n' - "negative indices and slicing occurs in the object's " - '"__getitem__()"\n' - 'method, subclasses overriding this method will need to ' - 'explicitly add\n' - 'that support.\n' - '\n' - "A string's items are characters. A character is not a " - 'separate data\n' - 'type but a string of exactly one character.\n', - 'truth': '\n' - 'Truth Value Testing\n' - '*******************\n' - '\n' - 'Any object can be tested for truth value, for use in an "if" or\n' - '"while" condition or as operand of the Boolean operations below. ' - 'The\n' - 'following values are considered false:\n' - '\n' - '* "None"\n' - '\n' - '* "False"\n' - '\n' - '* zero of any numeric type, for example, "0", "0.0", "0j".\n' - '\n' - '* any empty sequence, for example, "\'\'", "()", "[]".\n' - '\n' - '* any empty mapping, for example, "{}".\n' - '\n' - '* instances of user-defined classes, if the class defines a\n' - ' "__bool__()" or "__len__()" method, when that method returns ' - 'the\n' - ' integer zero or "bool" value "False". [1]\n' - '\n' - 'All other values are considered true --- so objects of many types ' - 'are\n' - 'always true.\n' - '\n' - 'Operations and built-in functions that have a Boolean result ' - 'always\n' - 'return "0" or "False" for false and "1" or "True" for true, ' - 'unless\n' - 'otherwise stated. (Important exception: the Boolean operations ' - '"or"\n' - 'and "and" always return one of their operands.)\n', - 'try': '\n' - 'The "try" statement\n' - '*******************\n' - '\n' - 'The "try" statement specifies exception handlers and/or cleanup ' - 'code\n' - 'for a group of statements:\n' - '\n' - ' try_stmt ::= try1_stmt | try2_stmt\n' - ' try1_stmt ::= "try" ":" suite\n' - ' ("except" [expression ["as" identifier]] ":" ' - 'suite)+\n' - ' ["else" ":" suite]\n' - ' ["finally" ":" suite]\n' - ' try2_stmt ::= "try" ":" suite\n' - ' "finally" ":" suite\n' - '\n' - 'The "except" clause(s) specify one or more exception handlers. When ' - 'no\n' - 'exception occurs in the "try" clause, no exception handler is\n' - 'executed. When an exception occurs in the "try" suite, a search for ' - 'an\n' - 'exception handler is started. This search inspects the except ' - 'clauses\n' - 'in turn until one is found that matches the exception. An ' - 'expression-\n' - 'less except clause, if present, must be last; it matches any\n' - 'exception. For an except clause with an expression, that ' - 'expression\n' - 'is evaluated, and the clause matches the exception if the ' - 'resulting\n' - 'object is "compatible" with the exception. An object is ' - 'compatible\n' - 'with an exception if it is the class or a base class of the ' - 'exception\n' - 'object or a tuple containing an item compatible with the ' - 'exception.\n' - '\n' - 'If no except clause matches the exception, the search for an ' - 'exception\n' - 'handler continues in the surrounding code and on the invocation ' - 'stack.\n' - '[1]\n' - '\n' - 'If the evaluation of an expression in the header of an except ' - 'clause\n' - 'raises an exception, the original search for a handler is canceled ' - 'and\n' - 'a search starts for the new exception in the surrounding code and ' - 'on\n' - 'the call stack (it is treated as if the entire "try" statement ' - 'raised\n' - 'the exception).\n' - '\n' - 'When a matching except clause is found, the exception is assigned ' - 'to\n' - 'the target specified after the "as" keyword in that except clause, ' - 'if\n' - "present, and the except clause's suite is executed. All except\n" - 'clauses must have an executable block. When the end of this block ' - 'is\n' - 'reached, execution continues normally after the entire try ' - 'statement.\n' - '(This means that if two nested handlers exist for the same ' - 'exception,\n' - 'and the exception occurs in the try clause of the inner handler, ' - 'the\n' - 'outer handler will not handle the exception.)\n' - '\n' - 'When an exception has been assigned using "as target", it is ' - 'cleared\n' - 'at the end of the except clause. This is as if\n' - '\n' - ' except E as N:\n' - ' foo\n' - '\n' - 'was translated to\n' - '\n' - ' except E as N:\n' - ' try:\n' - ' foo\n' - ' finally:\n' - ' del N\n' - '\n' - 'This means the exception must be assigned to a different name to ' - 'be\n' - 'able to refer to it after the except clause. Exceptions are ' - 'cleared\n' - 'because with the traceback attached to them, they form a reference\n' - 'cycle with the stack frame, keeping all locals in that frame alive\n' - 'until the next garbage collection occurs.\n' - '\n' - "Before an except clause's suite is executed, details about the\n" - 'exception are stored in the "sys" module and can be accessed via\n' - '"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of ' - 'the\n' - 'exception class, the exception instance and a traceback object ' - '(see\n' - 'section *The standard type hierarchy*) identifying the point in ' - 'the\n' - 'program where the exception occurred. "sys.exc_info()" values are\n' - 'restored to their previous values (before the call) when returning\n' - 'from a function that handled an exception.\n' - '\n' - 'The optional "else" clause is executed if and when control flows ' - 'off\n' - 'the end of the "try" clause. [2] Exceptions in the "else" clause ' - 'are\n' - 'not handled by the preceding "except" clauses.\n' - '\n' - 'If "finally" is present, it specifies a \'cleanup\' handler. The ' - '"try"\n' - 'clause is executed, including any "except" and "else" clauses. If ' - 'an\n' - 'exception occurs in any of the clauses and is not handled, the\n' - 'exception is temporarily saved. The "finally" clause is executed. ' - 'If\n' - 'there is a saved exception it is re-raised at the end of the ' - '"finally"\n' - 'clause. If the "finally" clause raises another exception, the ' - 'saved\n' - 'exception is set as the context of the new exception. If the ' - '"finally"\n' - 'clause executes a "return" or "break" statement, the saved ' - 'exception\n' - 'is discarded:\n' - '\n' - ' >>> def f():\n' - ' ... try:\n' - ' ... 1/0\n' - ' ... finally:\n' - ' ... return 42\n' - ' ...\n' - ' >>> f()\n' - ' 42\n' - '\n' - 'The exception information is not available to the program during\n' - 'execution of the "finally" clause.\n' - '\n' - 'When a "return", "break" or "continue" statement is executed in ' - 'the\n' - '"try" suite of a "try"..."finally" statement, the "finally" clause ' - 'is\n' - 'also executed \'on the way out.\' A "continue" statement is illegal ' - 'in\n' - 'the "finally" clause. (The reason is a problem with the current\n' - 'implementation --- this restriction may be lifted in the future).\n' - '\n' - 'The return value of a function is determined by the last "return"\n' - 'statement executed. Since the "finally" clause always executes, a\n' - '"return" statement executed in the "finally" clause will always be ' - 'the\n' - 'last one executed:\n' - '\n' - ' >>> def foo():\n' - ' ... try:\n' - " ... return 'try'\n" - ' ... finally:\n' - " ... return 'finally'\n" - ' ...\n' - ' >>> foo()\n' - " 'finally'\n" - '\n' - 'Additional information on exceptions can be found in section\n' - '*Exceptions*, and information on using the "raise" statement to\n' - 'generate exceptions may be found in section *The raise statement*.\n', - 'types': '\n' - 'The standard type hierarchy\n' - '***************************\n' - '\n' - 'Below is a list of the types that are built into Python. ' - 'Extension\n' - 'modules (written in C, Java, or other languages, depending on ' - 'the\n' - 'implementation) can define additional types. Future versions of\n' - 'Python may add types to the type hierarchy (e.g., rational ' - 'numbers,\n' - 'efficiently stored arrays of integers, etc.), although such ' - 'additions\n' - 'will often be provided via the standard library instead.\n' - '\n' - 'Some of the type descriptions below contain a paragraph listing\n' - "'special attributes.' These are attributes that provide access " - 'to the\n' - 'implementation and are not intended for general use. Their ' - 'definition\n' - 'may change in the future.\n' - '\n' - 'None\n' - ' This type has a single value. There is a single object with ' - 'this\n' - ' value. This object is accessed through the built-in name ' - '"None". It\n' - ' is used to signify the absence of a value in many situations, ' - 'e.g.,\n' - " it is returned from functions that don't explicitly return\n" - ' anything. Its truth value is false.\n' - '\n' - 'NotImplemented\n' - ' This type has a single value. There is a single object with ' - 'this\n' - ' value. This object is accessed through the built-in name\n' - ' "NotImplemented". Numeric methods and rich comparison methods\n' - ' should return this value if they do not implement the ' - 'operation for\n' - ' the operands provided. (The interpreter will then try the\n' - ' reflected operation, or some other fallback, depending on the\n' - ' operator.) Its truth value is true.\n' - '\n' - ' See *Implementing the arithmetic operations* for more ' - 'details.\n' - '\n' - 'Ellipsis\n' - ' This type has a single value. There is a single object with ' - 'this\n' - ' value. This object is accessed through the literal "..." or ' - 'the\n' - ' built-in name "Ellipsis". Its truth value is true.\n' - '\n' - '"numbers.Number"\n' - ' These are created by numeric literals and returned as results ' - 'by\n' - ' arithmetic operators and arithmetic built-in functions. ' - 'Numeric\n' - ' objects are immutable; once created their value never ' - 'changes.\n' - ' Python numbers are of course strongly related to mathematical\n' - ' numbers, but subject to the limitations of numerical ' - 'representation\n' - ' in computers.\n' - '\n' - ' Python distinguishes between integers, floating point numbers, ' - 'and\n' - ' complex numbers:\n' - '\n' - ' "numbers.Integral"\n' - ' These represent elements from the mathematical set of ' - 'integers\n' - ' (positive and negative).\n' - '\n' - ' There are two types of integers:\n' - '\n' - ' Integers ("int")\n' - '\n' - ' These represent numbers in an unlimited range, subject ' - 'to\n' - ' available (virtual) memory only. For the purpose of ' - 'shift\n' - ' and mask operations, a binary representation is assumed, ' - 'and\n' - " negative numbers are represented in a variant of 2's\n" - ' complement which gives the illusion of an infinite ' - 'string of\n' - ' sign bits extending to the left.\n' - '\n' - ' Booleans ("bool")\n' - ' These represent the truth values False and True. The ' - 'two\n' - ' objects representing the values "False" and "True" are ' - 'the\n' - ' only Boolean objects. The Boolean type is a subtype of ' - 'the\n' - ' integer type, and Boolean values behave like the values ' - '0 and\n' - ' 1, respectively, in almost all contexts, the exception ' - 'being\n' - ' that when converted to a string, the strings ""False"" ' - 'or\n' - ' ""True"" are returned, respectively.\n' - '\n' - ' The rules for integer representation are intended to give ' - 'the\n' - ' most meaningful interpretation of shift and mask ' - 'operations\n' - ' involving negative integers.\n' - '\n' - ' "numbers.Real" ("float")\n' - ' These represent machine-level double precision floating ' - 'point\n' - ' numbers. You are at the mercy of the underlying machine\n' - ' architecture (and C or Java implementation) for the ' - 'accepted\n' - ' range and handling of overflow. Python does not support ' - 'single-\n' - ' precision floating point numbers; the savings in processor ' - 'and\n' - ' memory usage that are usually the reason for using these ' - 'are\n' - ' dwarfed by the overhead of using objects in Python, so ' - 'there is\n' - ' no reason to complicate the language with two kinds of ' - 'floating\n' - ' point numbers.\n' - '\n' - ' "numbers.Complex" ("complex")\n' - ' These represent complex numbers as a pair of machine-level\n' - ' double precision floating point numbers. The same caveats ' - 'apply\n' - ' as for floating point numbers. The real and imaginary parts ' - 'of a\n' - ' complex number "z" can be retrieved through the read-only\n' - ' attributes "z.real" and "z.imag".\n' - '\n' - 'Sequences\n' - ' These represent finite ordered sets indexed by non-negative\n' - ' numbers. The built-in function "len()" returns the number of ' - 'items\n' - ' of a sequence. When the length of a sequence is *n*, the index ' - 'set\n' - ' contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence ' - '*a* is\n' - ' selected by "a[i]".\n' - '\n' - ' Sequences also support slicing: "a[i:j]" selects all items ' - 'with\n' - ' index *k* such that *i* "<=" *k* "<" *j*. When used as an\n' - ' expression, a slice is a sequence of the same type. This ' - 'implies\n' - ' that the index set is renumbered so that it starts at 0.\n' - '\n' - ' Some sequences also support "extended slicing" with a third ' - '"step"\n' - ' parameter: "a[i:j:k]" selects all items of *a* with index *x* ' - 'where\n' - ' "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n' - '\n' - ' Sequences are distinguished according to their mutability:\n' - '\n' - ' Immutable sequences\n' - ' An object of an immutable sequence type cannot change once ' - 'it is\n' - ' created. (If the object contains references to other ' - 'objects,\n' - ' these other objects may be mutable and may be changed; ' - 'however,\n' - ' the collection of objects directly referenced by an ' - 'immutable\n' - ' object cannot change.)\n' - '\n' - ' The following types are immutable sequences:\n' - '\n' - ' Strings\n' - ' A string is a sequence of values that represent Unicode ' - 'code\n' - ' points. All the code points in the range "U+0000 - ' - 'U+10FFFF"\n' - " can be represented in a string. Python doesn't have a " - '"char"\n' - ' type; instead, every code point in the string is ' - 'represented\n' - ' as a string object with length "1". The built-in ' - 'function\n' - ' "ord()" converts a code point from its string form to ' - 'an\n' - ' integer in the range "0 - 10FFFF"; "chr()" converts an\n' - ' integer in the range "0 - 10FFFF" to the corresponding ' - 'length\n' - ' "1" string object. "str.encode()" can be used to convert ' - 'a\n' - ' "str" to "bytes" using the given text encoding, and\n' - ' "bytes.decode()" can be used to achieve the opposite.\n' - '\n' - ' Tuples\n' - ' The items of a tuple are arbitrary Python objects. ' - 'Tuples of\n' - ' two or more items are formed by comma-separated lists ' - 'of\n' - " expressions. A tuple of one item (a 'singleton') can " - 'be\n' - ' formed by affixing a comma to an expression (an ' - 'expression by\n' - ' itself does not create a tuple, since parentheses must ' - 'be\n' - ' usable for grouping of expressions). An empty tuple can ' - 'be\n' - ' formed by an empty pair of parentheses.\n' - '\n' - ' Bytes\n' - ' A bytes object is an immutable array. The items are ' - '8-bit\n' - ' bytes, represented by integers in the range 0 <= x < ' - '256.\n' - ' Bytes literals (like "b\'abc\'") and the built-in ' - 'function\n' - ' "bytes()" can be used to construct bytes objects. ' - 'Also,\n' - ' bytes objects can be decoded to strings via the ' - '"decode()"\n' - ' method.\n' - '\n' - ' Mutable sequences\n' - ' Mutable sequences can be changed after they are created. ' - 'The\n' - ' subscription and slicing notations can be used as the ' - 'target of\n' - ' assignment and "del" (delete) statements.\n' - '\n' - ' There are currently two intrinsic mutable sequence types:\n' - '\n' - ' Lists\n' - ' The items of a list are arbitrary Python objects. Lists ' - 'are\n' - ' formed by placing a comma-separated list of expressions ' - 'in\n' - ' square brackets. (Note that there are no special cases ' - 'needed\n' - ' to form lists of length 0 or 1.)\n' - '\n' - ' Byte Arrays\n' - ' A bytearray object is a mutable array. They are created ' - 'by\n' - ' the built-in "bytearray()" constructor. Aside from ' - 'being\n' - ' mutable (and hence unhashable), byte arrays otherwise ' - 'provide\n' - ' the same interface and functionality as immutable bytes\n' - ' objects.\n' - '\n' - ' The extension module "array" provides an additional example ' - 'of a\n' - ' mutable sequence type, as does the "collections" module.\n' - '\n' - 'Set types\n' - ' These represent unordered, finite sets of unique, immutable\n' - ' objects. As such, they cannot be indexed by any subscript. ' - 'However,\n' - ' they can be iterated over, and the built-in function "len()"\n' - ' returns the number of items in a set. Common uses for sets are ' - 'fast\n' - ' membership testing, removing duplicates from a sequence, and\n' - ' computing mathematical operations such as intersection, ' - 'union,\n' - ' difference, and symmetric difference.\n' - '\n' - ' For set elements, the same immutability rules apply as for\n' - ' dictionary keys. Note that numeric types obey the normal rules ' - 'for\n' - ' numeric comparison: if two numbers compare equal (e.g., "1" ' - 'and\n' - ' "1.0"), only one of them can be contained in a set.\n' - '\n' - ' There are currently two intrinsic set types:\n' - '\n' - ' Sets\n' - ' These represent a mutable set. They are created by the ' - 'built-in\n' - ' "set()" constructor and can be modified afterwards by ' - 'several\n' - ' methods, such as "add()".\n' - '\n' - ' Frozen sets\n' - ' These represent an immutable set. They are created by the\n' - ' built-in "frozenset()" constructor. As a frozenset is ' - 'immutable\n' - ' and *hashable*, it can be used again as an element of ' - 'another\n' - ' set, or as a dictionary key.\n' - '\n' - 'Mappings\n' - ' These represent finite sets of objects indexed by arbitrary ' - 'index\n' - ' sets. The subscript notation "a[k]" selects the item indexed ' - 'by "k"\n' - ' from the mapping "a"; this can be used in expressions and as ' - 'the\n' - ' target of assignments or "del" statements. The built-in ' - 'function\n' - ' "len()" returns the number of items in a mapping.\n' - '\n' - ' There is currently a single intrinsic mapping type:\n' - '\n' - ' Dictionaries\n' - ' These represent finite sets of objects indexed by nearly\n' - ' arbitrary values. The only types of values not acceptable ' - 'as\n' - ' keys are values containing lists or dictionaries or other\n' - ' mutable types that are compared by value rather than by ' - 'object\n' - ' identity, the reason being that the efficient ' - 'implementation of\n' - " dictionaries requires a key's hash value to remain " - 'constant.\n' - ' Numeric types used for keys obey the normal rules for ' - 'numeric\n' - ' comparison: if two numbers compare equal (e.g., "1" and ' - '"1.0")\n' - ' then they can be used interchangeably to index the same\n' - ' dictionary entry.\n' - '\n' - ' Dictionaries are mutable; they can be created by the ' - '"{...}"\n' - ' notation (see section *Dictionary displays*).\n' - '\n' - ' The extension modules "dbm.ndbm" and "dbm.gnu" provide\n' - ' additional examples of mapping types, as does the ' - '"collections"\n' - ' module.\n' - '\n' - 'Callable types\n' - ' These are the types to which the function call operation (see\n' - ' section *Calls*) can be applied:\n' - '\n' - ' User-defined functions\n' - ' A user-defined function object is created by a function\n' - ' definition (see section *Function definitions*). It should ' - 'be\n' - ' called with an argument list containing the same number of ' - 'items\n' - " as the function's formal parameter list.\n" - '\n' - ' Special attributes:\n' - '\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | Attribute | ' - 'Meaning | |\n' - ' ' - '+===========================+=================================+=============+\n' - ' | "__doc__" | The function\'s ' - 'documentation | Writable |\n' - ' | | string, or "None" ' - 'if | |\n' - ' | | unavailable; not inherited ' - 'by | |\n' - ' | | ' - 'subclasses | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__name__" | The function\'s ' - 'name | Writable |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__qualname__" | The function\'s *qualified ' - 'name* | Writable |\n' - ' | | New in version ' - '3.3. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__module__" | The name of the module ' - 'the | Writable |\n' - ' | | function was defined in, ' - 'or | |\n' - ' | | "None" if ' - 'unavailable. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__defaults__" | A tuple containing ' - 'default | Writable |\n' - ' | | argument values for ' - 'those | |\n' - ' | | arguments that have ' - 'defaults, | |\n' - ' | | or "None" if no arguments ' - 'have | |\n' - ' | | a default ' - 'value | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__code__" | The code object ' - 'representing | Writable |\n' - ' | | the compiled function ' - 'body. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__globals__" | A reference to the ' - 'dictionary | Read-only |\n' - ' | | that holds the ' - "function's | |\n" - ' | | global variables --- the ' - 'global | |\n' - ' | | namespace of the module ' - 'in | |\n' - ' | | which the function was ' - 'defined. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__dict__" | The namespace ' - 'supporting | Writable |\n' - ' | | arbitrary function ' - 'attributes. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__closure__" | "None" or a tuple of cells ' - 'that | Read-only |\n' - ' | | contain bindings for ' - 'the | |\n' - " | | function's free " - 'variables. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__annotations__" | A dict containing ' - 'annotations | Writable |\n' - ' | | of parameters. The keys of ' - 'the | |\n' - ' | | dict are the parameter ' - 'names, | |\n' - ' | | and "\'return\'" for the ' - 'return | |\n' - ' | | annotation, if ' - 'provided. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - ' | "__kwdefaults__" | A dict containing defaults ' - 'for | Writable |\n' - ' | | keyword-only ' - 'parameters. | |\n' - ' ' - '+---------------------------+---------------------------------+-------------+\n' - '\n' - ' Most of the attributes labelled "Writable" check the type ' - 'of the\n' - ' assigned value.\n' - '\n' - ' Function objects also support getting and setting ' - 'arbitrary\n' - ' attributes, which can be used, for example, to attach ' - 'metadata\n' - ' to functions. Regular attribute dot-notation is used to ' - 'get and\n' - ' set such attributes. *Note that the current implementation ' - 'only\n' - ' supports function attributes on user-defined functions. ' - 'Function\n' - ' attributes on built-in functions may be supported in the\n' - ' future.*\n' - '\n' - " Additional information about a function's definition can " - 'be\n' - ' retrieved from its code object; see the description of ' - 'internal\n' - ' types below.\n' - '\n' - ' Instance methods\n' - ' An instance method object combines a class, a class ' - 'instance and\n' - ' any callable object (normally a user-defined function).\n' - '\n' - ' Special read-only attributes: "__self__" is the class ' - 'instance\n' - ' object, "__func__" is the function object; "__doc__" is ' - 'the\n' - ' method\'s documentation (same as "__func__.__doc__"); ' - '"__name__"\n' - ' is the method name (same as "__func__.__name__"); ' - '"__module__"\n' - ' is the name of the module the method was defined in, or ' - '"None"\n' - ' if unavailable.\n' - '\n' - ' Methods also support accessing (but not setting) the ' - 'arbitrary\n' - ' function attributes on the underlying function object.\n' - '\n' - ' User-defined method objects may be created when getting an\n' - ' attribute of a class (perhaps via an instance of that ' - 'class), if\n' - ' that attribute is a user-defined function object or a ' - 'class\n' - ' method object.\n' - '\n' - ' When an instance method object is created by retrieving a ' - 'user-\n' - ' defined function object from a class via one of its ' - 'instances,\n' - ' its "__self__" attribute is the instance, and the method ' - 'object\n' - ' is said to be bound. The new method\'s "__func__" ' - 'attribute is\n' - ' the original function object.\n' - '\n' - ' When a user-defined method object is created by retrieving\n' - ' another method object from a class or instance, the ' - 'behaviour is\n' - ' the same as for a function object, except that the ' - '"__func__"\n' - ' attribute of the new instance is not the original method ' - 'object\n' - ' but its "__func__" attribute.\n' - '\n' - ' When an instance method object is created by retrieving a ' - 'class\n' - ' method object from a class or instance, its "__self__" ' - 'attribute\n' - ' is the class itself, and its "__func__" attribute is the\n' - ' function object underlying the class method.\n' - '\n' - ' When an instance method object is called, the underlying\n' - ' function ("__func__") is called, inserting the class ' - 'instance\n' - ' ("__self__") in front of the argument list. For instance, ' - 'when\n' - ' "C" is a class which contains a definition for a function ' - '"f()",\n' - ' and "x" is an instance of "C", calling "x.f(1)" is ' - 'equivalent to\n' - ' calling "C.f(x, 1)".\n' - '\n' - ' When an instance method object is derived from a class ' - 'method\n' - ' object, the "class instance" stored in "__self__" will ' - 'actually\n' - ' be the class itself, so that calling either "x.f(1)" or ' - '"C.f(1)"\n' - ' is equivalent to calling "f(C,1)" where "f" is the ' - 'underlying\n' - ' function.\n' - '\n' - ' Note that the transformation from function object to ' - 'instance\n' - ' method object happens each time the attribute is retrieved ' - 'from\n' - ' the instance. In some cases, a fruitful optimization is ' - 'to\n' - ' assign the attribute to a local variable and call that ' - 'local\n' - ' variable. Also notice that this transformation only happens ' - 'for\n' - ' user-defined functions; other callable objects (and all ' - 'non-\n' - ' callable objects) are retrieved without transformation. It ' - 'is\n' - ' also important to note that user-defined functions which ' - 'are\n' - ' attributes of a class instance are not converted to bound\n' - ' methods; this *only* happens when the function is an ' - 'attribute\n' - ' of the class.\n' - '\n' - ' Generator functions\n' - ' A function or method which uses the "yield" statement (see\n' - ' section *The yield statement*) is called a *generator ' - 'function*.\n' - ' Such a function, when called, always returns an iterator ' - 'object\n' - ' which can be used to execute the body of the function: ' - 'calling\n' - ' the iterator\'s "iterator.__next__()" method will cause ' - 'the\n' - ' function to execute until it provides a value using the ' - '"yield"\n' - ' statement. When the function executes a "return" statement ' - 'or\n' - ' falls off the end, a "StopIteration" exception is raised ' - 'and the\n' - ' iterator will have reached the end of the set of values to ' - 'be\n' - ' returned.\n' - '\n' - ' Coroutine functions\n' - ' A function or method which is defined using "async def" is\n' - ' called a *coroutine function*. Such a function, when ' - 'called,\n' - ' returns a *coroutine* object. It may contain "await"\n' - ' expressions, as well as "async with" and "async for" ' - 'statements.\n' - ' See also the *Coroutine Objects* section.\n' - '\n' - ' Built-in functions\n' - ' A built-in function object is a wrapper around a C ' - 'function.\n' - ' Examples of built-in functions are "len()" and ' - '"math.sin()"\n' - ' ("math" is a standard built-in module). The number and type ' - 'of\n' - ' the arguments are determined by the C function. Special ' - 'read-\n' - ' only attributes: "__doc__" is the function\'s ' - 'documentation\n' - ' string, or "None" if unavailable; "__name__" is the ' - "function's\n" - ' name; "__self__" is set to "None" (but see the next item);\n' - ' "__module__" is the name of the module the function was ' - 'defined\n' - ' in or "None" if unavailable.\n' - '\n' - ' Built-in methods\n' - ' This is really a different disguise of a built-in function, ' - 'this\n' - ' time containing an object passed to the C function as an\n' - ' implicit extra argument. An example of a built-in method ' - 'is\n' - ' "alist.append()", assuming *alist* is a list object. In ' - 'this\n' - ' case, the special read-only attribute "__self__" is set to ' - 'the\n' - ' object denoted by *alist*.\n' - '\n' - ' Classes\n' - ' Classes are callable. These objects normally act as ' - 'factories\n' - ' for new instances of themselves, but variations are ' - 'possible for\n' - ' class types that override "__new__()". The arguments of ' - 'the\n' - ' call are passed to "__new__()" and, in the typical case, ' - 'to\n' - ' "__init__()" to initialize the new instance.\n' - '\n' - ' Class Instances\n' - ' Instances of arbitrary classes can be made callable by ' - 'defining\n' - ' a "__call__()" method in their class.\n' - '\n' - 'Modules\n' - ' Modules are a basic organizational unit of Python code, and ' - 'are\n' - ' created by the *import system* as invoked either by the ' - '"import"\n' - ' statement (see "import"), or by calling functions such as\n' - ' "importlib.import_module()" and built-in "__import__()". A ' - 'module\n' - ' object has a namespace implemented by a dictionary object ' - '(this is\n' - ' the dictionary referenced by the "__globals__" attribute of\n' - ' functions defined in the module). Attribute references are\n' - ' translated to lookups in this dictionary, e.g., "m.x" is ' - 'equivalent\n' - ' to "m.__dict__["x"]". A module object does not contain the ' - 'code\n' - " object used to initialize the module (since it isn't needed " - 'once\n' - ' the initialization is done).\n' - '\n' - " Attribute assignment updates the module's namespace " - 'dictionary,\n' - ' e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n' - '\n' - ' Special read-only attribute: "__dict__" is the module\'s ' - 'namespace\n' - ' as a dictionary object.\n' - '\n' - ' **CPython implementation detail:** Because of the way CPython\n' - ' clears module dictionaries, the module dictionary will be ' - 'cleared\n' - ' when the module falls out of scope even if the dictionary ' - 'still has\n' - ' live references. To avoid this, copy the dictionary or keep ' - 'the\n' - ' module around while using its dictionary directly.\n' - '\n' - ' Predefined (writable) attributes: "__name__" is the module\'s ' - 'name;\n' - ' "__doc__" is the module\'s documentation string, or "None" if\n' - ' unavailable; "__file__" is the pathname of the file from which ' - 'the\n' - ' module was loaded, if it was loaded from a file. The ' - '"__file__"\n' - ' attribute may be missing for certain types of modules, such as ' - 'C\n' - ' modules that are statically linked into the interpreter; for\n' - ' extension modules loaded dynamically from a shared library, it ' - 'is\n' - ' the pathname of the shared library file.\n' - '\n' - 'Custom classes\n' - ' Custom class types are typically created by class definitions ' - '(see\n' - ' section *Class definitions*). A class has a namespace ' - 'implemented\n' - ' by a dictionary object. Class attribute references are ' - 'translated\n' - ' to lookups in this dictionary, e.g., "C.x" is translated to\n' - ' "C.__dict__["x"]" (although there are a number of hooks which ' - 'allow\n' - ' for other means of locating attributes). When the attribute ' - 'name is\n' - ' not found there, the attribute search continues in the base\n' - ' classes. This search of the base classes uses the C3 method\n' - ' resolution order which behaves correctly even in the presence ' - 'of\n' - " 'diamond' inheritance structures where there are multiple\n" - ' inheritance paths leading back to a common ancestor. ' - 'Additional\n' - ' details on the C3 MRO used by Python can be found in the\n' - ' documentation accompanying the 2.3 release at\n' - ' https://www.python.org/download/releases/2.3/mro/.\n' - '\n' - ' When a class attribute reference (for class "C", say) would ' - 'yield a\n' - ' class method object, it is transformed into an instance ' - 'method\n' - ' object whose "__self__" attributes is "C". When it would ' - 'yield a\n' - ' static method object, it is transformed into the object ' - 'wrapped by\n' - ' the static method object. See section *Implementing ' - 'Descriptors*\n' - ' for another way in which attributes retrieved from a class ' - 'may\n' - ' differ from those actually contained in its "__dict__".\n' - '\n' - " Class attribute assignments update the class's dictionary, " - 'never\n' - ' the dictionary of a base class.\n' - '\n' - ' A class object can be called (see above) to yield a class ' - 'instance\n' - ' (see below).\n' - '\n' - ' Special attributes: "__name__" is the class name; "__module__" ' - 'is\n' - ' the module name in which the class was defined; "__dict__" is ' - 'the\n' - ' dictionary containing the class\'s namespace; "__bases__" is a ' - 'tuple\n' - ' (possibly empty or a singleton) containing the base classes, ' - 'in the\n' - ' order of their occurrence in the base class list; "__doc__" is ' - 'the\n' - " class's documentation string, or None if undefined.\n" - '\n' - 'Class instances\n' - ' A class instance is created by calling a class object (see ' - 'above).\n' - ' A class instance has a namespace implemented as a dictionary ' - 'which\n' - ' is the first place in which attribute references are ' - 'searched.\n' - " When an attribute is not found there, and the instance's class " - 'has\n' - ' an attribute by that name, the search continues with the ' - 'class\n' - ' attributes. If a class attribute is found that is a ' - 'user-defined\n' - ' function object, it is transformed into an instance method ' - 'object\n' - ' whose "__self__" attribute is the instance. Static method ' - 'and\n' - ' class method objects are also transformed; see above under\n' - ' "Classes". See section *Implementing Descriptors* for another ' - 'way\n' - ' in which attributes of a class retrieved via its instances ' - 'may\n' - " differ from the objects actually stored in the class's " - '"__dict__".\n' - " If no class attribute is found, and the object's class has a\n" - ' "__getattr__()" method, that is called to satisfy the lookup.\n' - '\n' - " Attribute assignments and deletions update the instance's\n" - " dictionary, never a class's dictionary. If the class has a\n" - ' "__setattr__()" or "__delattr__()" method, this is called ' - 'instead\n' - ' of updating the instance dictionary directly.\n' - '\n' - ' Class instances can pretend to be numbers, sequences, or ' - 'mappings\n' - ' if they have methods with certain special names. See section\n' - ' *Special method names*.\n' - '\n' - ' Special attributes: "__dict__" is the attribute dictionary;\n' - ' "__class__" is the instance\'s class.\n' - '\n' - 'I/O objects (also known as file objects)\n' - ' A *file object* represents an open file. Various shortcuts ' - 'are\n' - ' available to create file objects: the "open()" built-in ' - 'function,\n' - ' and also "os.popen()", "os.fdopen()", and the "makefile()" ' - 'method\n' - ' of socket objects (and perhaps by other functions or methods\n' - ' provided by extension modules).\n' - '\n' - ' The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n' - ' initialized to file objects corresponding to the ' - "interpreter's\n" - ' standard input, output and error streams; they are all open in ' - 'text\n' - ' mode and therefore follow the interface defined by the\n' - ' "io.TextIOBase" abstract class.\n' - '\n' - 'Internal types\n' - ' A few types used internally by the interpreter are exposed to ' - 'the\n' - ' user. Their definitions may change with future versions of ' - 'the\n' - ' interpreter, but they are mentioned here for completeness.\n' - '\n' - ' Code objects\n' - ' Code objects represent *byte-compiled* executable Python ' - 'code,\n' - ' or *bytecode*. The difference between a code object and a\n' - ' function object is that the function object contains an ' - 'explicit\n' - " reference to the function's globals (the module in which it " - 'was\n' - ' defined), while a code object contains no context; also ' - 'the\n' - ' default argument values are stored in the function object, ' - 'not\n' - ' in the code object (because they represent values ' - 'calculated at\n' - ' run-time). Unlike function objects, code objects are ' - 'immutable\n' - ' and contain no references (directly or indirectly) to ' - 'mutable\n' - ' objects.\n' - '\n' - ' Special read-only attributes: "co_name" gives the function ' - 'name;\n' - ' "co_argcount" is the number of positional arguments ' - '(including\n' - ' arguments with default values); "co_nlocals" is the number ' - 'of\n' - ' local variables used by the function (including ' - 'arguments);\n' - ' "co_varnames" is a tuple containing the names of the local\n' - ' variables (starting with the argument names); "co_cellvars" ' - 'is a\n' - ' tuple containing the names of local variables that are\n' - ' referenced by nested functions; "co_freevars" is a tuple\n' - ' containing the names of free variables; "co_code" is a ' - 'string\n' - ' representing the sequence of bytecode instructions; ' - '"co_consts"\n' - ' is a tuple containing the literals used by the bytecode;\n' - ' "co_names" is a tuple containing the names used by the ' - 'bytecode;\n' - ' "co_filename" is the filename from which the code was ' - 'compiled;\n' - ' "co_firstlineno" is the first line number of the function;\n' - ' "co_lnotab" is a string encoding the mapping from bytecode\n' - ' offsets to line numbers (for details see the source code of ' - 'the\n' - ' interpreter); "co_stacksize" is the required stack size\n' - ' (including local variables); "co_flags" is an integer ' - 'encoding a\n' - ' number of flags for the interpreter.\n' - '\n' - ' The following flag bits are defined for "co_flags": bit ' - '"0x04"\n' - ' is set if the function uses the "*arguments" syntax to ' - 'accept an\n' - ' arbitrary number of positional arguments; bit "0x08" is set ' - 'if\n' - ' the function uses the "**keywords" syntax to accept ' - 'arbitrary\n' - ' keyword arguments; bit "0x20" is set if the function is a\n' - ' generator.\n' - '\n' - ' Future feature declarations ("from __future__ import ' - 'division")\n' - ' also use bits in "co_flags" to indicate whether a code ' - 'object\n' - ' was compiled with a particular feature enabled: bit ' - '"0x2000" is\n' - ' set if the function was compiled with future division ' - 'enabled;\n' - ' bits "0x10" and "0x1000" were used in earlier versions of\n' - ' Python.\n' - '\n' - ' Other bits in "co_flags" are reserved for internal use.\n' - '\n' - ' If a code object represents a function, the first item in\n' - ' "co_consts" is the documentation string of the function, ' - 'or\n' - ' "None" if undefined.\n' - '\n' - ' Frame objects\n' - ' Frame objects represent execution frames. They may occur ' - 'in\n' - ' traceback objects (see below).\n' - '\n' - ' Special read-only attributes: "f_back" is to the previous ' - 'stack\n' - ' frame (towards the caller), or "None" if this is the ' - 'bottom\n' - ' stack frame; "f_code" is the code object being executed in ' - 'this\n' - ' frame; "f_locals" is the dictionary used to look up local\n' - ' variables; "f_globals" is used for global variables;\n' - ' "f_builtins" is used for built-in (intrinsic) names; ' - '"f_lasti"\n' - ' gives the precise instruction (this is an index into the\n' - ' bytecode string of the code object).\n' - '\n' - ' Special writable attributes: "f_trace", if not "None", is ' - 'a\n' - ' function called at the start of each source code line (this ' - 'is\n' - ' used by the debugger); "f_lineno" is the current line ' - 'number of\n' - ' the frame --- writing to this from within a trace function ' - 'jumps\n' - ' to the given line (only for the bottom-most frame). A ' - 'debugger\n' - ' can implement a Jump command (aka Set Next Statement) by ' - 'writing\n' - ' to f_lineno.\n' - '\n' - ' Frame objects support one method:\n' - '\n' - ' frame.clear()\n' - '\n' - ' This method clears all references to local variables ' - 'held by\n' - ' the frame. Also, if the frame belonged to a generator, ' - 'the\n' - ' generator is finalized. This helps break reference ' - 'cycles\n' - ' involving frame objects (for example when catching an\n' - ' exception and storing its traceback for later use).\n' - '\n' - ' "RuntimeError" is raised if the frame is currently ' - 'executing.\n' - '\n' - ' New in version 3.4.\n' - '\n' - ' Traceback objects\n' - ' Traceback objects represent a stack trace of an exception. ' - 'A\n' - ' traceback object is created when an exception occurs. When ' - 'the\n' - ' search for an exception handler unwinds the execution ' - 'stack, at\n' - ' each unwound level a traceback object is inserted in front ' - 'of\n' - ' the current traceback. When an exception handler is ' - 'entered,\n' - ' the stack trace is made available to the program. (See ' - 'section\n' - ' *The try statement*.) It is accessible as the third item of ' - 'the\n' - ' tuple returned by "sys.exc_info()". When the program ' - 'contains no\n' - ' suitable handler, the stack trace is written (nicely ' - 'formatted)\n' - ' to the standard error stream; if the interpreter is ' - 'interactive,\n' - ' it is also made available to the user as ' - '"sys.last_traceback".\n' - '\n' - ' Special read-only attributes: "tb_next" is the next level ' - 'in the\n' - ' stack trace (towards the frame where the exception ' - 'occurred), or\n' - ' "None" if there is no next level; "tb_frame" points to the\n' - ' execution frame of the current level; "tb_lineno" gives the ' - 'line\n' - ' number where the exception occurred; "tb_lasti" indicates ' - 'the\n' - ' precise instruction. The line number and last instruction ' - 'in\n' - ' the traceback may differ from the line number of its frame\n' - ' object if the exception occurred in a "try" statement with ' - 'no\n' - ' matching except clause or with a finally clause.\n' - '\n' - ' Slice objects\n' - ' Slice objects are used to represent slices for ' - '"__getitem__()"\n' - ' methods. They are also created by the built-in "slice()"\n' - ' function.\n' - '\n' - ' Special read-only attributes: "start" is the lower bound; ' - '"stop"\n' - ' is the upper bound; "step" is the step value; each is ' - '"None" if\n' - ' omitted. These attributes can have any type.\n' - '\n' - ' Slice objects support one method:\n' - '\n' - ' slice.indices(self, length)\n' - '\n' - ' This method takes a single integer argument *length* ' - 'and\n' - ' computes information about the slice that the slice ' - 'object\n' - ' would describe if applied to a sequence of *length* ' - 'items.\n' - ' It returns a tuple of three integers; respectively these ' - 'are\n' - ' the *start* and *stop* indices and the *step* or stride\n' - ' length of the slice. Missing or out-of-bounds indices ' - 'are\n' - ' handled in a manner consistent with regular slices.\n' - '\n' - ' Static method objects\n' - ' Static method objects provide a way of defeating the\n' - ' transformation of function objects to method objects ' - 'described\n' - ' above. A static method object is a wrapper around any ' - 'other\n' - ' object, usually a user-defined method object. When a ' - 'static\n' - ' method object is retrieved from a class or a class ' - 'instance, the\n' - ' object actually returned is the wrapped object, which is ' - 'not\n' - ' subject to any further transformation. Static method ' - 'objects are\n' - ' not themselves callable, although the objects they wrap ' - 'usually\n' - ' are. Static method objects are created by the built-in\n' - ' "staticmethod()" constructor.\n' - '\n' - ' Class method objects\n' - ' A class method object, like a static method object, is a ' - 'wrapper\n' - ' around another object that alters the way in which that ' - 'object\n' - ' is retrieved from classes and class instances. The ' - 'behaviour of\n' - ' class method objects upon such retrieval is described ' - 'above,\n' - ' under "User-defined methods". Class method objects are ' - 'created\n' - ' by the built-in "classmethod()" constructor.\n', - 'typesfunctions': '\n' - 'Functions\n' - '*********\n' - '\n' - 'Function objects are created by function definitions. ' - 'The only\n' - 'operation on a function object is to call it: ' - '"func(argument-list)".\n' - '\n' - 'There are really two flavors of function objects: ' - 'built-in functions\n' - 'and user-defined functions. Both support the same ' - 'operation (to call\n' - 'the function), but the implementation is different, ' - 'hence the\n' - 'different object types.\n' - '\n' - 'See *Function definitions* for more information.\n', - 'typesmapping': '\n' - 'Mapping Types --- "dict"\n' - '************************\n' - '\n' - 'A *mapping* object maps *hashable* values to arbitrary ' - 'objects.\n' - 'Mappings are mutable objects. There is currently only one ' - 'standard\n' - 'mapping type, the *dictionary*. (For other containers see ' - 'the built-\n' - 'in "list", "set", and "tuple" classes, and the ' - '"collections" module.)\n' - '\n' - "A dictionary's keys are *almost* arbitrary values. Values " - 'that are\n' - 'not *hashable*, that is, values containing lists, ' - 'dictionaries or\n' - 'other mutable types (that are compared by value rather ' - 'than by object\n' - 'identity) may not be used as keys. Numeric types used for ' - 'keys obey\n' - 'the normal rules for numeric comparison: if two numbers ' - 'compare equal\n' - '(such as "1" and "1.0") then they can be used ' - 'interchangeably to index\n' - 'the same dictionary entry. (Note however, that since ' - 'computers store\n' - 'floating-point numbers as approximations it is usually ' - 'unwise to use\n' - 'them as dictionary keys.)\n' - '\n' - 'Dictionaries can be created by placing a comma-separated ' - 'list of "key:\n' - 'value" pairs within braces, for example: "{\'jack\': 4098, ' - "'sjoerd':\n" - '4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the ' - '"dict"\n' - 'constructor.\n' - '\n' - 'class class dict(**kwarg)\n' - 'class class dict(mapping, **kwarg)\n' - 'class class dict(iterable, **kwarg)\n' - '\n' - ' Return a new dictionary initialized from an optional ' - 'positional\n' - ' argument and a possibly empty set of keyword ' - 'arguments.\n' - '\n' - ' If no positional argument is given, an empty dictionary ' - 'is created.\n' - ' If a positional argument is given and it is a mapping ' - 'object, a\n' - ' dictionary is created with the same key-value pairs as ' - 'the mapping\n' - ' object. Otherwise, the positional argument must be an ' - '*iterable*\n' - ' object. Each item in the iterable must itself be an ' - 'iterable with\n' - ' exactly two objects. The first object of each item ' - 'becomes a key\n' - ' in the new dictionary, and the second object the ' - 'corresponding\n' - ' value. If a key occurs more than once, the last value ' - 'for that key\n' - ' becomes the corresponding value in the new dictionary.\n' - '\n' - ' If keyword arguments are given, the keyword arguments ' - 'and their\n' - ' values are added to the dictionary created from the ' - 'positional\n' - ' argument. If a key being added is already present, the ' - 'value from\n' - ' the keyword argument replaces the value from the ' - 'positional\n' - ' argument.\n' - '\n' - ' To illustrate, the following examples all return a ' - 'dictionary equal\n' - ' to "{"one": 1, "two": 2, "three": 3}":\n' - '\n' - ' >>> a = dict(one=1, two=2, three=3)\n' - " >>> b = {'one': 1, 'two': 2, 'three': 3}\n" - " >>> c = dict(zip(['one', 'two', 'three'], [1, 2, " - '3]))\n' - " >>> d = dict([('two', 2), ('one', 1), ('three', " - '3)])\n' - " >>> e = dict({'three': 3, 'one': 1, 'two': 2})\n" - ' >>> a == b == c == d == e\n' - ' True\n' - '\n' - ' Providing keyword arguments as in the first example ' - 'only works for\n' - ' keys that are valid Python identifiers. Otherwise, any ' - 'valid keys\n' - ' can be used.\n' - '\n' - ' These are the operations that dictionaries support (and ' - 'therefore,\n' - ' custom mapping types should support too):\n' - '\n' - ' len(d)\n' - '\n' - ' Return the number of items in the dictionary *d*.\n' - '\n' - ' d[key]\n' - '\n' - ' Return the item of *d* with key *key*. Raises a ' - '"KeyError" if\n' - ' *key* is not in the map.\n' - '\n' - ' If a subclass of dict defines a method ' - '"__missing__()" and *key*\n' - ' is not present, the "d[key]" operation calls that ' - 'method with\n' - ' the key *key* as argument. The "d[key]" operation ' - 'then returns\n' - ' or raises whatever is returned or raised by the\n' - ' "__missing__(key)" call. No other operations or ' - 'methods invoke\n' - ' "__missing__()". If "__missing__()" is not defined, ' - '"KeyError"\n' - ' is raised. "__missing__()" must be a method; it ' - 'cannot be an\n' - ' instance variable:\n' - '\n' - ' >>> class Counter(dict):\n' - ' ... def __missing__(self, key):\n' - ' ... return 0\n' - ' >>> c = Counter()\n' - " >>> c['red']\n" - ' 0\n' - " >>> c['red'] += 1\n" - " >>> c['red']\n" - ' 1\n' - '\n' - ' The example above shows part of the implementation ' - 'of\n' - ' "collections.Counter". A different "__missing__" ' - 'method is used\n' - ' by "collections.defaultdict".\n' - '\n' - ' d[key] = value\n' - '\n' - ' Set "d[key]" to *value*.\n' - '\n' - ' del d[key]\n' - '\n' - ' Remove "d[key]" from *d*. Raises a "KeyError" if ' - '*key* is not\n' - ' in the map.\n' - '\n' - ' key in d\n' - '\n' - ' Return "True" if *d* has a key *key*, else "False".\n' - '\n' - ' key not in d\n' - '\n' - ' Equivalent to "not key in d".\n' - '\n' - ' iter(d)\n' - '\n' - ' Return an iterator over the keys of the dictionary. ' - 'This is a\n' - ' shortcut for "iter(d.keys())".\n' - '\n' - ' clear()\n' - '\n' - ' Remove all items from the dictionary.\n' - '\n' - ' copy()\n' - '\n' - ' Return a shallow copy of the dictionary.\n' - '\n' - ' classmethod fromkeys(seq[, value])\n' - '\n' - ' Create a new dictionary with keys from *seq* and ' - 'values set to\n' - ' *value*.\n' - '\n' - ' "fromkeys()" is a class method that returns a new ' - 'dictionary.\n' - ' *value* defaults to "None".\n' - '\n' - ' get(key[, default])\n' - '\n' - ' Return the value for *key* if *key* is in the ' - 'dictionary, else\n' - ' *default*. If *default* is not given, it defaults to ' - '"None", so\n' - ' that this method never raises a "KeyError".\n' - '\n' - ' items()\n' - '\n' - ' Return a new view of the dictionary\'s items ("(key, ' - 'value)"\n' - ' pairs). See the *documentation of view objects*.\n' - '\n' - ' keys()\n' - '\n' - " Return a new view of the dictionary's keys. See " - 'the\n' - ' *documentation of view objects*.\n' - '\n' - ' pop(key[, default])\n' - '\n' - ' If *key* is in the dictionary, remove it and return ' - 'its value,\n' - ' else return *default*. If *default* is not given ' - 'and *key* is\n' - ' not in the dictionary, a "KeyError" is raised.\n' - '\n' - ' popitem()\n' - '\n' - ' Remove and return an arbitrary "(key, value)" pair ' - 'from the\n' - ' dictionary.\n' - '\n' - ' "popitem()" is useful to destructively iterate over ' - 'a\n' - ' dictionary, as often used in set algorithms. If the ' - 'dictionary\n' - ' is empty, calling "popitem()" raises a "KeyError".\n' - '\n' - ' setdefault(key[, default])\n' - '\n' - ' If *key* is in the dictionary, return its value. If ' - 'not, insert\n' - ' *key* with a value of *default* and return ' - '*default*. *default*\n' - ' defaults to "None".\n' - '\n' - ' update([other])\n' - '\n' - ' Update the dictionary with the key/value pairs from ' - '*other*,\n' - ' overwriting existing keys. Return "None".\n' - '\n' - ' "update()" accepts either another dictionary object ' - 'or an\n' - ' iterable of key/value pairs (as tuples or other ' - 'iterables of\n' - ' length two). If keyword arguments are specified, ' - 'the dictionary\n' - ' is then updated with those key/value pairs: ' - '"d.update(red=1,\n' - ' blue=2)".\n' - '\n' - ' values()\n' - '\n' - " Return a new view of the dictionary's values. See " - 'the\n' - ' *documentation of view objects*.\n' - '\n' - ' Dictionaries compare equal if and only if they have the ' - 'same "(key,\n' - ' value)" pairs. Order comparisons (\'<\', \'<=\', ' - "'>=', '>') raise\n" - ' "TypeError".\n' - '\n' - 'See also: "types.MappingProxyType" can be used to create a ' - 'read-only\n' - ' view of a "dict".\n' - '\n' - '\n' - 'Dictionary view objects\n' - '=======================\n' - '\n' - 'The objects returned by "dict.keys()", "dict.values()" ' - 'and\n' - '"dict.items()" are *view objects*. They provide a dynamic ' - 'view on the\n' - "dictionary's entries, which means that when the dictionary " - 'changes,\n' - 'the view reflects these changes.\n' - '\n' - 'Dictionary views can be iterated over to yield their ' - 'respective data,\n' - 'and support membership tests:\n' - '\n' - 'len(dictview)\n' - '\n' - ' Return the number of entries in the dictionary.\n' - '\n' - 'iter(dictview)\n' - '\n' - ' Return an iterator over the keys, values or items ' - '(represented as\n' - ' tuples of "(key, value)") in the dictionary.\n' - '\n' - ' Keys and values are iterated over in an arbitrary order ' - 'which is\n' - ' non-random, varies across Python implementations, and ' - 'depends on\n' - " the dictionary's history of insertions and deletions. " - 'If keys,\n' - ' values and items views are iterated over with no ' - 'intervening\n' - ' modifications to the dictionary, the order of items ' - 'will directly\n' - ' correspond. This allows the creation of "(value, key)" ' - 'pairs using\n' - ' "zip()": "pairs = zip(d.values(), d.keys())". Another ' - 'way to\n' - ' create the same list is "pairs = [(v, k) for (k, v) in ' - 'd.items()]".\n' - '\n' - ' Iterating views while adding or deleting entries in the ' - 'dictionary\n' - ' may raise a "RuntimeError" or fail to iterate over all ' - 'entries.\n' - '\n' - 'x in dictview\n' - '\n' - ' Return "True" if *x* is in the underlying dictionary\'s ' - 'keys, values\n' - ' or items (in the latter case, *x* should be a "(key, ' - 'value)"\n' - ' tuple).\n' - '\n' - 'Keys views are set-like since their entries are unique and ' - 'hashable.\n' - 'If all values are hashable, so that "(key, value)" pairs ' - 'are unique\n' - 'and hashable, then the items view is also set-like. ' - '(Values views are\n' - 'not treated as set-like since the entries are generally ' - 'not unique.)\n' - 'For set-like views, all of the operations defined for the ' - 'abstract\n' - 'base class "collections.abc.Set" are available (for ' - 'example, "==",\n' - '"<", or "^").\n' - '\n' - 'An example of dictionary view usage:\n' - '\n' - " >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, " - "'spam': 500}\n" - ' >>> keys = dishes.keys()\n' - ' >>> values = dishes.values()\n' - '\n' - ' >>> # iteration\n' - ' >>> n = 0\n' - ' >>> for val in values:\n' - ' ... n += val\n' - ' >>> print(n)\n' - ' 504\n' - '\n' - ' >>> # keys and values are iterated over in the same ' - 'order\n' - ' >>> list(keys)\n' - " ['eggs', 'bacon', 'sausage', 'spam']\n" - ' >>> list(values)\n' - ' [2, 1, 1, 500]\n' - '\n' - ' >>> # view objects are dynamic and reflect dict ' - 'changes\n' - " >>> del dishes['eggs']\n" - " >>> del dishes['sausage']\n" - ' >>> list(keys)\n' - " ['spam', 'bacon']\n" - '\n' - ' >>> # set operations\n' - " >>> keys & {'eggs', 'bacon', 'salad'}\n" - " {'bacon'}\n" - " >>> keys ^ {'sausage', 'juice'}\n" - " {'juice', 'sausage', 'bacon', 'spam'}\n", - 'typesmethods': '\n' - 'Methods\n' - '*******\n' - '\n' - 'Methods are functions that are called using the attribute ' - 'notation.\n' - 'There are two flavors: built-in methods (such as ' - '"append()" on lists)\n' - 'and class instance methods. Built-in methods are ' - 'described with the\n' - 'types that support them.\n' - '\n' - 'If you access a method (a function defined in a class ' - 'namespace)\n' - 'through an instance, you get a special object: a *bound ' - 'method* (also\n' - 'called *instance method*) object. When called, it will add ' - 'the "self"\n' - 'argument to the argument list. Bound methods have two ' - 'special read-\n' - 'only attributes: "m.__self__" is the object on which the ' - 'method\n' - 'operates, and "m.__func__" is the function implementing ' - 'the method.\n' - 'Calling "m(arg-1, arg-2, ..., arg-n)" is completely ' - 'equivalent to\n' - 'calling "m.__func__(m.__self__, arg-1, arg-2, ..., ' - 'arg-n)".\n' - '\n' - 'Like function objects, bound method objects support ' - 'getting arbitrary\n' - 'attributes. However, since method attributes are actually ' - 'stored on\n' - 'the underlying function object ("meth.__func__"), setting ' - 'method\n' - 'attributes on bound methods is disallowed. Attempting to ' - 'set an\n' - 'attribute on a method results in an "AttributeError" being ' - 'raised. In\n' - 'order to set a method attribute, you need to explicitly ' - 'set it on the\n' - 'underlying function object:\n' - '\n' - ' >>> class C:\n' - ' ... def method(self):\n' - ' ... pass\n' - ' ...\n' - ' >>> c = C()\n' - " >>> c.method.whoami = 'my name is method' # can't set " - 'on the method\n' - ' Traceback (most recent call last):\n' - ' File "", line 1, in \n' - " AttributeError: 'method' object has no attribute " - "'whoami'\n" - " >>> c.method.__func__.whoami = 'my name is method'\n" - ' >>> c.method.whoami\n' - " 'my name is method'\n" - '\n' - 'See *The standard type hierarchy* for more information.\n', - 'typesmodules': '\n' - 'Modules\n' - '*******\n' - '\n' - 'The only special operation on a module is attribute ' - 'access: "m.name",\n' - 'where *m* is a module and *name* accesses a name defined ' - "in *m*'s\n" - 'symbol table. Module attributes can be assigned to. (Note ' - 'that the\n' - '"import" statement is not, strictly speaking, an operation ' - 'on a module\n' - 'object; "import foo" does not require a module object ' - 'named *foo* to\n' - 'exist, rather it requires an (external) *definition* for a ' - 'module\n' - 'named *foo* somewhere.)\n' - '\n' - 'A special attribute of every module is "__dict__". This is ' - 'the\n' - "dictionary containing the module's symbol table. Modifying " - 'this\n' - "dictionary will actually change the module's symbol table, " - 'but direct\n' - 'assignment to the "__dict__" attribute is not possible ' - '(you can write\n' - '"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", ' - "but you can't\n" - 'write "m.__dict__ = {}"). Modifying "__dict__" directly ' - 'is not\n' - 'recommended.\n' - '\n' - 'Modules built into the interpreter are written like this: ' - '"". If loaded from a file, they are ' - 'written as\n' - '"".\n', - 'typesseq': '\n' - 'Sequence Types --- "list", "tuple", "range"\n' - '*******************************************\n' - '\n' - 'There are three basic sequence types: lists, tuples, and ' - 'range\n' - 'objects. Additional sequence types tailored for processing of ' - '*binary\n' - 'data* and *text strings* are described in dedicated sections.\n' - '\n' - '\n' - 'Common Sequence Operations\n' - '==========================\n' - '\n' - 'The operations in the following table are supported by most ' - 'sequence\n' - 'types, both mutable and immutable. The ' - '"collections.abc.Sequence" ABC\n' - 'is provided to make it easier to correctly implement these ' - 'operations\n' - 'on custom sequence types.\n' - '\n' - 'This table lists the sequence operations sorted in ascending ' - 'priority.\n' - 'In the table, *s* and *t* are sequences of the same type, *n*, ' - '*i*,\n' - '*j* and *k* are integers and *x* is an arbitrary object that ' - 'meets any\n' - 'type and value restrictions imposed by *s*.\n' - '\n' - 'The "in" and "not in" operations have the same priorities as ' - 'the\n' - 'comparison operations. The "+" (concatenation) and "*" ' - '(repetition)\n' - 'operations have the same priority as the corresponding ' - 'numeric\n' - 'operations.\n' - '\n' - '+----------------------------+----------------------------------+------------+\n' - '| Operation | ' - 'Result | Notes |\n' - '+============================+==================================+============+\n' - '| "x in s" | "True" if an item of *s* ' - 'is | (1) |\n' - '| | equal to *x*, else ' - '"False" | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "x not in s" | "False" if an item of *s* ' - 'is | (1) |\n' - '| | equal to *x*, else ' - '"True" | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "s + t" | the concatenation of *s* and ' - '*t* | (6)(7) |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "s * n" or "n * s" | *n* shallow copies of ' - '*s* | (2)(7) |\n' - '| | ' - 'concatenated | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "s[i]" | *i*th item of *s*, origin ' - '0 | (3) |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "s[i:j]" | slice of *s* from *i* to ' - '*j* | (3)(4) |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "s[i:j:k]" | slice of *s* from *i* to ' - '*j* | (3)(5) |\n' - '| | with step ' - '*k* | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "len(s)" | length of ' - '*s* | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "min(s)" | smallest item of ' - '*s* | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "max(s)" | largest item of ' - '*s* | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "s.index(x[, i[, j]])" | index of the first occurrence ' - 'of | (8) |\n' - '| | *x* in *s* (at or after ' - 'index | |\n' - '| | *i* and before index ' - '*j*) | |\n' - '+----------------------------+----------------------------------+------------+\n' - '| "s.count(x)" | total number of occurrences ' - 'of | |\n' - '| | *x* in ' - '*s* | |\n' - '+----------------------------+----------------------------------+------------+\n' - '\n' - 'Sequences of the same type also support comparisons. In ' - 'particular,\n' - 'tuples and lists are compared lexicographically by comparing\n' - 'corresponding elements. This means that to compare equal, ' - 'every\n' - 'element must compare equal and the two sequences must be of ' - 'the same\n' - 'type and have the same length. (For full details see ' - '*Comparisons* in\n' - 'the language reference.)\n' - '\n' - 'Notes:\n' - '\n' - '1. While the "in" and "not in" operations are used only for ' - 'simple\n' - ' containment testing in the general case, some specialised ' - 'sequences\n' - ' (such as "str", "bytes" and "bytearray") also use them for\n' - ' subsequence testing:\n' - '\n' - ' >>> "gg" in "eggs"\n' - ' True\n' - '\n' - '2. Values of *n* less than "0" are treated as "0" (which ' - 'yields an\n' - ' empty sequence of the same type as *s*). Note also that ' - 'the copies\n' - ' are shallow; nested structures are not copied. This often ' - 'haunts\n' - ' new Python programmers; consider:\n' - '\n' - ' >>> lists = [[]] * 3\n' - ' >>> lists\n' - ' [[], [], []]\n' - ' >>> lists[0].append(3)\n' - ' >>> lists\n' - ' [[3], [3], [3]]\n' - '\n' - ' What has happened is that "[[]]" is a one-element list ' - 'containing\n' - ' an empty list, so all three elements of "[[]] * 3" are ' - '(pointers\n' - ' to) this single empty list. Modifying any of the elements ' - 'of\n' - ' "lists" modifies this single list. You can create a list ' - 'of\n' - ' different lists this way:\n' - '\n' - ' >>> lists = [[] for i in range(3)]\n' - ' >>> lists[0].append(3)\n' - ' >>> lists[1].append(5)\n' - ' >>> lists[2].append(7)\n' - ' >>> lists\n' - ' [[3], [5], [7]]\n' - '\n' - '3. If *i* or *j* is negative, the index is relative to the end ' - 'of\n' - ' the string: "len(s) + i" or "len(s) + j" is substituted. ' - 'But note\n' - ' that "-0" is still "0".\n' - '\n' - '4. The slice of *s* from *i* to *j* is defined as the sequence ' - 'of\n' - ' items with index *k* such that "i <= k < j". If *i* or *j* ' - 'is\n' - ' greater than "len(s)", use "len(s)". If *i* is omitted or ' - '"None",\n' - ' use "0". If *j* is omitted or "None", use "len(s)". If ' - '*i* is\n' - ' greater than or equal to *j*, the slice is empty.\n' - '\n' - '5. The slice of *s* from *i* to *j* with step *k* is defined ' - 'as the\n' - ' sequence of items with index "x = i + n*k" such that "0 <= ' - 'n <\n' - ' (j-i)/k". In other words, the indices are "i", "i+k", ' - '"i+2*k",\n' - ' "i+3*k" and so on, stopping when *j* is reached (but never\n' - ' including *j*). If *i* or *j* is greater than "len(s)", ' - 'use\n' - ' "len(s)". If *i* or *j* are omitted or "None", they become ' - '"end"\n' - ' values (which end depends on the sign of *k*). Note, *k* ' - 'cannot be\n' - ' zero. If *k* is "None", it is treated like "1".\n' - '\n' - '6. Concatenating immutable sequences always results in a new\n' - ' object. This means that building up a sequence by repeated\n' - ' concatenation will have a quadratic runtime cost in the ' - 'total\n' - ' sequence length. To get a linear runtime cost, you must ' - 'switch to\n' - ' one of the alternatives below:\n' - '\n' - ' * if concatenating "str" objects, you can build a list and ' - 'use\n' - ' "str.join()" at the end or else write to a "io.StringIO" ' - 'instance\n' - ' and retrieve its value when complete\n' - '\n' - ' * if concatenating "bytes" objects, you can similarly use\n' - ' "bytes.join()" or "io.BytesIO", or you can do in-place\n' - ' concatenation with a "bytearray" object. "bytearray" ' - 'objects are\n' - ' mutable and have an efficient overallocation mechanism\n' - '\n' - ' * if concatenating "tuple" objects, extend a "list" ' - 'instead\n' - '\n' - ' * for other types, investigate the relevant class ' - 'documentation\n' - '\n' - '7. Some sequence types (such as "range") only support item\n' - " sequences that follow specific patterns, and hence don't " - 'support\n' - ' sequence concatenation or repetition.\n' - '\n' - '8. "index" raises "ValueError" when *x* is not found in *s*. ' - 'When\n' - ' supported, the additional arguments to the index method ' - 'allow\n' - ' efficient searching of subsections of the sequence. Passing ' - 'the\n' - ' extra arguments is roughly equivalent to using ' - '"s[i:j].index(x)",\n' - ' only without copying any data and with the returned index ' - 'being\n' - ' relative to the start of the sequence rather than the start ' - 'of the\n' - ' slice.\n' - '\n' - '\n' - 'Immutable Sequence Types\n' - '========================\n' - '\n' - 'The only operation that immutable sequence types generally ' - 'implement\n' - 'that is not also implemented by mutable sequence types is ' - 'support for\n' - 'the "hash()" built-in.\n' - '\n' - 'This support allows immutable sequences, such as "tuple" ' - 'instances, to\n' - 'be used as "dict" keys and stored in "set" and "frozenset" ' - 'instances.\n' - '\n' - 'Attempting to hash an immutable sequence that contains ' - 'unhashable\n' - 'values will result in "TypeError".\n' - '\n' - '\n' - 'Mutable Sequence Types\n' - '======================\n' - '\n' - 'The operations in the following table are defined on mutable ' - 'sequence\n' - 'types. The "collections.abc.MutableSequence" ABC is provided ' - 'to make\n' - 'it easier to correctly implement these operations on custom ' - 'sequence\n' - 'types.\n' - '\n' - 'In the table *s* is an instance of a mutable sequence type, ' - '*t* is any\n' - 'iterable object and *x* is an arbitrary object that meets any ' - 'type and\n' - 'value restrictions imposed by *s* (for example, "bytearray" ' - 'only\n' - 'accepts integers that meet the value restriction "0 <= x <= ' - '255").\n' - '\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| Operation | ' - 'Result | Notes |\n' - '+================================+==================================+=======================+\n' - '| "s[i] = x" | item *i* of *s* is replaced ' - 'by | |\n' - '| | ' - '*x* | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s[i:j] = t" | slice of *s* from *i* to ' - '*j* is | |\n' - '| | replaced by the contents of ' - 'the | |\n' - '| | iterable ' - '*t* | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "del s[i:j]" | same as "s[i:j] = ' - '[]" | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s[i:j:k] = t" | the elements of "s[i:j:k]" ' - 'are | (1) |\n' - '| | replaced by those of ' - '*t* | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "del s[i:j:k]" | removes the elements ' - 'of | |\n' - '| | "s[i:j:k]" from the ' - 'list | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.append(x)" | appends *x* to the end of ' - 'the | |\n' - '| | sequence (same ' - 'as | |\n' - '| | "s[len(s):len(s)] = ' - '[x]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.clear()" | removes all items from "s" ' - '(same | (5) |\n' - '| | as "del ' - 's[:]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.copy()" | creates a shallow copy of ' - '"s" | (5) |\n' - '| | (same as ' - '"s[:]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.extend(t)" | extends *s* with the ' - 'contents of | |\n' - '| | *t* (same as ' - '"s[len(s):len(s)] = | |\n' - '| | ' - 't") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.insert(i, x)" | inserts *x* into *s* at ' - 'the | |\n' - '| | index given by *i* (same ' - 'as | |\n' - '| | "s[i:i] = ' - '[x]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.pop([i])" | retrieves the item at *i* ' - 'and | (2) |\n' - '| | also removes it from ' - '*s* | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.remove(x)" | remove the first item from ' - '*s* | (3) |\n' - '| | where "s[i] == ' - 'x" | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.reverse()" | reverses the items of *s* ' - 'in | (4) |\n' - '| | ' - 'place | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '\n' - 'Notes:\n' - '\n' - '1. *t* must have the same length as the slice it is ' - 'replacing.\n' - '\n' - '2. The optional argument *i* defaults to "-1", so that by ' - 'default\n' - ' the last item is removed and returned.\n' - '\n' - '3. "remove" raises "ValueError" when *x* is not found in *s*.\n' - '\n' - '4. The "reverse()" method modifies the sequence in place for\n' - ' economy of space when reversing a large sequence. To ' - 'remind users\n' - ' that it operates by side effect, it does not return the ' - 'reversed\n' - ' sequence.\n' - '\n' - '5. "clear()" and "copy()" are included for consistency with ' - 'the\n' - " interfaces of mutable containers that don't support " - 'slicing\n' - ' operations (such as "dict" and "set")\n' - '\n' - ' New in version 3.3: "clear()" and "copy()" methods.\n' - '\n' - '\n' - 'Lists\n' - '=====\n' - '\n' - 'Lists are mutable sequences, typically used to store ' - 'collections of\n' - 'homogeneous items (where the precise degree of similarity will ' - 'vary by\n' - 'application).\n' - '\n' - 'class class list([iterable])\n' - '\n' - ' Lists may be constructed in several ways:\n' - '\n' - ' * Using a pair of square brackets to denote the empty list: ' - '"[]"\n' - '\n' - ' * Using square brackets, separating items with commas: ' - '"[a]",\n' - ' "[a, b, c]"\n' - '\n' - ' * Using a list comprehension: "[x for x in iterable]"\n' - '\n' - ' * Using the type constructor: "list()" or "list(iterable)"\n' - '\n' - ' The constructor builds a list whose items are the same and ' - 'in the\n' - " same order as *iterable*'s items. *iterable* may be either " - 'a\n' - ' sequence, a container that supports iteration, or an ' - 'iterator\n' - ' object. If *iterable* is already a list, a copy is made ' - 'and\n' - ' returned, similar to "iterable[:]". For example, ' - '"list(\'abc\')"\n' - ' returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" ' - 'returns "[1, 2,\n' - ' 3]". If no argument is given, the constructor creates a new ' - 'empty\n' - ' list, "[]".\n' - '\n' - ' Many other operations also produce lists, including the ' - '"sorted()"\n' - ' built-in.\n' - '\n' - ' Lists implement all of the *common* and *mutable* sequence\n' - ' operations. Lists also provide the following additional ' - 'method:\n' - '\n' - ' sort(*, key=None, reverse=None)\n' - '\n' - ' This method sorts the list in place, using only "<" ' - 'comparisons\n' - ' between items. Exceptions are not suppressed - if any ' - 'comparison\n' - ' operations fail, the entire sort operation will fail ' - '(and the\n' - ' list will likely be left in a partially modified ' - 'state).\n' - '\n' - ' "sort()" accepts two arguments that can only be passed ' - 'by\n' - ' keyword (*keyword-only arguments*):\n' - '\n' - ' *key* specifies a function of one argument that is used ' - 'to\n' - ' extract a comparison key from each list element (for ' - 'example,\n' - ' "key=str.lower"). The key corresponding to each item in ' - 'the list\n' - ' is calculated once and then used for the entire sorting ' - 'process.\n' - ' The default value of "None" means that list items are ' - 'sorted\n' - ' directly without calculating a separate key value.\n' - '\n' - ' The "functools.cmp_to_key()" utility is available to ' - 'convert a\n' - ' 2.x style *cmp* function to a *key* function.\n' - '\n' - ' *reverse* is a boolean value. If set to "True", then ' - 'the list\n' - ' elements are sorted as if each comparison were ' - 'reversed.\n' - '\n' - ' This method modifies the sequence in place for economy ' - 'of space\n' - ' when sorting a large sequence. To remind users that it ' - 'operates\n' - ' by side effect, it does not return the sorted sequence ' - '(use\n' - ' "sorted()" to explicitly request a new sorted list ' - 'instance).\n' - '\n' - ' The "sort()" method is guaranteed to be stable. A sort ' - 'is\n' - ' stable if it guarantees not to change the relative order ' - 'of\n' - ' elements that compare equal --- this is helpful for ' - 'sorting in\n' - ' multiple passes (for example, sort by department, then ' - 'by salary\n' - ' grade).\n' - '\n' - ' **CPython implementation detail:** While a list is being ' - 'sorted,\n' - ' the effect of attempting to mutate, or even inspect, the ' - 'list is\n' - ' undefined. The C implementation of Python makes the ' - 'list appear\n' - ' empty for the duration, and raises "ValueError" if it ' - 'can detect\n' - ' that the list has been mutated during a sort.\n' - '\n' - '\n' - 'Tuples\n' - '======\n' - '\n' - 'Tuples are immutable sequences, typically used to store ' - 'collections of\n' - 'heterogeneous data (such as the 2-tuples produced by the ' - '"enumerate()"\n' - 'built-in). Tuples are also used for cases where an immutable ' - 'sequence\n' - 'of homogeneous data is needed (such as allowing storage in a ' - '"set" or\n' - '"dict" instance).\n' - '\n' - 'class class tuple([iterable])\n' - '\n' - ' Tuples may be constructed in a number of ways:\n' - '\n' - ' * Using a pair of parentheses to denote the empty tuple: ' - '"()"\n' - '\n' - ' * Using a trailing comma for a singleton tuple: "a," or ' - '"(a,)"\n' - '\n' - ' * Separating items with commas: "a, b, c" or "(a, b, c)"\n' - '\n' - ' * Using the "tuple()" built-in: "tuple()" or ' - '"tuple(iterable)"\n' - '\n' - ' The constructor builds a tuple whose items are the same and ' - 'in the\n' - " same order as *iterable*'s items. *iterable* may be either " - 'a\n' - ' sequence, a container that supports iteration, or an ' - 'iterator\n' - ' object. If *iterable* is already a tuple, it is returned\n' - ' unchanged. For example, "tuple(\'abc\')" returns "(\'a\', ' - '\'b\', \'c\')"\n' - ' and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no ' - 'argument is\n' - ' given, the constructor creates a new empty tuple, "()".\n' - '\n' - ' Note that it is actually the comma which makes a tuple, not ' - 'the\n' - ' parentheses. The parentheses are optional, except in the ' - 'empty\n' - ' tuple case, or when they are needed to avoid syntactic ' - 'ambiguity.\n' - ' For example, "f(a, b, c)" is a function call with three ' - 'arguments,\n' - ' while "f((a, b, c))" is a function call with a 3-tuple as ' - 'the sole\n' - ' argument.\n' - '\n' - ' Tuples implement all of the *common* sequence operations.\n' - '\n' - 'For heterogeneous collections of data where access by name is ' - 'clearer\n' - 'than access by index, "collections.namedtuple()" may be a ' - 'more\n' - 'appropriate choice than a simple tuple object.\n' - '\n' - '\n' - 'Ranges\n' - '======\n' - '\n' - 'The "range" type represents an immutable sequence of numbers ' - 'and is\n' - 'commonly used for looping a specific number of times in "for" ' - 'loops.\n' - '\n' - 'class class range(stop)\n' - 'class class range(start, stop[, step])\n' - '\n' - ' The arguments to the range constructor must be integers ' - '(either\n' - ' built-in "int" or any object that implements the ' - '"__index__"\n' - ' special method). If the *step* argument is omitted, it ' - 'defaults to\n' - ' "1". If the *start* argument is omitted, it defaults to ' - '"0". If\n' - ' *step* is zero, "ValueError" is raised.\n' - '\n' - ' For a positive *step*, the contents of a range "r" are ' - 'determined\n' - ' by the formula "r[i] = start + step*i" where "i >= 0" and ' - '"r[i] <\n' - ' stop".\n' - '\n' - ' For a negative *step*, the contents of the range are still\n' - ' determined by the formula "r[i] = start + step*i", but the\n' - ' constraints are "i >= 0" and "r[i] > stop".\n' - '\n' - ' A range object will be empty if "r[0]" does not meet the ' - 'value\n' - ' constraint. Ranges do support negative indices, but these ' - 'are\n' - ' interpreted as indexing from the end of the sequence ' - 'determined by\n' - ' the positive indices.\n' - '\n' - ' Ranges containing absolute values larger than "sys.maxsize" ' - 'are\n' - ' permitted but some features (such as "len()") may raise\n' - ' "OverflowError".\n' - '\n' - ' Range examples:\n' - '\n' - ' >>> list(range(10))\n' - ' [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n' - ' >>> list(range(1, 11))\n' - ' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n' - ' >>> list(range(0, 30, 5))\n' - ' [0, 5, 10, 15, 20, 25]\n' - ' >>> list(range(0, 10, 3))\n' - ' [0, 3, 6, 9]\n' - ' >>> list(range(0, -10, -1))\n' - ' [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n' - ' >>> list(range(0))\n' - ' []\n' - ' >>> list(range(1, 0))\n' - ' []\n' - '\n' - ' Ranges implement all of the *common* sequence operations ' - 'except\n' - ' concatenation and repetition (due to the fact that range ' - 'objects\n' - ' can only represent sequences that follow a strict pattern ' - 'and\n' - ' repetition and concatenation will usually violate that ' - 'pattern).\n' - '\n' - 'The advantage of the "range" type over a regular "list" or ' - '"tuple" is\n' - 'that a "range" object will always take the same (small) amount ' - 'of\n' - 'memory, no matter the size of the range it represents (as it ' - 'only\n' - 'stores the "start", "stop" and "step" values, calculating ' - 'individual\n' - 'items and subranges as needed).\n' - '\n' - 'Range objects implement the "collections.abc.Sequence" ABC, ' - 'and\n' - 'provide features such as containment tests, element index ' - 'lookup,\n' - 'slicing and support for negative indices (see *Sequence Types ' - '---\n' - 'list, tuple, range*):\n' - '\n' - '>>> r = range(0, 20, 2)\n' - '>>> r\n' - 'range(0, 20, 2)\n' - '>>> 11 in r\n' - 'False\n' - '>>> 10 in r\n' - 'True\n' - '>>> r.index(10)\n' - '5\n' - '>>> r[5]\n' - '10\n' - '>>> r[:5]\n' - 'range(0, 10, 2)\n' - '>>> r[-1]\n' - '18\n' - '\n' - 'Testing range objects for equality with "==" and "!=" compares ' - 'them as\n' - 'sequences. That is, two range objects are considered equal if ' - 'they\n' - 'represent the same sequence of values. (Note that two range ' - 'objects\n' - 'that compare equal might have different "start", "stop" and ' - '"step"\n' - 'attributes, for example "range(0) == range(2, 1, 3)" or ' - '"range(0, 3,\n' - '2) == range(0, 4, 2)".)\n' - '\n' - 'Changed in version 3.2: Implement the Sequence ABC. Support ' - 'slicing\n' - 'and negative indices. Test "int" objects for membership in ' - 'constant\n' - 'time instead of iterating through all items.\n' - '\n' - "Changed in version 3.3: Define '==' and '!=' to compare range " - 'objects\n' - 'based on the sequence of values they define (instead of ' - 'comparing\n' - 'based on object identity).\n' - '\n' - 'New in version 3.3: The "start", "stop" and "step" ' - 'attributes.\n', - 'typesseq-mutable': '\n' - 'Mutable Sequence Types\n' - '**********************\n' - '\n' - 'The operations in the following table are defined on ' - 'mutable sequence\n' - 'types. The "collections.abc.MutableSequence" ABC is ' - 'provided to make\n' - 'it easier to correctly implement these operations on ' - 'custom sequence\n' - 'types.\n' - '\n' - 'In the table *s* is an instance of a mutable sequence ' - 'type, *t* is any\n' - 'iterable object and *x* is an arbitrary object that ' - 'meets any type and\n' - 'value restrictions imposed by *s* (for example, ' - '"bytearray" only\n' - 'accepts integers that meet the value restriction "0 <= ' - 'x <= 255").\n' - '\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| Operation | ' - 'Result | ' - 'Notes |\n' - '+================================+==================================+=======================+\n' - '| "s[i] = x" | item *i* of *s* is ' - 'replaced by | |\n' - '| | ' - '*x* ' - '| |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s[i:j] = t" | slice of *s* from ' - '*i* to *j* is | |\n' - '| | replaced by the ' - 'contents of the | |\n' - '| | iterable ' - '*t* | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "del s[i:j]" | same as "s[i:j] = ' - '[]" | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s[i:j:k] = t" | the elements of ' - '"s[i:j:k]" are | (1) |\n' - '| | replaced by those ' - 'of *t* | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "del s[i:j:k]" | removes the ' - 'elements of | |\n' - '| | "s[i:j:k]" from the ' - 'list | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.append(x)" | appends *x* to the ' - 'end of the | |\n' - '| | sequence (same ' - 'as | |\n' - '| | "s[len(s):len(s)] = ' - '[x]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.clear()" | removes all items ' - 'from "s" (same | (5) |\n' - '| | as "del ' - 's[:]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.copy()" | creates a shallow ' - 'copy of "s" | (5) |\n' - '| | (same as ' - '"s[:]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.extend(t)" | extends *s* with ' - 'the contents of | |\n' - '| | *t* (same as ' - '"s[len(s):len(s)] = | |\n' - '| | ' - 't") ' - '| |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.insert(i, x)" | inserts *x* into ' - '*s* at the | |\n' - '| | index given by *i* ' - '(same as | |\n' - '| | "s[i:i] = ' - '[x]") | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.pop([i])" | retrieves the item ' - 'at *i* and | (2) |\n' - '| | also removes it ' - 'from *s* | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.remove(x)" | remove the first ' - 'item from *s* | (3) |\n' - '| | where "s[i] == ' - 'x" | |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '| "s.reverse()" | reverses the items ' - 'of *s* in | (4) |\n' - '| | ' - 'place ' - '| |\n' - '+--------------------------------+----------------------------------+-----------------------+\n' - '\n' - 'Notes:\n' - '\n' - '1. *t* must have the same length as the slice it is ' - 'replacing.\n' - '\n' - '2. The optional argument *i* defaults to "-1", so that ' - 'by default\n' - ' the last item is removed and returned.\n' - '\n' - '3. "remove" raises "ValueError" when *x* is not found ' - 'in *s*.\n' - '\n' - '4. The "reverse()" method modifies the sequence in ' - 'place for\n' - ' economy of space when reversing a large sequence. ' - 'To remind users\n' - ' that it operates by side effect, it does not return ' - 'the reversed\n' - ' sequence.\n' - '\n' - '5. "clear()" and "copy()" are included for consistency ' - 'with the\n' - " interfaces of mutable containers that don't support " - 'slicing\n' - ' operations (such as "dict" and "set")\n' - '\n' - ' New in version 3.3: "clear()" and "copy()" ' - 'methods.\n', - 'unary': '\n' - 'Unary arithmetic and bitwise operations\n' - '***************************************\n' - '\n' - 'All unary arithmetic and bitwise operations have the same ' - 'priority:\n' - '\n' - ' u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n' - '\n' - 'The unary "-" (minus) operator yields the negation of its ' - 'numeric\n' - 'argument.\n' - '\n' - 'The unary "+" (plus) operator yields its numeric argument ' - 'unchanged.\n' - '\n' - 'The unary "~" (invert) operator yields the bitwise inversion of ' - 'its\n' - 'integer argument. The bitwise inversion of "x" is defined as\n' - '"-(x+1)". It only applies to integral numbers.\n' - '\n' - 'In all three cases, if the argument does not have the proper ' - 'type, a\n' - '"TypeError" exception is raised.\n', - 'while': '\n' - 'The "while" statement\n' - '*********************\n' - '\n' - 'The "while" statement is used for repeated execution as long as ' - 'an\n' - 'expression is true:\n' - '\n' - ' while_stmt ::= "while" expression ":" suite\n' - ' ["else" ":" suite]\n' - '\n' - 'This repeatedly tests the expression and, if it is true, executes ' - 'the\n' - 'first suite; if the expression is false (which may be the first ' - 'time\n' - 'it is tested) the suite of the "else" clause, if present, is ' - 'executed\n' - 'and the loop terminates.\n' - '\n' - 'A "break" statement executed in the first suite terminates the ' - 'loop\n' - 'without executing the "else" clause\'s suite. A "continue" ' - 'statement\n' - 'executed in the first suite skips the rest of the suite and goes ' - 'back\n' - 'to testing the expression.\n', - 'with': '\n' - 'The "with" statement\n' - '********************\n' - '\n' - 'The "with" statement is used to wrap the execution of a block ' - 'with\n' - 'methods defined by a context manager (see section *With Statement\n' - 'Context Managers*). This allows common ' - '"try"..."except"..."finally"\n' - 'usage patterns to be encapsulated for convenient reuse.\n' - '\n' - ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n' - ' with_item ::= expression ["as" target]\n' - '\n' - 'The execution of the "with" statement with one "item" proceeds as\n' - 'follows:\n' - '\n' - '1. The context expression (the expression given in the ' - '"with_item")\n' - ' is evaluated to obtain a context manager.\n' - '\n' - '2. The context manager\'s "__exit__()" is loaded for later use.\n' - '\n' - '3. The context manager\'s "__enter__()" method is invoked.\n' - '\n' - '4. If a target was included in the "with" statement, the return\n' - ' value from "__enter__()" is assigned to it.\n' - '\n' - ' Note: The "with" statement guarantees that if the ' - '"__enter__()"\n' - ' method returns without an error, then "__exit__()" will ' - 'always be\n' - ' called. Thus, if an error occurs during the assignment to ' - 'the\n' - ' target list, it will be treated the same as an error ' - 'occurring\n' - ' within the suite would be. See step 6 below.\n' - '\n' - '5. The suite is executed.\n' - '\n' - '6. The context manager\'s "__exit__()" method is invoked. If an\n' - ' exception caused the suite to be exited, its type, value, and\n' - ' traceback are passed as arguments to "__exit__()". Otherwise, ' - 'three\n' - ' "None" arguments are supplied.\n' - '\n' - ' If the suite was exited due to an exception, and the return ' - 'value\n' - ' from the "__exit__()" method was false, the exception is ' - 'reraised.\n' - ' If the return value was true, the exception is suppressed, and\n' - ' execution continues with the statement following the "with"\n' - ' statement.\n' - '\n' - ' If the suite was exited for any reason other than an exception, ' - 'the\n' - ' return value from "__exit__()" is ignored, and execution ' - 'proceeds\n' - ' at the normal location for the kind of exit that was taken.\n' - '\n' - 'With more than one item, the context managers are processed as if\n' - 'multiple "with" statements were nested:\n' - '\n' - ' with A() as a, B() as b:\n' - ' suite\n' - '\n' - 'is equivalent to\n' - '\n' - ' with A() as a:\n' - ' with B() as b:\n' - ' suite\n' - '\n' - 'Changed in version 3.1: Support for multiple context expressions.\n' - '\n' - 'See also: **PEP 0343** - The "with" statement\n' - '\n' - ' The specification, background, and examples for the Python ' - '"with"\n' - ' statement.\n', - 'yield': '\n' - 'The "yield" statement\n' - '*********************\n' - '\n' - ' yield_stmt ::= yield_expression\n' - '\n' - 'A "yield" statement is semantically equivalent to a *yield\n' - 'expression*. The yield statement can be used to omit the ' - 'parentheses\n' - 'that would otherwise be required in the equivalent yield ' - 'expression\n' - 'statement. For example, the yield statements\n' - '\n' - ' yield \n' - ' yield from \n' - '\n' - 'are equivalent to the yield expression statements\n' - '\n' - ' (yield )\n' - ' (yield from )\n' - '\n' - 'Yield expressions and statements are only used when defining a\n' - '*generator* function, and are only used in the body of the ' - 'generator\n' - 'function. Using yield in a function definition is sufficient to ' - 'cause\n' - 'that definition to create a generator function instead of a ' - 'normal\n' - 'function.\n' - '\n' - 'For full details of "yield" semantics, refer to the *Yield\n' - 'expressions* section.\n'} +# Autogenerated by Sphinx on Sat Dec 5 17:02:49 2015 +topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', + 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', + 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', + 'atom-literals': u"\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", + 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. *__slots__*\n reserves space for the declared variables and prevents the\n automatic creation of *__dict__* and *__weakref__* for each\n instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n', + 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier. This\nproduction can be customized by overriding the "__getattr__()" method.\nIf this attribute is not available, the exception "AttributeError" is\nraised. Otherwise, the type and value of the object produced is\ndetermined by the object. Multiple evaluations of the same attribute\nreference may yield different objects.\n', + 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', + 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "@" m_expr |\n m_expr "//" u_expr| m_expr "/" u_expr |\n m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger and the other must be a sequence. In the former case, the\nnumbers are converted to a common type and then multiplied together.\nIn the latter case, sequence repetition is performed; a negative\nrepetition factor yields an empty sequence.\n\nThe "@" (at) operator is intended to be used for matrix\nmultiplication. No builtin Python types implement this operator.\n\nNew in version 3.5.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Division of integers yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: "x == (x//y)*y + (x%y)". Floor division and modulo are also\nconnected with the built-in function "divmod()": "divmod(x, y) ==\n(x//y, x%y)". [2].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the "divmod()"\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both be sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n', + 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n', + 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "__code__" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec()" or "eval()" built-in functions.\n\nSee *The standard type hierarchy* for more information.\n', + 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n"Ellipsis" (a built-in name). "type(Ellipsis)()" produces the\n"Ellipsis" singleton.\n\nIt is written as "Ellipsis" or "...".\n', + 'bltin-null-object': u'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name). "type(None)()" produces the\nsame singleton.\n\nIt is written as "None".\n', + 'bltin-type-objects': u'\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "".\n', + 'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a "__bool__()" method.\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to create a\nnew value, it returns a boolean value regardless of the type of its\nargument (for example, "not \'foo\'" produces "False" rather than "\'\'".)\n', + 'break': u'\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n', + 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', + 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nAn optional trailing comma may be present after the positional and\nkeyword arguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', + 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n', + 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\n\nValue comparisons\n=================\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects do not need to have the same type.\n\nChapter *Objects, values and types* states that objects have a value\n(in addition to type and identity). The value of an object is a\nrather abstract notion in Python: For example, there is no canonical\naccess method for an object\'s value. Also, there is no requirement\nthat the value of an object should be constructed in a particular way,\ne.g. comprised of all its data attributes. Comparison operators\nimplement a particular notion of what the value of an object is. One\ncan think of them as defining the value of an object indirectly, by\nmeans of their comparison implementation.\n\nBecause all types are (direct or indirect) subtypes of "object", they\ninherit the default comparison behavior from "object". Types can\ncustomize their comparison behavior by implementing *rich comparison\nmethods* like "__lt__()", described in *Basic customization*.\n\nThe default behavior for equality comparison ("==" and "!=") is based\non the identity of the objects. Hence, equality comparison of\ninstances with the same identity results in equality, and equality\ncomparison of instances with different identities results in\ninequality. A motivation for this default behavior is the desire that\nall objects should be reflexive (i.e. "x is y" implies "x == y").\n\nA default order comparison ("<", ">", "<=", and ">=") is not provided;\nan attempt raises "TypeError". A motivation for this default behavior\nis the lack of a similar invariant as for equality.\n\nThe behavior of the default equality comparison, that instances with\ndifferent identities are always unequal, may be in contrast to what\ntypes will need that have a sensible definition of object value and\nvalue-based equality. Such types will need to customize their\ncomparison behavior, and in fact, a number of built-in types have done\nthat.\n\nThe following list describes the comparison behavior of the most\nimportant built-in types.\n\n* Numbers of built-in numeric types (*Numeric Types --- int, float,\n complex*) and of the standard library types "fractions.Fraction" and\n "decimal.Decimal" can be compared within and across their types,\n with the restriction that complex numbers do not support order\n comparison. Within the limits of the types involved, they compare\n mathematically (algorithmically) correct without loss of precision.\n\n The not-a-number values "float(\'NaN\')" and "Decimal(\'NaN\')" are\n special. They are identical to themselves ("x is x" is true) but\n are not equal to themselves ("x == x" is false). Additionally,\n comparing any number to a not-a-number value will return "False".\n For example, both "3 < float(\'NaN\')" and "float(\'NaN\') < 3" will\n return "False".\n\n* Binary sequences (instances of "bytes" or "bytearray") can be\n compared within and across their types. They compare\n lexicographically using the numeric values of their elements.\n\n* Strings (instances of "str") compare lexicographically using the\n numerical Unicode code points (the result of the built-in function\n "ord()") of their characters. [3]\n\n Strings and binary sequences cannot be directly compared.\n\n* Sequences (instances of "tuple", "list", or "range") can be\n compared only within each of their types, with the restriction that\n ranges do not support order comparison. Equality comparison across\n these types results in unequality, and ordering comparison across\n these types raises "TypeError".\n\n Sequences compare lexicographically using comparison of\n corresponding elements, whereby reflexivity of the elements is\n enforced.\n\n In enforcing reflexivity of elements, the comparison of collections\n assumes that for a collection element "x", "x == x" is always true.\n Based on that assumption, element identity is compared first, and\n element comparison is performed only for distinct elements. This\n approach yields the same result as a strict element comparison\n would, if the compared elements are reflexive. For non-reflexive\n elements, the result is different than for strict element\n comparison, and may be surprising: The non-reflexive not-a-number\n values for example result in the following comparison behavior when\n used in a list:\n\n >>> nan = float(\'NaN\')\n >>> nan is nan\n True\n >>> nan == nan\n False <-- the defined non-reflexive behavior of NaN\n >>> [nan] == [nan]\n True <-- list enforces reflexivity and tests identity first\n\n Lexicographical comparison between built-in collections works as\n follows:\n\n * For two collections to compare equal, they must be of the same\n type, have the same length, and each pair of corresponding\n elements must compare equal (for example, "[1,2] == (1,2)" is\n false because the type is not the same).\n\n * Collections that support order comparison are ordered the same\n as their first unequal elements (for example, "[1,2,x] <= [1,2,y]"\n has the same value as "x <= y"). If a corresponding element does\n not exist, the shorter collection is ordered first (for example,\n "[1,2] < [1,2,3]" is true).\n\n* Mappings (instances of "dict") compare equal if and only if they\n have equal *(key, value)* pairs. Equality comparison of the keys and\n elements enforces reflexivity.\n\n Order comparisons ("<", ">", "<=", and ">=") raise "TypeError".\n\n* Sets (instances of "set" or "frozenset") can be compared within\n and across their types.\n\n They define order comparison operators to mean subset and superset\n tests. Those relations do not define total orderings (for example,\n the two sets "{1,2}" and "{2,3}" are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering\n (for example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs).\n\n Comparison of sets enforces reflexivity of its elements.\n\n* Most other built-in types have no comparison methods implemented,\n so they inherit the default comparison behavior.\n\nUser-defined classes that customize their comparison behavior should\nfollow some consistency rules, if possible:\n\n* Equality comparison should be reflexive. In other words, identical\n objects should compare equal:\n\n "x is y" implies "x == y"\n\n* Comparison should be symmetric. In other words, the following\n expressions should have the same result:\n\n "x == y" and "y == x"\n\n "x != y" and "y != x"\n\n "x < y" and "y > x"\n\n "x <= y" and "y >= x"\n\n* Comparison should be transitive. The following (non-exhaustive)\n examples illustrate that:\n\n "x > y and y > z" implies "x > z"\n\n "x < y and y <= z" implies "x < z"\n\n* Inverse comparison should result in the boolean negation. In other\n words, the following expressions should have the same result:\n\n "x == y" and "not x != y"\n\n "x < y" and "not x >= y" (for total ordering)\n\n "x > y" and "not x <= y" (for total ordering)\n\n The last two expressions apply to totally ordered collections (e.g.\n to sequences, but not to sets or mappings). See also the\n "total_ordering()" decorator.\n\nPython does not enforce these consistency rules. In fact, the\nnot-a-number values are an example for not following these rules.\n\n\nMembership test operations\n==========================\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\n\nIdentity comparisons\n====================\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', + 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode. Function and class definitions are also syntactically compound\nstatements.\n\nA compound statement consists of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of a suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | async_with_stmt\n | async_for_stmt\n | async_funcdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n\nCoroutines\n==========\n\nNew in version 3.5.\n\n\nCoroutine function definition\n-----------------------------\n\n async_funcdef ::= [decorators] "async" "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n\nExecution of Python coroutines can be suspended and resumed at many\npoints (see *coroutine*). In the body of a coroutine, any "await" and\n"async" identifiers become reserved keywords; "await" expressions,\n"async for" and "async with" can only be used in coroutine bodies.\n\nFunctions defined with "async def" syntax are always coroutine\nfunctions, even if they do not contain "await" or "async" keywords.\n\nIt is a "SyntaxError" to use "yield" expressions in "async def"\ncoroutines.\n\nAn example of a coroutine function:\n\n async def func(param1, param2):\n do_stuff()\n await some_coroutine()\n\n\nThe "async for" statement\n-------------------------\n\n async_for_stmt ::= "async" for_stmt\n\nAn *asynchronous iterable* is able to call asynchronous code in its\n*iter* implementation, and *asynchronous iterator* can call\nasynchronous code in its *next* method.\n\nThe "async for" statement allows convenient iteration over\nasynchronous iterators.\n\nThe following code:\n\n async for TARGET in ITER:\n BLOCK\n else:\n BLOCK2\n\nIs semantically equivalent to:\n\n iter = (ITER)\n iter = await type(iter).__aiter__(iter)\n running = True\n while running:\n try:\n TARGET = await type(iter).__anext__(iter)\n except StopAsyncIteration:\n running = False\n else:\n BLOCK\n else:\n BLOCK2\n\nSee also "__aiter__()" and "__anext__()" for details.\n\nIt is a "SyntaxError" to use "async for" statement outside of an\n"async def" function.\n\n\nThe "async with" statement\n--------------------------\n\n async_with_stmt ::= "async" with_stmt\n\nAn *asynchronous context manager* is a *context manager* that is able\nto suspend execution in its *enter* and *exit* methods.\n\nThe following code:\n\n async with EXPR as VAR:\n BLOCK\n\nIs semantically equivalent to:\n\n mgr = (EXPR)\n aexit = type(mgr).__aexit__\n aenter = type(mgr).__aenter__(mgr)\n exc = True\n\n VAR = await aenter\n try:\n BLOCK\n except:\n if not await aexit(mgr, *sys.exc_info()):\n raise\n else:\n await aexit(mgr, None, None, None)\n\nSee also "__aenter__()" and "__aexit__()" for details.\n\nIt is a "SyntaxError" to use "async with" statement outside of an\n"async def" function.\n\nSee also: **PEP 492** - Coroutines with async and await syntax\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', + 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', + 'continue': u'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n', + 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works as follows:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string as a\nleft argument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', + 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the second can be resolved by freeing the reference to the\n traceback object when it is no longer useful, and the third can\n be resolved by storing "None" in "sys.last_traceback". Circular\n references which are garbage are detected and cleaned up when the\n cyclic garbage collector is enabled (it\'s on by default). Refer\n to the documentation for the "gc" module for more information\n about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n By default, "__ne__()" delegates to "__eq__()" and inverts the\n result unless it is "NotImplemented". There are no other implied\n relationships among the comparison operators, for example, the\n truth of "(x.__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n', + 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type "continue", or you can\n step through the statement using "step" or "next" (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module "__main__" is used. (See the\n explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When "runeval()" returns, it returns the\n value of the expression. Otherwise this function is similar to\n "run()".\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses "Ctrl-C" on the console) when you give a\n "continue" command. This allows you to break into the debugger\n again by pressing "Ctrl-C". If you want Pdb not to touch the\n SIGINT handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs). Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint ("!"). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.) No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next". Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. "help pdb"\n displays the full documentation (the docstring of the "pdb"\n module). Since the *command* argument must be an identifier, "help\n exec" must be entered to get help on the "!" command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on "sys.path". Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just "end" to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) p some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with "end"; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between "next" and "step"\n is that "step" stops inside a called function, while "next"\n executes called functions at (nearly) full speed, only stopping at\n the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a "for" loop or out\n of a "finally" clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With "." as argument, list 11 lines around the current line. With\n one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by "->". If an\n exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ">>", if it differs\n from the current line.\n\n New in version 3.2: The ">>" marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for "list".\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\n Note: "print()" can also be used, but is not a debugger command\n --- this executes the Python "print()" function.\n\npp expression\n\n Like the "p" command, except the value of the expression is pretty-\n printed using the "pprint" module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the "code" module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by "%1", "%2", and so on, while "%*" is replaced by all\n the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ".pdbrc" file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n "global" statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with "shlex" and the result is used as the new\n "sys.argv". History, breakpoints, actions and debugger options are\n preserved. "restart" is an alias for "run".\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module\n is determined by the "__name__" in the frame globals.\n', + 'del': u'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n', + 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', + 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nName resolution of free variables occurs at runtime, not at compile\ntime. This means that the following code will print 42:\n\n i = 10\n def f():\n print(i)\n i = 42\n f()\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', + 'else': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', + 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', + 'execmodel': u'\nExecution model\n***************\n\n\nStructure of a program\n======================\n\nA Python program is constructed from code blocks. A *block* is a piece\nof Python program text that is executed as a unit. The following are\nblocks: a module, a function body, and a class definition. Each\ncommand typed interactively is a block. A script file (a file given\nas standard input to the interpreter or specified as a command line\nargument to the interpreter) is a code block. A script command (a\ncommand specified on the interpreter command line with the \'**-c**\'\noption) is a code block. The string argument passed to the built-in\nfunctions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\n\nNaming and binding\n==================\n\n\nBinding of names\n----------------\n\n*Names* refer to objects. Names are introduced by name binding\noperations.\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal" or "global". If a name is bound at the\nmodule level, it is a global variable. (The variables of the module\ncode block are local and global.) If a variable is used in a code\nblock but not defined there, it is a *free variable*.\n\nEach occurrence of a name in the program text refers to the *binding*\nof that name established by the following name resolution rules.\n\n\nResolution of names\n-------------------\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name.\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nWhen a name is not found at all, a "NameError" exception is raised. If\nthe current scope is a function scope, and the name refers to a local\nvariable that has not yet been bound to a value at the point where the\nname is used, an "UnboundLocalError" exception is raised.\n"UnboundLocalError" is a subclass of "NameError".\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\n"global" statement must precede all uses of the name.\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nThe "nonlocal" statement causes corresponding names to refer to\npreviously bound variables in the nearest enclosing function scope.\n"SyntaxError" is raised at compile time if the given name does not\nexist in any enclosing function scope.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nClass definition blocks and arguments to "exec()" and "eval()" are\nspecial in the context of name resolution. A class definition is an\nexecutable statement that may use and define names. These references\nfollow the normal rules for name resolution with an exception that\nunbound local variables are looked up in the global namespace. The\nnamespace of the class definition becomes the attribute dictionary of\nthe class. The scope of names defined in a class block is limited to\nthe class block; it does not extend to the code blocks of methods --\nthis includes comprehensions and generator expressions since they are\nimplemented using a function scope. This means that the following\nwill fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\n\nBuiltins and restricted execution\n---------------------------------\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\n\nInteraction with dynamic features\n---------------------------------\n\nName resolution of free variables occurs at runtime, not at compile\ntime. This means that the following code will print 42:\n\n i = 10\n def f():\n print(i)\n i = 42\n f()\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', + 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n', + 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, "077e010" is legal, and denotes the same number\nas "77e10". The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n', + 'for': u'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', + 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= +\n conversion ::= "r" | "s" | "a"\n format_spec ::= \n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nThree conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, "\'!r\'" which calls "repr()" and "\'!a\'" which\ncalls "ascii()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= \n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective "\'0b\'", "\'0o\'", or\n"\'0x\'" to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for "\'g\'" and "\'G\'"\nconversions, trailing zeros are not removed from the result.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 3.1: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'", but converts "nan" to "NAN" |\n | | and "inf" to "INF". |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\'e\'" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\'f\'" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\'e\'" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\'f\'") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to "\'g\'", except that fixed-point notation, when |\n | | used, has at least one digit past the decimal point. The |\n | | default precision is as high as needed to represent the |\n | | particular value. The overall effect is to match the |\n | | output of "str()" as altered by the other format |\n | | modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', + 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n', + 'global': u'\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the two restrictions, but programs should not abuse this\nfreedom, as future implementations may enforce them or silently change\nthe meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in a string\nor code object supplied to the built-in "exec()" function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by "global" statements in the\ncode containing the function call. The same applies to the "eval()"\nand "compile()" functions.\n', + 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', + 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters "A" through "Z", the underscore "_" and, except for the first\ncharacter, the digits "0" through "9".\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n"unicodedata" module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= \n id_continue ::= \n xid_start ::= \n xid_continue ::= \n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', + 'if': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', + 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', + 'import': u'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope\n where the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules are\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following\n "as" is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause, loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, "ImportError" is raised.\n\n 4. otherwise, a reference to that value is stored in the local\n namespace, using the name in the "as" clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe wild card form of import --- "from module import *" --- is only\nallowed at the module level. Attempting to use it in class or\nfunction definitions will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine dynamically the modules to be loaded.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python where the feature\nbecomes standard.\n\nThe future statement is intended to ease migration to future versions\nof Python that introduce incompatible changes to the language. It\nallows use of the new features on a per-module basis before the\nrelease in which the feature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement". They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement. This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n The original proposal for the __future__ mechanism.\n', + 'in': u'\nMembership test operations\n**************************\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n', + 'integers': u'\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0xdeadbeef\n', + 'lambda': u'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n lambda_expr_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda expressions (sometimes called lambda forms) are used to create\nanonymous functions. The expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def (arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements or annotations.\n', + 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n', + 'naming': u'\nNaming and binding\n******************\n\n\nBinding of names\n================\n\n*Names* refer to objects. Names are introduced by name binding\noperations.\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal" or "global". If a name is bound at the\nmodule level, it is a global variable. (The variables of the module\ncode block are local and global.) If a variable is used in a code\nblock but not defined there, it is a *free variable*.\n\nEach occurrence of a name in the program text refers to the *binding*\nof that name established by the following name resolution rules.\n\n\nResolution of names\n===================\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name.\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nWhen a name is not found at all, a "NameError" exception is raised. If\nthe current scope is a function scope, and the name refers to a local\nvariable that has not yet been bound to a value at the point where the\nname is used, an "UnboundLocalError" exception is raised.\n"UnboundLocalError" is a subclass of "NameError".\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\n"global" statement must precede all uses of the name.\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nThe "nonlocal" statement causes corresponding names to refer to\npreviously bound variables in the nearest enclosing function scope.\n"SyntaxError" is raised at compile time if the given name does not\nexist in any enclosing function scope.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nClass definition blocks and arguments to "exec()" and "eval()" are\nspecial in the context of name resolution. A class definition is an\nexecutable statement that may use and define names. These references\nfollow the normal rules for name resolution with an exception that\nunbound local variables are looked up in the global namespace. The\nnamespace of the class definition becomes the attribute dictionary of\nthe class. The scope of names defined in a class block is limited to\nthe class block; it does not extend to the code blocks of methods --\nthis includes comprehensions and generator expressions since they are\nimplemented using a function scope. This means that the following\nwill fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\n\nBuiltins and restricted execution\n=================================\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\n\nInteraction with dynamic features\n=================================\n\nName resolution of free variables occurs at runtime, not at compile\ntime. This means that the following code will print 42:\n\n i = 10\n def f():\n print(i)\n i = 42\n f()\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', + 'nonlocal': u'\nThe "nonlocal" statement\n************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope excluding\nglobals. This is important because the default behavior for binding is\nto search the local namespace first. The statement allows\nencapsulated code to rebind variables outside of the local scope\nbesides the global (module) scope.\n\nNames listed in a "nonlocal" statement, unlike those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also: **PEP 3104** - Access to Names in Outer Scopes\n\n The specification for the "nonlocal" statement.\n', + 'numbers': u'\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n', + 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__matmul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "@", "/", "//", "%", "divmod()",\n "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to\n evaluate the expression "x + y", where *x* is an instance of a\n class that has an "__add__()" method, "x.__add__(y)" is called.\n The "__divmod__()" method should be the equivalent to using\n "__floordiv__()" and "__mod__()"; it should not be related to\n "__truediv__()". Note that "__pow__()" should be defined to accept\n an optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rmatmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "@", "/", "//", "%", "divmod()",\n "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped)\n operands. These functions are only called if the left operand does\n not support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__imatmul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=",\n "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to\n do the operation in-place (modifying *self*) and return the result\n (which could be, but does not have to be, *self*). If a specific\n method is not defined, the augmented assignment falls back to the\n normal methods. For instance, if *x* is an instance of a class\n with an "__iadd__()" method, "x += y" is equivalent to "x =\n x.__iadd__(y)" . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are\n considered, as with the evaluation of "x + y". In certain\n situations, augmented assignment can result in unexpected errors\n (see *Why does a_tuple[i] += [\'item\'] raise an exception when the\n addition works?*), but this behavior is in fact part of the data\n model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n', + 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, "id(x)" is the memory\naddress where "x" is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The "type()" function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (so\nyou should always close files explicitly).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement and the \'"with"\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n', + 'operator-summary': u'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedence in Python, from\nlowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for exponentiation, which\ngroups from right to left).\n\nNote that comparisons, membership tests, and identity tests, all have\nthe same precedence and have a left-to-right chaining feature as\ndescribed in the *Comparisons* section.\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "@", "/", "//", "%" | Multiplication, matrix multiplication |\n| | division, remainder [5] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| "await" "x" | Await expression |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "{expressions...}" | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for "x//y" to be one larger than "(x-x%y)//y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[3] The Unicode standard distinguishes between *code points* (e.g.\n U+0041) and *abstract characters* (e.g. "LATIN CAPITAL LETTER A").\n While most abstract characters in Unicode are only represented\n using one code point, there is a number of abstract characters\n that can in addition be represented using a sequence of more than\n one code point. For example, the abstract character "LATIN\n CAPITAL LETTER C WITH CEDILLA" can be represented as a single\n *precomposed character* at code position U+00C7, or as a sequence\n of a *base character* at code position U+0043 (LATIN CAPITAL\n LETTER C), followed by a *combining character* at code position\n U+0327 (COMBINING CEDILLA).\n\n The comparison operators on strings compare at the level of\n Unicode code points. This may be counter-intuitive to humans. For\n example, ""\\u00C7" == "\\u0043\\u0327"" is "False", even though both\n strings represent the same abstract character "LATIN CAPITAL\n LETTER C WITH CEDILLA".\n\n To compare strings at the level of abstract characters (that is,\n in a way intuitive to humans), use "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', + 'pass': u'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', + 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= await ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n"10**2" returns "100", but "10**-2" returns "0.01".\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a "complex"\nnumber. (In earlier versions it raised a "ValueError".)\n', + 'raise': u'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "RuntimeError" exception is raised indicating\nthat this is an error.\n\nOtherwise, "raise" evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n"BaseException". If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the "__traceback__" attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the "with_traceback()" exception method (which returns\nthe same exception instance, with its traceback set to its argument),\nlike so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe "from" clause is used for exception chaining: if given, the second\n*expression* must be another exception class or instance, which will\nthen be attached to the raised exception as the "__cause__" attribute\n(which is writable). If the raised exception is not handled, both\nexceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler or a "finally" clause: the previous exception is\nthen attached as the new exception\'s "__context__" attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', + 'return': u'\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement indicates that the\ngenerator is done and will cause "StopIteration" to be raised. The\nreturned value (if any) is used as an argument to construct\n"StopIteration" and becomes the "StopIteration.value" attribute.\n', + 'sequence-types': u'\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n', + 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n', + 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary is indexed\n(using the same "__getitem__()" method as normal subscription) with a\nkey that is constructed from the slice list, as follows. If the slice\nlist contains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of a proper slice is a\nslice object (see section *The standard type hierarchy*) whose\n"start", "stop" and "step" attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting "None" for missing expressions.\n', + 'specialattrs': u'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n []\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n', + 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the second can be resolved by freeing the reference to the\n traceback object when it is no longer useful, and the third can\n be resolved by storing "None" in "sys.last_traceback". Circular\n references which are garbage are detected and cleaned up when the\n cyclic garbage collector is enabled (it\'s on by default). Refer\n to the documentation for the "gc" module for more information\n about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n By default, "__ne__()" delegates to "__eq__()" and inverts the\n result unless it is "NotImplemented". There are no other implied\n relationships among the comparison operators, for example, the\n truth of "(x.__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. *__slots__*\n reserves space for the declared variables and prevents the\n automatic creation of *__dict__* and *__weakref__* for each\n instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also: **PEP 3115** - Metaclasses in Python 3000\n\n Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also: **PEP 3135** - New super\n\n Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class variables\nare defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict". That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__matmul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "@", "/", "//", "%", "divmod()",\n "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, to\n evaluate the expression "x + y", where *x* is an instance of a\n class that has an "__add__()" method, "x.__add__(y)" is called.\n The "__divmod__()" method should be the equivalent to using\n "__floordiv__()" and "__mod__()"; it should not be related to\n "__truediv__()". Note that "__pow__()" should be defined to accept\n an optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rmatmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "@", "/", "//", "%", "divmod()",\n "pow()", "**", "<<", ">>", "&", "^", "|") with reflected (swapped)\n operands. These functions are only called if the left operand does\n not support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__imatmul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=",\n "<<=", ">>=", "&=", "^=", "|="). These methods should attempt to\n do the operation in-place (modifying *self*) and return the result\n (which could be, but does not have to be, *self*). If a specific\n method is not defined, the augmented assignment falls back to the\n normal methods. For instance, if *x* is an instance of a class\n with an "__iadd__()" method, "x += y" is equivalent to "x =\n x.__iadd__(y)" . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are\n considered, as with the evaluation of "x + y". In certain\n situations, augmented assignment can result in unexpected errors\n (see *Why does a_tuple[i] += [\'item\'] raise an exception when the\n addition works?*), but this behavior is in fact part of the data\n model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n', + 'string-methods': u'\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter "\'\xdf\'" is equivalent to ""ss"".\n Since it is already lowercase, "lower()" would do nothing to "\'\xdf\'";\n "casefold()" converts it to ""ss"".\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is an ASCII space). The\n original string is returned if *width* is less than or equal to\n "len(s)".\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is "\'utf-8\'". *errors* may be given to set a different\n error handling scheme. The default for *errors* is "\'strict\'",\n meaning that encoding errors raise a "UnicodeError". Other possible\n values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n "\'backslashreplace\'" and any other name registered via\n "codecs.register_error()", see section *Error Handlers*. For a list\n of possible encodings, see section *Standard Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to "str.format(**mapping)", except that "mapping" is used\n directly and not copied to a "dict". This is useful if for example\n "mapping" is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character "c"\n is alphanumeric if one of the following returns "True":\n "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\n Use "keyword.iskeyword()" to test for reserved identifiers such as\n "def" and "class".\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when "repr()" is\n invoked on a string. It has no bearing on the handling of strings\n written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A "TypeError" will be raised if there are\n any non-string values in *iterable*, including "bytes" objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n "str.translate()".\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n For example:\n\n >>> \'1,2,3\'.split(\',\')\n [\'1\', \'2\', \'3\']\n >>> \'1,2,3\'.split(\',\', maxsplit=1)\n [\'1\', \'2,3\']\n >>> \'1,2,,3,\'.split(\',\')\n [\'1\', \'2\', \'\', \'3\', \'\']\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example:\n\n >>> \'1 2 3\'.split()\n [\'1\', \'2\', \'3\']\n >>> \'1 2 3\'.split(maxsplit=1)\n [\'1\', \'2 3\']\n >>> \' 1 2 3 \'.split()\n [\'1\', \'2\', \'3\']\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n This method splits on the following line boundaries. In\n particular, the boundaries are a superset of *universal newlines*.\n\n +-------------------------+-------------------------------+\n | Representation | Description |\n +=========================+===============================+\n | "\\n" | Line Feed |\n +-------------------------+-------------------------------+\n | "\\r" | Carriage Return |\n +-------------------------+-------------------------------+\n | "\\r\\n" | Carriage Return + Line Feed |\n +-------------------------+-------------------------------+\n | "\\v" or "\\x0b" | Line Tabulation |\n +-------------------------+-------------------------------+\n | "\\f" or "\\x0c" | Form Feed |\n +-------------------------+-------------------------------+\n | "\\x1c" | File Separator |\n +-------------------------+-------------------------------+\n | "\\x1d" | Group Separator |\n +-------------------------+-------------------------------+\n | "\\x1e" | Record Separator |\n +-------------------------+-------------------------------+\n | "\\x85" | Next Line (C1 Control Code) |\n +-------------------------+-------------------------------+\n | "\\u2028" | Line Separator |\n +-------------------------+-------------------------------+\n | "\\u2029" | Paragraph Separator |\n +-------------------------+-------------------------------+\n\n Changed in version 3.2: "\\v" and "\\f" added to list of line\n boundaries.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(keepends=True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n The outermost leading and trailing *chars* argument values are\n stripped from the string. Characters are removed from the leading\n end until reaching a string character that is not contained in the\n set of characters in *chars*. A similar action takes place on the\n trailing end. For example:\n\n >>> comment_string = \'#....... Section 3.2.1 Issue #32 .......\'\n >>> comment_string.strip(\'.#! \')\n \'Section 3.2.1 Issue #32\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n "s.swapcase().swapcase() == s".\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n For example:\n\n >>> \'Hello world\'.title()\n \'Hello World\'\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(table)\n\n Return a copy of the string in which each character has been mapped\n through the given translation table. The table must be an object\n that implements indexing via "__getitem__()", typically a *mapping*\n or *sequence*. When indexed by a Unicode ordinal (an integer), the\n table object can do any of the following: return a Unicode ordinal\n or a string, to map the character to one or more other characters;\n return "None", to delete the character from the return string; or\n raise a "LookupError" exception, to map the character to itself.\n\n You can use "str.maketrans()" to create a translation map from\n character-to-character mappings in different formats.\n\n See also the "codecs" module for a more flexible approach to custom\n character mappings.\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return a copy of the string left filled with ASCII "\'0\'" digits to\n make a string of length *width*. A leading sign prefix\n ("\'+\'"/"\'-\'") is handled by inserting the padding *after* the sign\n character rather than before. The original string is returned if\n *width* is less than or equal to "len(s)".\n\n For example:\n\n >>> "42".zfill(5)\n \'00042\'\n >>> "-42".zfill(5)\n \'-0042\'\n', + 'strings': u'\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= \n longstringchar ::= \n stringescapeseq ::= "\\" \n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= \n longbyteschar ::= \n bytesescapeseq ::= "\\" \n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes ("""). They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*). The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type. They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix string literals with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters. As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\nNew in version 3.3: The "\'rb\'" prefix of raw bytes literals has been\nadded as a synonym of "\'br\'".\n\nNew in version 3.3: Support for the unicode legacy literal\n("u\'value\'") was reintroduced to simplify the maintenance of dual\nPython 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted literals, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the literal. (A "quote" is the character used to open the\nliteral, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in string\nand bytes literals are interpreted according to rules similar to those\nused by Standard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\N{name}" | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the\n byte with the given value. In a string literal, these escapes\n denote a Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight\n hex digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the result*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw literal, quotes can be escaped with a backslash, but the\nbackslash remains in the result; for example, "r"\\""" is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; "r"\\"" is not a valid string literal (even a raw string cannot\nend in an odd number of backslashes). Specifically, *a raw literal\ncannot end in a single backslash* (since the backslash would escape\nthe following quote character). Note also that a single backslash\nfollowed by a newline is interpreted as those two characters as part\nof the literal, *not* as a line continuation.\n', + 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription\n(lists or dictionaries for example). User-defined objects can support\nsubscription by defining a "__getitem__()" method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a "__getitem__()"\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that "x[-1]" selects the last item of "x").\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero). Since the support for\nnegative indices and slicing occurs in the object\'s "__getitem__()"\nmethod, subclasses overriding this method will need to explicitly add\nthat support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', + 'truth': u'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__bool__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n', + 'try': u'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n', + 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods\n should return this value if they do not implement the operation for\n the operands provided. (The interpreter will then try the\n reflected operation, or some other fallback, depending on the\n operator.) Its truth value is true.\n\n See *Implementing the arithmetic operations* for more details.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal "..." or the\n built-in name "Ellipsis". Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers ("int")\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans ("bool")\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of the\n integer type, and Boolean values behave like the values 0 and\n 1, respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these are\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex" ("complex")\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode code\n points. All the code points in the range "U+0000 - U+10FFFF"\n can be represented in a string. Python doesn\'t have a "char"\n type; instead, every code point in the string is represented\n as a string object with length "1". The built-in function\n "ord()" converts a code point from its string form to an\n integer in the range "0 - 10FFFF"; "chr()" converts an\n integer in the range "0 - 10FFFF" to the corresponding length\n "1" string object. "str.encode()" can be used to convert a\n "str" to "bytes" using the given text encoding, and\n "bytes.decode()" can be used to achieve the opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like "b\'abc\'") and the built-in function\n "bytes()" can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the "decode()"\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type, as does the "collections" module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section *Dictionary displays*).\n\n The extension modules "dbm.ndbm" and "dbm.gnu" provide\n additional examples of mapping types, as does the "collections"\n module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | "__doc__" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable; not inherited by | |\n | | subclasses | |\n +---------------------------+---------------------------------+-------------+\n | "__name__" | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__qualname__" | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value | |\n +---------------------------+---------------------------------+-------------+\n | "__code__" | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | "__dict__" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | | contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | "__annotations__" | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | and "\'return\'" for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | "__kwdefaults__" | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: "__self__" is the class instance\n object, "__func__" is the function object; "__doc__" is the\n method\'s documentation (same as "__func__.__doc__"); "__name__"\n is the method name (same as "__func__.__name__"); "__module__"\n is the name of the module the method was defined in, or "None"\n if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its "__self__" attribute is the instance, and the method object\n is said to be bound. The new method\'s "__func__" attribute is\n the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "__func__"\n attribute of the new instance is not the original method object\n but its "__func__" attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its "__self__" attribute\n is the class itself, and its "__func__" attribute is the\n function object underlying the class method.\n\n When an instance method object is called, the underlying\n function ("__func__") is called, inserting the class instance\n ("__self__") in front of the argument list. For instance, when\n "C" is a class which contains a definition for a function "f()",\n and "x" is an instance of "C", calling "x.f(1)" is equivalent to\n calling "C.f(x, 1)".\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in "__self__" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "iterator.__next__()" method will cause the\n function to execute until it provides a value using the "yield"\n statement. When the function executes a "return" statement or\n falls off the end, a "StopIteration" exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Coroutine functions\n A function or method which is defined using "async def" is\n called a *coroutine function*. Such a function, when called,\n returns a *coroutine* object. It may contain "await"\n expressions, as well as "async with" and "async for" statements.\n See also the *Coroutine Objects* section.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override "__new__()". The arguments of the\n call are passed to "__new__()" and, in the typical case, to\n "__init__()" to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a "__call__()" method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the "import"\n statement (see "import"), or by calling functions such as\n "importlib.import_module()" and built-in "__import__()". A module\n object has a namespace implemented by a dictionary object (this is\n the dictionary referenced by the "__globals__" attribute of\n functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., "m.x" is equivalent\n to "m.__dict__["x"]". A module object does not contain the code\n object used to initialize the module (since it isn\'t needed once\n the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute may be missing for certain types of modules, such as C\n modules that are statically linked into the interpreter; for\n extension modules loaded dynamically from a shared library, it is\n the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., "C.x" is translated to\n "C.__dict__["x"]" (although there are a number of hooks which allow\n for other means of locating attributes). When the attribute name is\n not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n https://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n class method object, it is transformed into an instance method\n object whose "__self__" attributes is "C". When it would yield a\n static method object, it is transformed into the object wrapped by\n the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its "__dict__".\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose "__self__" attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s "__dict__".\n If no class attribute is found, and the object\'s class has a\n "__getattr__()" method, that is called to satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the "open()" built-in function,\n and also "os.popen()", "os.fdopen()", and the "makefile()" method\n of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n "io.TextIOBase" abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names; "f_lasti"\n gives the precise instruction (this is an index into the\n bytecode string of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_lineno" is the current line number of\n the frame --- writing to this from within a trace function jumps\n to the given line (only for the bottom-most frame). A debugger\n can implement a Jump command (aka Set Next Statement) by writing\n to f_lineno.\n\n Frame objects support one method:\n\n frame.clear()\n\n This method clears all references to local variables held by\n the frame. Also, if the frame belonged to a generator, the\n generator is finalized. This helps break reference cycles\n involving frame objects (for example when catching an\n exception and storing its traceback for later use).\n\n "RuntimeError" is raised if the frame is currently executing.\n\n New in version 3.4.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by "sys.exc_info()". When the program contains no\n suitable handler, the stack trace is written (nicely formatted)\n to the standard error stream; if the interpreter is interactive,\n it is also made available to the user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for "__getitem__()"\n methods. They are also created by the built-in "slice()"\n function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n', + 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', + 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()" and *key*\n is not present, the "d[key]" operation calls that method with\n the key *key* as argument. The "d[key]" operation then returns\n or raises whatever is returned or raised by the\n "__missing__(key)" call. No other operations or methods invoke\n "__missing__()". If "__missing__()" is not defined, "KeyError"\n is raised. "__missing__()" must be a method; it cannot be an\n instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n The example above shows part of the implementation of\n "collections.Counter". A different "__missing__" method is used\n by "collections.defaultdict".\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n key not in d\n\n Equivalent to "not key in d".\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iter(d.keys())".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n items()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\n Dictionaries compare equal if and only if they have the same "(key,\n value)" pairs. Order comparisons (\'<\', \'<=\', \'>=\', \'>\') raise\n "TypeError".\n\nSee also: "types.MappingProxyType" can be used to create a read-only\n view of a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*. They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', + 'typesmethods': u'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the "self"\nargument to the argument list. Bound methods have two special read-\nonly attributes: "m.__self__" is the object on which the method\noperates, and "m.__func__" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.__func__"), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an "AttributeError" being raised. In\norder to set a method attribute, you need to explicitly set it on the\nunderlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "", line 1, in \n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n', + 'typesmodules': u'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "". If loaded from a file, they are written as\n"".\n', + 'typesseq': u'\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type, *n*, *i*,\n*j* and *k* are integers and *x* is an arbitrary object that meets any\ntype and value restrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+----------------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+----------------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s" | equivalent to adding *s* to | (2)(7) |\n| | itself *n* times | |\n+----------------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as "str", "bytes" and "bytearray") also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note that items in the\n sequence *s* are not copied; they are referenced multiple times.\n This often haunts new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are references\n to this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n Further explanation is available in the FAQ entry *How do I create\n a multidimensional list?*.\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new\n object. This means that building up a sequence by repeated\n concatenation will have a quadratic runtime cost in the total\n sequence length. To get a linear runtime cost, you must switch to\n one of the alternatives below:\n\n * if concatenating "str" objects, you can build a list and use\n "str.join()" at the end or else write to an "io.StringIO"\n instance and retrieve its value when complete\n\n * if concatenating "bytes" objects, you can similarly use\n "bytes.join()" or "io.BytesIO", or you can do in-place\n concatenation with a "bytearray" object. "bytearray" objects are\n mutable and have an efficient overallocation mechanism\n\n * if concatenating "tuple" objects, extend a "list" instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item\n sequences that follow specific patterns, and hence don\'t support\n sequence concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using "s[i:j].index(x)",\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" or "s += t" | extends *s* with the contents of | |\n| | *t* (for the most part the same | |\n| | as "s[len(s):len(s)] = t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (6) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n6. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the sequence.\n Items in the sequence are not copied; they are referenced multiple\n times, as explained for "s * n" under *Common Sequence Operations*.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: "[]"\n\n * Using square brackets, separating items with commas: "[a]",\n "[a, b, c]"\n\n * Using a list comprehension: "[x for x in iterable]"\n\n * Using the type constructor: "list()" or "list(iterable)"\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to "iterable[:]". For example, "list(\'abc\')"\n returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n 3]". If no argument is given, the constructor creates a new empty\n list, "[]".\n\n Many other operations also produce lists, including the "sorted()"\n built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only "<" comparisons\n between items. Exceptions are not suppressed - if any comparison\n operations fail, the entire sort operation will fail (and the\n list will likely be left in a partially modified state).\n\n "sort()" accepts two arguments that can only be passed by\n keyword (*keyword-only arguments*):\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n "key=str.lower"). The key corresponding to each item in the list\n is calculated once and then used for the entire sorting process.\n The default value of "None" means that list items are sorted\n directly without calculating a separate key value.\n\n The "functools.cmp_to_key()" utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n "sorted()" to explicitly request a new sorted list instance).\n\n The "sort()" method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises "ValueError" if it can detect\n that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: "()"\n\n * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n given, the constructor creates a new empty tuple, "()".\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, "f(a, b, c)" is a function call with three arguments,\n while "f((a, b, c))" is a function call with a 3-tuple as the sole\n argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in "int" or any object that implements the "__index__"\n special method). If the *step* argument is omitted, it defaults to\n "1". If the *start* argument is omitted, it defaults to "0". If\n *step* is zero, "ValueError" is raised.\n\n For a positive *step*, the contents of a range "r" are determined\n by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n stop".\n\n For a negative *step*, the contents of the range are still\n determined by the formula "r[i] = start + step*i", but the\n constraints are "i >= 0" and "r[i] > stop".\n\n A range object will be empty if "r[0]" does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than "sys.maxsize" are\n permitted but some features (such as "len()") may raise\n "OverflowError".\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences. That is, two range objects are considered equal if they\nrepresent the same sequence of values. (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n', + 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" or "s += t" | extends *s* with the contents of | |\n| | *t* (for the most part the same | |\n| | as "s[len(s):len(s)] = t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (6) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n6. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the sequence.\n Items in the sequence are not copied; they are referenced multiple\n times, as explained for "s * n" under *Common Sequence Operations*.\n', + 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of "x" is defined as\n"-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n', + 'while': u'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n', + 'with': u'\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', + 'yield': u'\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nA "yield" statement is semantically equivalent to a *yield\nexpression*. The yield statement can be used to omit the parentheses\nthat would otherwise be required in the equivalent yield expression\nstatement. For example, the yield statements\n\n yield \n yield from \n\nare equivalent to the yield expression statements\n\n (yield )\n (yield from )\n\nYield expressions and statements are only used when defining a\n*generator* function, and are only used in the body of the generator\nfunction. Using yield in a function definition is sufficient to cause\nthat definition to create a generator function instead of a normal\nfunction.\n\nFor full details of "yield" semantics, refer to the *Yield\nexpressions* section.\n'} diff --git a/Lib/rlcompleter.py b/Lib/rlcompleter.py index d517c0e2..be8aee0f 100644 --- a/Lib/rlcompleter.py +++ b/Lib/rlcompleter.py @@ -136,20 +136,23 @@ class Completer: return [] # get the content of the object, except __builtins__ - words = dir(thisobject) - if "__builtins__" in words: - words.remove("__builtins__") + words = set(dir(thisobject)) + words.discard("__builtins__") if hasattr(thisobject, '__class__'): - words.append('__class__') - words.extend(get_class_members(thisobject.__class__)) + words.add('__class__') + words.update(get_class_members(thisobject.__class__)) matches = [] n = len(attr) for word in words: - if word[:n] == attr and hasattr(thisobject, word): - val = getattr(thisobject, word) + if word[:n] == attr: + try: + val = getattr(thisobject, word) + except Exception: + continue # Exclude properties that are not set word = self._callable_postfix(val, "%s.%s" % (expr, word)) matches.append(word) + matches.sort() return matches def get_class_members(klass): diff --git a/Lib/shutil.py b/Lib/shutil.py index a5da5879..3f4b6bf6 100644 --- a/Lib/shutil.py +++ b/Lib/shutil.py @@ -679,7 +679,16 @@ def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): if not dry_run: with zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) as zf: + path = os.path.normpath(base_dir) + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) for dirpath, dirnames, filenames in os.walk(base_dir): + for name in sorted(dirnames): + path = os.path.normpath(os.path.join(dirpath, name)) + zf.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): diff --git a/Lib/signal.py b/Lib/signal.py index 371d7128..9f05c919 100644 --- a/Lib/signal.py +++ b/Lib/signal.py @@ -34,7 +34,7 @@ def _int_to_enum(value, enum_klass): def _enum_to_int(value): """Convert an IntEnum member to a numeric value. - If it's not a IntEnum member return the value itself. + If it's not an IntEnum member return the value itself. """ try: return int(value) diff --git a/Lib/site.py b/Lib/site.py index 6f66c07d..3a8d1c37 100644 --- a/Lib/site.py +++ b/Lib/site.py @@ -465,7 +465,9 @@ def venv(known_paths): config_line = re.compile(CONFIG_LINE) virtual_conf = candidate_confs[0] system_site = "true" - with open(virtual_conf) as f: + # Issue 25185: Use UTF-8, as that's what the venv module uses when + # writing the file. + with open(virtual_conf, encoding='utf-8') as f: for line in f: line = line.strip() m = config_line.match(line) diff --git a/Lib/smtpd.py b/Lib/smtpd.py index ff86e7d2..732066ef 100755 --- a/Lib/smtpd.py +++ b/Lib/smtpd.py @@ -137,8 +137,8 @@ class SMTPChannel(asynchat.async_chat): self.enable_SMTPUTF8 = enable_SMTPUTF8 if enable_SMTPUTF8: if decode_data: - ValueError("decode_data and enable_SMTPUTF8 cannot be set to" - " True at the same time") + raise ValueError("decode_data and enable_SMTPUTF8 cannot" + " be set to True at the same time") decode_data = False if decode_data is None: warn("The decode_data default of True will change to False in 3.6;" diff --git a/Lib/smtplib.py b/Lib/smtplib.py index 4fe6aaf8..47569738 100755 --- a/Lib/smtplib.py +++ b/Lib/smtplib.py @@ -630,12 +630,12 @@ class SMTP: (code, resp) = self.docmd("AUTH", mechanism + " " + response) else: (code, resp) = self.docmd("AUTH", mechanism) - # Server replies with 334 (challenge) or 535 (not supported) - if code == 334: - challenge = base64.decodebytes(resp) - response = encode_base64( - authobject(challenge).encode('ascii'), eol='') - (code, resp) = self.docmd(response) + # If server responds with a challenge, send the response. + if code == 334: + challenge = base64.decodebytes(resp) + response = encode_base64( + authobject(challenge).encode('ascii'), eol='') + (code, resp) = self.docmd(response) if code in (235, 503): return (code, resp) raise SMTPAuthenticationError(code, resp) @@ -657,11 +657,10 @@ class SMTP: def auth_login(self, challenge=None): """ Authobject to use with LOGIN authentication. Requires self.user and self.password to be set.""" - (code, resp) = self.docmd( - encode_base64(self.user.encode('ascii'), eol='')) - if code == 334: + if challenge is None: + return self.user + else: return self.password - raise SMTPAuthenticationError(code, resp) def login(self, user, password, *, initial_response_ok=True): """Log in on an SMTP server that requires authentication. @@ -1080,8 +1079,6 @@ class LMTP(SMTP): # Test the sendmail method, which tests most of the others. # Note: This always sends to localhost. if __name__ == '__main__': - import sys - def prompt(prompt): sys.stdout.write(prompt + ": ") sys.stdout.flush() diff --git a/Lib/socket.py b/Lib/socket.py index db34ab37..ed1b10a2 100644 --- a/Lib/socket.py +++ b/Lib/socket.py @@ -193,7 +193,11 @@ class socket(_socket.socket): For IP sockets, the address info is a pair (hostaddr, port). """ fd, addr = self._accept() - sock = socket(self.family, self.type, self.proto, fileno=fd) + # If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the + # new socket. We do not currently allow passing SOCK_NONBLOCK to + # accept4, so the returned socket is always blocking. + type = self.type & ~globals().get("SOCK_NONBLOCK", 0) + sock = socket(self.family, type, self.proto, fileno=fd) # Issue #7995: if no default timeout is set and the listening # socket had a (non-zero) timeout, force the new socket in blocking # mode to override platform-specific socket flags inheritance. diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py index 67f84e94..f6a88517 100644 --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -70,14 +70,14 @@ class Pattern: def __init__(self): self.flags = 0 self.groupdict = {} - self.subpatterns = [None] # group 0 + self.groupwidths = [None] # group 0 self.lookbehindgroups = None @property def groups(self): - return len(self.subpatterns) + return len(self.groupwidths) def opengroup(self, name=None): gid = self.groups - self.subpatterns.append(None) + self.groupwidths.append(None) if self.groups > MAXGROUPS: raise error("too many groups") if name is not None: @@ -88,9 +88,9 @@ class Pattern: self.groupdict[name] = gid return gid def closegroup(self, gid, p): - self.subpatterns[gid] = p + self.groupwidths[gid] = p.getwidth() def checkgroup(self, gid): - return gid < self.groups and self.subpatterns[gid] is not None + return gid < self.groups and self.groupwidths[gid] is not None def checklookbehindgroup(self, gid, source): if self.lookbehindgroups is not None: @@ -195,7 +195,7 @@ class SubPattern: lo = lo + 1 hi = hi + 1 elif op is GROUPREF: - i, j = self.pattern.subpatterns[av].getwidth() + i, j = self.pattern.groupwidths[av] lo = lo + i hi = hi + j elif op is GROUPREF_EXISTS: diff --git a/Lib/statistics.py b/Lib/statistics.py index 3972ed2e..9203cf10 100644 --- a/Lib/statistics.py +++ b/Lib/statistics.py @@ -360,7 +360,7 @@ def median_high(data): def median_grouped(data, interval=1): - """"Return the 50th percentile (median) of grouped continuous data. + """Return the 50th percentile (median) of grouped continuous data. >>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5]) 3.7 diff --git a/Lib/string.py b/Lib/string.py index f3365c67..62e8f2f0 100644 --- a/Lib/string.py +++ b/Lib/string.py @@ -188,7 +188,7 @@ class Formatter: def vformat(self, format_string, args, kwargs): used_args = set() - result = self._vformat(format_string, args, kwargs, used_args, 2) + result, _ = self._vformat(format_string, args, kwargs, used_args, 2) self.check_unused_args(used_args, args, kwargs) return result @@ -235,14 +235,15 @@ class Formatter: obj = self.convert_field(obj, conversion) # expand the format spec, if needed - format_spec = self._vformat(format_spec, args, kwargs, - used_args, recursion_depth-1, - auto_arg_index=auto_arg_index) + format_spec, auto_arg_index = self._vformat( + format_spec, args, kwargs, + used_args, recursion_depth-1, + auto_arg_index=auto_arg_index) # format the object and append to the result result.append(self.format_field(obj, format_spec)) - return ''.join(result) + return ''.join(result), auto_arg_index def get_value(self, key, args, kwargs): diff --git a/Lib/subprocess.py b/Lib/subprocess.py index b6c43743..bbf50ce7 100644 --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -1333,8 +1333,10 @@ class Popen(object): return (stdout, stderr) def send_signal(self, sig): - """Send a signal to the process - """ + """Send a signal to the process.""" + # Don't signal a process that we know has already died. + if self.returncode is not None: + return if sig == signal.SIGTERM: self.terminate() elif sig == signal.CTRL_C_EVENT: @@ -1345,8 +1347,10 @@ class Popen(object): raise ValueError("Unsupported signal: {}".format(sig)) def terminate(self): - """Terminates the process - """ + """Terminates the process.""" + # Don't terminate a process that we know has already died. + if self.returncode is not None: + return try: _winapi.TerminateProcess(self._handle, 1) except PermissionError: @@ -1754,9 +1758,10 @@ class Popen(object): def send_signal(self, sig): - """Send a signal to the process - """ - os.kill(self.pid, sig) + """Send a signal to the process.""" + # Skip signalling a process that we know has already died. + if self.returncode is None: + os.kill(self.pid, sig) def terminate(self): """Terminate the process with SIGTERM diff --git a/Lib/tempfile.py b/Lib/tempfile.py index 2f1f9fda..d381a25d 100644 --- a/Lib/tempfile.py +++ b/Lib/tempfile.py @@ -307,22 +307,22 @@ def mkstemp(suffix=None, prefix=None, dir=None, text=False): file. The return value is a pair (fd, name) where fd is the file descriptor returned by os.open, and name is the filename. - If 'suffix' is specified, the file name will end with that suffix, + If 'suffix' is not None, the file name will end with that suffix, otherwise there will be no suffix. - If 'prefix' is specified, the file name will begin with that prefix, + If 'prefix' is not None, the file name will begin with that prefix, otherwise a default prefix is used. - If 'dir' is specified, the file will be created in that directory, + If 'dir' is not None, the file will be created in that directory, otherwise a default directory is used. If 'text' is specified and true, the file is opened in text mode. Else (the default) the file is opened in binary mode. On some operating systems, this makes no difference. - suffix, prefix and dir must all contain the same type if specified. - If they are bytes, the returned name will be bytes; str otherwise. - A value of None will cause an appropriate default to be used. + If any of 'suffix', 'prefix' and 'dir' are not None, they must be the + same type. If they are bytes, the returned name will be bytes; str + otherwise. The file is readable and writable only by the creating user ID. If the operating system uses permission bits to indicate whether a @@ -385,8 +385,9 @@ def mktemp(suffix="", prefix=template, dir=None): """User-callable function to return a unique temporary file name. The file is not created. - Arguments are as for mkstemp, except that the 'text' argument is - not accepted. + Arguments are similar to mkstemp, except that the 'text' argument is + not accepted, and suffix=None, prefix=None and bytes file names are not + supported. THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may refer to a file that did not exist at some point, but by the time @@ -591,12 +592,20 @@ else: flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT fd = _os.open(dir, flags2, 0o600) except IsADirectoryError: - # Linux kernel older than 3.11 ignores O_TMPFILE flag. - # Set flag to False to not try again. + # Linux kernel older than 3.11 ignores the O_TMPFILE flag: + # O_TMPFILE is read as O_DIRECTORY. Trying to open a directory + # with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a + # directory cannot be open to write. Set flag to False to not + # try again. _O_TMPFILE_WORKS = False except OSError: # The filesystem of the directory does not support O_TMPFILE. # For example, OSError(95, 'Operation not supported'). + # + # On Linux kernel older than 3.11, trying to open a regular + # file (or a symbolic link to a regular file) with O_TMPFILE + # fails with NotADirectoryError, because O_TMPFILE is read as + # O_DIRECTORY. pass else: try: diff --git a/Lib/test/datetimetester.py b/Lib/test/datetimetester.py index babeb44c..6365c605 100644 --- a/Lib/test/datetimetester.py +++ b/Lib/test/datetimetester.py @@ -3,6 +3,7 @@ See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases """ +import copy import decimal import sys import pickle @@ -192,6 +193,29 @@ class TestTZInfo(unittest.TestCase): self.assertEqual(derived.utcoffset(None), offset) self.assertEqual(derived.tzname(None), oname) + def test_issue23600(self): + DSTDIFF = DSTOFFSET = timedelta(hours=1) + + class UKSummerTime(tzinfo): + """Simple time zone which pretends to always be in summer time, since + that's what shows the failure. + """ + + def utcoffset(self, dt): + return DSTOFFSET + + def dst(self, dt): + return DSTDIFF + + def tzname(self, dt): + return 'UKSummerTime' + + tz = UKSummerTime() + u = datetime(2014, 4, 26, 12, 1, tzinfo=tz) + t = tz.fromutc(u) + self.assertEqual(t - t.utcoffset(), u) + + class TestTimeZone(unittest.TestCase): def setUp(self): @@ -212,7 +236,6 @@ class TestTimeZone(unittest.TestCase): tzrep = repr(tz) self.assertEqual(tz, eval(tzrep)) - def test_class_members(self): limit = timedelta(hours=23, minutes=59) self.assertEqual(timezone.utc.utcoffset(None), ZERO) @@ -299,6 +322,33 @@ class TestTimeZone(unittest.TestCase): self.assertEqual(tz.dst(t), t.replace(tzinfo=tz).dst()) + def test_pickle(self): + for tz in self.ACDT, self.EST, timezone.min, timezone.max: + for pickler, unpickler, proto in pickle_choices: + tz_copy = unpickler.loads(pickler.dumps(tz, proto)) + self.assertEqual(tz_copy, tz) + tz = timezone.utc + for pickler, unpickler, proto in pickle_choices: + tz_copy = unpickler.loads(pickler.dumps(tz, proto)) + self.assertIs(tz_copy, tz) + + def test_copy(self): + for tz in self.ACDT, self.EST, timezone.min, timezone.max: + tz_copy = copy.copy(tz) + self.assertEqual(tz_copy, tz) + tz = timezone.utc + tz_copy = copy.copy(tz) + self.assertIs(tz_copy, tz) + + def test_deepcopy(self): + for tz in self.ACDT, self.EST, timezone.min, timezone.max: + tz_copy = copy.deepcopy(tz) + self.assertEqual(tz_copy, tz) + tz = timezone.utc + tz_copy = copy.deepcopy(tz) + self.assertIs(tz_copy, tz) + + ############################################################################# # Base class for testing a particular aspect of timedelta, time, date and # datetime comparisons. @@ -663,11 +713,15 @@ class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase): eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0 eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0 eq(td(milliseconds=0.5/1000), td(microseconds=0)) - eq(td(milliseconds=-0.5/1000), td(microseconds=0)) + eq(td(milliseconds=-0.5/1000), td(microseconds=-0)) eq(td(milliseconds=0.6/1000), td(microseconds=1)) eq(td(milliseconds=-0.6/1000), td(microseconds=-1)) + eq(td(milliseconds=1.5/1000), td(microseconds=2)) + eq(td(milliseconds=-1.5/1000), td(microseconds=-2)) eq(td(seconds=0.5/10**6), td(microseconds=0)) - eq(td(seconds=-0.5/10**6), td(microseconds=0)) + eq(td(seconds=-0.5/10**6), td(microseconds=-0)) + eq(td(seconds=1/2**7), td(microseconds=7812)) + eq(td(seconds=-1/2**7), td(microseconds=-7812)) # Rounding due to contributions from more than one field. us_per_hour = 3600e6 @@ -1851,6 +1905,7 @@ class TestDateTime(TestDate): zero = fts(0) self.assertEqual(zero.second, 0) self.assertEqual(zero.microsecond, 0) + one = fts(1e-6) try: minus_one = fts(-1e-6) except OSError: @@ -1861,22 +1916,28 @@ class TestDateTime(TestDate): self.assertEqual(minus_one.microsecond, 999999) t = fts(-1e-8) - self.assertEqual(t, minus_one) + self.assertEqual(t, zero) t = fts(-9e-7) self.assertEqual(t, minus_one) t = fts(-1e-7) - self.assertEqual(t, minus_one) + self.assertEqual(t, zero) + t = fts(-1/2**7) + self.assertEqual(t.second, 59) + self.assertEqual(t.microsecond, 992188) t = fts(1e-7) self.assertEqual(t, zero) t = fts(9e-7) - self.assertEqual(t, zero) + self.assertEqual(t, one) t = fts(0.99999949) self.assertEqual(t.second, 0) self.assertEqual(t.microsecond, 999999) t = fts(0.9999999) + self.assertEqual(t.second, 1) + self.assertEqual(t.microsecond, 0) + t = fts(1/2**7) self.assertEqual(t.second, 0) - self.assertEqual(t.microsecond, 999999) + self.assertEqual(t.microsecond, 7812) def test_insane_fromtimestamp(self): # It's possible that some platform maps time_t to double, diff --git a/Lib/test/decimaltestdata/fma.decTest b/Lib/test/decimaltestdata/fma.decTest index b0a81ca1..0b188fa4 100644 --- a/Lib/test/decimaltestdata/fma.decTest +++ b/Lib/test/decimaltestdata/fma.decTest @@ -148,7 +148,7 @@ fmax2018 fma 9.999999 -9.999999 0E+999999 -> -100.000 Inexact Rounded fmax2019 fma -9.999999 9.999999 0E+999999 -> -100.000 Inexact Rounded fmax2020 fma -9.999999 -9.999999 0E+999999 -> 100.000 Inexact Rounded --- 1999.12.21: next one is a edge case if intermediate longs are used +-- 1999.12.21: next one is an edge case if intermediate longs are used precision: 15 fmax2059 fma 999999999999 9765625 0E+999999 -> 9.76562499999023E+18 Inexact Rounded precision: 30 diff --git a/Lib/test/decimaltestdata/multiply.decTest b/Lib/test/decimaltestdata/multiply.decTest index 6a23d5a6..e8bd77a8 100644 --- a/Lib/test/decimaltestdata/multiply.decTest +++ b/Lib/test/decimaltestdata/multiply.decTest @@ -49,7 +49,7 @@ mulx018 multiply 9.999999999 -9.999999999 -> -100.000 Inexact Rounded mulx019 multiply -9.999999999 9.999999999 -> -100.000 Inexact Rounded mulx020 multiply -9.999999999 -9.999999999 -> 100.000 Inexact Rounded --- 1999.12.21: next one is a edge case if intermediate longs are used +-- 1999.12.21: next one is an edge case if intermediate longs are used precision: 15 mulx059 multiply 999999999999 9765625 -> 9.76562499999023E+18 Inexact Rounded precision: 30 diff --git a/Lib/test/eintrdata/eintr_tester.py b/Lib/test/eintrdata/eintr_tester.py index f7558805..e3c36f9c 100644 --- a/Lib/test/eintrdata/eintr_tester.py +++ b/Lib/test/eintrdata/eintr_tester.py @@ -8,16 +8,29 @@ Signals are generated in-process using setitimer(ITIMER_REAL), which allows sub-second periodicity (contrarily to signal()). """ +import contextlib import io import os import select import signal import socket +import subprocess +import sys import time import unittest from test import support +@contextlib.contextmanager +def kill_on_error(proc): + """Context manager killing the subprocess if a Python exception is raised.""" + with proc: + try: + yield proc + except: + proc.kill() + raise + @unittest.skipUnless(hasattr(signal, "setitimer"), "requires setitimer()") class EINTRBaseTest(unittest.TestCase): @@ -28,7 +41,7 @@ class EINTRBaseTest(unittest.TestCase): # signal delivery periodicity signal_period = 0.1 # default sleep time for tests - should obviously have: - # sleep_time > signal_period + # sleep_time > signal_period sleep_time = 0.2 @classmethod @@ -46,23 +59,22 @@ class EINTRBaseTest(unittest.TestCase): cls.stop_alarm() signal.signal(signal.SIGALRM, cls.orig_handler) - @classmethod - def _sleep(cls): - # default sleep time - time.sleep(cls.sleep_time) + def subprocess(self, *args, **kw): + cmd_args = (sys.executable, '-c') + args + return subprocess.Popen(cmd_args, **kw) @unittest.skipUnless(hasattr(signal, "setitimer"), "requires setitimer()") class OSEINTRTest(EINTRBaseTest): """ EINTR tests for the os module. """ + def new_sleep_process(self): + code = 'import time; time.sleep(%r)' % self.sleep_time + return self.subprocess(code) + def _test_wait_multiple(self, wait_func): num = 3 - for _ in range(num): - pid = os.fork() - if pid == 0: - self._sleep() - os._exit(0) + processes = [self.new_sleep_process() for _ in range(num)] for _ in range(num): wait_func() @@ -74,12 +86,8 @@ class OSEINTRTest(EINTRBaseTest): self._test_wait_multiple(lambda: os.wait3(0)) def _test_wait_single(self, wait_func): - pid = os.fork() - if pid == 0: - self._sleep() - os._exit(0) - else: - wait_func(pid) + proc = self.new_sleep_process() + wait_func(proc.pid) def test_waitpid(self): self._test_wait_single(lambda pid: os.waitpid(pid, 0)) @@ -97,19 +105,25 @@ class OSEINTRTest(EINTRBaseTest): # atomic datas = [b"hello", b"world", b"spam"] - pid = os.fork() - if pid == 0: - os.close(rd) - for data in datas: - # let the parent block on read() - self._sleep() - os.write(wr, data) - os._exit(0) - else: - self.addCleanup(os.waitpid, pid, 0) + code = '\n'.join(( + 'import os, sys, time', + '', + 'wr = int(sys.argv[1])', + 'datas = %r' % datas, + 'sleep_time = %r' % self.sleep_time, + '', + 'for data in datas:', + ' # let the parent block on read()', + ' time.sleep(sleep_time)', + ' os.write(wr, data)', + )) + + proc = self.subprocess(code, str(wr), pass_fds=[wr]) + with kill_on_error(proc): os.close(wr) for data in datas: self.assertEqual(data, os.read(rd, len(data))) + self.assertEqual(proc.wait(), 0) def test_write(self): rd, wr = os.pipe() @@ -117,25 +131,37 @@ class OSEINTRTest(EINTRBaseTest): # rd closed explicitly by parent # we must write enough data for the write() to block - data = b"xyz" * support.PIPE_MAX_SIZE - - pid = os.fork() - if pid == 0: - os.close(wr) - read_data = io.BytesIO() - # let the parent block on write() - self._sleep() - while len(read_data.getvalue()) < len(data): - chunk = os.read(rd, 2 * len(data)) - read_data.write(chunk) - self.assertEqual(read_data.getvalue(), data) - os._exit(0) - else: + data = b"x" * support.PIPE_MAX_SIZE + + code = '\n'.join(( + 'import io, os, sys, time', + '', + 'rd = int(sys.argv[1])', + 'sleep_time = %r' % self.sleep_time, + 'data = b"x" * %s' % support.PIPE_MAX_SIZE, + 'data_len = len(data)', + '', + '# let the parent block on write()', + 'time.sleep(sleep_time)', + '', + 'read_data = io.BytesIO()', + 'while len(read_data.getvalue()) < data_len:', + ' chunk = os.read(rd, 2 * data_len)', + ' read_data.write(chunk)', + '', + 'value = read_data.getvalue()', + 'if value != data:', + ' raise Exception("read error: %s vs %s bytes"', + ' % (len(value), data_len))', + )) + + proc = self.subprocess(code, str(rd), pass_fds=[rd]) + with kill_on_error(proc): os.close(rd) written = 0 while written < len(data): written += os.write(wr, memoryview(data)[written:]) - self.assertEqual(0, os.waitpid(pid, 0)[1]) + self.assertEqual(proc.wait(), 0) @unittest.skipUnless(hasattr(signal, "setitimer"), "requires setitimer()") @@ -151,19 +177,32 @@ class SocketEINTRTest(EINTRBaseTest): # single-byte payload guard us against partial recv datas = [b"x", b"y", b"z"] - pid = os.fork() - if pid == 0: - rd.close() - for data in datas: - # let the parent block on recv() - self._sleep() - wr.sendall(data) - os._exit(0) - else: - self.addCleanup(os.waitpid, pid, 0) + code = '\n'.join(( + 'import os, socket, sys, time', + '', + 'fd = int(sys.argv[1])', + 'family = %s' % int(wr.family), + 'sock_type = %s' % int(wr.type), + 'datas = %r' % datas, + 'sleep_time = %r' % self.sleep_time, + '', + 'wr = socket.fromfd(fd, family, sock_type)', + 'os.close(fd)', + '', + 'with wr:', + ' for data in datas:', + ' # let the parent block on recv()', + ' time.sleep(sleep_time)', + ' wr.sendall(data)', + )) + + fd = wr.fileno() + proc = self.subprocess(code, str(fd), pass_fds=[fd]) + with kill_on_error(proc): wr.close() for data in datas: self.assertEqual(data, recv_func(rd, len(data))) + self.assertEqual(proc.wait(), 0) def test_recv(self): self._test_recv(socket.socket.recv) @@ -180,25 +219,43 @@ class SocketEINTRTest(EINTRBaseTest): # we must send enough data for the send() to block data = b"xyz" * (support.SOCK_MAX_SIZE // 3) - pid = os.fork() - if pid == 0: - wr.close() - # let the parent block on send() - self._sleep() - received_data = bytearray(len(data)) - n = 0 - while n < len(data): - n += rd.recv_into(memoryview(received_data)[n:]) - self.assertEqual(received_data, data) - os._exit(0) - else: + code = '\n'.join(( + 'import os, socket, sys, time', + '', + 'fd = int(sys.argv[1])', + 'family = %s' % int(rd.family), + 'sock_type = %s' % int(rd.type), + 'sleep_time = %r' % self.sleep_time, + 'data = b"xyz" * %s' % (support.SOCK_MAX_SIZE // 3), + 'data_len = len(data)', + '', + 'rd = socket.fromfd(fd, family, sock_type)', + 'os.close(fd)', + '', + 'with rd:', + ' # let the parent block on send()', + ' time.sleep(sleep_time)', + '', + ' received_data = bytearray(data_len)', + ' n = 0', + ' while n < data_len:', + ' n += rd.recv_into(memoryview(received_data)[n:])', + '', + 'if received_data != data:', + ' raise Exception("recv error: %s vs %s bytes"', + ' % (len(received_data), data_len))', + )) + + fd = rd.fileno() + proc = self.subprocess(code, str(fd), pass_fds=[fd]) + with kill_on_error(proc): rd.close() written = 0 while written < len(data): sent = send_func(wr, memoryview(data)[written:]) # sendall() returns None written += len(data) if sent is None else sent - self.assertEqual(0, os.waitpid(pid, 0)[1]) + self.assertEqual(proc.wait(), 0) def test_send(self): self._test_send(socket.socket.send) @@ -215,46 +272,75 @@ class SocketEINTRTest(EINTRBaseTest): self.addCleanup(sock.close) sock.bind((support.HOST, 0)) - _, port = sock.getsockname() + port = sock.getsockname()[1] sock.listen() - pid = os.fork() - if pid == 0: - # let parent block on accept() - self._sleep() - with socket.create_connection((support.HOST, port)): - self._sleep() - os._exit(0) - else: - self.addCleanup(os.waitpid, pid, 0) + code = '\n'.join(( + 'import socket, time', + '', + 'host = %r' % support.HOST, + 'port = %s' % port, + 'sleep_time = %r' % self.sleep_time, + '', + '# let parent block on accept()', + 'time.sleep(sleep_time)', + 'with socket.create_connection((host, port)):', + ' time.sleep(sleep_time)', + )) + + proc = self.subprocess(code) + with kill_on_error(proc): client_sock, _ = sock.accept() client_sock.close() + self.assertEqual(proc.wait(), 0) + # Issue #25122: There is a race condition in the FreeBSD kernel on + # handling signals in the FIFO device. Skip the test until the bug is + # fixed in the kernel. + # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=203162 + @support.requires_freebsd_version(10, 3) @unittest.skipUnless(hasattr(os, 'mkfifo'), 'needs mkfifo()') def _test_open(self, do_open_close_reader, do_open_close_writer): + filename = support.TESTFN + # Use a fifo: until the child opens it for reading, the parent will # block when trying to open it for writing. - support.unlink(support.TESTFN) - os.mkfifo(support.TESTFN) - self.addCleanup(support.unlink, support.TESTFN) - - pid = os.fork() - if pid == 0: - # let the parent block - self._sleep() - do_open_close_reader(support.TESTFN) - os._exit(0) - else: - self.addCleanup(os.waitpid, pid, 0) - do_open_close_writer(support.TESTFN) + support.unlink(filename) + os.mkfifo(filename) + self.addCleanup(support.unlink, filename) + + code = '\n'.join(( + 'import os, time', + '', + 'path = %a' % filename, + 'sleep_time = %r' % self.sleep_time, + '', + '# let the parent block', + 'time.sleep(sleep_time)', + '', + do_open_close_reader, + )) + + proc = self.subprocess(code) + with kill_on_error(proc): + do_open_close_writer(filename) + self.assertEqual(proc.wait(), 0) + + def python_open(self, path): + fp = open(path, 'w') + fp.close() def test_open(self): - self._test_open(lambda path: open(path, 'r').close(), - lambda path: open(path, 'w').close()) + self._test_open("fp = open(path, 'r')\nfp.close()", + self.python_open) + + def os_open(self, path): + fd = os.open(path, os.O_WRONLY) + os.close(fd) def test_os_open(self): - self._test_open(lambda path: os.close(os.open(path, os.O_RDONLY)), - lambda path: os.close(os.open(path, os.O_WRONLY))) + self._test_open("fd = os.open(path, os.O_RDONLY)\nos.close(fd)", + self.os_open) @unittest.skipUnless(hasattr(signal, "setitimer"), "requires setitimer()") @@ -284,26 +370,33 @@ class SignalEINTRTest(EINTRBaseTest): @unittest.skipUnless(hasattr(signal, 'sigwaitinfo'), 'need signal.sigwaitinfo()') def test_sigwaitinfo(self): + # Issue #25277: The sleep is a weak synchronization between the parent + # and the child process. If the sleep is too low, the test hangs on + # slow or highly loaded systems. + self.sleep_time = 2.0 + signum = signal.SIGUSR1 pid = os.getpid() old_handler = signal.signal(signum, lambda *args: None) self.addCleanup(signal.signal, signum, old_handler) + code = '\n'.join(( + 'import os, time', + 'pid = %s' % os.getpid(), + 'signum = %s' % int(signum), + 'sleep_time = %r' % self.sleep_time, + 'time.sleep(sleep_time)', + 'os.kill(pid, signum)', + )) + t0 = time.monotonic() - child_pid = os.fork() - if child_pid == 0: - # child - try: - self._sleep() - os.kill(pid, signum) - finally: - os._exit(0) - else: + proc = self.subprocess(code) + with kill_on_error(proc): # parent signal.sigwaitinfo([signum]) dt = time.monotonic() - t0 - os.waitpid(child_pid, 0) + self.assertEqual(proc.wait(), 0) self.assertGreaterEqual(dt, self.sleep_time) diff --git a/Lib/test/inspect_fodder.py b/Lib/test/inspect_fodder.py index 068d8258..711badad 100644 --- a/Lib/test/inspect_fodder.py +++ b/Lib/test/inspect_fodder.py @@ -45,14 +45,17 @@ class StupidGit: self.ex = sys.exc_info() self.tr = inspect.trace() + @property def contradiction(self): 'The automatic gainsaying.' pass -# line 48 +# line 53 class MalodorousPervert(StupidGit): def abuse(self, a, b, c): pass + + @property def contradiction(self): pass @@ -64,6 +67,8 @@ class ParrotDroppings: class FesteringGob(MalodorousPervert, ParrotDroppings): def abuse(self, a, b, c): pass + + @property def contradiction(self): pass diff --git a/Lib/test/lock_tests.py b/Lib/test/lock_tests.py index b325bce8..afd68736 100644 --- a/Lib/test/lock_tests.py +++ b/Lib/test/lock_tests.py @@ -394,6 +394,14 @@ class EventTests(BaseTestCase): b.wait_for_finished() self.assertEqual(results, [True] * N) + def test_reset_internal_locks(self): + evt = self.eventtype() + old_lock = evt._cond._lock + evt._reset_internal_locks() + new_lock = evt._cond._lock + self.assertIsNot(new_lock, old_lock) + self.assertIs(type(new_lock), type(old_lock)) + class ConditionTests(BaseTestCase): """ diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 78b54839..6d971aa2 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -18,6 +18,9 @@ from test.support import ( from pickle import bytes_types +requires_32b = unittest.skipUnless(sys.maxsize < 2**32, + "test is only meaningful on 32-bit builds") + # Tests that try a number of pickle protocols should have a # for proto in protocols: # kind of outer loop. @@ -100,6 +103,15 @@ class E(C): class H(object): pass +# Hashable mutable key +class K(object): + def __init__(self, value): + self.value = value + + def __reduce__(self): + # Shouldn't support the recursion itself + return K, (self.value,) + import __main__ __main__.C = C C.__module__ = "__main__" @@ -109,6 +121,8 @@ __main__.E = E E.__module__ = "__main__" __main__.H = H H.__module__ = "__main__" +__main__.K = K +K.__module__ = "__main__" class myint(int): def __init__(self, x): @@ -142,7 +156,7 @@ def create_dynamic_class(name, bases): result.reduce_args = (name, bases) return result -# DATA0 .. DATA2 are the pickles we expect under the various protocols, for +# DATA0 .. DATA4 are the pickles we expect under the various protocols, for # the object returned by create_data(). DATA0 = ( @@ -398,22 +412,172 @@ DATA2_DIS = """\ highest protocol among opcodes = 2 """ +DATA3 = ( + b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c' + b'builtins\ncomplex\nq\x01G' + b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02' + b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff' + b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f' + b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq' + b'\x04h\x04c__main__\nC\nq\x05)\x81q' + b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00' + b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05' + b'e.' +) + +# Disassembly of DATA3 +DATA3_DIS = """\ + 0: \x80 PROTO 3 + 2: ] EMPTY_LIST + 3: q BINPUT 0 + 5: ( MARK + 6: K BININT1 0 + 8: K BININT1 1 + 10: G BINFLOAT 2.0 + 19: c GLOBAL 'builtins complex' + 37: q BINPUT 1 + 39: G BINFLOAT 3.0 + 48: G BINFLOAT 0.0 + 57: \x86 TUPLE2 + 58: q BINPUT 2 + 60: R REDUCE + 61: q BINPUT 3 + 63: K BININT1 1 + 65: J BININT -1 + 70: K BININT1 255 + 72: J BININT -255 + 77: J BININT -256 + 82: M BININT2 65535 + 85: J BININT -65535 + 90: J BININT -65536 + 95: J BININT 2147483647 + 100: J BININT -2147483647 + 105: J BININT -2147483648 + 110: ( MARK + 111: X BINUNICODE 'abc' + 119: q BINPUT 4 + 121: h BINGET 4 + 123: c GLOBAL '__main__ C' + 135: q BINPUT 5 + 137: ) EMPTY_TUPLE + 138: \x81 NEWOBJ + 139: q BINPUT 6 + 141: } EMPTY_DICT + 142: q BINPUT 7 + 144: ( MARK + 145: X BINUNICODE 'bar' + 153: q BINPUT 8 + 155: K BININT1 2 + 157: X BINUNICODE 'foo' + 165: q BINPUT 9 + 167: K BININT1 1 + 169: u SETITEMS (MARK at 144) + 170: b BUILD + 171: h BINGET 6 + 173: t TUPLE (MARK at 110) + 174: q BINPUT 10 + 176: h BINGET 10 + 178: K BININT1 5 + 180: e APPENDS (MARK at 5) + 181: . STOP +highest protocol among opcodes = 2 +""" + +DATA4 = ( + b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@' + b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07' + b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK' + b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ' + b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(' + b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c' + b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c' + b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.' +) + +# Disassembly of DATA4 +DATA4_DIS = """\ + 0: \x80 PROTO 4 + 2: \x95 FRAME 168 + 11: ] EMPTY_LIST + 12: \x94 MEMOIZE + 13: ( MARK + 14: K BININT1 0 + 16: K BININT1 1 + 18: G BINFLOAT 2.0 + 27: \x8c SHORT_BINUNICODE 'builtins' + 37: \x94 MEMOIZE + 38: \x8c SHORT_BINUNICODE 'complex' + 47: \x94 MEMOIZE + 48: \x93 STACK_GLOBAL + 49: \x94 MEMOIZE + 50: G BINFLOAT 3.0 + 59: G BINFLOAT 0.0 + 68: \x86 TUPLE2 + 69: \x94 MEMOIZE + 70: R REDUCE + 71: \x94 MEMOIZE + 72: K BININT1 1 + 74: J BININT -1 + 79: K BININT1 255 + 81: J BININT -255 + 86: J BININT -256 + 91: M BININT2 65535 + 94: J BININT -65535 + 99: J BININT -65536 + 104: J BININT 2147483647 + 109: J BININT -2147483647 + 114: J BININT -2147483648 + 119: ( MARK + 120: \x8c SHORT_BINUNICODE 'abc' + 125: \x94 MEMOIZE + 126: h BINGET 6 + 128: \x8c SHORT_BINUNICODE '__main__' + 138: \x94 MEMOIZE + 139: \x8c SHORT_BINUNICODE 'C' + 142: \x94 MEMOIZE + 143: \x93 STACK_GLOBAL + 144: \x94 MEMOIZE + 145: ) EMPTY_TUPLE + 146: \x81 NEWOBJ + 147: \x94 MEMOIZE + 148: } EMPTY_DICT + 149: \x94 MEMOIZE + 150: ( MARK + 151: \x8c SHORT_BINUNICODE 'bar' + 156: \x94 MEMOIZE + 157: K BININT1 2 + 159: \x8c SHORT_BINUNICODE 'foo' + 164: \x94 MEMOIZE + 165: K BININT1 1 + 167: u SETITEMS (MARK at 150) + 168: b BUILD + 169: h BINGET 10 + 171: t TUPLE (MARK at 119) + 172: \x94 MEMOIZE + 173: h BINGET 14 + 175: K BININT1 5 + 177: e APPENDS (MARK at 13) + 178: . STOP +highest protocol among opcodes = 4 +""" + # set([1,2]) pickled from 2.x with protocol 2 -DATA3 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.' +DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.' # xrange(5) pickled from 2.x with protocol 2 -DATA4 = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.' +DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.' # a SimpleCookie() object pickled from 2.x with protocol 2 -DATA5 = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key' - b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U' - b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07' - b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U' - b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b' - b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.') +DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key' + b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U' + b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07' + b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U' + b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b' + b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.') # set([3]) pickled from 2.x with protocol 2 -DATA6 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.' +DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.' python2_exceptions_without_args = ( ArithmeticError, @@ -465,20 +629,10 @@ python2_exceptions_without_args = ( exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.' -# Exception objects without arguments pickled from 2.x with protocol 2 -DATA7 = { - exception : - exception_pickle.replace(b'?', exception.__name__.encode("ascii")) - for exception in python2_exceptions_without_args -} - -# StandardError is mapped to Exception, test that separately -DATA8 = exception_pickle.replace(b'?', b'StandardError') - # UnicodeEncodeError object pickled from 2.x with protocol 2 -DATA9 = (b'\x80\x02cexceptions\nUnicodeEncodeError\n' - b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01' - b'U\x03badq\x03tq\x04Rq\x05.') +DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n' + b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01' + b'U\x03badq\x03tq\x04Rq\x05.') def create_data(): @@ -502,16 +656,11 @@ def create_data(): return x -class AbstractPickleTests(unittest.TestCase): - # Subclass must define self.dumps, self.loads. - - optimized = False +class AbstractUnpickleTests(unittest.TestCase): + # Subclass must define self.loads. _testdata = create_data() - def setUp(self): - pass - def assert_is_copy(self, obj, objcopy, msg=None): """Utility method to verify if two objects are copies of each others. """ @@ -530,33 +679,6 @@ class AbstractPickleTests(unittest.TestCase): self.assertEqual(getattr(obj, slot, None), getattr(objcopy, slot, None), msg=msg) - def test_misc(self): - # test various datatypes not tested by testdata - for proto in protocols: - x = myint(4) - s = self.dumps(x, proto) - y = self.loads(s) - self.assert_is_copy(x, y) - - x = (1, ()) - s = self.dumps(x, proto) - y = self.loads(s) - self.assert_is_copy(x, y) - - x = initarg(1, x) - s = self.dumps(x, proto) - y = self.loads(s) - self.assert_is_copy(x, y) - - # XXX test __reduce__ protocol? - - def test_roundtrip_equality(self): - expected = self._testdata - for proto in protocols: - s = self.dumps(expected, proto) - got = self.loads(s) - self.assert_is_copy(expected, got) - def test_load_from_data0(self): self.assert_is_copy(self._testdata, self.loads(DATA0)) @@ -566,6 +688,12 @@ class AbstractPickleTests(unittest.TestCase): def test_load_from_data2(self): self.assert_is_copy(self._testdata, self.loads(DATA2)) + def test_load_from_data3(self): + self.assert_is_copy(self._testdata, self.loads(DATA3)) + + def test_load_from_data4(self): + self.assert_is_copy(self._testdata, self.loads(DATA4)) + def test_load_classic_instance(self): # See issue5180. Test loading 2.x pickles that # contain an instance of old style class. @@ -623,6 +751,282 @@ class AbstractPickleTests(unittest.TestCase): b'q\x00oq\x01}q\x02b.').replace(b'X', xname) self.assert_is_copy(X(*args), self.loads(pickle2)) + def test_maxint64(self): + maxint64 = (1 << 63) - 1 + data = b'I' + str(maxint64).encode("ascii") + b'\n.' + got = self.loads(data) + self.assert_is_copy(maxint64, got) + + # Try too with a bogus literal. + data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.' + self.assertRaises(ValueError, self.loads, data) + + def test_pop_empty_stack(self): + # Test issue7455 + s = b'0' + self.assertRaises((pickle.UnpicklingError, IndexError), self.loads, s) + + def test_unpickle_from_2x(self): + # Unpickle non-trivial data from Python 2.x. + loaded = self.loads(DATA_SET) + self.assertEqual(loaded, set([1, 2])) + loaded = self.loads(DATA_XRANGE) + self.assertEqual(type(loaded), type(range(0))) + self.assertEqual(list(loaded), list(range(5))) + loaded = self.loads(DATA_COOKIE) + self.assertEqual(type(loaded), SimpleCookie) + self.assertEqual(list(loaded.keys()), ["key"]) + self.assertEqual(loaded["key"].value, "value") + + # Exception objects without arguments pickled from 2.x with protocol 2 + for exc in python2_exceptions_without_args: + data = exception_pickle.replace(b'?', exc.__name__.encode("ascii")) + loaded = self.loads(data) + self.assertIs(type(loaded), exc) + + # StandardError is mapped to Exception, test that separately + loaded = self.loads(exception_pickle.replace(b'?', b'StandardError')) + self.assertIs(type(loaded), Exception) + + loaded = self.loads(DATA_UEERR) + self.assertIs(type(loaded), UnicodeEncodeError) + self.assertEqual(loaded.object, "foo") + self.assertEqual(loaded.encoding, "ascii") + self.assertEqual(loaded.start, 0) + self.assertEqual(loaded.end, 1) + self.assertEqual(loaded.reason, "bad") + + def test_load_python2_str_as_bytes(self): + # From Python 2: pickle.dumps('a\x00\xa0', protocol=0) + self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.", + encoding="bytes"), b'a\x00\xa0') + # From Python 2: pickle.dumps('a\x00\xa0', protocol=1) + self.assertEqual(self.loads(b'U\x03a\x00\xa0.', + encoding="bytes"), b'a\x00\xa0') + # From Python 2: pickle.dumps('a\x00\xa0', protocol=2) + self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.', + encoding="bytes"), b'a\x00\xa0') + + def test_load_python2_unicode_as_str(self): + # From Python 2: pickle.dumps(u'π', protocol=0) + self.assertEqual(self.loads(b'V\\u03c0\n.', + encoding='bytes'), 'π') + # From Python 2: pickle.dumps(u'π', protocol=1) + self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.', + encoding="bytes"), 'π') + # From Python 2: pickle.dumps(u'π', protocol=2) + self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.', + encoding="bytes"), 'π') + + def test_load_long_python2_str_as_bytes(self): + # From Python 2: pickle.dumps('x' * 300, protocol=1) + self.assertEqual(self.loads(pickle.BINSTRING + + struct.pack("', '<\\\u1234>', '<\n>', @@ -754,7 +1230,6 @@ class AbstractPickleTests(unittest.TestCase): self.assert_is_copy(s, self.loads(p)) def test_ints(self): - import sys for proto in protocols: n = sys.maxsize while n: @@ -764,16 +1239,6 @@ class AbstractPickleTests(unittest.TestCase): self.assert_is_copy(expected, n2) n = n >> 1 - def test_maxint64(self): - maxint64 = (1 << 63) - 1 - data = b'I' + str(maxint64).encode("ascii") + b'\n.' - got = self.loads(data) - self.assert_is_copy(maxint64, got) - - # Try too with a bogus literal. - data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.' - self.assertRaises(ValueError, self.loads, data) - def test_long(self): for proto in protocols: # 256 bytes is where LONG4 begins. @@ -826,11 +1291,6 @@ class AbstractPickleTests(unittest.TestCase): loaded = self.loads(dumped) self.assert_is_copy(inst, loaded) - def test_pop_empty_stack(self): - # Test issue7455 - s = b'0' - self.assertRaises((pickle.UnpicklingError, IndexError), self.loads, s) - def test_metaclass(self): a = use_metaclass() for proto in protocols: @@ -1335,71 +1795,15 @@ class AbstractPickleTests(unittest.TestCase): for x_key, y_key in zip(x_keys, y_keys): self.assertIs(x_key, y_key) - def test_unpickle_from_2x(self): - # Unpickle non-trivial data from Python 2.x. - loaded = self.loads(DATA3) - self.assertEqual(loaded, set([1, 2])) - loaded = self.loads(DATA4) - self.assertEqual(type(loaded), type(range(0))) - self.assertEqual(list(loaded), list(range(5))) - loaded = self.loads(DATA5) - self.assertEqual(type(loaded), SimpleCookie) - self.assertEqual(list(loaded.keys()), ["key"]) - self.assertEqual(loaded["key"].value, "value") - - for (exc, data) in DATA7.items(): - loaded = self.loads(data) - self.assertIs(type(loaded), exc) - - loaded = self.loads(DATA8) - self.assertIs(type(loaded), Exception) - - loaded = self.loads(DATA9) - self.assertIs(type(loaded), UnicodeEncodeError) - self.assertEqual(loaded.object, "foo") - self.assertEqual(loaded.encoding, "ascii") - self.assertEqual(loaded.start, 0) - self.assertEqual(loaded.end, 1) - self.assertEqual(loaded.reason, "bad") - def test_pickle_to_2x(self): # Pickle non-trivial data with protocol 2, expecting that it yields # the same result as Python 2.x did. # NOTE: this test is a bit too strong since we can produce different # bytecode that 2.x will still understand. dumped = self.dumps(range(5), 2) - self.assertEqual(dumped, DATA4) + self.assertEqual(dumped, DATA_XRANGE) dumped = self.dumps(set([3]), 2) - self.assertEqual(dumped, DATA6) - - def test_load_python2_str_as_bytes(self): - # From Python 2: pickle.dumps('a\x00\xa0', protocol=0) - self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.", - encoding="bytes"), b'a\x00\xa0') - # From Python 2: pickle.dumps('a\x00\xa0', protocol=1) - self.assertEqual(self.loads(b'U\x03a\x00\xa0.', - encoding="bytes"), b'a\x00\xa0') - # From Python 2: pickle.dumps('a\x00\xa0', protocol=2) - self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.', - encoding="bytes"), b'a\x00\xa0') - - def test_load_python2_unicode_as_str(self): - # From Python 2: pickle.dumps(u'π', protocol=0) - self.assertEqual(self.loads(b'V\\u03c0\n.', - encoding='bytes'), 'π') - # From Python 2: pickle.dumps(u'π', protocol=1) - self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.', - encoding="bytes"), 'π') - # From Python 2: pickle.dumps(u'π', protocol=2) - self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.', - encoding="bytes"), 'π') - - def test_load_long_python2_str_as_bytes(self): - # From Python 2: pickle.dumps('x' * 300, protocol=1) - self.assertEqual(self.loads(pickle.BINSTRING + - struct.pack(" 2**32: - self.skipTest("test is only meaningful on 32-bit builds") - # XXX Pure Python pickle reads lengths as signed and passes - # them directly to read() (hence the EOFError) - with self.assertRaises((pickle.UnpicklingError, EOFError, - ValueError, OverflowError)): - self.loads(dumped) - - def test_negative_32b_binbytes(self): - # On 32-bit builds, a BINBYTES of 2**31 or more is refused - self.check_negative_32b_binXXX(b'\x80\x03B\xff\xff\xff\xffxyzq\x00.') - - def test_negative_32b_binunicode(self): - # On 32-bit builds, a BINUNICODE of 2**31 or more is refused - self.check_negative_32b_binXXX(b'\x80\x03X\xff\xff\xff\xffxyzq\x00.') - - def test_negative_put(self): - # Issue #12847 - dumped = b'Va\np-1\n.' - self.assertRaises(ValueError, self.loads, dumped) - - def test_negative_32b_binput(self): - # Issue #12847 - if sys.maxsize > 2**32: - self.skipTest("test is only meaningful on 32-bit builds") - dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.' - self.assertRaises(ValueError, self.loads, dumped) - - def test_badly_escaped_string(self): - self.assertRaises(ValueError, self.loads, b"S'\\'\n.") - - def test_badly_quoted_string(self): - # Issue #17710 - badpickles = [b"S'\n.", - b'S"\n.', - b'S\' \n.', - b'S" \n.', - b'S\'"\n.', - b'S"\'\n.', - b"S' ' \n.", - b'S" " \n.', - b"S ''\n.", - b'S ""\n.', - b'S \n.', - b'S\n.', - b'S.'] - for p in badpickles: - self.assertRaises(pickle.UnpicklingError, self.loads, p) - - def test_correctly_quoted_string(self): - goodpickles = [(b"S''\n.", ''), - (b'S""\n.', ''), - (b'S"\\n"\n.', '\n'), - (b"S'\\n'\n.", '\n')] - for p, expected in goodpickles: - self.assertEqual(self.loads(p), expected) - def _check_pickling_with_opcode(self, obj, opcode, proto): pickled = self.dumps(obj, proto) self.assertTrue(opcode_in_pickle(opcode, pickled)) @@ -1599,14 +1940,6 @@ class AbstractPickleTests(unittest.TestCase): count_opcode(pickle.FRAME, pickled)) self.assertEqual(obj, self.loads(some_frames_pickle)) - def test_frame_readline(self): - pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.' - # 0: \x80 PROTO 4 - # 2: \x95 FRAME 5 - # 11: I INT 42 - # 15: . STOP - self.assertEqual(self.loads(pickled), 42) - def test_nested_names(self): global Nested class Nested: @@ -1734,33 +2067,6 @@ class AbstractPickleTests(unittest.TestCase): self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled) self.assertIs(type(self.loads(pickled)), type(val)) - def test_compat_unpickle(self): - # xrange(1, 7) - pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.' - unpickled = self.loads(pickled) - self.assertIs(type(unpickled), range) - self.assertEqual(unpickled, range(1, 7)) - self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6]) - # reduce - pickled = b'\x80\x02c__builtin__\nreduce\n.' - self.assertIs(self.loads(pickled), functools.reduce) - # whichdb.whichdb - pickled = b'\x80\x02cwhichdb\nwhichdb\n.' - self.assertIs(self.loads(pickled), dbm.whichdb) - # Exception(), StandardError() - for name in (b'Exception', b'StandardError'): - pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.') - unpickled = self.loads(pickled) - self.assertIs(type(unpickled), Exception) - self.assertEqual(str(unpickled), 'ugh') - # UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2}) - for name in (b'UserDict', b'IterableUserDict'): - pickled = (b'\x80\x02(cUserDict\n' + name + - b'\no}U\x04data}K\x01K\x02ssb.') - unpickled = self.loads(pickled) - self.assertIs(type(unpickled), collections.UserDict) - self.assertEqual(unpickled, collections.UserDict({1: 2})) - def test_local_lookup_error(self): # Test that whichmodule() errors out cleanly when looking up # an assumed globally-reachable object fails. @@ -2392,7 +2698,7 @@ if __name__ == "__main__": # Print some stuff that can be used to rewrite DATA{0,1,2} from pickletools import dis x = create_data() - for i in range(3): + for i in range(pickle.HIGHEST_PROTOCOL+1): p = pickle.dumps(x, i) print("DATA{0} = (".format(i)) for j in range(0, len(p), 20): diff --git a/Lib/test/pystone.py b/Lib/test/pystone.py index 1f67e66e..cf1692ec 100755 --- a/Lib/test/pystone.py +++ b/Lib/test/pystone.py @@ -35,7 +35,7 @@ Version History: Under Python 3 version 1.1 would use the normal division operator, resulting in some of the operations mistakenly yielding floats. Version 1.2 instead uses floor division - making the benchmark a integer benchmark again. + making the benchmark an integer benchmark again. """ diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py index b49e66be..431042ee 100755 --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -27,7 +27,7 @@ python -E -Wd -m test [options] [test_name1 ...] EPILOG = """\ Additional option details: --r randomizes test execution order. You can use --randseed=int to provide a +-r randomizes test execution order. You can use --randseed=int to provide an int seed value for the randomizer; this is useful for reproducing troublesome test orders. @@ -322,6 +322,8 @@ def _create_parser(): group.add_argument('-F', '--forever', action='store_true', help='run the specified tests in a loop, until an ' 'error happens') + group.add_argument('-P', '--pgo', dest='pgo', action='store_true', + help='enable Profile Guided Optimization training') parser.add_argument('args', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) @@ -361,7 +363,7 @@ def _parse_args(args, **kwargs): findleaks=False, use_resources=None, trace=False, coverdir='coverage', runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, random_seed=None, use_mp=None, verbose3=False, forever=False, - header=False, failfast=False, match_tests=None) + header=False, failfast=False, match_tests=None, pgo=False) for k, v in kwargs.items(): if not hasattr(ns, k): raise TypeError('%r is an invalid keyword argument ' @@ -435,14 +437,16 @@ def run_test_in_subprocess(testname, ns): from subprocess import Popen, PIPE base_cmd = ([sys.executable] + support.args_from_interpreter_flags() + ['-X', 'faulthandler', '-m', 'test.regrtest']) - + # required to spawn a new process with PGO flag on/off + if ns.pgo: + base_cmd = base_cmd + ['--pgo'] slaveargs = ( (testname, ns.verbose, ns.quiet), dict(huntrleaks=ns.huntrleaks, use_resources=ns.use_resources, output_on_failure=ns.verbose3, timeout=ns.timeout, failfast=ns.failfast, - match_tests=ns.match_tests)) + match_tests=ns.match_tests, pgo=ns.pgo)) # Running the child from the same working directory as regrtest's original # invocation ensures that TEMPDIR for the child is the same when # sysconfig.is_python_build() is true. See issue 15300. @@ -507,7 +511,13 @@ def main(tests=None, **kwargs): import gc gc.set_threshold(ns.threshold) if ns.nowindows: + print('The --nowindows (-n) option is deprecated. ' + 'Use -vv to display assertions in stderr.') + try: import msvcrt + except ImportError: + pass + else: msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS| msvcrt.SEM_NOALIGNMENTFAULTEXCEPT| msvcrt.SEM_NOGPFAULTERRORBOX| @@ -519,8 +529,11 @@ def main(tests=None, **kwargs): pass else: for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: - msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) - msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) + if ns.verbose and ns.verbose >= 2: + msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) + msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) + else: + msvcrt.CrtSetReportMode(m, 0) if ns.wait: input("Press any key to continue...") @@ -596,13 +609,14 @@ def main(tests=None, **kwargs): ns.args = [] # For a partial run, we do not need to clutter the output. - if ns.verbose or ns.header or not (ns.quiet or ns.single or tests or ns.args): + if (ns.verbose or ns.header or + not (ns.pgo or ns.quiet or ns.single or tests or ns.args)): # Print basic platform information print("==", platform.python_implementation(), *sys.version.split()) print("== ", platform.platform(aliased=True), - "%s-endian" % sys.byteorder) + "%s-endian" % sys.byteorder) print("== ", "hash algorithm:", sys.hash_info.algorithm, - "64bit" if sys.maxsize > 2**32 else "32bit") + "64bit" if sys.maxsize > 2**32 else "32bit") print("== ", os.getcwd()) print("Testing with flags:", sys.flags) @@ -645,7 +659,8 @@ def main(tests=None, **kwargs): def accumulate_result(test, result): ok, test_time = result - test_times.append((test_time, test)) + if ok not in (CHILD_ERROR, INTERRUPTED): + test_times.append((test_time, test)) if ok == PASSED: good.append(test) elif ok == FAILED: @@ -722,13 +737,16 @@ def main(tests=None, **kwargs): continue accumulate_result(test, result) if not ns.quiet: - fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" + if bad and not ns.pgo: + fmt = "[{1:{0}}{2}/{3}] {4}" + else: + fmt = "[{1:{0}}{2}] {4}" print(fmt.format( test_count_width, test_index, test_count, len(bad), test)) if stdout: print(stdout) - if stderr: + if stderr and not ns.pgo: print(stderr, file=sys.stderr) sys.stdout.flush() sys.stderr.flush() @@ -745,7 +763,10 @@ def main(tests=None, **kwargs): else: for test_index, test in enumerate(tests, 1): if not ns.quiet: - fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" + if bad and not ns.pgo: + fmt = "[{1:{0}}{2}/{3}] {4}" + else: + fmt = "[{1:{0}}{2}] {4}" print(fmt.format( test_count_width, test_index, test_count, len(bad), test)) sys.stdout.flush() @@ -760,7 +781,7 @@ def main(tests=None, **kwargs): ns.huntrleaks, output_on_failure=ns.verbose3, timeout=ns.timeout, failfast=ns.failfast, - match_tests=ns.match_tests) + match_tests=ns.match_tests, pgo=ns.pgo) accumulate_result(test, result) except KeyboardInterrupt: interrupted = True @@ -779,14 +800,14 @@ def main(tests=None, **kwargs): if module not in save_modules and module.startswith("test."): support.unload(module) - if interrupted: + if interrupted and not ns.pgo: # print a newline after ^C print() print("Test suite interrupted by signal SIGINT.") omitted = set(selected) - set(good) - set(bad) - set(skipped) print(count(len(omitted), "test"), "omitted:") printlist(omitted) - if good and not ns.quiet: + if good and not ns.quiet and not ns.pgo: if not bad and not skipped and not interrupted and len(good) > 1: print("All", end=' ') print(count(len(good), "test"), "OK.") @@ -795,26 +816,27 @@ def main(tests=None, **kwargs): print("10 slowest tests:") for time, test in test_times[:10]: print("%s: %.1fs" % (test, time)) - if bad: + if bad and not ns.pgo: print(count(len(bad), "test"), "failed:") printlist(bad) - if environment_changed: + if environment_changed and not ns.pgo: print("{} altered the execution environment:".format( count(len(environment_changed), "test"))) printlist(environment_changed) - if skipped and not ns.quiet: + if skipped and not ns.quiet and not ns.pgo: print(count(len(skipped), "test"), "skipped:") printlist(skipped) if ns.verbose2 and bad: print("Re-running failed tests in verbose mode") for test in bad[:]: - print("Re-running test %r in verbose mode" % test) + if not ns.pgo: + print("Re-running test %r in verbose mode" % test) sys.stdout.flush() try: ns.verbose = True ok = runtest(test, True, ns.quiet, ns.huntrleaks, - timeout=ns.timeout) + timeout=ns.timeout, pgo=ns.pgo) except KeyboardInterrupt: # print a newline separate from the ^C print() @@ -913,7 +935,7 @@ def replace_stdout(): def runtest(test, verbose, quiet, huntrleaks=False, use_resources=None, output_on_failure=False, failfast=False, match_tests=None, - timeout=None): + timeout=None, *, pgo=False): """Run a single test. test -- the name of the test @@ -926,6 +948,8 @@ def runtest(test, verbose, quiet, timeout -- dump the traceback and exit if a test takes more than timeout seconds failfast, match_tests -- See regrtest command-line flags for these. + pgo -- if true, do not print unnecessary info when running the test + for Profile Guided Optimization build Returns the tuple result, test_time, where result is one of the constants: INTERRUPTED KeyboardInterrupt when run under -j @@ -935,7 +959,6 @@ def runtest(test, verbose, quiet, FAILED test failed PASSED test passed """ - if use_resources is not None: support.use_resources = use_resources use_timeout = (timeout is not None) @@ -965,8 +988,8 @@ def runtest(test, verbose, quiet, sys.stdout = stream sys.stderr = stream result = runtest_inner(test, verbose, quiet, huntrleaks, - display_failure=False) - if result[0] == FAILED: + display_failure=False, pgo=pgo) + if result[0] == FAILED and not pgo: output = stream.getvalue() orig_stderr.write(output) orig_stderr.flush() @@ -976,7 +999,7 @@ def runtest(test, verbose, quiet, else: support.verbose = verbose # Tell tests to be moderately quiet result = runtest_inner(test, verbose, quiet, huntrleaks, - display_failure=not verbose) + display_failure=not verbose, pgo=pgo) return result finally: if use_timeout: @@ -1008,10 +1031,11 @@ class saved_test_environment: changed = False - def __init__(self, testname, verbose=0, quiet=False): + def __init__(self, testname, verbose=0, quiet=False, *, pgo=False): self.testname = testname self.verbose = verbose self.quiet = quiet + self.pgo = pgo # To add things to save and restore, add a name XXX to the resources list # and add corresponding get_XXX/restore_XXX functions. get_XXX should @@ -1240,11 +1264,11 @@ class saved_test_environment: if current != original: self.changed = True restore(original) - if not self.quiet: + if not self.quiet and not self.pgo: print("Warning -- {} was modified by {}".format( name, self.testname), file=sys.stderr) - if self.verbose > 1: + if self.verbose > 1 and not self.pgo: print(" Before: {}\n After: {} ".format( original, current), file=sys.stderr) @@ -1252,7 +1276,7 @@ class saved_test_environment: def runtest_inner(test, verbose, quiet, - huntrleaks=False, display_failure=True): + huntrleaks=False, display_failure=True, pgo=False): support.unload(test) test_time = 0.0 @@ -1263,7 +1287,7 @@ def runtest_inner(test, verbose, quiet, else: # Always import it from the test package abstest = 'test.' + test - with saved_test_environment(test, verbose, quiet) as environment: + with saved_test_environment(test, verbose, quiet, pgo=pgo) as environment: start_time = time.time() the_module = importlib.import_module(abstest) # If the test has a test_main, that will run the appropriate @@ -1283,27 +1307,29 @@ def runtest_inner(test, verbose, quiet, refleak = dash_R(the_module, test, test_runner, huntrleaks) test_time = time.time() - start_time except support.ResourceDenied as msg: - if not quiet: + if not quiet and not pgo: print(test, "skipped --", msg) sys.stdout.flush() return RESOURCE_DENIED, test_time except unittest.SkipTest as msg: - if not quiet: + if not quiet and not pgo: print(test, "skipped --", msg) sys.stdout.flush() return SKIPPED, test_time except KeyboardInterrupt: raise except support.TestFailed as msg: - if display_failure: - print("test", test, "failed --", msg, file=sys.stderr) - else: - print("test", test, "failed", file=sys.stderr) + if not pgo: + if display_failure: + print("test", test, "failed --", msg, file=sys.stderr) + else: + print("test", test, "failed", file=sys.stderr) sys.stderr.flush() return FAILED, test_time except: msg = traceback.format_exc() - print("test", test, "crashed --", msg, file=sys.stderr) + if not pgo: + print("test", test, "crashed --", msg, file=sys.stderr) sys.stderr.flush() return FAILED, test_time else: diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index 8b180b56..dfb4c254 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -309,7 +309,7 @@ if sys.platform.startswith("win"): # The exponential backoff of the timeout amounts to a total # of ~1 second after which the deletion is probably an error # anyway. - # Testing on a i7@4.3GHz shows that usually only 1 iteration is + # Testing on an i7@4.3GHz shows that usually only 1 iteration is # required when contention occurs. timeout = 0.001 while timeout < 1.0: @@ -1608,12 +1608,15 @@ class _MemoryWatchdog: def bigmemtest(size, memuse, dry_run=True): """Decorator for bigmem tests. - 'minsize' is the minimum useful size for the test (in arbitrary, - test-interpreted units.) 'memuse' is the number of 'bytes per size' for - the test, or a good estimate of it. + 'size' is a requested size for the test (in arbitrary, test-interpreted + units.) 'memuse' is the number of bytes per unit for the test, or a good + estimate of it. For example, a test that needs two byte buffers, of 4 GiB + each, could be decorated with @bigmemtest(size=_4G, memuse=2). - if 'dry_run' is False, it means the test doesn't support dummy runs - when -M is not specified. + The 'size' argument is normally passed to the decorated test method as an + extra argument. If 'dry_run' is true, the value passed to the test method + may be less than the requested value. If 'dry_run' is false, it means the + test doesn't support dummy runs when -M is not specified. """ def decorator(f): def wrapper(self): diff --git a/Lib/test/test_argparse.py b/Lib/test/test_argparse.py index 27bfad5f..f48e85c8 100644 --- a/Lib/test/test_argparse.py +++ b/Lib/test/test_argparse.py @@ -3026,7 +3026,7 @@ class TestShortColumns(HelpTestCase): '''Test extremely small number of columns. TestCase prevents "COLUMNS" from being too small in the tests themselves, - but we don't want any exceptions thrown in such case. Only ugly representation. + but we don't want any exceptions thrown in such cases. Only ugly representation. ''' def setUp(self): env = support.EnvironmentVarGuard() diff --git a/Lib/test/test_asdl_parser.py b/Lib/test/test_asdl_parser.py index 7a6426a4..15bc6849 100644 --- a/Lib/test/test_asdl_parser.py +++ b/Lib/test/test_asdl_parser.py @@ -21,7 +21,7 @@ class TestAsdlParser(unittest.TestCase): def setUpClass(cls): # Loads the asdl module dynamically, since it's not in a real importable # package. - # Parses Python.asdl into a ast.Module and run the check on it. + # Parses Python.asdl into an ast.Module and run the check on it. # There's no need to do this for each test method, hence setUpClass. sys.path.insert(0, parser_dir) loader = importlib.machinery.SourceFileLoader( diff --git a/Lib/test/test_ast.py b/Lib/test/test_ast.py index f1c655bd..d3e6d359 100644 --- a/Lib/test/test_ast.py +++ b/Lib/test/test_ast.py @@ -983,15 +983,15 @@ exec_results = [ ('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]), ('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 12), [('Name', (1, 12), 'a', ('Store',)), ('Name', (1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 20), 'c', ('Load',)), [])]))]), ('Module', [('Expr', (1, 0), ('GeneratorExp', (2, 4), ('Tuple', (3, 4), [('Name', (3, 4), 'Aa', ('Load',)), ('Name', (5, 7), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4), [('Name', (8, 4), 'Aa', ('Store',)), ('Name', (10, 4), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10), 'Cc', ('Load',)), [])]))]), -('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Name', (1, 11), 'w', ('Store',)), ('Name', (1, 16), 'x', ('Load',)), []), ('comprehension', ('Name', (1, 22), 'm', ('Store',)), ('Name', (1, 27), 'p', ('Load',)), [('Name', (1, 32), 'g', ('Load',))])]))]), -('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]), -('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]), -('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]), +('Module', [('Expr', (1, 0), ('DictComp', (1, 0), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Name', (1, 11), 'w', ('Store',)), ('Name', (1, 16), 'x', ('Load',)), []), ('comprehension', ('Name', (1, 22), 'm', ('Store',)), ('Name', (1, 27), 'p', ('Load',)), [('Name', (1, 32), 'g', ('Load',))])]))]), +('Module', [('Expr', (1, 0), ('DictComp', (1, 0), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]), +('Module', [('Expr', (1, 0), ('SetComp', (1, 0), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]), +('Module', [('Expr', (1, 0), ('SetComp', (1, 0), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]), ('Module', [('AsyncFunctionDef', (1, 6), 'f', ('arguments', [], None, [], [], None, []), [('Expr', (2, 1), ('Await', (2, 1), ('Call', (2, 7), ('Name', (2, 7), 'something', ('Load',)), [], [])))], [], None)]), ('Module', [('AsyncFunctionDef', (1, 6), 'f', ('arguments', [], None, [], [], None, []), [('AsyncFor', (2, 7), ('Name', (2, 11), 'e', ('Store',)), ('Name', (2, 16), 'i', ('Load',)), [('Expr', (2, 19), ('Num', (2, 19), 1))], [('Expr', (3, 7), ('Num', (3, 7), 2))])], [], None)]), ('Module', [('AsyncFunctionDef', (1, 6), 'f', ('arguments', [], None, [], [], None, []), [('AsyncWith', (2, 7), [('withitem', ('Name', (2, 12), 'a', ('Load',)), ('Name', (2, 17), 'b', ('Store',)))], [('Expr', (2, 20), ('Num', (2, 20), 1))])], [], None)]), -('Module', [('Expr', (1, 0), ('Dict', (1, 1), [None, ('Num', (1, 10), 2)], [('Dict', (1, 4), [('Num', (1, 4), 1)], [('Num', (1, 6), 2)]), ('Num', (1, 12), 3)]))]), -('Module', [('Expr', (1, 0), ('Set', (1, 1), [('Starred', (1, 1), ('Set', (1, 3), [('Num', (1, 3), 1), ('Num', (1, 6), 2)]), ('Load',)), ('Num', (1, 10), 3)]))]), +('Module', [('Expr', (1, 0), ('Dict', (1, 0), [None, ('Num', (1, 10), 2)], [('Dict', (1, 3), [('Num', (1, 4), 1)], [('Num', (1, 6), 2)]), ('Num', (1, 12), 3)]))]), +('Module', [('Expr', (1, 0), ('Set', (1, 0), [('Starred', (1, 1), ('Set', (1, 2), [('Num', (1, 3), 1), ('Num', (1, 6), 2)]), ('Load',)), ('Num', (1, 10), 3)]))]), ] single_results = [ ('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]), @@ -1002,10 +1002,10 @@ eval_results = [ ('Expression', ('BinOp', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Add',), ('Name', (1, 4), 'b', ('Load',)))), ('Expression', ('UnaryOp', (1, 0), ('Not',), ('Name', (1, 4), 'v', ('Load',)))), ('Expression', ('Lambda', (1, 0), ('arguments', [], None, [], [], None, []), ('NameConstant', (1, 7), None))), -('Expression', ('Dict', (1, 2), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])), +('Expression', ('Dict', (1, 0), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])), ('Expression', ('Dict', (1, 0), [], [])), -('Expression', ('Set', (1, 1), [('NameConstant', (1, 1), None)])), -('Expression', ('Dict', (2, 6), [('Num', (2, 6), 1)], [('Num', (4, 10), 2)])), +('Expression', ('Set', (1, 0), [('NameConstant', (1, 1), None)])), +('Expression', ('Dict', (1, 0), [('Num', (2, 6), 1)], [('Num', (4, 10), 2)])), ('Expression', ('ListComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])), ('Expression', ('GeneratorExp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])), ('Expression', ('Compare', (1, 0), ('Num', (1, 0), 1), [('Lt',), ('Lt',)], [('Num', (1, 4), 2), ('Num', (1, 8), 3)])), diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py index b1f1e56c..072fe2f9 100644 --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -3,6 +3,7 @@ import errno import logging import math +import os import socket import sys import threading @@ -756,6 +757,59 @@ class BaseEventLoopTests(test_utils.TestCase): pass self.assertTrue(func.called) + def test_single_selecter_event_callback_after_stopping(self): + # Python issue #25593: A stopped event loop may cause event callbacks + # to run more than once. + event_sentinel = object() + callcount = 0 + doer = None + + def proc_events(event_list): + nonlocal doer + if event_sentinel in event_list: + doer = self.loop.call_soon(do_event) + + def do_event(): + nonlocal callcount + callcount += 1 + self.loop.call_soon(clear_selector) + + def clear_selector(): + doer.cancel() + self.loop._selector.select.return_value = () + + self.loop._process_events = proc_events + self.loop._selector.select.return_value = (event_sentinel,) + + for i in range(1, 3): + with self.subTest('Loop %d/2' % i): + self.loop.call_soon(self.loop.stop) + self.loop.run_forever() + self.assertEqual(callcount, 1) + + def test_run_once(self): + # Simple test for test_utils.run_once(). It may seem strange + # to have a test for this (the function isn't even used!) but + # it's a de-factor standard API for library tests. This tests + # the idiom: loop.call_soon(loop.stop); loop.run_forever(). + count = 0 + + def callback(): + nonlocal count + count += 1 + + self.loop._process_events = mock.Mock() + self.loop.call_soon(callback) + test_utils.run_once(self.loop) + self.assertEqual(count, 1) + + def test_run_forever_pre_stopped(self): + # Test that the old idiom for pre-stopping the loop works. + self.loop._process_events = mock.Mock() + self.loop.stop() + self.loop.run_forever() + self.loop._selector.select.assert_called_once_with(0) + class MyProto(asyncio.Protocol): done = None @@ -790,11 +844,11 @@ class MyProto(asyncio.Protocol): class MyDatagramProto(asyncio.DatagramProtocol): done = None - def __init__(self, create_future=False): + def __init__(self, create_future=False, loop=None): self.state = 'INITIAL' self.nbytes = 0 if create_future: - self.done = asyncio.Future() + self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport @@ -1099,6 +1153,19 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase): f = self.loop.create_server(MyProto, '0.0.0.0', 0) self.assertRaises(OSError, self.loop.run_until_complete, f) + @mock.patch('asyncio.base_events.socket') + def test_create_server_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_STREAM = socket.SOCK_STREAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + f = self.loop.create_server( + MyProto, '0.0.0.0', 0, reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, f) + @mock.patch('asyncio.base_events.socket') def test_create_server_cant_bind(self, m_socket): @@ -1199,6 +1266,125 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase): self.assertRaises(Err, self.loop.run_until_complete, fut) self.assertTrue(m_sock.close.called) + def test_create_datagram_endpoint_sock(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.bind(('127.0.0.1', 0)) + fut = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + sock=sock) + transport, protocol = self.loop.run_until_complete(fut) + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + def test_create_datagram_endpoint_sock_sockopts(self): + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, local_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, family=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, proto=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, flags=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_address=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_port=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, allow_broadcast=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + def test_create_datagram_endpoint_sockopts(self): + # Socket options should not be applied unless asked for. + # SO_REUSEADDR defaults to on for UNIX. + # SO_REUSEPORT is not available on all platforms. + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0)) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + reuse_address_default_on = ( + os.name == 'posix' and sys.platform != 'cygwin') + reuseport_supported = hasattr(socket, 'SO_REUSEPORT') + + if reuse_address_default_on: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + else: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=True, + reuse_port=reuseport_supported, + allow_broadcast=True) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + @mock.patch('asyncio.base_events.socket') + def test_create_datagram_endpoint_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_DGRAM = socket.SOCK_DGRAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=False, + reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + def test_accept_connection_retry(self): sock = mock.Mock() sock.accept.side_effect = BlockingIOError() diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py index 8fbba8fe..141fde74 100644 --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -57,6 +57,17 @@ ONLYCERT = data_file('ssl_cert.pem') ONLYKEY = data_file('ssl_key.pem') SIGNED_CERTFILE = data_file('keycert3.pem') SIGNING_CA = data_file('pycacert.pem') +PEERCERT = {'serialNumber': 'B09264B1F2DA21D1', + 'version': 1, + 'subject': ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)), + 'issuer': ((('countryName', 'XY'),), + (('organizationName', 'Python Software Foundation CA'),), + (('commonName', 'our-ca-server'),)), + 'notAfter': 'Nov 13 19:47:07 2022 GMT', + 'notBefore': 'Jan 4 19:47:07 2013 GMT'} class MyBaseProto(asyncio.Protocol): @@ -596,22 +607,55 @@ class EventLoopTestsMixin: self.assertGreater(pr.nbytes, 0) tr.close() + def check_ssl_extra_info(self, client, check_sockname=True, + peername=None, peercert={}): + if check_sockname: + self.assertIsNotNone(client.get_extra_info('sockname')) + if peername: + self.assertEqual(peername, + client.get_extra_info('peername')) + else: + self.assertIsNotNone(client.get_extra_info('peername')) + self.assertEqual(peercert, + client.get_extra_info('peercert')) + + # test SSL cipher + cipher = client.get_extra_info('cipher') + self.assertIsInstance(cipher, tuple) + self.assertEqual(len(cipher), 3, cipher) + self.assertIsInstance(cipher[0], str) + self.assertIsInstance(cipher[1], str) + self.assertIsInstance(cipher[2], int) + + # test SSL object + sslobj = client.get_extra_info('ssl_object') + self.assertIsNotNone(sslobj) + self.assertEqual(sslobj.compression(), + client.get_extra_info('compression')) + self.assertEqual(sslobj.cipher(), + client.get_extra_info('cipher')) + self.assertEqual(sslobj.getpeercert(), + client.get_extra_info('peercert')) + self.assertEqual(sslobj.compression(), + client.get_extra_info('compression')) + def _basetest_create_ssl_connection(self, connection_fut, - check_sockname=True): + check_sockname=True, + peername=None): tr, pr = self.loop.run_until_complete(connection_fut) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.assertTrue('ssl' in tr.__class__.__name__.lower()) - if check_sockname: - self.assertIsNotNone(tr.get_extra_info('sockname')) + self.check_ssl_extra_info(tr, check_sockname, peername) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close() def _test_create_ssl_connection(self, httpd, create_connection, - check_sockname=True): + check_sockname=True, peername=None): conn_fut = create_connection(ssl=test_utils.dummy_ssl_context()) - self._basetest_create_ssl_connection(conn_fut, check_sockname) + self._basetest_create_ssl_connection(conn_fut, check_sockname, + peername) # ssl.Purpose was introduced in Python 3.4 if hasattr(ssl, 'Purpose'): @@ -629,7 +673,8 @@ class EventLoopTestsMixin: with mock.patch('ssl.create_default_context', side_effect=_dummy_ssl_create_context) as m: conn_fut = create_connection(ssl=True) - self._basetest_create_ssl_connection(conn_fut, check_sockname) + self._basetest_create_ssl_connection(conn_fut, check_sockname, + peername) self.assertEqual(m.call_count, 1) # With the real ssl.create_default_context(), certificate @@ -638,7 +683,8 @@ class EventLoopTestsMixin: conn_fut = create_connection(ssl=True) # Ignore the "SSL handshake failed" log in debug mode with test_utils.disable_logger(): - self._basetest_create_ssl_connection(conn_fut, check_sockname) + self._basetest_create_ssl_connection(conn_fut, check_sockname, + peername) self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED') @@ -649,7 +695,8 @@ class EventLoopTestsMixin: self.loop.create_connection, lambda: MyProto(loop=self.loop), *httpd.address) - self._test_create_ssl_connection(httpd, create_connection) + self._test_create_ssl_connection(httpd, create_connection, + peername=httpd.address) def test_legacy_create_ssl_connection(self): with test_utils.force_legacy_ssl_support(): @@ -669,7 +716,8 @@ class EventLoopTestsMixin: server_hostname='127.0.0.1') self._test_create_ssl_connection(httpd, create_connection, - check_sockname) + check_sockname, + peername=httpd.address) def test_legacy_create_ssl_unix_connection(self): with test_utils.force_legacy_ssl_support(): @@ -696,6 +744,40 @@ class EventLoopTestsMixin: self.assertEqual(cm.exception.errno, errno.EADDRINUSE) self.assertIn(str(httpd.address), cm.exception.strerror) + @mock.patch('asyncio.base_events.socket') + def create_server_multiple_hosts(self, family, hosts, mock_sock): + @asyncio.coroutine + def getaddrinfo(host, port, *args, **kw): + if family == socket.AF_INET: + return [[family, socket.SOCK_STREAM, 6, '', (host, port)]] + else: + return [[family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0)]] + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + if family == socket.AF_INET: + mock_sock.socket().getsockbyname.side_effect = [(host, 80) + for host in hosts] + else: + mock_sock.socket().getsockbyname.side_effect = [(host, 80, 0, 0) + for host in hosts] + self.loop.getaddrinfo = getaddrinfo_task + self.loop._start_serving = mock.Mock() + self.loop._stop_serving = mock.Mock() + f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80) + server = self.loop.run_until_complete(f) + self.addCleanup(server.close) + server_hosts = [sock.getsockbyname()[0] for sock in server.sockets] + self.assertEqual(server_hosts, hosts) + + def test_create_server_multiple_hosts_ipv4(self): + self.create_server_multiple_hosts(socket.AF_INET, + ['1.2.3.4', '5.6.7.8']) + + def test_create_server_multiple_hosts_ipv6(self): + self.create_server_multiple_hosts(socket.AF_INET6, ['::1', '::2']) + def test_create_server(self): proto = MyProto(self.loop) f = self.loop.create_server(lambda: proto, '0.0.0.0', 0) @@ -732,6 +814,32 @@ class EventLoopTestsMixin: # close server server.close() + @unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT') + def test_create_server_reuse_port(self): + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + + test_utils.run_briefly(self.loop) + + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0, reuse_port=True) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + def _make_unix_server(self, factory, **kwargs): path = test_utils.gen_unix_socket_path() self.addCleanup(lambda: os.path.exists(path) and os.unlink(path)) @@ -819,9 +927,7 @@ class EventLoopTestsMixin: self.assertEqual(3, proto.nbytes) # extra info is available - self.assertIsNotNone(proto.transport.get_extra_info('sockname')) - self.assertEqual('127.0.0.1', - proto.transport.get_extra_info('peername')[0]) + self.check_ssl_extra_info(client, peername=(host, port)) # close connection proto.transport.close() @@ -1023,6 +1129,10 @@ class EventLoopTestsMixin: server_hostname='localhost') client, pr = self.loop.run_until_complete(f_c) + # extra info is available + self.check_ssl_extra_info(client,peername=(host, port), + peercert=PEERCERT) + # close connection proto.transport.close() client.close() @@ -1180,6 +1290,32 @@ class EventLoopTestsMixin: self.assertEqual('CLOSED', client.state) server.transport.close() + def test_create_datagram_endpoint_sock(self): + sock = None + local_address = ('127.0.0.1', 0) + infos = self.loop.run_until_complete( + self.loop.getaddrinfo( + *local_address, type=socket.SOCK_DGRAM)) + for family, type, proto, cname, address in infos: + try: + sock = socket.socket(family=family, type=type, proto=proto) + sock.setblocking(False) + sock.bind(address) + except: + pass + else: + break + else: + assert False, 'Can not create socket.' + + f = self.loop.create_connection( + lambda: MyDatagramProto(loop=self.loop), sock=sock) + tr, pr = self.loop.run_until_complete(f) + self.assertIsInstance(tr, asyncio.Transport) + self.assertIsInstance(pr, MyDatagramProto) + tr.close() + self.loop.run_until_complete(pr.done) + def test_internal_fds(self): loop = self.create_event_loop() if not isinstance(loop, selector_events.BaseSelectorEventLoop): diff --git a/Lib/test/test_asyncio/test_futures.py b/Lib/test/test_asyncio/test_futures.py index c8b6829f..55fdff3f 100644 --- a/Lib/test/test_asyncio/test_futures.py +++ b/Lib/test/test_asyncio/test_futures.py @@ -174,13 +174,13 @@ class FutureTests(test_utils.TestCase): '') def test_copy_state(self): - # Test the internal _copy_state method since it's being directly - # invoked in other modules. + from asyncio.futures import _copy_future_state + f = asyncio.Future(loop=self.loop) f.set_result(10) newf = asyncio.Future(loop=self.loop) - newf._copy_state(f) + _copy_future_state(f, newf) self.assertTrue(newf.done()) self.assertEqual(newf.result(), 10) @@ -188,7 +188,7 @@ class FutureTests(test_utils.TestCase): f_exception.set_exception(RuntimeError()) newf_exception = asyncio.Future(loop=self.loop) - newf_exception._copy_state(f_exception) + _copy_future_state(f_exception, newf_exception) self.assertTrue(newf_exception.done()) self.assertRaises(RuntimeError, newf_exception.result) @@ -196,7 +196,7 @@ class FutureTests(test_utils.TestCase): f_cancelled.cancel() newf_cancelled = asyncio.Future(loop=self.loop) - newf_cancelled._copy_state(f_cancelled) + _copy_future_state(f_cancelled, newf_cancelled) self.assertTrue(newf_cancelled.cancelled()) def test_iter(self): @@ -384,9 +384,10 @@ class FutureTests(test_utils.TestCase): self.check_future_exception_never_retrieved(True) def test_set_result_unless_cancelled(self): + from asyncio import futures fut = asyncio.Future(loop=self.loop) fut.cancel() - fut._set_result_unless_cancelled(2) + futures._set_result_unless_cancelled(fut, 2) self.assertTrue(fut.cancelled()) diff --git a/Lib/test/test_asyncio/test_locks.py b/Lib/test/test_asyncio/test_locks.py index dda4577a..cdf5d9d3 100644 --- a/Lib/test/test_asyncio/test_locks.py +++ b/Lib/test/test_asyncio/test_locks.py @@ -7,7 +7,6 @@ import re import asyncio from asyncio import test_utils - STR_RGX_REPR = ( r'^<(?P.*?) object at (?P
.*?)' r'\[(?P' @@ -783,22 +782,20 @@ class SemaphoreTests(test_utils.TestCase): test_utils.run_briefly(self.loop) self.assertEqual(0, sem._value) - self.assertEqual([1, 2, 3], result) + self.assertEqual(3, len(result)) self.assertTrue(sem.locked()) self.assertEqual(1, len(sem._waiters)) self.assertEqual(0, sem._value) self.assertTrue(t1.done()) self.assertTrue(t1.result()) - self.assertTrue(t2.done()) - self.assertTrue(t2.result()) - self.assertTrue(t3.done()) - self.assertTrue(t3.result()) - self.assertFalse(t4.done()) + race_tasks = [t2, t3, t4] + done_tasks = [t for t in race_tasks if t.done() and t.result()] + self.assertTrue(2, len(done_tasks)) # cleanup locked semaphore sem.release() - self.loop.run_until_complete(t4) + self.loop.run_until_complete(asyncio.gather(*race_tasks)) def test_acquire_cancel(self): sem = asyncio.Semaphore(loop=self.loop) @@ -809,7 +806,44 @@ class SemaphoreTests(test_utils.TestCase): self.assertRaises( asyncio.CancelledError, self.loop.run_until_complete, acquire) - self.assertFalse(sem._waiters) + self.assertTrue((not sem._waiters) or + all(waiter.done() for waiter in sem._waiters)) + + def test_acquire_cancel_before_awoken(self): + sem = asyncio.Semaphore(value=0, loop=self.loop) + + t1 = asyncio.Task(sem.acquire(), loop=self.loop) + t2 = asyncio.Task(sem.acquire(), loop=self.loop) + t3 = asyncio.Task(sem.acquire(), loop=self.loop) + t4 = asyncio.Task(sem.acquire(), loop=self.loop) + + test_utils.run_briefly(self.loop) + + sem.release() + t1.cancel() + t2.cancel() + + test_utils.run_briefly(self.loop) + num_done = sum(t.done() for t in [t3, t4]) + self.assertEqual(num_done, 1) + + t3.cancel() + t4.cancel() + test_utils.run_briefly(self.loop) + + def test_acquire_hang(self): + sem = asyncio.Semaphore(value=0, loop=self.loop) + + t1 = asyncio.Task(sem.acquire(), loop=self.loop) + t2 = asyncio.Task(sem.acquire(), loop=self.loop) + + test_utils.run_briefly(self.loop) + + sem.release() + t1.cancel() + + test_utils.run_briefly(self.loop) + self.assertTrue(sem.locked()) def test_release_not_acquired(self): sem = asyncio.BoundedSemaphore(loop=self.loop) diff --git a/Lib/test/test_asyncio/test_pep492.py b/Lib/test/test_asyncio/test_pep492.py index 41e1b8a9..404a7489 100644 --- a/Lib/test/test_asyncio/test_pep492.py +++ b/Lib/test/test_asyncio/test_pep492.py @@ -203,6 +203,26 @@ class CoroutineTests(BaseTest): self.loop.run_until_complete(runner()) + def test_double_await(self): + async def afunc(): + await asyncio.sleep(0.1, loop=self.loop) + + async def runner(): + coro = afunc() + t = asyncio.Task(coro, loop=self.loop) + try: + await asyncio.sleep(0, loop=self.loop) + await coro + finally: + t.cancel() + + self.loop.set_debug(True) + with self.assertRaisesRegex( + RuntimeError, + r'Cannot await.*test_double_await.*\bafunc\b.*while.*\bsleep\b'): + + self.loop.run_until_complete(runner()) + if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_asyncio/test_proactor_events.py b/Lib/test/test_asyncio/test_proactor_events.py index fcd9ab1e..5a0f0881 100644 --- a/Lib/test/test_asyncio/test_proactor_events.py +++ b/Lib/test/test_asyncio/test_proactor_events.py @@ -204,7 +204,7 @@ class ProactorSocketTransportTests(test_utils.TestCase): tr.close() test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertEqual(tr._conn_lost, 1) self.protocol.connection_lost.reset_mock() @@ -298,7 +298,7 @@ class ProactorSocketTransportTests(test_utils.TestCase): self.loop, self.sock, self.protocol) self.assertTrue(tr.can_write_eof()) tr.write_eof() - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.loop._run_once() self.assertTrue(self.sock.close.called) tr.close() @@ -309,7 +309,7 @@ class ProactorSocketTransportTests(test_utils.TestCase): tr._loop._proactor.send.return_value = f tr.write(b'data') tr.write_eof() - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertFalse(self.sock.shutdown.called) tr._loop._proactor.send.assert_called_with(self.sock, b'data') f.set_result(4) diff --git a/Lib/test/test_asyncio/test_queues.py b/Lib/test/test_asyncio/test_queues.py index 8e38175e..591a9bb5 100644 --- a/Lib/test/test_asyncio/test_queues.py +++ b/Lib/test/test_asyncio/test_queues.py @@ -271,6 +271,29 @@ class QueueGetTests(_QueueTestBase): self.assertEqual(self.loop.run_until_complete(q.get()), 'a') self.assertEqual(self.loop.run_until_complete(q.get()), 'b') + def test_why_are_getters_waiting(self): + # From issue #268. + + @asyncio.coroutine + def consumer(queue, num_expected): + for _ in range(num_expected): + yield from queue.get() + + @asyncio.coroutine + def producer(queue, num_items): + for i in range(num_items): + yield from queue.put(i) + + queue_size = 1 + producer_num_items = 5 + q = asyncio.Queue(queue_size, loop=self.loop) + + self.loop.run_until_complete( + asyncio.gather(producer(q, producer_num_items), + consumer(q, producer_num_items), + loop=self.loop), + ) + class QueuePutTests(_QueueTestBase): @@ -377,13 +400,8 @@ class QueuePutTests(_QueueTestBase): loop.run_until_complete(reader3) - # reader2 will receive `2`, because it was added to the - # queue of pending readers *before* put_nowaits were called. - self.assertEqual(reader2.result(), 2) - # reader3 will receive `1`, because reader1 was cancelled - # before is had a chance to execute, and `2` was already - # pushed to reader2 by second `put_nowait`. - self.assertEqual(reader3.result(), 1) + # It is undefined in which order concurrent readers receive results. + self.assertEqual({reader2.result(), reader3.result()}, {1, 2}) def test_put_cancel_drop(self): @@ -479,6 +497,29 @@ class QueuePutTests(_QueueTestBase): self.loop.run_until_complete(q.put('a')) self.assertEqual(self.loop.run_until_complete(t), 'a') + def test_why_are_putters_waiting(self): + # From issue #265. + + queue = asyncio.Queue(2, loop=self.loop) + + @asyncio.coroutine + def putter(item): + yield from queue.put(item) + + @asyncio.coroutine + def getter(): + yield + num = queue.qsize() + for _ in range(num): + item = queue.get_nowait() + + t0 = putter(0) + t1 = putter(1) + t2 = putter(2) + t3 = putter(3) + self.loop.run_until_complete( + asyncio.gather(getter(), t0, t1, t2, t3, loop=self.loop)) + class LifoQueueTests(_QueueTestBase): diff --git a/Lib/test/test_asyncio/test_selector_events.py b/Lib/test/test_asyncio/test_selector_events.py index f0fcdd22..135b5abf 100644 --- a/Lib/test/test_asyncio/test_selector_events.py +++ b/Lib/test/test_asyncio/test_selector_events.py @@ -698,7 +698,7 @@ class SelectorTransportTests(test_utils.TestCase): tr = self.create_transport() tr.close() - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertEqual(1, self.loop.remove_reader_count[7]) self.protocol.connection_lost(None) self.assertEqual(tr._conn_lost, 1) @@ -723,7 +723,7 @@ class SelectorTransportTests(test_utils.TestCase): self.loop.add_writer(7, mock.sentinel) tr._force_close(None) - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertEqual(tr._buffer, list_to_buffer()) self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) @@ -1436,7 +1436,7 @@ class SelectorSslTransportTests(test_utils.TestCase): tr = self._make_one() tr.close() - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertEqual(1, self.loop.remove_reader_count[1]) self.assertEqual(tr._conn_lost, 1) diff --git a/Lib/test/test_asyncio/test_streams.py b/Lib/test/test_asyncio/test_streams.py index ef6f6030..6f657ad5 100644 --- a/Lib/test/test_asyncio/test_streams.py +++ b/Lib/test/test_asyncio/test_streams.py @@ -2,8 +2,10 @@ import gc import os +import queue import socket import sys +import threading import unittest from unittest import mock try: @@ -632,6 +634,89 @@ os.close(fd) protocol = asyncio.StreamReaderProtocol(reader) self.assertIs(protocol._loop, self.loop) + def test_drain_raises(self): + # See http://bugs.python.org/issue25441 + + # This test should not use asyncio for the mock server; the + # whole point of the test is to test for a bug in drain() + # where it never gives up the event loop but the socket is + # closed on the server side. + + q = queue.Queue() + + def server(): + # Runs in a separate thread. + sock = socket.socket() + sock.bind(('localhost', 0)) + sock.listen(1) + addr = sock.getsockname() + q.put(addr) + clt, _ = sock.accept() + clt.close() + + @asyncio.coroutine + def client(host, port): + reader, writer = yield from asyncio.open_connection(host, port, loop=self.loop) + while True: + writer.write(b"foo\n") + yield from writer.drain() + + # Start the server thread and wait for it to be listening. + thread = threading.Thread(target=server) + thread.setDaemon(True) + thread.start() + addr = q.get() + + # Should not be stuck in an infinite loop. + with self.assertRaises((ConnectionResetError, BrokenPipeError)): + self.loop.run_until_complete(client(*addr)) + + # Clean up the thread. (Only on success; on failure, it may + # be stuck in accept().) + thread.join() + + def test___repr__(self): + stream = asyncio.StreamReader(loop=self.loop) + self.assertEqual("", repr(stream)) + + def test___repr__nondefault_limit(self): + stream = asyncio.StreamReader(loop=self.loop, limit=123) + self.assertEqual("", repr(stream)) + + def test___repr__eof(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_eof() + self.assertEqual("", repr(stream)) + + def test___repr__data(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(b'data') + self.assertEqual("", repr(stream)) + + def test___repr__exception(self): + stream = asyncio.StreamReader(loop=self.loop) + exc = RuntimeError() + stream.set_exception(exc) + self.assertEqual("", repr(stream)) + + def test___repr__waiter(self): + stream = asyncio.StreamReader(loop=self.loop) + stream._waiter = asyncio.Future(loop=self.loop) + self.assertRegex( + repr(stream), + ">") + stream._waiter.set_result(None) + self.loop.run_until_complete(stream._waiter) + stream._waiter = None + self.assertEqual("", repr(stream)) + + def test___repr__transport(self): + stream = asyncio.StreamReader(loop=self.loop) + stream._transport = mock.Mock() + stream._transport.__repr__ = mock.Mock() + stream._transport.__repr__.return_value = "" + self.assertEqual(">", repr(stream)) + if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_asyncio/test_subprocess.py b/Lib/test/test_asyncio/test_subprocess.py index 38f0ceeb..e90f17dd 100644 --- a/Lib/test/test_asyncio/test_subprocess.py +++ b/Lib/test/test_asyncio/test_subprocess.py @@ -61,7 +61,7 @@ class SubprocessTransportTests(test_utils.TestCase): self.assertTrue(protocol.connection_lost.called) self.assertEqual(protocol.connection_lost.call_args[0], (None,)) - self.assertFalse(transport._closed) + self.assertFalse(transport.is_closing()) self.assertIsNone(transport._loop) self.assertIsNone(transport._proc) self.assertIsNone(transport._protocol) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py index 04267873..04d19ac6 100644 --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -153,6 +153,24 @@ class TaskTests(test_utils.TestCase): t = asyncio.ensure_future(t_orig, loop=self.loop) self.assertIs(t, t_orig) + @unittest.skipUnless(PY35, 'need python 3.5 or later') + def test_ensure_future_awaitable(self): + class Aw: + def __init__(self, coro): + self.coro = coro + def __await__(self): + return (yield from self.coro) + + @asyncio.coroutine + def coro(): + return 'ok' + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + fut = asyncio.ensure_future(Aw(coro()), loop=loop) + loop.run_until_complete(fut) + assert fut.result() == 'ok' + def test_ensure_future_neither(self): with self.assertRaises(TypeError): asyncio.ensure_future('ok') @@ -2082,5 +2100,121 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase): self.assertIsInstance(f.exception(), RuntimeError) +class RunCoroutineThreadsafeTests(test_utils.TestCase): + """Test case for asyncio.run_coroutine_threadsafe.""" + + def setUp(self): + self.loop = asyncio.new_event_loop() + self.set_event_loop(self.loop) # Will cleanup properly + + @asyncio.coroutine + def add(self, a, b, fail=False, cancel=False): + """Wait 0.05 second and return a + b.""" + yield from asyncio.sleep(0.05, loop=self.loop) + if fail: + raise RuntimeError("Fail!") + if cancel: + asyncio.tasks.Task.current_task(self.loop).cancel() + yield + return a + b + + def target(self, fail=False, cancel=False, timeout=None, + advance_coro=False): + """Run add coroutine in the event loop.""" + coro = self.add(1, 2, fail=fail, cancel=cancel) + future = asyncio.run_coroutine_threadsafe(coro, self.loop) + if advance_coro: + # this is for test_run_coroutine_threadsafe_task_factory_exception; + # otherwise it spills errors and breaks **other** unittests, since + # 'target' is interacting with threads. + + # With this call, `coro` will be advanced, so that + # CoroWrapper.__del__ won't do anything when asyncio tests run + # in debug mode. + self.loop.call_soon_threadsafe(coro.send, None) + try: + return future.result(timeout) + finally: + future.done() or future.cancel() + + def test_run_coroutine_threadsafe(self): + """Test coroutine submission from a thread to an event loop.""" + future = self.loop.run_in_executor(None, self.target) + result = self.loop.run_until_complete(future) + self.assertEqual(result, 3) + + def test_run_coroutine_threadsafe_with_exception(self): + """Test coroutine submission from a thread to an event loop + when an exception is raised.""" + future = self.loop.run_in_executor(None, self.target, True) + with self.assertRaises(RuntimeError) as exc_context: + self.loop.run_until_complete(future) + self.assertIn("Fail!", exc_context.exception.args) + + def test_run_coroutine_threadsafe_with_timeout(self): + """Test coroutine submission from a thread to an event loop + when a timeout is raised.""" + callback = lambda: self.target(timeout=0) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.TimeoutError): + self.loop.run_until_complete(future) + test_utils.run_briefly(self.loop) + # Check that there's no pending task (add has been cancelled) + for task in asyncio.Task.all_tasks(self.loop): + self.assertTrue(task.done()) + + def test_run_coroutine_threadsafe_task_cancelled(self): + """Test coroutine submission from a tread to an event loop + when the task is cancelled.""" + callback = lambda: self.target(cancel=True) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.CancelledError): + self.loop.run_until_complete(future) + + def test_run_coroutine_threadsafe_task_factory_exception(self): + """Test coroutine submission from a tread to an event loop + when the task factory raise an exception.""" + # Schedule the target + future = self.loop.run_in_executor( + None, lambda: self.target(advance_coro=True)) + # Set corrupted task factory + self.loop.set_task_factory(lambda loop, coro: wrong_name) + # Set exception handler + callback = test_utils.MockCallback() + self.loop.set_exception_handler(callback) + # Run event loop + with self.assertRaises(NameError) as exc_context: + self.loop.run_until_complete(future) + # Check exceptions + self.assertIn('wrong_name', exc_context.exception.args[0]) + self.assertEqual(len(callback.call_args_list), 1) + (loop, context), kwargs = callback.call_args + self.assertEqual(context['exception'], exc_context.exception) + + +class SleepTests(test_utils.TestCase): + def setUp(self): + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(None) + + def test_sleep_zero(self): + result = 0 + + def inc_result(num): + nonlocal result + result += num + + @asyncio.coroutine + def coro(): + self.loop.call_soon(inc_result, 1) + self.assertEqual(result, 0) + num = yield from asyncio.sleep(0, loop=self.loop, result=10) + self.assertEqual(result, 1) # inc'ed by call_soon + inc_result(num) # num should be 11 + + self.loop.run_until_complete(coro()) + self.assertEqual(result, 11) + + if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_asyncio/test_unix_events.py b/Lib/test/test_asyncio/test_unix_events.py index dc0835c5..22dc6880 100644 --- a/Lib/test/test_asyncio/test_unix_events.py +++ b/Lib/test/test_asyncio/test_unix_events.py @@ -440,7 +440,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase): tr = self.read_pipe_transport() err = object() tr._close(err) - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(err) @@ -598,7 +598,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase): tr._read_ready() self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) @@ -658,7 +658,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase): self.assertFalse(self.loop.writers) self.assertFalse(self.loop.readers) self.assertEqual([], tr._buffer) - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) m_logexc.assert_called_with( test_utils.MockPattern( 'Fatal write error on pipe transport' @@ -694,7 +694,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase): self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) @@ -743,7 +743,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase): def test_write_eof(self): tr = self.write_pipe_transport() tr.write_eof() - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) @@ -752,7 +752,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase): tr = self.write_pipe_transport() tr._buffer = [b'data'] tr.write_eof() - self.assertTrue(tr._closing) + self.assertTrue(tr.is_closing()) self.assertFalse(self.protocol.connection_lost.called) diff --git a/Lib/test/test_binascii.py b/Lib/test/test_binascii.py index 4e678875..8367afe0 100644 --- a/Lib/test/test_binascii.py +++ b/Lib/test/test_binascii.py @@ -178,6 +178,8 @@ class BinASCIITest(unittest.TestCase): self.assertEqual(binascii.unhexlify(self.type2test(t)), u) def test_qp(self): + binascii.a2b_qp(data=b"", header=False) # Keyword arguments allowed + # A test for SF bug 534347 (segfaults without the proper fix) try: binascii.a2b_qp(b"", **{1:1}) @@ -185,6 +187,7 @@ class BinASCIITest(unittest.TestCase): pass else: self.fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") + self.assertEqual(binascii.a2b_qp(b"= "), b"= ") self.assertEqual(binascii.a2b_qp(b"=="), b"=") self.assertEqual(binascii.a2b_qp(b"=AX"), b"=AX") diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py index a6533904..2eef9fc6 100644 --- a/Lib/test/test_buffer.py +++ b/Lib/test/test_buffer.py @@ -841,6 +841,11 @@ class TestBufferProtocol(unittest.TestCase): # test tobytes() self.assertEqual(result.tobytes(), b) + # test hex() + m = memoryview(result) + h = "".join("%02x" % c for c in b) + self.assertEqual(m.hex(), h) + # lst := expected multi-dimensional logical representation # flatten(lst) := elements in C-order ff = fmt if fmt else 'B' diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py index cdbb2cb9..5deeb72b 100644 --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -1134,82 +1134,6 @@ class BuiltinTest(unittest.TestCase): sys.stdout = savestdout fp.close() - @unittest.skipUnless(pty, "the pty and signal modules must be available") - def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): - if not sys.stdin.isatty() or not sys.stdout.isatty(): - self.skipTest("stdin and stdout must be ttys") - r, w = os.pipe() - try: - pid, fd = pty.fork() - except (OSError, AttributeError) as e: - os.close(r) - os.close(w) - self.skipTest("pty.fork() raised {}".format(e)) - if pid == 0: - # Child - try: - # Make sure we don't get stuck if there's a problem - signal.alarm(2) - os.close(r) - # Check the error handlers are accounted for - if stdio_encoding: - sys.stdin = io.TextIOWrapper(sys.stdin.detach(), - encoding=stdio_encoding, - errors='surrogateescape') - sys.stdout = io.TextIOWrapper(sys.stdout.detach(), - encoding=stdio_encoding, - errors='replace') - with open(w, "w") as wpipe: - print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) - print(ascii(input(prompt)), file=wpipe) - except: - traceback.print_exc() - finally: - # We don't want to return to unittest... - os._exit(0) - # Parent - os.close(w) - os.write(fd, terminal_input + b"\r\n") - # Get results from the pipe - with open(r, "r") as rpipe: - lines = [] - while True: - line = rpipe.readline().strip() - if line == "": - # The other end was closed => the child exited - break - lines.append(line) - # Check the result was got and corresponds to the user's terminal input - if len(lines) != 2: - # Something went wrong, try to get at stderr - with open(fd, "r", encoding="ascii", errors="ignore") as child_output: - self.fail("got %d lines in pipe but expected 2, child output was:\n%s" - % (len(lines), child_output.read())) - os.close(fd) - # Check we did exercise the GNU readline path - self.assertIn(lines[0], {'tty = True', 'tty = False'}) - if lines[0] != 'tty = True': - self.skipTest("standard IO in should have been a tty") - input_result = eval(lines[1]) # ascii() -> eval() roundtrip - if stdio_encoding: - expected = terminal_input.decode(stdio_encoding, 'surrogateescape') - else: - expected = terminal_input.decode(sys.stdin.encoding) # what else? - self.assertEqual(input_result, expected) - - def test_input_tty(self): - # Test input() functionality when wired to a tty (the code path - # is different and invokes GNU readline if available). - self.check_input_tty("prompt", b"quux") - - def test_input_tty_non_ascii(self): - # Check stdin/stdout encoding is used when invoking GNU readline - self.check_input_tty("prompté", b"quux\xe9", "utf-8") - - def test_input_tty_non_ascii_unicode_errors(self): - # Check stdin/stdout error handler is used when invoking GNU readline - self.check_input_tty("prompté", b"quux\xe9", "ascii") - # test_int(): see test_int.py for tests of built-in function int(). def test_repr(self): @@ -1564,6 +1488,119 @@ class BuiltinTest(unittest.TestCase): self.assertRaises(TypeError, tp, 1, 2) self.assertRaises(TypeError, tp, a=1, b=2) +@unittest.skipUnless(pty, "the pty and signal modules must be available") +class PtyTests(unittest.TestCase): + """Tests that use a pseudo terminal to guarantee stdin and stdout are + terminals in the test environment""" + + def run_child(self, child, terminal_input): + r, w = os.pipe() # Pipe test results from child back to parent + try: + pid, fd = pty.fork() + except (OSError, AttributeError) as e: + os.close(r) + os.close(w) + self.skipTest("pty.fork() raised {}".format(e)) + raise + if pid == 0: + # Child + try: + # Make sure we don't get stuck if there's a problem + signal.alarm(2) + os.close(r) + with open(w, "w") as wpipe: + child(wpipe) + except: + traceback.print_exc() + finally: + # We don't want to return to unittest... + os._exit(0) + # Parent + os.close(w) + os.write(fd, terminal_input) + # Get results from the pipe + with open(r, "r") as rpipe: + lines = [] + while True: + line = rpipe.readline().strip() + if line == "": + # The other end was closed => the child exited + break + lines.append(line) + # Check the result was got and corresponds to the user's terminal input + if len(lines) != 2: + # Something went wrong, try to get at stderr + # Beware of Linux raising EIO when the slave is closed + child_output = bytearray() + while True: + try: + chunk = os.read(fd, 3000) + except OSError: # Assume EIO + break + if not chunk: + break + child_output.extend(chunk) + os.close(fd) + child_output = child_output.decode("ascii", "ignore") + self.fail("got %d lines in pipe but expected 2, child output was:\n%s" + % (len(lines), child_output)) + os.close(fd) + return lines + + def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): + if not sys.stdin.isatty() or not sys.stdout.isatty(): + self.skipTest("stdin and stdout must be ttys") + def child(wpipe): + # Check the error handlers are accounted for + if stdio_encoding: + sys.stdin = io.TextIOWrapper(sys.stdin.detach(), + encoding=stdio_encoding, + errors='surrogateescape') + sys.stdout = io.TextIOWrapper(sys.stdout.detach(), + encoding=stdio_encoding, + errors='replace') + print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) + print(ascii(input(prompt)), file=wpipe) + lines = self.run_child(child, terminal_input + b"\r\n") + # Check we did exercise the GNU readline path + self.assertIn(lines[0], {'tty = True', 'tty = False'}) + if lines[0] != 'tty = True': + self.skipTest("standard IO in should have been a tty") + input_result = eval(lines[1]) # ascii() -> eval() roundtrip + if stdio_encoding: + expected = terminal_input.decode(stdio_encoding, 'surrogateescape') + else: + expected = terminal_input.decode(sys.stdin.encoding) # what else? + self.assertEqual(input_result, expected) + + def test_input_tty(self): + # Test input() functionality when wired to a tty (the code path + # is different and invokes GNU readline if available). + self.check_input_tty("prompt", b"quux") + + def test_input_tty_non_ascii(self): + # Check stdin/stdout encoding is used when invoking GNU readline + self.check_input_tty("prompté", b"quux\xe9", "utf-8") + + def test_input_tty_non_ascii_unicode_errors(self): + # Check stdin/stdout error handler is used when invoking GNU readline + self.check_input_tty("prompté", b"quux\xe9", "ascii") + + def test_input_no_stdout_fileno(self): + # Issue #24402: If stdin is the original terminal but stdout.fileno() + # fails, do not use the original stdout file descriptor + def child(wpipe): + print("stdin.isatty():", sys.stdin.isatty(), file=wpipe) + sys.stdout = io.StringIO() # Does not support fileno() + input("prompt") + print("captured:", ascii(sys.stdout.getvalue()), file=wpipe) + lines = self.run_child(child, b"quux\r") + expected = ( + "stdin.isatty(): True", + "captured: 'prompt'", + ) + self.assertSequenceEqual(lines, expected) + class TestSorted(unittest.TestCase): def test_basic(self): diff --git a/Lib/test/test_cgi.py b/Lib/test/test_cgi.py index a7a9d02f..ab9f6ab6 100644 --- a/Lib/test/test_cgi.py +++ b/Lib/test/test_cgi.py @@ -326,6 +326,24 @@ Content-Type: text/plain got = getattr(files[x], k) self.assertEqual(got, exp) + def test_fieldstorage_part_content_length(self): + BOUNDARY = "JfISa01" + POSTDATA = """--JfISa01 +Content-Disposition: form-data; name="submit-name" +Content-Length: 5 + +Larry +--JfISa01""" + env = { + 'REQUEST_METHOD': 'POST', + 'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY), + 'CONTENT_LENGTH': str(len(POSTDATA))} + fp = BytesIO(POSTDATA.encode('latin-1')) + fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1") + self.assertEqual(len(fs.list), 1) + self.assertEqual(fs.list[0].name, 'submit-name') + self.assertEqual(fs.list[0].value, 'Larry') + def test_fieldstorage_as_context_manager(self): fp = BytesIO(b'x' * 10) env = {'REQUEST_METHOD': 'PUT'} diff --git a/Lib/test/test_cmd.py b/Lib/test/test_cmd.py index 0c314544..dd8981f8 100644 --- a/Lib/test/test_cmd.py +++ b/Lib/test/test_cmd.py @@ -110,7 +110,7 @@ class samplecmdclass(cmd.Cmd): 5 12 19 6 13 - This is a interactive test, put some commands in the cmdqueue attribute + This is an interactive test, put some commands in the cmdqueue attribute and let it execute This test includes the preloop(), postloop(), default(), emptyline(), parseline(), do_help() functions diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py index eab07c98..b93e0ab0 100644 --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -97,7 +97,7 @@ class ReadTest(MixInCheckStateHandling): self.assertEqual(r.read(), "") self.assertEqual(r.bytebuffer, b"") - # do the check again, this time using a incremental decoder + # do the check again, this time using an incremental decoder d = codecs.getincrementaldecoder(self.encoding)() result = "" for (c, partialresult) in zip(input.encode(self.encoding), partialresults): @@ -903,6 +903,32 @@ class CP65001Test(ReadTest, unittest.TestCase): class UTF7Test(ReadTest, unittest.TestCase): encoding = "utf-7" + def test_ascii(self): + # Set D (directly encoded characters) + set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789' + '\'(),-./:?') + self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii')) + self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d) + # Set O (optional direct characters) + set_o = ' !"#$%&*;<=>@[]^_`{|}' + self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii')) + self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o) + # + + self.assertEqual('a+b'.encode(self.encoding), b'a+-b') + self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b') + # White spaces + ws = ' \t\n\r' + self.assertEqual(ws.encode(self.encoding), ws.encode('ascii')) + self.assertEqual(ws.encode('ascii').decode(self.encoding), ws) + # Other ASCII characters + other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) - + set(set_d + set_o + '+' + ws))) + self.assertEqual(other_ascii.encode(self.encoding), + b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU' + b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-') + def test_partial(self): self.check_partial( 'a+-b\x00c\x80d\u0100e\U00010000f', @@ -944,7 +970,9 @@ class UTF7Test(ReadTest, unittest.TestCase): def test_errors(self): tests = [ + (b'\xffb', '\ufffdb'), (b'a\xffb', 'a\ufffdb'), + (b'a\xff\xffb', 'a\ufffd\ufffdb'), (b'a+IK', 'a\ufffd'), (b'a+IK-b', 'a\ufffdb'), (b'a+IK,b', 'a\ufffdb'), @@ -960,6 +988,8 @@ class UTF7Test(ReadTest, unittest.TestCase): (b'a+//,+IKw-b', 'a\ufffd\u20acb'), (b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'), (b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'), + (b'a+IKw-b\xff', 'a\u20acb\ufffd'), + (b'a+IKw\xffb', 'a\u20ac\ufffdb'), ] for raw, expected in tests: with self.subTest(raw=raw): @@ -971,8 +1001,36 @@ class UTF7Test(ReadTest, unittest.TestCase): self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0') + self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0') + self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-') + self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding), + b'+IKwgrNgB3KA-') + self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') + self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') - test_lone_surrogates = None + def test_lone_surrogates(self): + tests = [ + (b'a+2AE-b', 'a\ud801b'), + (b'a+2AE\xffb', 'a\ufffdb'), + (b'a+2AE', 'a\ufffd'), + (b'a+2AEA-b', 'a\ufffdb'), + (b'a+2AH-b', 'a\ufffdb'), + (b'a+IKzYAQ-b', 'a\u20ac\ud801b'), + (b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'), + (b'a+IKzYAQA-b', 'a\u20ac\ufffdb'), + (b'a+IKzYAd-b', 'a\u20ac\ufffdb'), + (b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'), + (b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'), + (b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'), + ] + for raw, expected in tests: + with self.subTest(raw=raw): + self.assertEqual(raw.decode('utf-7', 'replace'), expected) class UTF16ExTest(unittest.TestCase): @@ -2684,6 +2742,14 @@ class TransformCodecTest(unittest.TestCase): info = codecs.lookup(alias) self.assertEqual(info.name, expected_name) + def test_quopri_stateless(self): + # Should encode with quotetabs=True + encoded = codecs.encode(b"space tab\teol \n", "quopri-codec") + self.assertEqual(encoded, b"space=20tab=09eol=20\n") + # But should still support unescaped tabs and spaces + unescaped = b"space tab eol\n" + self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped) + def test_uu_invalid(self): # Missing "begin" line self.assertRaises(ValueError, codecs.decode, b"", "uu-codec") diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py index c2d03eea..b9fdf794 100644 --- a/Lib/test/test_collections.py +++ b/Lib/test/test_collections.py @@ -257,7 +257,6 @@ class TestNamedTuple(unittest.TestCase): self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method - self.assertEqual(vars(p), p._asdict()) # verify that vars() works try: p._replace(x=1, error=2) @@ -412,6 +411,17 @@ class TestNamedTuple(unittest.TestCase): globals().pop('NTColor', None) # clean-up after this test + def test_namedtuple_subclass_issue_24931(self): + class Point(namedtuple('_Point', ['x', 'y'])): + pass + + a = Point(3, 4) + self.assertEqual(a._asdict(), OrderedDict([('x', 3), ('y', 4)])) + + a.w = 5 + self.assertEqual(a.__dict__, {'w': 5}) + + ################################################################################ ### Abstract Base Classes ################################################################################ @@ -1631,7 +1641,7 @@ def replaced_module(name, replacement): class OrderedDictTests: def test_init(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict with self.assertRaises(TypeError): OrderedDict([('a', 1), ('b', 2)], None) # too many args pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] @@ -1655,7 +1665,7 @@ class OrderedDictTests: [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)]) def test_update(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict with self.assertRaises(TypeError): OrderedDict().update([('a', 1), ('b', 2)], None) # too many args pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] @@ -1701,7 +1711,7 @@ class OrderedDictTests: self.assertRaises(TypeError, OrderedDict.update) def test_fromkeys(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict.fromkeys('abc') self.assertEqual(list(od.items()), [(c, None) for c in 'abc']) od = OrderedDict.fromkeys('abc', value=None) @@ -1710,12 +1720,12 @@ class OrderedDictTests: self.assertEqual(list(od.items()), [(c, 0) for c in 'abc']) def test_abc(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict self.assertIsInstance(OrderedDict(), MutableMapping) self.assertTrue(issubclass(OrderedDict, MutableMapping)) def test_clear(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) @@ -1724,7 +1734,7 @@ class OrderedDictTests: self.assertEqual(len(od), 0) def test_delitem(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) del od['a'] @@ -1734,7 +1744,7 @@ class OrderedDictTests: self.assertEqual(list(od.items()), pairs[:2] + pairs[3:]) def test_setitem(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)]) od['c'] = 10 # existing element od['f'] = 20 # new element @@ -1742,7 +1752,7 @@ class OrderedDictTests: [('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)]) def test_iterators(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) @@ -1759,7 +1769,7 @@ class OrderedDictTests: self.assertEqual(list(reversed(od.items())), list(reversed(pairs))) def test_detect_deletion_during_iteration(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict.fromkeys('abc') it = iter(od) key = next(it) @@ -1770,7 +1780,7 @@ class OrderedDictTests: next(it) def test_sorted_iterators(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict with self.assertRaises(TypeError): OrderedDict([('a', 1), ('b', 2)], None) pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] @@ -1783,7 +1793,7 @@ class OrderedDictTests: sorted([t[0] for t in reversed(pairs)])) def test_iterators_empty(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict() empty = [] self.assertEqual(list(od), empty) @@ -1796,7 +1806,7 @@ class OrderedDictTests: self.assertEqual(list(reversed(od.items())), empty) def test_popitem(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) @@ -1807,7 +1817,7 @@ class OrderedDictTests: self.assertEqual(len(od), 0) def test_popitem_last(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [(i, i) for i in range(30)] obj = OrderedDict(pairs) @@ -1818,7 +1828,7 @@ class OrderedDictTests: self.assertEqual(len(obj), 20) def test_pop(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) @@ -1844,7 +1854,7 @@ class OrderedDictTests: m.pop('a') def test_equality(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od1 = OrderedDict(pairs) @@ -1860,7 +1870,7 @@ class OrderedDictTests: self.assertNotEqual(od1, OrderedDict(pairs[:-1])) def test_copying(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict # Check that ordered dicts are copyable, deepcopyable, picklable, # and have a repr/eval round-trip pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] @@ -1887,7 +1897,7 @@ class OrderedDictTests: check(OrderedDict(od)) def test_yaml_linkage(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature. # In yaml, lists are native but tuples are not. pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] @@ -1897,7 +1907,7 @@ class OrderedDictTests: self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) def test_reduce_not_too_fat(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict # do not save instance dictionary if not needed pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) @@ -1906,7 +1916,7 @@ class OrderedDictTests: self.assertIsNotNone(od.__reduce__()[2]) def test_pickle_recursive(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict() od[1] = od @@ -1919,7 +1929,7 @@ class OrderedDictTests: self.assertIs(dup[1], dup) def test_repr(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), "OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])") @@ -1927,7 +1937,7 @@ class OrderedDictTests: self.assertEqual(repr(OrderedDict()), "OrderedDict()") def test_repr_recursive(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict # See issue #9826 od = OrderedDict.fromkeys('abc') od['x'] = od @@ -1935,7 +1945,7 @@ class OrderedDictTests: "OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])") def test_setdefault(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) @@ -1955,7 +1965,7 @@ class OrderedDictTests: self.assertEqual(Missing().setdefault(5, 9), 9) def test_reinsert(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict # Given insert a, insert b, delete a, re-insert a, # verify that a is now later than b. od = OrderedDict() @@ -1967,7 +1977,7 @@ class OrderedDictTests: self.assertEqual(list(od.items()), [('b', 2), ('a', 1)]) def test_move_to_end(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict.fromkeys('abcde') self.assertEqual(list(od), list('abcde')) od.move_to_end('c') @@ -1985,15 +1995,29 @@ class OrderedDictTests: with self.assertRaises(KeyError): od.move_to_end('x', 0) + def test_move_to_end_issue25406(self): + OrderedDict = self.OrderedDict + od = OrderedDict.fromkeys('abc') + od.move_to_end('c', last=False) + self.assertEqual(list(od), list('cab')) + od.move_to_end('a', last=False) + self.assertEqual(list(od), list('acb')) + + od = OrderedDict.fromkeys('abc') + od.move_to_end('a') + self.assertEqual(list(od), list('bca')) + od.move_to_end('c') + self.assertEqual(list(od), list('bac')) + def test_sizeof(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict # Wimpy test: Just verify the reported size is larger than a regular dict d = dict(a=1) od = OrderedDict(**d) self.assertGreater(sys.getsizeof(od), sys.getsizeof(d)) def test_override_update(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict # Verify that subclasses can override update() without breaking __init__() class MyOD(OrderedDict): def update(self, *args, **kwds): @@ -2001,19 +2025,32 @@ class OrderedDictTests: items = [('a', 1), ('c', 3), ('b', 2)] self.assertEqual(list(MyOD(items).items()), items) - -class PurePythonOrderedDictTests(OrderedDictTests, unittest.TestCase): - - module = py_coll - - -@unittest.skipUnless(c_coll, 'requires the C version of the collections module') -class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase): - - module = c_coll + def test_highly_nested(self): + # Issue 25395: crashes during garbage collection + OrderedDict = self.OrderedDict + obj = None + for _ in range(1000): + obj = OrderedDict([(None, obj)]) + del obj + support.gc_collect() + + def test_highly_nested_subclass(self): + # Issue 25395: crashes during garbage collection + OrderedDict = self.OrderedDict + deleted = [] + class MyOD(OrderedDict): + def __del__(self): + deleted.append(self.i) + obj = None + for i in range(100): + obj = MyOD([(None, obj)]) + obj.i = i + del obj + support.gc_collect() + self.assertEqual(deleted, list(reversed(range(100)))) def test_delitem_hash_collision(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict class Key: def __init__(self, hash): @@ -2051,25 +2088,8 @@ class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase): del od[colliding] self.assertEqual(list(od.items()), [(key, ...), ('after', ...)]) - def test_key_change_during_iteration(self): - OrderedDict = self.module.OrderedDict - - od = OrderedDict.fromkeys('abcde') - self.assertEqual(list(od), list('abcde')) - with self.assertRaises(RuntimeError): - for i, k in enumerate(od): - od.move_to_end(k) - self.assertLess(i, 5) - with self.assertRaises(RuntimeError): - for k in od: - od['f'] = None - with self.assertRaises(RuntimeError): - for k in od: - del od['c'] - self.assertEqual(list(od), list('bdeaf')) - def test_issue24347(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict class Key: def __hash__(self): @@ -2081,13 +2101,17 @@ class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase): od[key] = i # These should not crash. + with self.assertRaises(KeyError): + list(od.values()) + with self.assertRaises(KeyError): + list(od.items()) with self.assertRaises(KeyError): repr(od) with self.assertRaises(KeyError): od.copy() def test_issue24348(self): - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict class Key: def __hash__(self): @@ -2110,7 +2134,7 @@ class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase): that we will keep the size of the odict the same at each popitem call. This verifies that we handled the dict resize properly. """ - OrderedDict = self.module.OrderedDict + OrderedDict = self.OrderedDict od = OrderedDict() for c0 in '0123456789ABCDEF': @@ -2121,6 +2145,104 @@ class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase): key = c0 + c1 od[key] = key + # Direct use of dict methods + + def test_dict_setitem(self): + OrderedDict = self.OrderedDict + od = OrderedDict() + dict.__setitem__(od, 'spam', 1) + self.assertNotIn('NULL', repr(od)) + + def test_dict_delitem(self): + OrderedDict = self.OrderedDict + od = OrderedDict() + od['spam'] = 1 + od['ham'] = 2 + dict.__delitem__(od, 'spam') + with self.assertRaises(KeyError): + repr(od) + + def test_dict_clear(self): + OrderedDict = self.OrderedDict + od = OrderedDict() + od['spam'] = 1 + od['ham'] = 2 + dict.clear(od) + self.assertNotIn('NULL', repr(od)) + + def test_dict_pop(self): + OrderedDict = self.OrderedDict + od = OrderedDict() + od['spam'] = 1 + od['ham'] = 2 + dict.pop(od, 'spam') + with self.assertRaises(KeyError): + repr(od) + + def test_dict_popitem(self): + OrderedDict = self.OrderedDict + od = OrderedDict() + od['spam'] = 1 + od['ham'] = 2 + dict.popitem(od) + with self.assertRaises(KeyError): + repr(od) + + def test_dict_setdefault(self): + OrderedDict = self.OrderedDict + od = OrderedDict() + dict.setdefault(od, 'spam', 1) + self.assertNotIn('NULL', repr(od)) + + def test_dict_update(self): + od = OrderedDict() + dict.update(od, [('spam', 1)]) + self.assertNotIn('NULL', repr(od)) + + +class PurePythonOrderedDictTests(OrderedDictTests, unittest.TestCase): + + module = py_coll + OrderedDict = py_coll.OrderedDict + + +@unittest.skipUnless(c_coll, 'requires the C version of the collections module') +class CPythonOrderedDictTests(OrderedDictTests, unittest.TestCase): + + module = c_coll + OrderedDict = c_coll.OrderedDict + + def test_key_change_during_iteration(self): + OrderedDict = self.OrderedDict + + od = OrderedDict.fromkeys('abcde') + self.assertEqual(list(od), list('abcde')) + with self.assertRaises(RuntimeError): + for i, k in enumerate(od): + od.move_to_end(k) + self.assertLess(i, 5) + with self.assertRaises(RuntimeError): + for k in od: + od['f'] = None + with self.assertRaises(RuntimeError): + for k in od: + del od['c'] + self.assertEqual(list(od), list('bdeaf')) + + +class PurePythonOrderedDictSubclassTests(PurePythonOrderedDictTests): + + module = py_coll + class OrderedDict(py_coll.OrderedDict): + pass + + +class CPythonOrderedDictSubclassTests(CPythonOrderedDictTests): + + module = c_coll + class OrderedDict(c_coll.OrderedDict): + pass + class PurePythonGeneralMappingTests(mapping_tests.BasicTestMappingProtocol): @@ -2183,6 +2305,8 @@ def test_main(verbose=None): test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs, TestCollectionABCs, TestCounter, TestChainMap, PurePythonOrderedDictTests, CPythonOrderedDictTests, + PurePythonOrderedDictSubclassTests, + CPythonOrderedDictSubclassTests, PurePythonGeneralMappingTests, CPythonGeneralMappingTests, PurePythonSubclassMappingTests, CPythonSubclassMappingTests, TestUserObjects, diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py index db821be0..8935c651 100644 --- a/Lib/test/test_compile.py +++ b/Lib/test/test_compile.py @@ -516,6 +516,16 @@ if 1: res = script_helper.run_python_until_end(fn)[0] self.assertIn(b"Non-UTF-8", res.err) + def test_yet_more_evil_still_undecodable(self): + # Issue #25388 + src = b"#\x00\n#\xfd\n" + with tempfile.TemporaryDirectory() as tmpd: + fn = os.path.join(tmpd, "bad.py") + with open(fn, "wb") as fp: + fp.write(src) + res = script_helper.run_python_until_end(fn)[0] + self.assertIn(b"Non-UTF-8", res.err) + @support.cpython_only def test_compiler_recursion_limit(self): # Expected limit is sys.getrecursionlimit() * the scaling factor @@ -542,6 +552,26 @@ if 1: check_limit("a", "[0]") check_limit("a", "*a") + def test_null_terminated(self): + # The source code is null-terminated internally, but bytes-like + # objects are accepted, which could be not terminated. + with self.assertRaisesRegex(ValueError, "cannot contain null"): + compile("123\x00", "", "eval") + with self.assertRaisesRegex(ValueError, "cannot contain null"): + compile(memoryview(b"123\x00"), "", "eval") + code = compile(memoryview(b"123\x00")[1:-1], "", "eval") + self.assertEqual(eval(code), 23) + code = compile(memoryview(b"1234")[1:-1], "", "eval") + self.assertEqual(eval(code), 23) + code = compile(memoryview(b"$23$")[1:-1], "", "eval") + self.assertEqual(eval(code), 23) + + # Also test when eval() and exec() do the compilation step + self.assertEqual(eval(memoryview(b"1234")[1:-1]), 23) + namespace = dict() + exec(memoryview(b"ax = 123")[1:-1], namespace) + self.assertEqual(namespace['x'], 12) + class TestStackSize(unittest.TestCase): # These tests check that the computed stack size for a code object diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py index 9479776a..ef2b6693 100644 --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -2,6 +2,7 @@ import sys import compileall import importlib.util import os +import pathlib import py_compile import shutil import struct @@ -168,6 +169,33 @@ class EncodingTest(unittest.TestCase): class CommandLineTests(unittest.TestCase): """Test compileall's CLI.""" + @classmethod + def setUpClass(cls): + for path in filter(os.path.isdir, sys.path): + directory_created = False + directory = pathlib.Path(path) / '__pycache__' + path = directory / 'test.try' + try: + if not directory.is_dir(): + directory.mkdir() + directory_created = True + with path.open('w') as file: + file.write('# for test_compileall') + except OSError: + sys_path_writable = False + break + finally: + support.unlink(str(path)) + if directory_created: + directory.rmdir() + else: + sys_path_writable = True + cls._sys_path_writable = sys_path_writable + + def _skip_if_sys_path_not_writable(self): + if not self._sys_path_writable: + raise unittest.SkipTest('not all entries on sys.path are writable') + def _get_run_args(self, args): interp_args = ['-S'] if sys.flags.optimize: @@ -194,8 +222,8 @@ class CommandLineTests(unittest.TestCase): self.assertFalse(os.path.exists(path)) def setUp(self): - self.addCleanup(self._cleanup) self.directory = tempfile.mkdtemp() + self.addCleanup(support.rmtree, self.directory) self.pkgdir = os.path.join(self.directory, 'foo') os.mkdir(self.pkgdir) self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__') @@ -203,11 +231,9 @@ class CommandLineTests(unittest.TestCase): self.initfn = script_helper.make_script(self.pkgdir, '__init__', '') self.barfn = script_helper.make_script(self.pkgdir, 'bar', '') - def _cleanup(self): - support.rmtree(self.directory) - def test_no_args_compiles_path(self): # Note that -l is implied for the no args case. + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) self.assertCompiled(bazfn) @@ -215,6 +241,7 @@ class CommandLineTests(unittest.TestCase): self.assertNotCompiled(self.barfn) def test_no_args_respects_force_flag(self): + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) pycpath = importlib.util.cache_from_source(bazfn) @@ -231,6 +258,7 @@ class CommandLineTests(unittest.TestCase): self.assertNotEqual(mtime, mtime2) def test_no_args_respects_quiet_flag(self): + self._skip_if_sys_path_not_writable() script_helper.make_script(self.directory, 'baz', '') noisy = self.assertRunOK(PYTHONPATH=self.directory) self.assertIn(b'Listing ', noisy) diff --git a/Lib/test/test_configparser.py b/Lib/test/test_configparser.py index 470d2cd1..71a8f3f8 100644 --- a/Lib/test/test_configparser.py +++ b/Lib/test/test_configparser.py @@ -847,7 +847,8 @@ class ConfigParserTestCase(BasicTestCase, unittest.TestCase): "something with lots of interpolation (10 steps)") e = self.get_error(cf, configparser.InterpolationDepthError, "Foo", "bar11") if self.interpolation == configparser._UNSET: - self.assertEqual(e.args, ("bar11", "Foo", "%(with1)s")) + self.assertEqual(e.args, ("bar11", "Foo", + "something %(with11)s lots of interpolation (11 steps)")) elif isinstance(self.interpolation, configparser.LegacyInterpolation): self.assertEqual(e.args, ("bar11", "Foo", "something %(with11)s lots of interpolation (11 steps)")) @@ -861,7 +862,7 @@ class ConfigParserTestCase(BasicTestCase, unittest.TestCase): self.assertEqual(e.option, "name") if self.interpolation == configparser._UNSET: self.assertEqual(e.args, ('name', 'Interpolation Error', - '', 'reference')) + '%(reference)s', 'reference')) elif isinstance(self.interpolation, configparser.LegacyInterpolation): self.assertEqual(e.args, ('name', 'Interpolation Error', '%(reference)s', 'reference')) @@ -1177,7 +1178,7 @@ class ConfigParserTestCaseExtendedInterpolation(BasicTestCase, unittest.TestCase with self.assertRaises(exception_class) as cm: cf['interpolated']['$trying'] self.assertEqual(cm.exception.reference, 'dollars:${sick') - self.assertEqual(cm.exception.args[2], '}') #rawval + self.assertEqual(cm.exception.args[2], '${dollars:${sick}}') #rawval def test_case_sensitivity_basic(self): ini = textwrap.dedent(""" diff --git a/Lib/test/test_contextlib.py b/Lib/test/test_contextlib.py index 78741f5f..30a6377b 100644 --- a/Lib/test/test_contextlib.py +++ b/Lib/test/test_contextlib.py @@ -880,9 +880,11 @@ class TestSuppress(unittest.TestCase): with ignore_exceptions: len(5) with ignore_exceptions: - 1/0 with ignore_exceptions: # Check nested usage len(5) + outer_continued = True + 1/0 + self.assertTrue(outer_continued) if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_coroutines.py b/Lib/test/test_coroutines.py index 10de8564..07c1cdf8 100644 --- a/Lib/test/test_coroutines.py +++ b/Lib/test/test_coroutines.py @@ -1,5 +1,7 @@ import contextlib +import copy import inspect +import pickle import sys import types import unittest @@ -1318,11 +1320,41 @@ class CoroutineTest(unittest.TestCase): run_async(foo()) self.assertEqual(CNT, 0) + def test_copy(self): + async def func(): pass + coro = func() + with self.assertRaises(TypeError): + copy.copy(coro) + + aw = coro.__await__() + try: + with self.assertRaises(TypeError): + copy.copy(aw) + finally: + aw.close() + + def test_pickle(self): + async def func(): pass + coro = func() + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.assertRaises((TypeError, pickle.PicklingError)): + pickle.dumps(coro, proto) + + aw = coro.__await__() + try: + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.assertRaises((TypeError, pickle.PicklingError)): + pickle.dumps(aw, proto) + finally: + aw.close() + class CoroAsyncIOCompatTest(unittest.TestCase): def test_asyncio_1(self): - import asyncio + # asyncio cannot be imported when Python is compiled without thread + # support + asyncio = support.import_module('asyncio') class MyException(Exception): pass diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py index c74ebae4..d0963901 100644 --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -3798,6 +3798,37 @@ order (MRO) for bases """ else: assert 0, "best_base calculation found wanting" + def test_unsubclassable_types(self): + with self.assertRaises(TypeError): + class X(type(None)): + pass + with self.assertRaises(TypeError): + class X(object, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), object): + pass + class O(object): + pass + with self.assertRaises(TypeError): + class X(O, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), O): + pass + + class X(object): + pass + with self.assertRaises(TypeError): + X.__bases__ = type(None), + with self.assertRaises(TypeError): + X.__bases__ = object, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), object + with self.assertRaises(TypeError): + X.__bases__ = O, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), O def test_mutable_bases_with_failing_mro(self): # Testing mutable bases with failing mro... @@ -4732,14 +4763,6 @@ class PicklingTests(unittest.TestCase): with self.assertRaises((TypeError, ValueError)): obj.__reduce_ex__(proto) - class C8: - def __getnewargs_ex__(self): - return (args, kwargs) - obj = C8() - for proto in protocols: - if 2 <= proto < 4: - with self.assertRaises(ValueError): - obj.__reduce_ex__(proto) class C9: def __getnewargs_ex__(self): return (args, {}) @@ -5058,10 +5081,6 @@ class PicklingTests(unittest.TestCase): with self.subTest(cls=cls): kwargs = getattr(cls, 'KWARGS', {}) obj = cls(*cls.ARGS, **kwargs) - # XXX: We need to modify the copy module to support PEP 3154's - # reduce protocol 4. - if hasattr(cls, '__getnewargs_ex__'): - continue objcopy = deepcopy(obj) self._assert_is_copy(obj, objcopy) # For test classes that supports this, make sure we didn't go diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py index 2488b634..3b424142 100644 --- a/Lib/test/test_dict.py +++ b/Lib/test/test_dict.py @@ -603,7 +603,7 @@ class DictTest(unittest.TestCase): # (D) subclass defines __missing__ method returning a value # (E) subclass defines __missing__ method raising RuntimeError # (F) subclass sets __missing__ instance variable (no effect) - # (G) subclass doesn't define __missing__ at a all + # (G) subclass doesn't define __missing__ at all class D(dict): def __missing__(self, key): return 42 diff --git a/Lib/test/test_dictviews.py b/Lib/test/test_dictviews.py index 8d33801c..787ef20c 100644 --- a/Lib/test/test_dictviews.py +++ b/Lib/test/test_dictviews.py @@ -1,3 +1,5 @@ +import copy +import pickle import unittest class DictSetTest(unittest.TestCase): @@ -197,6 +199,22 @@ class DictSetTest(unittest.TestCase): d[42] = d.values() self.assertRaises(RecursionError, repr, d) + def test_copy(self): + d = {1: 10, "a": "ABC"} + self.assertRaises(TypeError, copy.copy, d.keys()) + self.assertRaises(TypeError, copy.copy, d.values()) + self.assertRaises(TypeError, copy.copy, d.items()) + + def test_pickle(self): + d = {1: 10, "a": "ABC"} + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + self.assertRaises((TypeError, pickle.PicklingError), + pickle.dumps, d.keys(), proto) + self.assertRaises((TypeError, pickle.PicklingError), + pickle.dumps, d.values(), proto) + self.assertRaises((TypeError, pickle.PicklingError), + pickle.dumps, d.items(), proto) + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_eintr.py b/Lib/test/test_eintr.py index 111ead36..aabad835 100644 --- a/Lib/test/test_eintr.py +++ b/Lib/test/test_eintr.py @@ -1,5 +1,7 @@ import os import signal +import subprocess +import sys import unittest from test import support @@ -14,7 +16,14 @@ class EINTRTests(unittest.TestCase): # Run the tester in a sub-process, to make sure there is only one # thread (for reliable signal delivery). tester = support.findfile("eintr_tester.py", subdir="eintrdata") - script_helper.assert_python_ok(tester) + + if support.verbose: + args = [sys.executable, tester] + with subprocess.Popen(args) as proc: + exitcode = proc.wait() + self.assertEqual(exitcode, 0) + else: + script_helper.assert_python_ok(tester) if __name__ == "__main__": diff --git a/Lib/test/test_email/test_email.py b/Lib/test/test_email/test_email.py index d7e3dca1..894b8003 100644 --- a/Lib/test/test_email/test_email.py +++ b/Lib/test/test_email/test_email.py @@ -3052,7 +3052,7 @@ class TestMiscellaneous(TestEmailBase): # issue 1690608. email.utils.formataddr() should be rfc2047 aware. name = "H\u00e4ns W\u00fcrst" addr = 'person@dom.ain' - # A object without a header_encode method: + # An object without a header_encode method: bad_charset = object() self.assertRaises(AttributeError, utils.formataddr, (name, addr), bad_charset) @@ -3179,7 +3179,7 @@ Foo self.msgids = [] append = self.msgids.append make_msgid = utils.make_msgid - clock = time.clock + clock = time.monotonic tfin = clock() + 3.0 while clock() < tfin: append(make_msgid(domain='testdomain-string')) diff --git a/Lib/test/test_email/test_utils.py b/Lib/test/test_email/test_utils.py index 1e9cd63e..6dcb3bbe 100644 --- a/Lib/test/test_email/test_utils.py +++ b/Lib/test/test_email/test_utils.py @@ -136,6 +136,9 @@ class LocaltimeTests(unittest.TestCase): t1 = utils.localtime(t0) self.assertEqual(t1.tzname(), 'EET') +# Issue #24836: The timezone files are out of date (pre 2011k) +# on Mac OS X Snow Leopard. +@test.support.requires_mac_ver(10, 7) class FormatDateTests(unittest.TestCase): @test.support.run_with_tz('Europe/Minsk') diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py index 32a66ea9..8a490455 100644 --- a/Lib/test/test_exceptions.py +++ b/Lib/test/test_exceptions.py @@ -233,6 +233,7 @@ class ExceptionTests(unittest.TestCase): self.assertEqual(w.winerror, 3) self.assertEqual(w.strerror, 'foo') self.assertEqual(w.filename, 'bar') + self.assertEqual(w.filename2, None) self.assertEqual(str(w), "[WinError 3] foo: 'bar'") # Unknown win error becomes EINVAL (22) w = OSError(0, 'foo', None, 1001) @@ -240,6 +241,7 @@ class ExceptionTests(unittest.TestCase): self.assertEqual(w.winerror, 1001) self.assertEqual(w.strerror, 'foo') self.assertEqual(w.filename, None) + self.assertEqual(w.filename2, None) self.assertEqual(str(w), "[WinError 1001] foo") # Non-numeric "errno" w = OSError('bar', 'foo') @@ -247,6 +249,7 @@ class ExceptionTests(unittest.TestCase): self.assertEqual(w.winerror, None) self.assertEqual(w.strerror, 'foo') self.assertEqual(w.filename, None) + self.assertEqual(w.filename2, None) @unittest.skipUnless(sys.platform == 'win32', 'test specific to Windows') @@ -271,13 +274,15 @@ class ExceptionTests(unittest.TestCase): (SystemExit, ('foo',), {'args' : ('foo',), 'code' : 'foo'}), (OSError, ('foo',), - {'args' : ('foo',), 'filename' : None, + {'args' : ('foo',), 'filename' : None, 'filename2' : None, 'errno' : None, 'strerror' : None}), (OSError, ('foo', 'bar'), - {'args' : ('foo', 'bar'), 'filename' : None, + {'args' : ('foo', 'bar'), + 'filename' : None, 'filename2' : None, 'errno' : 'foo', 'strerror' : 'bar'}), (OSError, ('foo', 'bar', 'baz'), - {'args' : ('foo', 'bar'), 'filename' : 'baz', + {'args' : ('foo', 'bar'), + 'filename' : 'baz', 'filename2' : None, 'errno' : 'foo', 'strerror' : 'bar'}), (OSError, ('foo', 'bar', 'baz', None, 'quux'), {'args' : ('foo', 'bar'), 'filename' : 'baz', 'filename2': 'quux'}), @@ -287,7 +292,8 @@ class ExceptionTests(unittest.TestCase): 'filename' : 'filenameStr'}), (OSError, (1, 'strErrorStr', 'filenameStr'), {'args' : (1, 'strErrorStr'), 'errno' : 1, - 'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}), + 'strerror' : 'strErrorStr', + 'filename' : 'filenameStr', 'filename2' : None}), (SyntaxError, (), {'msg' : None, 'text' : None, 'filename' : None, 'lineno' : None, 'offset' : None, 'print_file_and_line' : None}), @@ -343,7 +349,8 @@ class ExceptionTests(unittest.TestCase): (WindowsError, (1, 'strErrorStr', 'filenameStr'), {'args' : (1, 'strErrorStr'), 'strerror' : 'strErrorStr', 'winerror' : None, - 'errno' : 1, 'filename' : 'filenameStr'}) + 'errno' : 1, + 'filename' : 'filenameStr', 'filename2' : None}) ) except NameError: pass diff --git a/Lib/test/test_fileinput.py b/Lib/test/test_fileinput.py index 1d089f52..4765a056 100644 --- a/Lib/test/test_fileinput.py +++ b/Lib/test/test_fileinput.py @@ -288,6 +288,21 @@ class FileInputTests(unittest.TestCase): with self.assertRaises(UnicodeDecodeError): # Read to the end of file. list(fi) + self.assertEqual(fi.readline(), '') + self.assertEqual(fi.readline(), '') + + def test_readline_binary_mode(self): + with open(TESTFN, 'wb') as f: + f.write(b'A\nB\r\nC\rD') + self.addCleanup(safe_unlink, TESTFN) + + with FileInput(files=TESTFN, mode='rb') as fi: + self.assertEqual(fi.readline(), b'A\n') + self.assertEqual(fi.readline(), b'B\r\n') + self.assertEqual(fi.readline(), b'C\rD') + # Read to the end of file. + self.assertEqual(fi.readline(), b'') + self.assertEqual(fi.readline(), b'') def test_context_manager(self): try: diff --git a/Lib/test/test_float.py b/Lib/test/test_float.py index 42510905..723beb37 100644 --- a/Lib/test/test_float.py +++ b/Lib/test/test_float.py @@ -31,7 +31,6 @@ class GeneralFloatCases(unittest.TestCase): self.assertEqual(float(3.14), 3.14) self.assertEqual(float(314), 314.0) self.assertEqual(float(" 3.14 "), 3.14) - self.assertEqual(float(b" 3.14 "), 3.14) self.assertRaises(ValueError, float, " 0x3.1 ") self.assertRaises(ValueError, float, " -0x3.p-1 ") self.assertRaises(ValueError, float, " +0x3.p-1 ") @@ -43,7 +42,6 @@ class GeneralFloatCases(unittest.TestCase): self.assertRaises(ValueError, float, "+.inf") self.assertRaises(ValueError, float, ".") self.assertRaises(ValueError, float, "-.") - self.assertRaises(ValueError, float, b"-") self.assertRaises(TypeError, float, {}) self.assertRaisesRegex(TypeError, "not 'dict'", float, {}) # Lone surrogate @@ -57,6 +55,42 @@ class GeneralFloatCases(unittest.TestCase): float(b'.' + b'1'*1000) float('.' + '1'*1000) + def test_non_numeric_input_types(self): + # Test possible non-numeric types for the argument x, including + # subclasses of the explicitly documented accepted types. + class CustomStr(str): pass + class CustomBytes(bytes): pass + class CustomByteArray(bytearray): pass + + factories = [ + bytes, + bytearray, + lambda b: CustomStr(b.decode()), + CustomBytes, + CustomByteArray, + memoryview, + ] + try: + from array import array + except ImportError: + pass + else: + factories.append(lambda b: array('B', b)) + + for f in factories: + x = f(b" 3.14 ") + with self.subTest(type(x)): + self.assertEqual(float(x), 3.14) + with self.assertRaisesRegex(ValueError, "could not convert"): + float(f(b'A' * 0x10)) + + def test_float_memoryview(self): + self.assertEqual(float(memoryview(b'12.3')[1:4]), 2.3) + self.assertEqual(float(memoryview(b'12.3\x00')[1:4]), 2.3) + self.assertEqual(float(memoryview(b'12.3 ')[1:4]), 2.3) + self.assertEqual(float(memoryview(b'12.3A')[1:4]), 2.3) + self.assertEqual(float(memoryview(b'12.34')[1:4]), 2.3) + def test_error_message(self): testlist = ('\xbd', '123\xbd', ' 123 456 ') for s in testlist: diff --git a/Lib/test/test_functools.py b/Lib/test/test_functools.py index ac211c46..d822b2de 100644 --- a/Lib/test/test_functools.py +++ b/Lib/test/test_functools.py @@ -1,5 +1,6 @@ import abc import collections +import copy from itertools import permutations import pickle from random import choice @@ -139,14 +140,23 @@ class TestPartial: def test_nested_optimization(self): partial = self.partial - # Only "true" partial is optimized - if partial.__name__ != 'partial': - return inner = partial(signature, 'asdf') nested = partial(inner, bar=True) flat = partial(signature, 'asdf', bar=True) self.assertEqual(signature(nested), signature(flat)) + def test_nested_partial_with_attribute(self): + # see issue 25137 + partial = self.partial + + def foo(bar): + return bar + + p = partial(foo, 'first') + p2 = partial(p, 'second') + p2.new_attr = 'spam' + self.assertEqual(p2.new_attr, 'spam') + @unittest.skipUnless(c_functools, 'requires the C _functools module') class TestPartialC(TestPartial, unittest.TestCase): @@ -238,6 +248,9 @@ class TestPartialCSubclass(TestPartialC): if c_functools: partial = PartialSubclass + # partial subclasses are not optimized for nested calls + test_nested_optimization = None + class TestPartialMethod(unittest.TestCase): @@ -1239,11 +1252,64 @@ class TestLRU: self.assertEqual(b.f.cache_info(), X.f.cache_info()) self.assertEqual(c.f.cache_info(), X.f.cache_info()) -class TestLRUC(TestLRU, unittest.TestCase): - module = c_functools + def test_pickle(self): + cls = self.__class__ + for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth: + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.subTest(proto=proto, func=f): + f_copy = pickle.loads(pickle.dumps(f, proto)) + self.assertIs(f_copy, f) + + def test_copy(self): + cls = self.__class__ + for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth: + with self.subTest(func=f): + f_copy = copy.copy(f) + self.assertIs(f_copy, f) + + def test_deepcopy(self): + cls = self.__class__ + for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth: + with self.subTest(func=f): + f_copy = copy.deepcopy(f) + self.assertIs(f_copy, f) + + +@py_functools.lru_cache() +def py_cached_func(x, y): + return 3 * x + y + +@c_functools.lru_cache() +def c_cached_func(x, y): + return 3 * x + y + class TestLRUPy(TestLRU, unittest.TestCase): module = py_functools + cached_func = py_cached_func, + + @module.lru_cache() + def cached_meth(self, x, y): + return 3 * x + y + + @staticmethod + @module.lru_cache() + def cached_staticmeth(x, y): + return 3 * x + y + + +class TestLRUC(TestLRU, unittest.TestCase): + module = c_functools + cached_func = c_cached_func, + + @module.lru_cache() + def cached_meth(self, x, y): + return 3 * x + y + + @staticmethod + @module.lru_cache() + def cached_staticmeth(x, y): + return 3 * x + y class TestSingleDispatch(unittest.TestCase): @@ -1491,6 +1557,24 @@ class TestSingleDispatch(unittest.TestCase): many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable] self.assertEqual(mro(X, abcs=many_abcs), expected) + def test_false_meta(self): + # see issue23572 + class MetaA(type): + def __len__(self): + return 0 + class A(metaclass=MetaA): + pass + class AA(A): + pass + @functools.singledispatch + def fun(a): + return 'base A' + @fun.register(A) + def _(a): + return 'fun A' + aa = AA() + self.assertEqual(fun(aa), 'fun A') + def test_mro_conflicts(self): c = collections @functools.singledispatch diff --git a/Lib/test/test_gdb.py b/Lib/test/test_gdb.py index 03226777..6c4a3489 100644 --- a/Lib/test/test_gdb.py +++ b/Lib/test/test_gdb.py @@ -21,19 +21,34 @@ except ImportError: from test import support from test.support import run_unittest, findfile, python_is_optimized -try: - gdb_version, _ = subprocess.Popen(["gdb", "-nx", "--version"], - stdout=subprocess.PIPE).communicate() -except OSError: - # This is what "no gdb" looks like. There may, however, be other - # errors that manifest this way too. - raise unittest.SkipTest("Couldn't find gdb on the path") -gdb_version_number = re.search(b"^GNU gdb [^\d]*(\d+)\.(\d)", gdb_version) -gdb_major_version = int(gdb_version_number.group(1)) -gdb_minor_version = int(gdb_version_number.group(2)) +def get_gdb_version(): + try: + proc = subprocess.Popen(["gdb", "-nx", "--version"], + stdout=subprocess.PIPE, + universal_newlines=True) + with proc: + version = proc.communicate()[0] + except OSError: + # This is what "no gdb" looks like. There may, however, be other + # errors that manifest this way too. + raise unittest.SkipTest("Couldn't find gdb on the path") + + # Regex to parse: + # 'GNU gdb (GDB; SUSE Linux Enterprise 12) 7.7\n' -> 7.7 + # 'GNU gdb (GDB) Fedora 7.9.1-17.fc22\n' -> 7.9 + # 'GNU gdb 6.1.1 [FreeBSD]\n' -> 6.1 + # 'GNU gdb (GDB) Fedora (7.5.1-37.fc18)\n' -> 7.5 + match = re.search(r"^GNU gdb.*?\b(\d+)\.(\d+)", version) + if match is None: + raise Exception("unable to parse GDB version: %r" % version) + return (version, int(match.group(1)), int(match.group(2))) + +gdb_version, gdb_major_version, gdb_minor_version = get_gdb_version() if gdb_major_version < 7: - raise unittest.SkipTest("gdb versions before 7.0 didn't support python embedding" - " Saw:\n" + gdb_version.decode('ascii', 'replace')) + raise unittest.SkipTest("gdb versions before 7.0 didn't support python " + "embedding. Saw %s.%s:\n%s" + % (gdb_major_version, gdb_minor_version, + gdb_version)) if not sysconfig.is_python_build(): raise unittest.SkipTest("test_gdb only works on source builds at the moment.") @@ -59,9 +74,12 @@ def run_gdb(*args, **env_vars): base_cmd = ('gdb', '--batch', '-nx') if (gdb_major_version, gdb_minor_version) >= (7, 4): base_cmd += ('-iex', 'add-auto-load-safe-path ' + checkout_hook_path) - out, err = subprocess.Popen(base_cmd + args, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, - ).communicate() + proc = subprocess.Popen(base_cmd + args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + with proc: + out, err = proc.communicate() return out.decode('utf-8', 'replace'), err.decode('utf-8', 'replace') # Verify that "gdb" was built with the embedded python support enabled: @@ -880,8 +898,8 @@ class PyLocalsTests(DebuggerTests): def test_main(): if support.verbose: - print("GDB version:") - for line in os.fsdecode(gdb_version).splitlines(): + print("GDB version %s.%s:" % (gdb_major_version, gdb_minor_version)) + for line in gdb_version.splitlines(): print(" " * 4 + line) run_unittest(PrettyPrintTests, PyListTests, diff --git a/Lib/test/test_generators.py b/Lib/test/test_generators.py index 25cc628d..b92d5cee 100644 --- a/Lib/test/test_generators.py +++ b/Lib/test/test_generators.py @@ -1,4 +1,6 @@ +import copy import gc +import pickle import sys import unittest import warnings @@ -111,6 +113,21 @@ class GeneratorTest(unittest.TestCase): self.assertEqual(gen.__qualname__, "GeneratorTest.test_name..") + def test_copy(self): + def f(): + yield 1 + g = f() + with self.assertRaises(TypeError): + copy.copy(g) + + def test_pickle(self): + def f(): + yield 1 + g = f() + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.assertRaises((TypeError, pickle.PicklingError)): + pickle.dumps(g, proto) + class ExceptionTest(unittest.TestCase): # Tests for the issue #23353: check that the currently handled exception diff --git a/Lib/test/test_glob.py b/Lib/test/test_glob.py index 21b01533..dce64f9f 100644 --- a/Lib/test/test_glob.py +++ b/Lib/test/test_glob.py @@ -31,6 +31,7 @@ class GlobTests(unittest.TestCase): self.mktemp('.bb', 'H') self.mktemp('aaa', 'zzzF') self.mktemp('ZZZ') + self.mktemp('EF') self.mktemp('a', 'bcd', 'EF') self.mktemp('a', 'bcd', 'efg', 'ha') if can_symlink(): @@ -200,7 +201,7 @@ class GlobTests(unittest.TestCase): def test_recursive_glob(self): eq = self.assertSequencesEqual_noorder - full = [('ZZZ',), + full = [('EF',), ('ZZZ',), ('a',), ('a', 'D'), ('a', 'bcd'), ('a', 'bcd', 'EF'), @@ -217,8 +218,8 @@ class GlobTests(unittest.TestCase): ('sym3', 'efg', 'ha'), ] eq(self.rglob('**'), self.joins(('',), *full)) - eq(self.rglob('.', '**'), self.joins(('.',''), - *(('.',) + i for i in full))) + eq(self.rglob(os.curdir, '**'), + self.joins((os.curdir, ''), *((os.curdir,) + i for i in full))) dirs = [('a', ''), ('a', 'bcd', ''), ('a', 'bcd', 'efg', ''), ('aaa', ''), ('aab', '')] if can_symlink(): @@ -229,11 +230,11 @@ class GlobTests(unittest.TestCase): ('a', ''), ('a', 'D'), ('a', 'bcd'), ('a', 'bcd', 'EF'), ('a', 'bcd', 'efg'), ('a', 'bcd', 'efg', 'ha'))) eq(self.rglob('a**'), self.joins(('a',), ('aaa',), ('aab',))) - expect = [('a', 'bcd', 'EF')] + expect = [('a', 'bcd', 'EF'), ('EF',)] if can_symlink(): expect += [('sym3', 'EF')] eq(self.rglob('**', 'EF'), self.joins(*expect)) - expect = [('a', 'bcd', 'EF'), ('aaa', 'zzzF'), ('aab', 'F')] + expect = [('a', 'bcd', 'EF'), ('aaa', 'zzzF'), ('aab', 'F'), ('EF',)] if can_symlink(): expect += [('sym3', 'EF')] eq(self.rglob('**', '*F'), self.joins(*expect)) @@ -242,22 +243,26 @@ class GlobTests(unittest.TestCase): ('a', 'bcd', 'EF'), ('a', 'bcd', 'efg'))) eq(self.rglob('a', '**', 'bcd'), self.joins(('a', 'bcd'))) - predir = os.path.abspath(os.curdir) - try: - os.chdir(self.tempdir) + with change_cwd(self.tempdir): join = os.path.join eq(glob.glob('**', recursive=True), [join(*i) for i in full]) eq(glob.glob(join('**', ''), recursive=True), [join(*i) for i in dirs]) + eq(glob.glob(join('**', '*'), recursive=True), + [join(*i) for i in full]) + eq(glob.glob(join(os.curdir, '**'), recursive=True), + [join(os.curdir, '')] + [join(os.curdir, *i) for i in full]) + eq(glob.glob(join(os.curdir, '**', ''), recursive=True), + [join(os.curdir, '')] + [join(os.curdir, *i) for i in dirs]) + eq(glob.glob(join(os.curdir, '**', '*'), recursive=True), + [join(os.curdir, *i) for i in full]) eq(glob.glob(join('**','zz*F'), recursive=True), [join('aaa', 'zzzF')]) eq(glob.glob('**zz*F', recursive=True), []) - expect = [join('a', 'bcd', 'EF')] + expect = [join('a', 'bcd', 'EF'), 'EF'] if can_symlink(): expect += [join('sym3', 'EF')] eq(glob.glob(join('**', 'EF'), recursive=True), expect) - finally: - os.chdir(predir) @skip_unless_symlink diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py index d8408e15..3c51673a 100644 --- a/Lib/test/test_gzip.py +++ b/Lib/test/test_gzip.py @@ -3,6 +3,7 @@ import unittest from test import support +from test.support import bigmemtest, _4G import os import io import struct @@ -116,6 +117,14 @@ class TestGzip(BaseTest): self.assertEqual(f.tell(), nread) self.assertEqual(b''.join(blocks), data1 * 50) + @bigmemtest(size=_4G, memuse=1) + def test_read_large(self, size): + # Read chunk size over UINT_MAX should be supported, despite zlib's + # limitation per low-level call + compressed = gzip.compress(data1, compresslevel=1) + f = gzip.GzipFile(fileobj=io.BytesIO(compressed), mode='rb') + self.assertEqual(f.read(size), data1) + def test_io_on_closed_object(self): # Test that I/O operations on closed GzipFile objects raise a # ValueError, just like the corresponding functions on file objects. diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py index de8f3e80..11420b2c 100644 --- a/Lib/test/test_htmlparser.py +++ b/Lib/test/test_htmlparser.py @@ -72,9 +72,6 @@ class EventCollectorExtra(EventCollector): class EventCollectorCharrefs(EventCollector): - def get_events(self): - return self.events - def handle_charref(self, data): self.fail('This should never be called with convert_charrefs=True') @@ -633,6 +630,18 @@ text ] self._run_check(html, expected) + def test_convert_charrefs_dropped_text(self): + # #23144: make sure that all the events are triggered when + # convert_charrefs is True, even if we don't call .close() + parser = EventCollector(convert_charrefs=True) + # before the fix, bar & baz was missing + parser.feed("foo link bar & baz") + self.assertEqual( + parser.get_events(), + [('data', 'foo '), ('starttag', 'a', []), ('data', 'link'), + ('endtag', 'a'), ('data', ' bar & baz')] + ) + class AttributesTestCase(TestCaseBase): diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py index 518e38ec..d4c8eabd 100644 --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -425,6 +425,16 @@ print("%%s, %%s, %%s" %% (form.getfirst("spam"), form.getfirst("eggs"), form.getfirst("bacon"))) """ +cgi_file4 = """\ +#!%s +import os + +print("Content-type: text/html") +print() + +print(os.environ["%s"]) +""" + @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0, "This test can't be run reliably as root (issue #13308).") @@ -446,6 +456,7 @@ class CGIHTTPServerTestCase(BaseTestCase): self.file1_path = None self.file2_path = None self.file3_path = None + self.file4_path = None # The shebang line should be pure ASCII: use symlink if possible. # See issue #7668. @@ -484,6 +495,11 @@ class CGIHTTPServerTestCase(BaseTestCase): file3.write(cgi_file1 % self.pythonexe) os.chmod(self.file3_path, 0o777) + self.file4_path = os.path.join(self.cgi_dir, 'file4.py') + with open(self.file4_path, 'w', encoding='utf-8') as file4: + file4.write(cgi_file4 % (self.pythonexe, 'QUERY_STRING')) + os.chmod(self.file4_path, 0o777) + os.chdir(self.parent_dir) def tearDown(self): @@ -499,6 +515,8 @@ class CGIHTTPServerTestCase(BaseTestCase): os.remove(self.file2_path) if self.file3_path: os.remove(self.file3_path) + if self.file4_path: + os.remove(self.file4_path) os.rmdir(self.cgi_child_dir) os.rmdir(self.cgi_dir) os.rmdir(self.parent_dir) @@ -606,6 +624,19 @@ class CGIHTTPServerTestCase(BaseTestCase): (b'Hello World' + self.linesep, 'text/html', HTTPStatus.OK), (res.read(), res.getheader('Content-type'), res.status)) + def test_query_with_multiple_question_mark(self): + res = self.request('/cgi-bin/file4.py?a=b?c=d') + self.assertEqual( + (b'a=b?c=d' + self.linesep, 'text/html', HTTPStatus.OK), + (res.read(), res.getheader('Content-type'), res.status)) + + def test_query_with_continuous_slashes(self): + res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//') + self.assertEqual( + (b'k=aa%2F%2Fbb&//q//p//=//a//b//' + self.linesep, + 'text/html', HTTPStatus.OK), + (res.read(), res.getheader('Content-type'), res.status)) + class SocketlessRequestHandler(SimpleHTTPRequestHandler): def __init__(self): diff --git a/Lib/test/test_imaplib.py b/Lib/test/test_imaplib.py index 82486568..07157f52 100644 --- a/Lib/test/test_imaplib.py +++ b/Lib/test/test_imaplib.py @@ -54,7 +54,9 @@ class TestImaplib(unittest.TestCase): '"18-May-2033 05:33:20 +0200"'] @run_with_locale('LC_ALL', 'de_DE', 'fr_FR') - @run_with_tz('STD-1DST') + # DST rules included to work around quirk where the Gnu C library may not + # otherwise restore the previous time zone + @run_with_tz('STD-1DST,M3.2.0,M11.1.0') def test_Time2Internaldate(self): expected = '"18-May-2033 05:33:20 +0200"' diff --git a/Lib/test/test_importlib/import_/test_path.py b/Lib/test/test_importlib/import_/test_path.py index 4359dd9e..b32a876f 100644 --- a/Lib/test/test_importlib/import_/test_path.py +++ b/Lib/test/test_importlib/import_/test_path.py @@ -3,7 +3,6 @@ from .. import util importlib = util.import_importlib('importlib') machinery = util.import_importlib('importlib.machinery') -import errno import os import sys import tempfile @@ -160,17 +159,24 @@ class FinderTests: got = self.machinery.PathFinder.find_spec('whatever', [path]) self.assertEqual(got, success_finder.spec) - @unittest.skipIf(sys.platform == 'win32', "cwd can't not exist on Windows") def test_deleted_cwd(self): # Issue #22834 - self.addCleanup(os.chdir, os.getcwd()) + old_dir = os.getcwd() + self.addCleanup(os.chdir, old_dir) + new_dir = tempfile.mkdtemp() try: - with tempfile.TemporaryDirectory() as path: - os.chdir(path) - except OSError as exc: - if exc.errno == errno.EINVAL: - self.skipTest("platform does not allow the deletion of the cwd") + os.chdir(new_dir) + try: + os.rmdir(new_dir) + except OSError: + # EINVAL on Solaris, EBUSY on AIX, ENOTEMPTY on Windows + self.skipTest("platform does not allow " + "the deletion of the cwd") + except: + os.chdir(old_dir) + os.rmdir(new_dir) raise + with util.import_state(path=['']): # Do not want FileNotFoundError raised. self.assertIsNone(self.machinery.PathFinder.find_spec('whatever')) diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py index 955b2adc..69ddb514 100644 --- a/Lib/test/test_inspect.py +++ b/Lib/test/test_inspect.py @@ -393,8 +393,8 @@ class TestRetrievingSourceCode(GetSourceBase): def test_getsource(self): self.assertSourceEqual(git.abuse, 29, 39) - self.assertSourceEqual(mod.StupidGit, 21, 50) - self.assertSourceEqual(mod.lobbest, 70, 71) + self.assertSourceEqual(mod.StupidGit, 21, 51) + self.assertSourceEqual(mod.lobbest, 75, 76) def test_getsourcefile(self): self.assertEqual(normcase(inspect.getsourcefile(mod.spam)), modfile) diff --git a/Lib/test/test_int.py b/Lib/test/test_int.py index cb57f154..3e4b4fc2 100644 --- a/Lib/test/test_int.py +++ b/Lib/test/test_int.py @@ -276,16 +276,40 @@ class IntTestCases(unittest.TestCase): class CustomBytes(bytes): pass class CustomByteArray(bytearray): pass - values = [b'100', - bytearray(b'100'), - CustomStr('100'), - CustomBytes(b'100'), - CustomByteArray(b'100')] - - for x in values: - msg = 'x has type %s' % type(x).__name__ - self.assertEqual(int(x), 100, msg=msg) - self.assertEqual(int(x, 2), 4, msg=msg) + factories = [ + bytes, + bytearray, + lambda b: CustomStr(b.decode()), + CustomBytes, + CustomByteArray, + memoryview, + ] + try: + from array import array + except ImportError: + pass + else: + factories.append(lambda b: array('B', b)) + + for f in factories: + x = f(b'100') + with self.subTest(type(x)): + self.assertEqual(int(x), 100) + if isinstance(x, (str, bytes, bytearray)): + self.assertEqual(int(x, 2), 4) + else: + msg = "can't convert non-string" + with self.assertRaisesRegex(TypeError, msg): + int(x, 2) + with self.assertRaisesRegex(ValueError, 'invalid literal'): + int(f(b'A' * 0x10)) + + def test_int_memoryview(self): + self.assertEqual(int(memoryview(b'123')[1:3]), 23) + self.assertEqual(int(memoryview(b'123\x00')[1:3]), 23) + self.assertEqual(int(memoryview(b'123 ')[1:3]), 23) + self.assertEqual(int(memoryview(b'123A')[1:3]), 23) + self.assertEqual(int(memoryview(b'1234')[1:3]), 23) def test_string_float(self): self.assertRaises(ValueError, int, '1.2') diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py index 465d45ae..28f440d9 100644 --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -15,7 +15,7 @@ ################################################################################ # When writing tests for io, it's important to test both the C and Python # implementations. This is usually done by writing a base test that refers to -# the type it is testing as a attribute. Then it provides custom subclasses to +# the type it is testing as an attribute. Then it provides custom subclasses to # test both implementations. This file has lots of examples. ################################################################################ diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py index c217d369..39eb9a13 100644 --- a/Lib/test/test_ipaddress.py +++ b/Lib/test/test_ipaddress.py @@ -1302,7 +1302,7 @@ class IpaddrUnitTest(unittest.TestCase): # test a /24 is summarized properly self.assertEqual(list(summarize(ip1, ip2))[0], ipaddress.ip_network('1.1.1.0/24')) - # test an IPv4 range that isn't on a network byte boundary + # test an IPv4 range that isn't on a network byte boundary ip2 = ipaddress.ip_address('1.1.1.8') self.assertEqual(list(summarize(ip1, ip2)), [ipaddress.ip_network('1.1.1.0/29'), @@ -1315,7 +1315,7 @@ class IpaddrUnitTest(unittest.TestCase): ip1 = ipaddress.ip_address('1::') ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff') - # test a IPv6 is sumamrized properly + # test an IPv6 is summarized properly self.assertEqual(list(summarize(ip1, ip2))[0], ipaddress.ip_network('1::/16')) # test an IPv6 range that isn't on a network byte boundary diff --git a/Lib/test/test_itertools.py b/Lib/test/test_itertools.py index fcd88691..5b3ba7e2 100644 --- a/Lib/test/test_itertools.py +++ b/Lib/test/test_itertools.py @@ -985,6 +985,16 @@ class TestBasicOps(unittest.TestCase): for proto in range(pickle.HIGHEST_PROTOCOL + 1): self.pickletest(proto, product(*args)) + def test_product_issue_25021(self): + # test that indices are properly clamped to the length of the tuples + p = product((1, 2),(3,)) + p.__setstate__((0, 0x1000)) # will access tuple element 1 if not clamped + self.assertEqual(next(p), (2, 3)) + # test that empty tuple in the list will result in an immediate StopIteration + p = product((1, 2), (), (3,)) + p.__setstate__((0, 0, 0x1000)) # will access tuple element 1 if not clamped + self.assertRaises(StopIteration, next, p) + def test_repeat(self): self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) self.assertEqual(lzip(range(3),repeat('a')), diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py index b6e64ce9..95575bf5 100644 --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -58,14 +58,10 @@ try: except ImportError: threading = None try: - import win32evtlog + import win32evtlog, win32evtlogutil, pywintypes except ImportError: - win32evtlog = None -try: - import win32evtlogutil -except ImportError: - win32evtlogutil = None - win32evtlog = None + win32evtlog = win32evtlogutil = pywintypes = None + try: import zlib except ImportError: @@ -934,7 +930,7 @@ class SMTPHandlerTest(BaseTest): timeout=self.TIMEOUT) self.assertEqual(h.toaddrs, ['you']) self.messages = [] - r = logging.makeLogRecord({'msg': 'Hello'}) + r = logging.makeLogRecord({'msg': 'Hello \u2713'}) self.handled = threading.Event() h.handle(r) self.handled.wait(self.TIMEOUT) # 14314: don't wait forever @@ -945,7 +941,7 @@ class SMTPHandlerTest(BaseTest): self.assertEqual(mailfrom, 'me') self.assertEqual(rcpttos, ['you']) self.assertIn('\nSubject: Log\n', data) - self.assertTrue(data.endswith('\n\nHello')) + self.assertTrue(data.endswith('\n\nHello \u2713')) h.close() def process_message(self, *args): @@ -4128,13 +4124,20 @@ for when, exp in (('S', 1), setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover) -@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.') +@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.') class NTEventLogHandlerTest(BaseTest): def test_basic(self): logtype = 'Application' elh = win32evtlog.OpenEventLog(None, logtype) num_recs = win32evtlog.GetNumberOfEventLogRecords(elh) - h = logging.handlers.NTEventLogHandler('test_logging') + + try: + h = logging.handlers.NTEventLogHandler('test_logging') + except pywintypes.error as e: + if e.winerror == 5: # access denied + raise unittest.SkipTest('Insufficient privileges to run test') + raise + r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) h.close() diff --git a/Lib/test/test_long.py b/Lib/test/test_long.py index e0257161..62e69a9e 100644 --- a/Lib/test/test_long.py +++ b/Lib/test/test_long.py @@ -7,15 +7,6 @@ import random import math import array -# Used for lazy formatting of failure messages -class Frm(object): - def __init__(self, format, *args): - self.format = format - self.args = args - - def __str__(self): - return self.format % self.args - # SHIFT should match the value in longintrepr.h for best testing. SHIFT = sys.int_info.bits_per_digit BASE = 2 ** SHIFT @@ -163,17 +154,18 @@ class LongTest(unittest.TestCase): def check_division(self, x, y): eq = self.assertEqual - q, r = divmod(x, y) - q2, r2 = x//y, x%y - pab, pba = x*y, y*x - eq(pab, pba, Frm("multiplication does not commute for %r and %r", x, y)) - eq(q, q2, Frm("divmod returns different quotient than / for %r and %r", x, y)) - eq(r, r2, Frm("divmod returns different mod than %% for %r and %r", x, y)) - eq(x, q*y + r, Frm("x != q*y + r after divmod on x=%r, y=%r", x, y)) - if y > 0: - self.assertTrue(0 <= r < y, Frm("bad mod from divmod on %r and %r", x, y)) - else: - self.assertTrue(y < r <= 0, Frm("bad mod from divmod on %r and %r", x, y)) + with self.subTest(x=x, y=y): + q, r = divmod(x, y) + q2, r2 = x//y, x%y + pab, pba = x*y, y*x + eq(pab, pba, "multiplication does not commute") + eq(q, q2, "divmod returns different quotient than /") + eq(r, r2, "divmod returns different mod than %") + eq(x, q*y + r, "x != q*y + r after divmod") + if y > 0: + self.assertTrue(0 <= r < y, "bad mod from divmod") + else: + self.assertTrue(y < r <= 0, "bad mod from divmod") def test_division(self): digits = list(range(1, MAXDIGITS+1)) + list(range(KARATSUBA_CUTOFF, @@ -228,72 +220,63 @@ class LongTest(unittest.TestCase): for bbits in bits: if bbits < abits: continue - b = (1 << bbits) - 1 - x = a * b - y = ((1 << (abits + bbits)) - - (1 << abits) - - (1 << bbits) + - 1) - self.assertEqual(x, y, - Frm("bad result for a*b: a=%r, b=%r, x=%r, y=%r", a, b, x, y)) + with self.subTest(abits=abits, bbits=bbits): + b = (1 << bbits) - 1 + x = a * b + y = ((1 << (abits + bbits)) - + (1 << abits) - + (1 << bbits) + + 1) + self.assertEqual(x, y) def check_bitop_identities_1(self, x): eq = self.assertEqual - eq(x & 0, 0, Frm("x & 0 != 0 for x=%r", x)) - eq(x | 0, x, Frm("x | 0 != x for x=%r", x)) - eq(x ^ 0, x, Frm("x ^ 0 != x for x=%r", x)) - eq(x & -1, x, Frm("x & -1 != x for x=%r", x)) - eq(x | -1, -1, Frm("x | -1 != -1 for x=%r", x)) - eq(x ^ -1, ~x, Frm("x ^ -1 != ~x for x=%r", x)) - eq(x, ~~x, Frm("x != ~~x for x=%r", x)) - eq(x & x, x, Frm("x & x != x for x=%r", x)) - eq(x | x, x, Frm("x | x != x for x=%r", x)) - eq(x ^ x, 0, Frm("x ^ x != 0 for x=%r", x)) - eq(x & ~x, 0, Frm("x & ~x != 0 for x=%r", x)) - eq(x | ~x, -1, Frm("x | ~x != -1 for x=%r", x)) - eq(x ^ ~x, -1, Frm("x ^ ~x != -1 for x=%r", x)) - eq(-x, 1 + ~x, Frm("not -x == 1 + ~x for x=%r", x)) - eq(-x, ~(x-1), Frm("not -x == ~(x-1) forx =%r", x)) + with self.subTest(x=x): + eq(x & 0, 0) + eq(x | 0, x) + eq(x ^ 0, x) + eq(x & -1, x) + eq(x | -1, -1) + eq(x ^ -1, ~x) + eq(x, ~~x) + eq(x & x, x) + eq(x | x, x) + eq(x ^ x, 0) + eq(x & ~x, 0) + eq(x | ~x, -1) + eq(x ^ ~x, -1) + eq(-x, 1 + ~x) + eq(-x, ~(x-1)) for n in range(2*SHIFT): p2 = 2 ** n - eq(x << n >> n, x, - Frm("x << n >> n != x for x=%r, n=%r", (x, n))) - eq(x // p2, x >> n, - Frm("x // p2 != x >> n for x=%r n=%r p2=%r", (x, n, p2))) - eq(x * p2, x << n, - Frm("x * p2 != x << n for x=%r n=%r p2=%r", (x, n, p2))) - eq(x & -p2, x >> n << n, - Frm("not x & -p2 == x >> n << n for x=%r n=%r p2=%r", (x, n, p2))) - eq(x & -p2, x & ~(p2 - 1), - Frm("not x & -p2 == x & ~(p2 - 1) for x=%r n=%r p2=%r", (x, n, p2))) + with self.subTest(x=x, n=n, p2=p2): + eq(x << n >> n, x) + eq(x // p2, x >> n) + eq(x * p2, x << n) + eq(x & -p2, x >> n << n) + eq(x & -p2, x & ~(p2 - 1)) def check_bitop_identities_2(self, x, y): eq = self.assertEqual - eq(x & y, y & x, Frm("x & y != y & x for x=%r, y=%r", (x, y))) - eq(x | y, y | x, Frm("x | y != y | x for x=%r, y=%r", (x, y))) - eq(x ^ y, y ^ x, Frm("x ^ y != y ^ x for x=%r, y=%r", (x, y))) - eq(x ^ y ^ x, y, Frm("x ^ y ^ x != y for x=%r, y=%r", (x, y))) - eq(x & y, ~(~x | ~y), Frm("x & y != ~(~x | ~y) for x=%r, y=%r", (x, y))) - eq(x | y, ~(~x & ~y), Frm("x | y != ~(~x & ~y) for x=%r, y=%r", (x, y))) - eq(x ^ y, (x | y) & ~(x & y), - Frm("x ^ y != (x | y) & ~(x & y) for x=%r, y=%r", (x, y))) - eq(x ^ y, (x & ~y) | (~x & y), - Frm("x ^ y == (x & ~y) | (~x & y) for x=%r, y=%r", (x, y))) - eq(x ^ y, (x | y) & (~x | ~y), - Frm("x ^ y == (x | y) & (~x | ~y) for x=%r, y=%r", (x, y))) + with self.subTest(x=x, y=y): + eq(x & y, y & x) + eq(x | y, y | x) + eq(x ^ y, y ^ x) + eq(x ^ y ^ x, y) + eq(x & y, ~(~x | ~y)) + eq(x | y, ~(~x & ~y)) + eq(x ^ y, (x | y) & ~(x & y)) + eq(x ^ y, (x & ~y) | (~x & y)) + eq(x ^ y, (x | y) & (~x | ~y)) def check_bitop_identities_3(self, x, y, z): eq = self.assertEqual - eq((x & y) & z, x & (y & z), - Frm("(x & y) & z != x & (y & z) for x=%r, y=%r, z=%r", (x, y, z))) - eq((x | y) | z, x | (y | z), - Frm("(x | y) | z != x | (y | z) for x=%r, y=%r, z=%r", (x, y, z))) - eq((x ^ y) ^ z, x ^ (y ^ z), - Frm("(x ^ y) ^ z != x ^ (y ^ z) for x=%r, y=%r, z=%r", (x, y, z))) - eq(x & (y | z), (x & y) | (x & z), - Frm("x & (y | z) != (x & y) | (x & z) for x=%r, y=%r, z=%r", (x, y, z))) - eq(x | (y & z), (x | y) & (x | z), - Frm("x | (y & z) != (x | y) & (x | z) for x=%r, y=%r, z=%r", (x, y, z))) + with self.subTest(x=x, y=y, z=z): + eq((x & y) & z, x & (y & z)) + eq((x | y) | z, x | (y | z)) + eq((x ^ y) ^ z, x ^ (y ^ z)) + eq(x & (y | z), (x & y) | (x & z)) + eq(x | (y & z), (x | y) & (x | z)) def test_bitop_identities(self): for x in special: @@ -324,11 +307,11 @@ class LongTest(unittest.TestCase): def check_format_1(self, x): for base, mapper in (2, bin), (8, oct), (10, str), (10, repr), (16, hex): got = mapper(x) - expected = self.slow_format(x, base) - msg = Frm("%s returned %r but expected %r for %r", - mapper.__name__, got, expected, x) - self.assertEqual(got, expected, msg) - self.assertEqual(int(got, 0), x, Frm('int("%s", 0) != %r', got, x)) + with self.subTest(x=x, mapper=mapper.__name__): + expected = self.slow_format(x, base) + self.assertEqual(got, expected) + with self.subTest(got=got): + self.assertEqual(int(got, 0), x) def test_format(self): for x in special: @@ -625,14 +608,15 @@ class LongTest(unittest.TestCase): for y in cases: Ry = Rat(y) Rcmp = (Rx > Ry) - (Rx < Ry) - xycmp = (x > y) - (x < y) - eq(Rcmp, xycmp, Frm("%r %r %d %d", x, y, Rcmp, xycmp)) - eq(x == y, Rcmp == 0, Frm("%r == %r %d", x, y, Rcmp)) - eq(x != y, Rcmp != 0, Frm("%r != %r %d", x, y, Rcmp)) - eq(x < y, Rcmp < 0, Frm("%r < %r %d", x, y, Rcmp)) - eq(x <= y, Rcmp <= 0, Frm("%r <= %r %d", x, y, Rcmp)) - eq(x > y, Rcmp > 0, Frm("%r > %r %d", x, y, Rcmp)) - eq(x >= y, Rcmp >= 0, Frm("%r >= %r %d", x, y, Rcmp)) + with self.subTest(x=x, y=y, Rcmp=Rcmp): + xycmp = (x > y) - (x < y) + eq(Rcmp, xycmp) + eq(x == y, Rcmp == 0) + eq(x != y, Rcmp != 0) + eq(x < y, Rcmp < 0) + eq(x <= y, Rcmp <= 0) + eq(x > y, Rcmp > 0) + eq(x >= y, Rcmp >= 0) def test__format__(self): self.assertEqual(format(123456789, 'd'), '123456789') diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py index da01a84f..d2bab381 100644 --- a/Lib/test/test_memoryview.py +++ b/Lib/test/test_memoryview.py @@ -512,6 +512,13 @@ class OtherTest(unittest.TestCase): m[2:] = memoryview(p6).cast(format)[2:] self.assertEqual(d.value, 0.6) + def test_memoryview_hex(self): + # Issue #9951: memoryview.hex() segfaults with non-contiguous buffers. + x = b'0' * 200000 + m1 = memoryview(x) + m2 = m1[::-1] + self.assertEqual(m2.hex(), '30' * 200000) + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_mmap.py b/Lib/test/test_mmap.py index 3de84e8b..0f25742c 100644 --- a/Lib/test/test_mmap.py +++ b/Lib/test/test_mmap.py @@ -731,7 +731,10 @@ class LargeMmapTests(unittest.TestCase): f.write(tail) f.flush() except (OSError, OverflowError): - f.close() + try: + f.close() + except (OSError, OverflowError): + pass raise unittest.SkipTest("filesystem does not have largefile support") return f diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py index d91f58cd..618c18ab 100644 --- a/Lib/test/test_os.py +++ b/Lib/test/test_os.py @@ -85,7 +85,7 @@ HAVE_WHEEL_GROUP = sys.platform.startswith('freebsd') and os.getgid() == 0 # Tests creating TESTFN class FileTests(unittest.TestCase): def setUp(self): - if os.path.exists(support.TESTFN): + if os.path.lexists(support.TESTFN): os.unlink(support.TESTFN) tearDown = setUp @@ -209,6 +209,19 @@ class FileTests(unittest.TestCase): with open(TESTFN2, 'r') as f: self.assertEqual(f.read(), "1") + def test_open_keywords(self): + f = os.open(path=__file__, flags=os.O_RDONLY, mode=0o777, + dir_fd=None) + os.close(f) + + def test_symlink_keywords(self): + symlink = support.get_attribute(os, "symlink") + try: + symlink(src='target', dst=support.TESTFN, + target_is_directory=False, dir_fd=None) + except (NotImplementedError, OSError): + pass # No OS support or unprivileged user + # Test attributes on return values from os.*stat* family. class StatAttributeTests(unittest.TestCase): @@ -1027,6 +1040,9 @@ class MakedirTests(unittest.TestCase): os.makedirs(path, mode=mode, exist_ok=True) os.umask(old_mask) + # Issue #25583: A drive root could raise PermissionError on Windows + os.makedirs(os.path.abspath('/'), exist_ok=True) + def test_exist_ok_s_isgid_directory(self): path = os.path.join(support.TESTFN, 'dir1') S_ISGID = stat.S_ISGID @@ -1213,13 +1229,15 @@ class URandomTests(unittest.TestCase): self.assertNotEqual(data1, data2) -HAVE_GETENTROPY = (sysconfig.get_config_var('HAVE_GETENTROPY') == 1) -HAVE_GETRANDOM = (sysconfig.get_config_var('HAVE_GETRANDOM_SYSCALL') == 1) +# os.urandom() doesn't use a file descriptor when it is implemented with the +# getentropy() function, the getrandom() function or the getrandom() syscall +OS_URANDOM_DONT_USE_FD = ( + sysconfig.get_config_var('HAVE_GETENTROPY') == 1 + or sysconfig.get_config_var('HAVE_GETRANDOM') == 1 + or sysconfig.get_config_var('HAVE_GETRANDOM_SYSCALL') == 1) -@unittest.skipIf(HAVE_GETENTROPY, - "getentropy() does not use a file descriptor") -@unittest.skipIf(HAVE_GETRANDOM, - "getrandom() does not use a file descriptor") +@unittest.skipIf(OS_URANDOM_DONT_USE_FD , + "os.random() does not use a file descriptor") class URandomFDTests(unittest.TestCase): @unittest.skipUnless(resource, "test requires the resource module") def test_urandom_failure(self): @@ -1434,7 +1452,7 @@ class TestInvalidFD(unittest.TestCase): except OSError as e: self.assertEqual(e.errno, errno.EBADF) else: - self.fail("%r didn't raise a OSError with a bad file descriptor" + self.fail("%r didn't raise an OSError with a bad file descriptor" % f) @unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()') @@ -1773,7 +1791,7 @@ class Win32KillTests(unittest.TestCase): os.kill(proc.pid, signal.SIGINT) self.fail("subprocess did not stop on {}".format(name)) - @unittest.skip("subprocesses aren't inheriting CTRL+C property") + @unittest.skip("subprocesses aren't inheriting Ctrl+C property") def test_CTRL_C_EVENT(self): from ctypes import wintypes import ctypes @@ -1786,7 +1804,7 @@ class Win32KillTests(unittest.TestCase): SetConsoleCtrlHandler.restype = wintypes.BOOL # Calling this with NULL and FALSE causes the calling process to - # handle CTRL+C, rather than ignore it. This property is inherited + # handle Ctrl+C, rather than ignore it. This property is inherited # by subprocesses. SetConsoleCtrlHandler(NULL, 0) @@ -2065,6 +2083,12 @@ class PidTests(unittest.TestCase): # We are the parent of our subprocess self.assertEqual(int(stdout), os.getpid()) + def test_waitpid(self): + args = [sys.executable, '-c', 'pass'] + pid = os.spawnv(os.P_NOWAIT, args[0], args) + status = os.waitpid(pid, 0) + self.assertEqual(status, (pid, 0)) + # The introduction of this TestCase caused at least two different errors on # *nix buildbots. Temporarily skip this to let the buildbots move along. @@ -2313,6 +2337,14 @@ class TestSendfile(unittest.TestCase): os.sendfile(self.sockno, self.fileno, -1, 4096) self.assertEqual(cm.exception.errno, errno.EINVAL) + def test_keywords(self): + # Keyword arguments should be supported + os.sendfile(out=self.sockno, offset=0, count=4096, + **{'in': self.fileno}) + if self.SUPPORT_HEADERS_TRAILERS: + os.sendfile(self.sockno, self.fileno, offset=0, count=4096, + headers=(), trailers=(), flags=0) + # --- headers / trailers tests @requires_headers_trailers diff --git a/Lib/test/test_pdb.py b/Lib/test/test_pdb.py index ec8346c5..35044ad2 100644 --- a/Lib/test/test_pdb.py +++ b/Lib/test/test_pdb.py @@ -1043,6 +1043,18 @@ class PdbTestCase(unittest.TestCase): self.assertNotIn('Error', stdout.decode(), "Got an error running test script under PDB") + def test_issue16180(self): + # A syntax error in the debuggee. + script = "def f: pass\n" + commands = '' + expected = "SyntaxError:" + stdout, stderr = self.run_pdb(script, commands) + self.assertIn(expected, stdout, + '\n\nExpected:\n{}\nGot:\n{}\n' + 'Fail to handle a syntax error in the debuggee.' + .format(expected, stdout)) + + def tearDown(self): support.unlink(support.TESTFN) diff --git a/Lib/test/test_pep277.py b/Lib/test/test_pep277.py index 6c833a8b..98c716b4 100644 --- a/Lib/test/test_pep277.py +++ b/Lib/test/test_pep277.py @@ -158,17 +158,11 @@ class UnicodeFileTests(unittest.TestCase): def test_directory(self): dirname = os.path.join(support.TESTFN, 'Gr\xfc\xdf-\u66e8\u66e9\u66eb') filename = '\xdf-\u66e8\u66e9\u66eb' - oldwd = os.getcwd() - os.mkdir(dirname) - os.chdir(dirname) - try: + with support.temp_cwd(dirname): with open(filename, 'wb') as f: f.write((filename + '\n').encode("utf-8")) os.access(filename,os.R_OK) os.remove(filename) - finally: - os.chdir(oldwd) - os.rmdir(dirname) class UnicodeNFCFileTests(UnicodeFileTests): diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py index ba92de94..0429941d 100644 --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -10,6 +10,7 @@ import sys import unittest from test import support +from test.pickletester import AbstractUnpickleTests from test.pickletester import AbstractPickleTests from test.pickletester import AbstractPickleModuleTests from test.pickletester import AbstractPersistentPicklerTests @@ -28,6 +29,16 @@ class PickleTests(AbstractPickleModuleTests): pass +class PyUnpicklerTests(AbstractUnpickleTests): + + unpickler = pickle._Unpickler + + def loads(self, buf, **kwds): + f = io.BytesIO(buf) + u = self.unpickler(f, **kwds) + return u.load() + + class PyPicklerTests(AbstractPickleTests): pickler = pickle._Pickler @@ -46,7 +57,8 @@ class PyPicklerTests(AbstractPickleTests): return u.load() -class InMemoryPickleTests(AbstractPickleTests, BigmemPickleTests): +class InMemoryPickleTests(AbstractPickleTests, AbstractUnpickleTests, + BigmemPickleTests): pickler = pickle._Pickler unpickler = pickle._Unpickler @@ -105,6 +117,9 @@ class PyChainDispatchTableTests(AbstractDispatchTableTests): if has_c_implementation: + class CUnpicklerTests(PyUnpicklerTests): + unpickler = _pickle.Unpickler + class CPicklerTests(PyPicklerTests): pickler = _pickle.Pickler unpickler = _pickle.Unpickler @@ -365,8 +380,9 @@ class CompatPickleTests(unittest.TestCase): self.assertEqual(mapping('exceptions', name), ('builtins', name)) - import multiprocessing.context - for name, exc in get_exceptions(multiprocessing.context): + def test_multiprocessing_exceptions(self): + module = support.import_module('multiprocessing.context') + for name, exc in get_exceptions(module): with self.subTest(name): self.assertEqual(reverse_mapping('multiprocessing.context', name), ('multiprocessing', name)) @@ -375,11 +391,11 @@ class CompatPickleTests(unittest.TestCase): def test_main(): - tests = [PickleTests, PyPicklerTests, PyPersPicklerTests, + tests = [PickleTests, PyUnpicklerTests, PyPicklerTests, PyPersPicklerTests, PyDispatchTableTests, PyChainDispatchTableTests, CompatPickleTests] if has_c_implementation: - tests.extend([CPicklerTests, CPersPicklerTests, + tests.extend([CUnpicklerTests, CPicklerTests, CPersPicklerTests, CDumpPickle_LoadPickle, DumpPickle_CLoadPickle, PyPicklerUnpicklerObjectTests, CPicklerUnpicklerObjectTests, diff --git a/Lib/test/test_popen.py b/Lib/test/test_popen.py index 8958db03..da01a878 100644 --- a/Lib/test/test_popen.py +++ b/Lib/test/test_popen.py @@ -57,5 +57,9 @@ class PopenTest(unittest.TestCase): with os.popen("echo hello") as f: self.assertEqual(list(f), ["hello\n"]) + def test_keywords(self): + with os.popen(cmd="exit 0", mode="w", buffering=-1): + pass + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_posix.py b/Lib/test/test_posix.py index 77e5b0c4..2a59c381 100644 --- a/Lib/test/test_posix.py +++ b/Lib/test/test_posix.py @@ -442,6 +442,14 @@ class PosixTester(unittest.TestCase): else: self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode)) + # Keyword arguments are also supported + support.unlink(support.TESTFN) + try: + posix.mknod(path=support.TESTFN, mode=mode, device=0, + dir_fd=None) + except OSError as e: + self.assertIn(e.errno, (errno.EPERM, errno.EINVAL)) + @unittest.skipUnless(hasattr(posix, 'stat'), 'test needs posix.stat()') @unittest.skipUnless(hasattr(posix, 'makedev'), 'test needs posix.makedev()') def test_makedev(self): diff --git a/Lib/test/test_posixpath.py b/Lib/test/test_posixpath.py index ece3555e..9d204715 100644 --- a/Lib/test/test_posixpath.py +++ b/Lib/test/test_posixpath.py @@ -316,7 +316,6 @@ class PosixPathTest(unittest.TestCase): # Bug #930024, return the path unchanged if we get into an infinite # symlink loop. try: - old_path = abspath('.') os.symlink(ABSTFN, ABSTFN) self.assertEqual(realpath(ABSTFN), ABSTFN) @@ -342,10 +341,9 @@ class PosixPathTest(unittest.TestCase): self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c") # Test using relative path as well. - os.chdir(dirname(ABSTFN)) - self.assertEqual(realpath(basename(ABSTFN)), ABSTFN) + with support.change_cwd(dirname(ABSTFN)): + self.assertEqual(realpath(basename(ABSTFN)), ABSTFN) finally: - os.chdir(old_path) support.unlink(ABSTFN) support.unlink(ABSTFN+"1") support.unlink(ABSTFN+"2") @@ -373,7 +371,6 @@ class PosixPathTest(unittest.TestCase): @skip_if_ABSTFN_contains_backslash def test_realpath_deep_recursion(self): depth = 10 - old_path = abspath('.') try: os.mkdir(ABSTFN) for i in range(depth): @@ -382,10 +379,9 @@ class PosixPathTest(unittest.TestCase): self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN) # Test using relative path as well. - os.chdir(ABSTFN) - self.assertEqual(realpath('%d' % depth), ABSTFN) + with support.change_cwd(ABSTFN): + self.assertEqual(realpath('%d' % depth), ABSTFN) finally: - os.chdir(old_path) for i in range(depth + 1): support.unlink(ABSTFN + '/%d' % i) safe_rmdir(ABSTFN) @@ -399,15 +395,13 @@ class PosixPathTest(unittest.TestCase): # /usr/doc with 'doc' being a symlink to /usr/share/doc. We call # realpath("a"). This should return /usr/share/doc/a/. try: - old_path = abspath('.') os.mkdir(ABSTFN) os.mkdir(ABSTFN + "/y") os.symlink(ABSTFN + "/y", ABSTFN + "/k") - os.chdir(ABSTFN + "/k") - self.assertEqual(realpath("a"), ABSTFN + "/y/a") + with support.change_cwd(ABSTFN + "/k"): + self.assertEqual(realpath("a"), ABSTFN + "/y/a") finally: - os.chdir(old_path) support.unlink(ABSTFN + "/k") safe_rmdir(ABSTFN + "/y") safe_rmdir(ABSTFN) @@ -424,7 +418,6 @@ class PosixPathTest(unittest.TestCase): # and a symbolic link 'link-y' pointing to 'y' in directory 'a', # then realpath("link-y/..") should return 'k', not 'a'. try: - old_path = abspath('.') os.mkdir(ABSTFN) os.mkdir(ABSTFN + "/k") os.mkdir(ABSTFN + "/k/y") @@ -433,11 +426,10 @@ class PosixPathTest(unittest.TestCase): # Absolute path. self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k") # Relative path. - os.chdir(dirname(ABSTFN)) - self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."), - ABSTFN + "/k") + with support.change_cwd(dirname(ABSTFN)): + self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."), + ABSTFN + "/k") finally: - os.chdir(old_path) support.unlink(ABSTFN + "/link-y") safe_rmdir(ABSTFN + "/k/y") safe_rmdir(ABSTFN + "/k") @@ -451,17 +443,14 @@ class PosixPathTest(unittest.TestCase): # must be resolved too. try: - old_path = abspath('.') os.mkdir(ABSTFN) os.mkdir(ABSTFN + "/k") os.symlink(ABSTFN, ABSTFN + "link") - os.chdir(dirname(ABSTFN)) - - base = basename(ABSTFN) - self.assertEqual(realpath(base + "link"), ABSTFN) - self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k") + with support.change_cwd(dirname(ABSTFN)): + base = basename(ABSTFN) + self.assertEqual(realpath(base + "link"), ABSTFN) + self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k") finally: - os.chdir(old_path) support.unlink(ABSTFN + "link") safe_rmdir(ABSTFN + "/k") safe_rmdir(ABSTFN) diff --git a/Lib/test/test_pprint.py b/Lib/test/test_pprint.py index 357c5cf0..7ebc2983 100644 --- a/Lib/test/test_pprint.py +++ b/Lib/test/test_pprint.py @@ -961,7 +961,7 @@ deque([('brown', 2), 'quick': 1, 'the': 0}""") - def test_user_dict(self): + def test_user_list(self): d = collections.UserList() self.assertEqual(pprint.pformat(d, width=1), "[]") words = 'the quick brown fox jumped over a lazy dog'.split() diff --git a/Lib/test/test_py_compile.py b/Lib/test/test_py_compile.py index 03cca5dc..4a6caa57 100644 --- a/Lib/test/test_py_compile.py +++ b/Lib/test/test_py_compile.py @@ -63,11 +63,9 @@ class PyCompileTests(unittest.TestCase): self.assertTrue(os.path.exists(self.cache_path)) def test_cwd(self): - cwd = os.getcwd() - os.chdir(self.directory) - py_compile.compile(os.path.basename(self.source_path), - os.path.basename(self.pyc_path)) - os.chdir(cwd) + with support.change_cwd(self.directory): + py_compile.compile(os.path.basename(self.source_path), + os.path.basename(self.pyc_path)) self.assertTrue(os.path.exists(self.pyc_path)) self.assertFalse(os.path.exists(self.cache_path)) diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc.py index ec5c31ba..8ad5706a 100644 --- a/Lib/test/test_pydoc.py +++ b/Lib/test/test_pydoc.py @@ -396,6 +396,13 @@ class PydocBaseTest(unittest.TestCase): finally: pkgutil.walk_packages = walk_packages + def call_url_handler(self, url, expected_title): + text = pydoc._url_handler(url, "text/html") + result = get_html_title(text) + # Check the title to ensure an unexpected error page was not returned + self.assertEqual(result, expected_title, text) + return text + class PydocDocTest(unittest.TestCase): @@ -704,6 +711,29 @@ class PydocImportTest(PydocBaseTest): finally: os.chmod(pkgdir, current_mode) + def test_url_search_package_error(self): + # URL handler search should cope with packages that raise exceptions + pkgdir = os.path.join(TESTFN, "test_error_package") + os.mkdir(pkgdir) + init = os.path.join(pkgdir, "__init__.py") + with open(init, "wt", encoding="ascii") as f: + f.write("""raise ValueError("ouch")\n""") + with self.restrict_walk_packages(path=[TESTFN]): + # Package has to be importable for the error to have any effect + saved_paths = tuple(sys.path) + sys.path.insert(0, TESTFN) + try: + with self.assertRaisesRegex(ValueError, "ouch"): + import test_error_package # Sanity check + + text = self.call_url_handler("search?key=test_error_package", + "Pydoc: Search Results") + found = ('' + 'test_error_package') + self.assertIn(found, text) + finally: + sys.path[:] = saved_paths + @unittest.skip('causes undesireable side-effects (#20128)') def test_modules(self): # See Helper.listmodules(). @@ -880,16 +910,12 @@ class PydocUrlHandlerTest(PydocBaseTest): with self.restrict_walk_packages(): for url, title in requests: - text = pydoc._url_handler(url, "text/html") - result = get_html_title(text) - self.assertEqual(result, title, text) + self.call_url_handler(url, title) path = string.__file__ title = "Pydoc: getfile " + path url = "getfile?key=" + path - text = pydoc._url_handler(url, "text/html") - result = get_html_title(text) - self.assertEqual(result, title) + self.call_url_handler(url, title) class TestHelper(unittest.TestCase): diff --git a/Lib/test/test_pyexpat.py b/Lib/test/test_pyexpat.py index 08e95c6e..550aebf0 100644 --- a/Lib/test/test_pyexpat.py +++ b/Lib/test/test_pyexpat.py @@ -3,6 +3,7 @@ from io import BytesIO import os +import sys import sysconfig import unittest import traceback @@ -16,22 +17,47 @@ from test.support import sortdict class SetAttributeTest(unittest.TestCase): def setUp(self): self.parser = expat.ParserCreate(namespace_separator='!') - self.set_get_pairs = [ - [0, 0], - [1, 1], - [2, 1], - [0, 0], - ] + + def test_buffer_text(self): + self.assertIs(self.parser.buffer_text, False) + for x in 0, 1, 2, 0: + self.parser.buffer_text = x + self.assertIs(self.parser.buffer_text, bool(x)) + + def test_namespace_prefixes(self): + self.assertIs(self.parser.namespace_prefixes, False) + for x in 0, 1, 2, 0: + self.parser.namespace_prefixes = x + self.assertIs(self.parser.namespace_prefixes, bool(x)) def test_ordered_attributes(self): - for x, y in self.set_get_pairs: + self.assertIs(self.parser.ordered_attributes, False) + for x in 0, 1, 2, 0: self.parser.ordered_attributes = x - self.assertEqual(self.parser.ordered_attributes, y) + self.assertIs(self.parser.ordered_attributes, bool(x)) + + def test_specified_attributes(self): + self.assertIs(self.parser.specified_attributes, False) + for x in 0, 1, 2, 0: + self.parser.specified_attributes = x + self.assertIs(self.parser.specified_attributes, bool(x)) def test_specified_attributes(self): - for x, y in self.set_get_pairs: + self.assertIs(self.parser.specified_attributes, False) + for x in 0, 1, 2, 0: self.parser.specified_attributes = x - self.assertEqual(self.parser.specified_attributes, y) + self.assertIs(self.parser.specified_attributes, bool(x)) + + def test_invalid_attributes(self): + with self.assertRaises(AttributeError): + self.parser.returns_unicode = 1 + with self.assertRaises(AttributeError): + self.parser.returns_unicode + + # Issue #25019 + self.assertRaises(TypeError, setattr, self.parser, range(0xF), 0) + self.assertRaises(TypeError, self.parser.__setattr__, range(0xF), 0) + self.assertRaises(TypeError, getattr, self.parser, range(0xF)) data = b'''\ @@ -514,11 +540,14 @@ class ChardataBufferTest(unittest.TestCase): def test_wrong_size(self): parser = expat.ParserCreate() parser.buffer_text = 1 - def f(size): - parser.buffer_size = size - - self.assertRaises(ValueError, f, -1) - self.assertRaises(ValueError, f, 0) + with self.assertRaises(ValueError): + parser.buffer_size = -1 + with self.assertRaises(ValueError): + parser.buffer_size = 0 + with self.assertRaises((ValueError, OverflowError)): + parser.buffer_size = sys.maxsize + 1 + with self.assertRaises(TypeError): + parser.buffer_size = 512.0 def test_unchanged_size(self): xml1 = b"" + b'a' * 512 diff --git a/Lib/test/test_rlcompleter.py b/Lib/test/test_rlcompleter.py index d37b620b..2022ed6e 100644 --- a/Lib/test/test_rlcompleter.py +++ b/Lib/test/test_rlcompleter.py @@ -64,6 +64,19 @@ class TestRlcompleter(unittest.TestCase): ['egg.{}('.format(x) for x in dir(str) if x.startswith('s')]) + def test_excessive_getattr(self): + # Ensure getattr() is invoked no more than once per attribute + class Foo: + calls = 0 + @property + def bar(self): + self.calls += 1 + return None + f = Foo() + completer = rlcompleter.Completer(dict(f=f)) + self.assertEqual(completer.complete('f.b', 0), 'f.bar') + self.assertEqual(f.calls, 1) + def test_complete(self): completer = rlcompleter.Completer() self.assertEqual(completer.complete('', 0), '\t') diff --git a/Lib/test/test_shutil.py b/Lib/test/test_shutil.py index 5183c3ca..62036ddb 100644 --- a/Lib/test/test_shutil.py +++ b/Lib/test/test_shutil.py @@ -12,11 +12,7 @@ import errno import functools import subprocess from contextlib import ExitStack -from test import support -from test.support import TESTFN -from os.path import splitdrive -from distutils.spawn import find_executable, spawn -from shutil import (_make_tarball, _make_zipfile, make_archive, +from shutil import (make_archive, register_archive_format, unregister_archive_format, get_archive_formats, Error, unpack_archive, register_unpack_format, RegistryError, @@ -53,7 +49,7 @@ try: import zipfile ZIP_SUPPORT = True except ImportError: - ZIP_SUPPORT = find_executable('zip') + ZIP_SUPPORT = shutil.which('zip') def _fake_rename(*args, **kwargs): # Pretend the destination path is on a different filesystem. @@ -94,6 +90,18 @@ def read_file(path, binary=False): with open(path, 'rb' if binary else 'r') as fp: return fp.read() +def rlistdir(path): + res = [] + for name in sorted(os.listdir(path)): + p = os.path.join(path, name) + if os.path.isdir(p) and not os.path.islink(p): + res.append(name + '/') + for n in rlistdir(p): + res.append(name + '/' + n) + else: + res.append(name) + return res + class TestShutil(unittest.TestCase): @@ -959,139 +967,143 @@ class TestShutil(unittest.TestCase): @requires_zlib def test_make_tarball(self): # creating something to tar - tmpdir = self.mkdtemp() - write_file((tmpdir, 'file1'), 'xxx') - write_file((tmpdir, 'file2'), 'xxx') - os.mkdir(os.path.join(tmpdir, 'sub')) - write_file((tmpdir, 'sub', 'file3'), 'xxx') + root_dir, base_dir = self._create_files('') tmpdir2 = self.mkdtemp() # force shutil to create the directory os.rmdir(tmpdir2) - unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0], - "source and target should be on same drive") + # working with relative paths + work_dir = os.path.dirname(tmpdir2) + rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive') - base_name = os.path.join(tmpdir2, 'archive') - - # working with relative paths to avoid tar warnings - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - _make_tarball(splitdrive(base_name)[1], '.') - finally: - os.chdir(old_dir) + with support.change_cwd(work_dir): + base_name = os.path.abspath(rel_base_name) + tarball = make_archive(rel_base_name, 'gztar', root_dir, '.') # check if the compressed tarball was created - tarball = base_name + '.tar.gz' - self.assertTrue(os.path.exists(tarball)) + self.assertEqual(tarball, base_name + '.tar.gz') + self.assertTrue(os.path.isfile(tarball)) + self.assertTrue(tarfile.is_tarfile(tarball)) + with tarfile.open(tarball, 'r:gz') as tf: + self.assertCountEqual(tf.getnames(), + ['.', './sub', './sub2', + './file1', './file2', './sub/file3']) # trying an uncompressed one - base_name = os.path.join(tmpdir2, 'archive') - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - _make_tarball(splitdrive(base_name)[1], '.', compress=None) - finally: - os.chdir(old_dir) - tarball = base_name + '.tar' - self.assertTrue(os.path.exists(tarball)) + with support.change_cwd(work_dir): + tarball = make_archive(rel_base_name, 'tar', root_dir, '.') + self.assertEqual(tarball, base_name + '.tar') + self.assertTrue(os.path.isfile(tarball)) + self.assertTrue(tarfile.is_tarfile(tarball)) + with tarfile.open(tarball, 'r') as tf: + self.assertCountEqual(tf.getnames(), + ['.', './sub', './sub2', + './file1', './file2', './sub/file3']) def _tarinfo(self, path): - tar = tarfile.open(path) - try: + with tarfile.open(path) as tar: names = tar.getnames() names.sort() return tuple(names) - finally: - tar.close() - def _create_files(self): + def _create_files(self, base_dir='dist'): # creating something to tar - tmpdir = self.mkdtemp() - dist = os.path.join(tmpdir, 'dist') - os.mkdir(dist) + root_dir = self.mkdtemp() + dist = os.path.join(root_dir, base_dir) + os.makedirs(dist, exist_ok=True) write_file((dist, 'file1'), 'xxx') write_file((dist, 'file2'), 'xxx') os.mkdir(os.path.join(dist, 'sub')) write_file((dist, 'sub', 'file3'), 'xxx') os.mkdir(os.path.join(dist, 'sub2')) - tmpdir2 = self.mkdtemp() - base_name = os.path.join(tmpdir2, 'archive') - return tmpdir, tmpdir2, base_name + if base_dir: + write_file((root_dir, 'outer'), 'xxx') + return root_dir, base_dir @requires_zlib - @unittest.skipUnless(find_executable('tar') and find_executable('gzip'), + @unittest.skipUnless(shutil.which('tar'), 'Need the tar command to run') def test_tarfile_vs_tar(self): - tmpdir, tmpdir2, base_name = self._create_files() - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - _make_tarball(base_name, 'dist') - finally: - os.chdir(old_dir) + root_dir, base_dir = self._create_files() + base_name = os.path.join(self.mkdtemp(), 'archive') + tarball = make_archive(base_name, 'gztar', root_dir, base_dir) # check if the compressed tarball was created - tarball = base_name + '.tar.gz' - self.assertTrue(os.path.exists(tarball)) + self.assertEqual(tarball, base_name + '.tar.gz') + self.assertTrue(os.path.isfile(tarball)) # now create another tarball using `tar` - tarball2 = os.path.join(tmpdir, 'archive2.tar.gz') - tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist'] - gzip_cmd = ['gzip', '-f9', 'archive2.tar'] - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - with captured_stdout() as s: - spawn(tar_cmd) - spawn(gzip_cmd) - finally: - os.chdir(old_dir) + tarball2 = os.path.join(root_dir, 'archive2.tar') + tar_cmd = ['tar', '-cf', 'archive2.tar', base_dir] + subprocess.check_call(tar_cmd, cwd=root_dir, + stdout=subprocess.DEVNULL) - self.assertTrue(os.path.exists(tarball2)) + self.assertTrue(os.path.isfile(tarball2)) # let's compare both tarballs self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2)) # trying an uncompressed one - base_name = os.path.join(tmpdir2, 'archive') - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - _make_tarball(base_name, 'dist', compress=None) - finally: - os.chdir(old_dir) - tarball = base_name + '.tar' - self.assertTrue(os.path.exists(tarball)) + tarball = make_archive(base_name, 'tar', root_dir, base_dir) + self.assertEqual(tarball, base_name + '.tar') + self.assertTrue(os.path.isfile(tarball)) # now for a dry_run - base_name = os.path.join(tmpdir2, 'archive') - old_dir = os.getcwd() - os.chdir(tmpdir) - try: - _make_tarball(base_name, 'dist', compress=None, dry_run=True) - finally: - os.chdir(old_dir) - tarball = base_name + '.tar' - self.assertTrue(os.path.exists(tarball)) + tarball = make_archive(base_name, 'tar', root_dir, base_dir, + dry_run=True) + self.assertEqual(tarball, base_name + '.tar') + self.assertTrue(os.path.isfile(tarball)) @requires_zlib @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run') def test_make_zipfile(self): - # creating something to tar - tmpdir = self.mkdtemp() - write_file((tmpdir, 'file1'), 'xxx') - write_file((tmpdir, 'file2'), 'xxx') + # creating something to zip + root_dir, base_dir = self._create_files() tmpdir2 = self.mkdtemp() # force shutil to create the directory os.rmdir(tmpdir2) - base_name = os.path.join(tmpdir2, 'archive') - _make_zipfile(base_name, tmpdir) - - # check if the compressed tarball was created - tarball = base_name + '.zip' - self.assertTrue(os.path.exists(tarball)) + # working with relative paths + work_dir = os.path.dirname(tmpdir2) + rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive') + + with support.change_cwd(work_dir): + base_name = os.path.abspath(rel_base_name) + res = make_archive(rel_base_name, 'zip', root_dir, base_dir) + + self.assertEqual(res, base_name + '.zip') + self.assertTrue(os.path.isfile(res)) + self.assertTrue(zipfile.is_zipfile(res)) + with zipfile.ZipFile(res) as zf: + self.assertCountEqual(zf.namelist(), + ['dist/', 'dist/sub/', 'dist/sub2/', + 'dist/file1', 'dist/file2', 'dist/sub/file3']) + @requires_zlib + @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run') + @unittest.skipUnless(shutil.which('zip'), + 'Need the zip command to run') + def test_zipfile_vs_zip(self): + root_dir, base_dir = self._create_files() + base_name = os.path.join(self.mkdtemp(), 'archive') + archive = make_archive(base_name, 'zip', root_dir, base_dir) + + # check if ZIP file was created + self.assertEqual(archive, base_name + '.zip') + self.assertTrue(os.path.isfile(archive)) + + # now create another ZIP file using `zip` + archive2 = os.path.join(root_dir, 'archive2.zip') + zip_cmd = ['zip', '-q', '-r', 'archive2.zip', base_dir] + subprocess.check_call(zip_cmd, cwd=root_dir, + stdout=subprocess.DEVNULL) + + self.assertTrue(os.path.isfile(archive2)) + # let's compare both ZIP files + with zipfile.ZipFile(archive) as zf: + names = zf.namelist() + with zipfile.ZipFile(archive2) as zf: + names2 = zf.namelist() + self.assertEqual(sorted(names), sorted(names2)) def test_make_archive(self): tmpdir = self.mkdtemp() @@ -1108,40 +1120,37 @@ class TestShutil(unittest.TestCase): else: group = owner = 'root' - base_dir, root_dir, base_name = self._create_files() - base_name = os.path.join(self.mkdtemp() , 'archive') + root_dir, base_dir = self._create_files() + base_name = os.path.join(self.mkdtemp(), 'archive') res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner, group=group) - self.assertTrue(os.path.exists(res)) + self.assertTrue(os.path.isfile(res)) res = make_archive(base_name, 'zip', root_dir, base_dir) - self.assertTrue(os.path.exists(res)) + self.assertTrue(os.path.isfile(res)) res = make_archive(base_name, 'tar', root_dir, base_dir, owner=owner, group=group) - self.assertTrue(os.path.exists(res)) + self.assertTrue(os.path.isfile(res)) res = make_archive(base_name, 'tar', root_dir, base_dir, owner='kjhkjhkjg', group='oihohoh') - self.assertTrue(os.path.exists(res)) + self.assertTrue(os.path.isfile(res)) @requires_zlib @unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support") def test_tarfile_root_owner(self): - tmpdir, tmpdir2, base_name = self._create_files() - old_dir = os.getcwd() - os.chdir(tmpdir) + root_dir, base_dir = self._create_files() + base_name = os.path.join(self.mkdtemp(), 'archive') group = grp.getgrgid(0)[0] owner = pwd.getpwuid(0)[0] - try: - archive_name = _make_tarball(base_name, 'dist', compress=None, - owner=owner, group=group) - finally: - os.chdir(old_dir) + with support.change_cwd(root_dir): + archive_name = make_archive(base_name, 'gztar', root_dir, 'dist', + owner=owner, group=group) # check if the compressed tarball was created - self.assertTrue(os.path.exists(archive_name)) + self.assertTrue(os.path.isfile(archive_name)) # now checks the rights archive = tarfile.open(archive_name) @@ -1198,18 +1207,6 @@ class TestShutil(unittest.TestCase): formats = [name for name, params in get_archive_formats()] self.assertNotIn('xxx', formats) - def _compare_dirs(self, dir1, dir2): - # check that dir1 and dir2 are equivalent, - # return the diff - diff = [] - for root, dirs, files in os.walk(dir1): - for file_ in files: - path = os.path.join(root, file_) - target_path = os.path.join(dir2, os.path.split(path)[-1]) - if not os.path.exists(target_path): - diff.append(file_) - return diff - @requires_zlib def test_unpack_archive(self): formats = ['tar', 'gztar', 'zip'] @@ -1218,22 +1215,22 @@ class TestShutil(unittest.TestCase): if LZMA_SUPPORTED: formats.append('xztar') + root_dir, base_dir = self._create_files() + expected = rlistdir(root_dir) + expected.remove('outer') for format in formats: - tmpdir = self.mkdtemp() - base_dir, root_dir, base_name = self._create_files() - tmpdir2 = self.mkdtemp() + base_name = os.path.join(self.mkdtemp(), 'archive') filename = make_archive(base_name, format, root_dir, base_dir) # let's try to unpack it now + tmpdir2 = self.mkdtemp() unpack_archive(filename, tmpdir2) - diff = self._compare_dirs(tmpdir, tmpdir2) - self.assertEqual(diff, []) + self.assertEqual(rlistdir(tmpdir2), expected) # and again, this time with the format specified tmpdir3 = self.mkdtemp() unpack_archive(filename, tmpdir3, format=format) - diff = self._compare_dirs(tmpdir, tmpdir3) - self.assertEqual(diff, []) + self.assertEqual(rlistdir(tmpdir3), expected) self.assertRaises(shutil.ReadError, unpack_archive, TESTFN) self.assertRaises(ValueError, unpack_archive, TESTFN, format='xxx') diff --git a/Lib/test/test_smtpd.py b/Lib/test/test_smtpd.py index 1aa55d21..88dbfdf6 100644 --- a/Lib/test/test_smtpd.py +++ b/Lib/test/test_smtpd.py @@ -313,6 +313,12 @@ class SMTPDChannelTest(unittest.TestCase): DummyDispatcherBroken, BrokenDummyServer, (support.HOST, 0), ('b', 0), decode_data=True) + def test_decode_data_and_enable_SMTPUTF8_raises(self): + self.assertRaises( + ValueError, smtpd.SMTPChannel, + self.server, self.channel.conn, self.channel.addr, + enable_SMTPUTF8=True, decode_data=True) + def test_server_accept(self): self.server.handle_accept() diff --git a/Lib/test/test_smtplib.py b/Lib/test/test_smtplib.py index 8e362414..28539f36 100644 --- a/Lib/test/test_smtplib.py +++ b/Lib/test/test_smtplib.py @@ -1,8 +1,10 @@ import asyncore +import base64 import email.mime.text from email.message import EmailMessage from email.base64mime import body_encode as encode_base64 import email.utils +import hmac import socket import smtpd import smtplib @@ -623,20 +625,12 @@ sim_users = {'Mr.A@somewhere.com':'John A', sim_auth = ('Mr.A@somewhere.com', 'somepassword') sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn' 'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=') -sim_auth_credentials = { - 'login': 'TXIuQUBzb21ld2hlcmUuY29t', - 'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=', - 'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ' - 'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'), - } -sim_auth_login_user = 'TXIUQUBZB21LD2HLCMUUY29T' -sim_auth_plain = 'AE1YLKFAC29TZXDOZXJLLMNVBQBZB21LCGFZC3DVCMQ=' - sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'], 'list-2':['Ms.B@xn--fo-fka.com',], } # Simulated SMTP channel & server +class ResponseException(Exception): pass class SimSMTPChannel(smtpd.SMTPChannel): quit_response = None @@ -646,12 +640,109 @@ class SimSMTPChannel(smtpd.SMTPChannel): rcpt_count = 0 rset_count = 0 disconnect = 0 + AUTH = 99 # Add protocol state to enable auth testing. + authenticated_user = None def __init__(self, extra_features, *args, **kw): self._extrafeatures = ''.join( [ "250-{0}\r\n".format(x) for x in extra_features ]) super(SimSMTPChannel, self).__init__(*args, **kw) + # AUTH related stuff. It would be nice if support for this were in smtpd. + def found_terminator(self): + if self.smtp_state == self.AUTH: + line = self._emptystring.join(self.received_lines) + print('Data:', repr(line), file=smtpd.DEBUGSTREAM) + self.received_lines = [] + try: + self.auth_object(line) + except ResponseException as e: + self.smtp_state = self.COMMAND + self.push('%s %s' % (e.smtp_code, e.smtp_error)) + return + super().found_terminator() + + + def smtp_AUTH(self, arg): + if not self.seen_greeting: + self.push('503 Error: send EHLO first') + return + if not self.extended_smtp or 'AUTH' not in self._extrafeatures: + self.push('500 Error: command "AUTH" not recognized') + return + if self.authenticated_user is not None: + self.push( + '503 Bad sequence of commands: already authenticated') + return + args = arg.split() + if len(args) not in [1, 2]: + self.push('501 Syntax: AUTH [initial-response]') + return + auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_') + try: + self.auth_object = getattr(self, auth_object_name) + except AttributeError: + self.push('504 Command parameter not implemented: unsupported ' + ' authentication mechanism {!r}'.format(auth_object_name)) + return + self.smtp_state = self.AUTH + self.auth_object(args[1] if len(args) == 2 else None) + + def _authenticated(self, user, valid): + if valid: + self.authenticated_user = user + self.push('235 Authentication Succeeded') + else: + self.push('535 Authentication credentials invalid') + self.smtp_state = self.COMMAND + + def _decode_base64(self, string): + return base64.decodebytes(string.encode('ascii')).decode('utf-8') + + def _auth_plain(self, arg=None): + if arg is None: + self.push('334 ') + else: + logpass = self._decode_base64(arg) + try: + *_, user, password = logpass.split('\0') + except ValueError as e: + self.push('535 Splitting response {!r} into user and password' + ' failed: {}'.format(logpass, e)) + return + self._authenticated(user, password == sim_auth[1]) + + def _auth_login(self, arg=None): + if arg is None: + # base64 encoded 'Username:' + self.push('334 VXNlcm5hbWU6') + elif not hasattr(self, '_auth_login_user'): + self._auth_login_user = self._decode_base64(arg) + # base64 encoded 'Password:' + self.push('334 UGFzc3dvcmQ6') + else: + password = self._decode_base64(arg) + self._authenticated(self._auth_login_user, password == sim_auth[1]) + del self._auth_login_user + + def _auth_cram_md5(self, arg=None): + if arg is None: + self.push('334 {}'.format(sim_cram_md5_challenge)) + else: + logpass = self._decode_base64(arg) + try: + user, hashed_pass = logpass.split() + except ValueError as e: + self.push('535 Splitting response {!r} into user and password' + 'failed: {}'.format(logpass, e)) + return False + valid_hashed_pass = hmac.HMAC( + sim_auth[1].encode('ascii'), + self._decode_base64(sim_cram_md5_challenge).encode('ascii'), + 'md5').hexdigest() + self._authenticated(user, hashed_pass == valid_hashed_pass) + # end AUTH related stuff. + def smtp_EHLO(self, arg): resp = ('250-testhost\r\n' '250-EXPN\r\n' @@ -683,20 +774,6 @@ class SimSMTPChannel(smtpd.SMTPChannel): else: self.push('550 No access for you!') - def smtp_AUTH(self, arg): - mech = arg.strip().lower() - if mech=='cram-md5': - self.push('334 {}'.format(sim_cram_md5_challenge)) - elif mech not in sim_auth_credentials: - self.push('504 auth type unimplemented') - return - elif mech=='plain': - self.push('334 ') - elif mech=='login': - self.push('334 ') - else: - self.push('550 No access for you!') - def smtp_QUIT(self, arg): if self.quit_response is None: super(SimSMTPChannel, self).smtp_QUIT(arg) @@ -841,63 +918,49 @@ class SMTPSimTests(unittest.TestCase): self.assertEqual(smtp.expn(u), expected_unknown) smtp.quit() - # SimSMTPChannel doesn't fully support AUTH because it requires a - # synchronous read to obtain the credentials...so instead smtpd - # sees the credential sent by smtplib's login method as an unknown command, - # which results in smtplib raising an auth error. Fortunately the error - # message contains the encoded credential, so we can partially check that it - # was generated correctly (partially, because the 'word' is uppercased in - # the error message). - def testAUTH_PLAIN(self): self.serv.add_feature("AUTH PLAIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) - try: smtp.login(sim_auth[0], sim_auth[1], initial_response_ok=False) - except smtplib.SMTPAuthenticationError as err: - self.assertIn(sim_auth_plain, str(err)) + resp = smtp.login(sim_auth[0], sim_auth[1]) + self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_LOGIN(self): self.serv.add_feature("AUTH LOGIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) - try: smtp.login(sim_auth[0], sim_auth[1]) - except smtplib.SMTPAuthenticationError as err: - self.assertIn(sim_auth_login_user, str(err)) + resp = smtp.login(sim_auth[0], sim_auth[1]) + self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_CRAM_MD5(self): self.serv.add_feature("AUTH CRAM-MD5") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) - - try: smtp.login(sim_auth[0], sim_auth[1]) - except smtplib.SMTPAuthenticationError as err: - self.assertIn(sim_auth_credentials['cram-md5'], str(err)) + resp = smtp.login(sim_auth[0], sim_auth[1]) + self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_multiple(self): # Test that multiple authentication methods are tried. self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) - try: smtp.login(sim_auth[0], sim_auth[1]) - except smtplib.SMTPAuthenticationError as err: - self.assertIn(sim_auth_login_user, str(err)) + resp = smtp.login(sim_auth[0], sim_auth[1]) + self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def test_auth_function(self): - smtp = smtplib.SMTP(HOST, self.port, - local_hostname='localhost', timeout=15) - self.serv.add_feature("AUTH CRAM-MD5") - smtp.user, smtp.password = sim_auth[0], sim_auth[1] - supported = {'CRAM-MD5': smtp.auth_cram_md5, - 'PLAIN': smtp.auth_plain, - 'LOGIN': smtp.auth_login, - } - for mechanism, method in supported.items(): - try: smtp.auth(mechanism, method, initial_response_ok=False) - except smtplib.SMTPAuthenticationError as err: - self.assertIn(sim_auth_credentials[mechanism.lower()].upper(), - str(err)) - smtp.close() + supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'} + for mechanism in supported: + self.serv.add_feature("AUTH {}".format(mechanism)) + for mechanism in supported: + with self.subTest(mechanism=mechanism): + smtp = smtplib.SMTP(HOST, self.port, + local_hostname='localhost', timeout=15) + smtp.ehlo('foo') + smtp.user, smtp.password = sim_auth[0], sim_auth[1] + method = 'auth_' + mechanism.lower().replace('-', '_') + resp = smtp.auth(mechanism, getattr(smtp, method)) + self.assertEqual(resp, (235, b'Authentication Succeeded')) + smtp.close() def test_quit_resets_greeting(self): smtp = smtplib.SMTP(HOST, self.port, diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py index a089ef15..1e355ea8 100644 --- a/Lib/test/test_socket.py +++ b/Lib/test/test_socket.py @@ -1289,12 +1289,11 @@ class GeneralModuleTests(unittest.TestCase): @unittest.skipUnless(support.is_resource_enabled('network'), 'network is not enabled') def test_idna(self): - # Check for internet access before running test (issue #12804). - try: + # Check for internet access before running test + # (issue #12804, issue #25138). + with support.transient_internet('python.org'): socket.gethostbyname('python.org') - except socket.gaierror as e: - if e.errno == socket.EAI_NODATA: - self.skipTest('internet access required for this test') + # these should all be successful domain = 'испытание.pythontest.net' socket.gethostbyname(domain) @@ -3867,6 +3866,7 @@ class NonBlockingTCPTests(ThreadedTCPSocketTest): read, write, err = select.select([self.serv], [], []) if self.serv in read: conn, addr = self.serv.accept() + self.assertIsNone(conn.gettimeout()) conn.close() else: self.fail("Error trying to do accept after select.") @@ -4549,7 +4549,7 @@ class TestUnixDomain(unittest.TestCase): except OSError as e: if str(e) == "AF_UNIX path too long": self.skipTest( - "Pathname {0!a} is too long to serve as a AF_UNIX path" + "Pathname {0!a} is too long to serve as an AF_UNIX path" .format(path)) else: raise diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py index f314ff41..548feebb 100644 --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -784,12 +784,12 @@ class ContextTests(unittest.TestCase): @skip_if_broken_ubuntu_ssl def test_options(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) - # OP_ALL | OP_NO_SSLv2 is the default value - self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2, - ctx.options) - ctx.options |= ssl.OP_NO_SSLv3 + # OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3, ctx.options) + ctx.options |= ssl.OP_NO_TLSv1 + self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1, + ctx.options) if can_clear_options(): ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1 self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3, @@ -2451,17 +2451,17 @@ else: " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" % str(x)) if hasattr(ssl, 'PROTOCOL_SSLv3'): - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3') + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1') if hasattr(ssl, 'PROTOCOL_SSLv3'): - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) if hasattr(ssl, 'PROTOCOL_SSLv3'): - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) @@ -2493,8 +2493,8 @@ else: try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, 'SSLv3', - client_options=ssl.OP_NO_SSLv2) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, + False, client_options=ssl.OP_NO_SSLv2) @skip_if_broken_ubuntu_ssl def test_protocol_tlsv1(self): diff --git a/Lib/test/test_string.py b/Lib/test/test_string.py index 5d37e164..0cd2b862 100644 --- a/Lib/test/test_string.py +++ b/Lib/test/test_string.py @@ -58,6 +58,8 @@ class ModuleTest(unittest.TestCase): 'foo{1}{num}{1}'.format(None, 'bar', num=6)) self.assertEqual(fmt.format('{:^{}}', 'bar', 6), '{:^{}}'.format('bar', 6)) + self.assertEqual(fmt.format('{:^{}} {}', 'bar', 6, 'X'), + '{:^{}} {}'.format('bar', 6, 'X')) self.assertEqual(fmt.format('{:^{pad}}{}', 'foo', 'bar', pad=6), '{:^{pad}}{}'.format('foo', 'bar', pad=6)) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py index 9c0229ae..e06beed3 100644 --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -37,16 +37,6 @@ else: SETBINARY = '' -try: - mkstemp = tempfile.mkstemp -except AttributeError: - # tempfile.mkstemp is not available - def mkstemp(): - """Replacement for mkstemp, calling mktemp.""" - fname = tempfile.mktemp() - return os.open(fname, os.O_RDWR|os.O_CREAT), fname - - class BaseTestCase(unittest.TestCase): def setUp(self): # Try to minimize the number of children we have so this test @@ -317,11 +307,8 @@ class ProcessTestCase(BaseTestCase): # Normalize an expected cwd (for Tru64 support). # We can't use os.path.realpath since it doesn't expand Tru64 {memb} # strings. See bug #1063571. - original_cwd = os.getcwd() - os.chdir(cwd) - cwd = os.getcwd() - os.chdir(original_cwd) - return cwd + with support.change_cwd(cwd): + return os.getcwd() # For use in the test_cwd* tests below. def _split_python_path(self): @@ -1153,9 +1140,9 @@ class ProcessTestCase(BaseTestCase): def test_handles_closed_on_exception(self): # If CreateProcess exits with an error, ensure the # duplicate output handles are released - ifhandle, ifname = mkstemp() - ofhandle, ofname = mkstemp() - efhandle, efname = mkstemp() + ifhandle, ifname = tempfile.mkstemp() + ofhandle, ofname = tempfile.mkstemp() + efhandle, efname = tempfile.mkstemp() try: subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle, stderr=efhandle) @@ -1527,7 +1514,7 @@ class POSIXProcessTestCase(BaseTestCase): def test_args_string(self): # args is a string - fd, fname = mkstemp() + fd, fname = tempfile.mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!/bin/sh\n") @@ -1572,7 +1559,7 @@ class POSIXProcessTestCase(BaseTestCase): def test_call_string(self): # call() function with string argument on UNIX - fd, fname = mkstemp() + fd, fname = tempfile.mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!/bin/sh\n") @@ -1765,7 +1752,7 @@ class POSIXProcessTestCase(BaseTestCase): def test_remapping_std_fds(self): # open up some temporary files - temps = [mkstemp() for i in range(3)] + temps = [tempfile.mkstemp() for i in range(3)] try: temp_fds = [fd for fd, fname in temps] @@ -1810,7 +1797,7 @@ class POSIXProcessTestCase(BaseTestCase): def check_swap_fds(self, stdin_no, stdout_no, stderr_no): # open up some temporary files - temps = [mkstemp() for i in range(3)] + temps = [tempfile.mkstemp() for i in range(3)] temp_fds = [fd for fd, fname in temps] try: # unlink the files -- we won't need to reopen them @@ -2325,8 +2312,6 @@ class POSIXProcessTestCase(BaseTestCase): func = lambda: None gc.enable() - executable_list = "exec" # error: must be a sequence - for args, exe_list, cwd, env_list in ( (123, [b"exe"], None, [b"env"]), ([b"arg"], 123, None, [b"env"]), @@ -2344,6 +2329,34 @@ class POSIXProcessTestCase(BaseTestCase): if not gc_enabled: gc.disable() + @support.cpython_only + def test_fork_exec_sorted_fd_sanity_check(self): + # Issue #23564: sanity check the fork_exec() fds_to_keep sanity check. + import _posixsubprocess + gc_enabled = gc.isenabled() + try: + gc.enable() + + for fds_to_keep in ( + (-1, 2, 3, 4, 5), # Negative number. + ('str', 4), # Not an int. + (18, 23, 42, 2**63), # Out of range. + (5, 4), # Not sorted. + (6, 7, 7, 8), # Duplicate. + ): + with self.assertRaises( + ValueError, + msg='fds_to_keep={}'.format(fds_to_keep)) as c: + _posixsubprocess.fork_exec( + [b"false"], [b"false"], + True, fds_to_keep, None, [b"env"], + -1, -1, -1, -1, + 1, 2, 3, 4, + True, True, None) + self.assertIn('fds_to_keep', str(c.exception)) + finally: + if not gc_enabled: + gc.disable() @unittest.skipUnless(mswindows, "Windows specific tests") @@ -2537,7 +2550,7 @@ class CommandsWithSpaces (BaseTestCase): def setUp(self): super().setUp() - f, fname = mkstemp(".py", "te st") + f, fname = tempfile.mkstemp(".py", "te st") self.fname = fname.lower () os.write(f, b"import sys;" b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))" diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py index 83549bc5..2d956532 100644 --- a/Lib/test/test_sys.py +++ b/Lib/test/test_sys.py @@ -197,25 +197,64 @@ class SysModuleTest(unittest.TestCase): self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) - @unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(), - 'fatal error if run with a trace function') def test_recursionlimit_recovery(self): - # NOTE: this test is slightly fragile in that it depends on the current - # recursion count when executing the test being low enough so as to - # trigger the recursion recovery detection in the _Py_MakeEndRecCheck - # macro (see ceval.h). + if hasattr(sys, 'gettrace') and sys.gettrace(): + self.skipTest('fatal error if run with a trace function') + oldlimit = sys.getrecursionlimit() def f(): f() try: - for i in (50, 1000): - # Issue #5392: stack overflow after hitting recursion limit twice - sys.setrecursionlimit(i) + for depth in (10, 25, 50, 75, 100, 250, 1000): + try: + sys.setrecursionlimit(depth) + except RecursionError: + # Issue #25274: The recursion limit is too low at the + # current recursion depth + continue + + # Issue #5392: test stack overflow after hitting recursion + # limit twice self.assertRaises(RecursionError, f) self.assertRaises(RecursionError, f) finally: sys.setrecursionlimit(oldlimit) + @test.support.cpython_only + def test_setrecursionlimit_recursion_depth(self): + # Issue #25274: Setting a low recursion limit must be blocked if the + # current recursion depth is already higher than the "lower-water + # mark". Otherwise, it may not be possible anymore to + # reset the overflowed flag to 0. + + from _testcapi import get_recursion_depth + + def set_recursion_limit_at_depth(depth, limit): + recursion_depth = get_recursion_depth() + if recursion_depth >= depth: + with self.assertRaises(RecursionError) as cm: + sys.setrecursionlimit(limit) + self.assertRegex(str(cm.exception), + "cannot set the recursion limit to [0-9]+ " + "at the recursion depth [0-9]+: " + "the limit is too low") + else: + set_recursion_limit_at_depth(depth, limit) + + oldlimit = sys.getrecursionlimit() + try: + sys.setrecursionlimit(1000) + + for limit in (10, 25, 50, 75, 100, 150, 200): + # formula extracted from _Py_RecursionLimitLowerWaterMark() + if limit > 200: + depth = limit - 50 + else: + depth = limit * 3 // 4 + set_recursion_limit_at_depth(depth, limit) + finally: + sys.setrecursionlimit(oldlimit) + def test_recursionlimit_fatalerror(self): # A fatal error occurs if a second recursion limit is hit when recovering # from a first one. diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py index c0f27a67..0917c3e2 100644 --- a/Lib/test/test_sysconfig.py +++ b/Lib/test/test_sysconfig.py @@ -6,7 +6,7 @@ import shutil from copy import copy from test.support import (run_unittest, TESTFN, unlink, check_warnings, - captured_stdout, skip_unless_symlink) + captured_stdout, skip_unless_symlink, change_cwd) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -361,12 +361,8 @@ class TestSysConfig(unittest.TestCase): # srcdir should be independent of the current working directory # See Issues #15322, #15364. srcdir = sysconfig.get_config_var('srcdir') - cwd = os.getcwd() - try: - os.chdir('..') + with change_cwd(os.pardir): srcdir2 = sysconfig.get_config_var('srcdir') - finally: - os.chdir(cwd) self.assertEqual(srcdir, srcdir2) @unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None, diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py index 57cddb2d..1412cae1 100644 --- a/Lib/test/test_tarfile.py +++ b/Lib/test/test_tarfile.py @@ -1132,10 +1132,8 @@ class WriteTest(WriteTestBase, unittest.TestCase): self.assertEqual(tar.getnames(), [], "added the archive to itself") - cwd = os.getcwd() - os.chdir(TEMPDIR) - tar.add(dstname) - os.chdir(cwd) + with support.change_cwd(TEMPDIR): + tar.add(dstname) self.assertEqual(tar.getnames(), [], "added the archive to itself") finally: @@ -1292,9 +1290,7 @@ class WriteTest(WriteTestBase, unittest.TestCase): def test_cwd(self): # Test adding the current working directory. - cwd = os.getcwd() - os.chdir(TEMPDIR) - try: + with support.change_cwd(TEMPDIR): tar = tarfile.open(tmpname, self.mode) try: tar.add(".") @@ -1308,8 +1304,6 @@ class WriteTest(WriteTestBase, unittest.TestCase): self.assertTrue(t.name.startswith("./"), t.name) finally: tar.close() - finally: - os.chdir(cwd) def test_open_nonwritable_fileobj(self): for exctype in OSError, EOFError, RuntimeError: diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py index 5be645a3..25f6edec 100644 --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -1,5 +1,6 @@ import unittest import re +import subprocess import sys import os from test import support @@ -242,11 +243,10 @@ class TclTest(unittest.TestCase): with support.EnvironmentVarGuard() as env: env.unset("TCL_LIBRARY") - f = os.popen('%s -c "import tkinter; print(tkinter)"' % (unc_name,)) + stdout = subprocess.check_output( + [unc_name, '-c', 'import tkinter; print(tkinter)']) - self.assertIn('tkinter', f.read()) - # exit code must be zero - self.assertEqual(f.close(), None) + self.assertIn(b'tkinter', stdout) def test_exprstring(self): tcl = self.interp diff --git a/Lib/test/test_textwrap.py b/Lib/test/test_textwrap.py index 707aaaa2..a44184fb 100644 --- a/Lib/test/test_textwrap.py +++ b/Lib/test/test_textwrap.py @@ -748,6 +748,11 @@ def foo(): expect = "hello there\n how are you?" self.assertEqual(expect, dedent(text)) + # test margin is smaller than smallest indent + text = " \thello there\n \thow are you?\n \tI'm fine, thanks" + expect = " \thello there\n \thow are you?\n\tI'm fine, thanks" + self.assertEqual(expect, dedent(text)) + # Test textwrap.indent class IndentTestCase(unittest.TestCase): diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py index 6bcd2124..76b894ee 100644 --- a/Lib/test/test_time.py +++ b/Lib/test/test_time.py @@ -773,7 +773,7 @@ class TestPyTime_t(unittest.TestCase): (2**23 - 1e-9, 8388607999999999), (2**23, 8388608000000000), - # start loosing precision for value > 2^23 seconds + # start losing precision for value > 2^23 seconds (2**23 + 1e-9, 8388608000000002), # nanoseconds are lost for value > 2^23 seconds @@ -848,7 +848,7 @@ class TestPyTime_t(unittest.TestCase): (4194304000000000, 2**22), (4194304000000001, 2**22 + 1e-9), - # start loosing precision for value > 2^23 seconds + # start losing precision for value > 2^23 seconds (8388608000000002, 2**23 + 1e-9), # nanoseconds are lost for value > 2^23 seconds @@ -946,14 +946,14 @@ class TestPyTime_t(unittest.TestCase): # nanoseconds (1, 0, FLOOR), (1, 1, CEILING), - (-1, 0, FLOOR), - (-1, -1, CEILING), + (-1, -1, FLOOR), + (-1, 0, CEILING), # seconds + nanoseconds (1234 * MS_TO_NS + 1, 1234, FLOOR), (1234 * MS_TO_NS + 1, 1235, CEILING), - (-1234 * MS_TO_NS - 1, -1234, FLOOR), - (-1234 * MS_TO_NS - 1, -1235, CEILING), + (-1234 * MS_TO_NS - 1, -1235, FLOOR), + (-1234 * MS_TO_NS - 1, -1234, CEILING), ): with self.subTest(nanoseconds=ns, milliseconds=ms, round=rnd): self.assertEqual(PyTime_AsMilliseconds(ns, rnd), ms) @@ -983,14 +983,14 @@ class TestPyTime_t(unittest.TestCase): # nanoseconds (1, 0, FLOOR), (1, 1, CEILING), - (-1, 0, FLOOR), - (-1, -1, CEILING), + (-1, -1, FLOOR), + (-1, 0, CEILING), # seconds + nanoseconds (1234 * US_TO_NS + 1, 1234, FLOOR), (1234 * US_TO_NS + 1, 1235, CEILING), - (-1234 * US_TO_NS - 1, -1234, FLOOR), - (-1234 * US_TO_NS - 1, -1235, CEILING), + (-1234 * US_TO_NS - 1, -1235, FLOOR), + (-1234 * US_TO_NS - 1, -1234, CEILING), ): with self.subTest(nanoseconds=ns, milliseconds=ms, round=rnd): self.assertEqual(PyTime_AsMicroseconds(ns, rnd), ms) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index b7ca0894..3b17ca63 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1,22 +1,44 @@ -doctests = """ -Tests for the tokenize module. - -The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARKER is omitted for -brevity. +from test import support +from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, + STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, + open as tokenize_open, Untokenizer) +from io import BytesIO +from unittest import TestCase, mock +import os +import token - >>> import glob - >>> dump_tokens("1 + 1") - ENCODING 'utf-8' (0, 0) (0, 0) +class TokenizeTest(TestCase): + # Tests for the tokenize module. + + # The tests can be really simple. Given a small fragment of source + # code, print out a table with tokens. The ENDMARKER is omitted for + # brevity. + + def check_tokenize(self, s, expected): + # Format the tokens in s in a table format. + # The ENDMARKER is omitted. + result = [] + f = BytesIO(s.encode('utf-8')) + for type, token, start, end, line in tokenize(f.readline): + if type == ENDMARKER: + break + type = tok_name[type] + result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" % + locals()) + self.assertEqual(result, + [" ENCODING 'utf-8' (0, 0) (0, 0)"] + + expected.rstrip().splitlines()) + + def test_basic(self): + self.check_tokenize("1 + 1", """\ NUMBER '1' (1, 0) (1, 1) OP '+' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) - - >>> dump_tokens("if False:\\n" - ... " # NL\\n" - ... " True = False # NEWLINE\\n") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("if False:\n" + " # NL\n" + " True = False # NEWLINE\n", """\ NAME 'if' (1, 0) (1, 2) NAME 'False' (1, 3) (1, 8) OP ':' (1, 8) (1, 9) @@ -30,112 +52,48 @@ brevity. COMMENT '# NEWLINE' (3, 17) (3, 26) NEWLINE '\\n' (3, 26) (3, 27) DEDENT '' (4, 0) (4, 0) - - >>> indent_error_file = \""" - ... def k(x): - ... x += 2 - ... x += 5 - ... \""" - >>> readline = BytesIO(indent_error_file.encode('utf-8')).readline - >>> for tok in tokenize(readline): pass - Traceback (most recent call last): - ... - IndentationError: unindent does not match any outer indentation level - -There are some standard formatting practices that are easy to get right. - - >>> roundtrip("if x == 1:\\n" - ... " print(x)\\n") - True - - >>> roundtrip("# This is a comment\\n# This also") - True - -Some people use different formatting conventions, which makes -untokenize a little trickier. Note that this test involves trailing -whitespace after the colon. Note that we use hex escapes to make the -two trailing blanks apparent in the expected output. - - >>> roundtrip("if x == 1 : \\n" - ... " print(x)\\n") - True - - >>> f = support.findfile("tokenize_tests.txt") - >>> roundtrip(open(f, 'rb')) - True - - >>> roundtrip("if x == 1:\\n" - ... " # A comment by itself.\\n" - ... " print(x) # Comment here, too.\\n" - ... " # Another comment.\\n" - ... "after_if = True\\n") - True - - >>> roundtrip("if (x # The comments need to go in the right place\\n" - ... " == 1):\\n" - ... " print('x==1')\\n") - True - - >>> roundtrip("class Test: # A comment here\\n" - ... " # A comment with weird indent\\n" - ... " after_com = 5\\n" - ... " def x(m): return m*5 # a one liner\\n" - ... " def y(m): # A whitespace after the colon\\n" - ... " return y*4 # 3-space indent\\n") - True - -Some error-handling code - - >>> roundtrip("try: import somemodule\\n" - ... "except ImportError: # comment\\n" - ... " print('Can not import' # comment2\\n)" - ... "else: print('Loaded')\\n") - True - -Balancing continuation - - >>> roundtrip("a = (3,4, \\n" - ... "5,6)\\n" - ... "y = [3, 4,\\n" - ... "5]\\n" - ... "z = {'a': 5,\\n" - ... "'b':15, 'c':True}\\n" - ... "x = len(y) + 5 - a[\\n" - ... "3] - a[2]\\n" - ... "+ len(z) - z[\\n" - ... "'b']\\n") - True - -Ordinary integers and binary operators - - >>> dump_tokens("0xff <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + indent_error_file = b"""\ +def k(x): + x += 2 + x += 5 +""" + readline = BytesIO(indent_error_file).readline + with self.assertRaisesRegex(IndentationError, + "unindent does not match any " + "outer indentation level"): + for tok in tokenize(readline): + pass + + def test_int(self): + # Ordinary integers and binary operators + self.check_tokenize("0xff <= 255", """\ NUMBER '0xff' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0b10 <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0b10 <= 255", """\ NUMBER '0b10' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0o123 <= 0O123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0o123 <= 0O123", """\ NUMBER '0o123' (1, 0) (1, 5) OP '<=' (1, 6) (1, 8) NUMBER '0O123' (1, 9) (1, 14) - >>> dump_tokens("1234567 > ~0x15") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("1234567 > ~0x15", """\ NUMBER '1234567' (1, 0) (1, 7) OP '>' (1, 8) (1, 9) OP '~' (1, 10) (1, 11) NUMBER '0x15' (1, 11) (1, 15) - >>> dump_tokens("2134568 != 1231515") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("2134568 != 1231515", """\ NUMBER '2134568' (1, 0) (1, 7) OP '!=' (1, 8) (1, 10) NUMBER '1231515' (1, 11) (1, 18) - >>> dump_tokens("(-124561-1) & 200000000") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("(-124561-1) & 200000000", """\ OP '(' (1, 0) (1, 1) OP '-' (1, 1) (1, 2) NUMBER '124561' (1, 2) (1, 8) @@ -144,93 +102,93 @@ Ordinary integers and binary operators OP ')' (1, 10) (1, 11) OP '&' (1, 12) (1, 13) NUMBER '200000000' (1, 14) (1, 23) - >>> dump_tokens("0xdeadbeef != -1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadbeef != -1", """\ NUMBER '0xdeadbeef' (1, 0) (1, 10) OP '!=' (1, 11) (1, 13) OP '-' (1, 14) (1, 15) NUMBER '1' (1, 15) (1, 16) - >>> dump_tokens("0xdeadc0de & 12345") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadc0de & 12345", """\ NUMBER '0xdeadc0de' (1, 0) (1, 10) OP '&' (1, 11) (1, 12) NUMBER '12345' (1, 13) (1, 18) - >>> dump_tokens("0xFF & 0x15 | 1234") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xFF & 0x15 | 1234", """\ NUMBER '0xFF' (1, 0) (1, 4) OP '&' (1, 5) (1, 6) NUMBER '0x15' (1, 7) (1, 11) OP '|' (1, 12) (1, 13) NUMBER '1234' (1, 14) (1, 18) + """) -Long integers - - >>> dump_tokens("x = 0") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_long(self): + # Long integers + self.check_tokenize("x = 0", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0' (1, 4) (1, 5) - >>> dump_tokens("x = 0xfffffffffff") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 0xfffffffffff", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0xffffffffff (1, 4) (1, 17) - >>> dump_tokens("x = 123141242151251616110") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 123141242151251616110", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '123141242151 (1, 4) (1, 25) - >>> dump_tokens("x = -15921590215012591") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = -15921590215012591", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '-' (1, 4) (1, 5) NUMBER '159215902150 (1, 5) (1, 22) + """) -Floating point numbers - - >>> dump_tokens("x = 3.14159") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_float(self): + # Floating point numbers + self.check_tokenize("x = 3.14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14159' (1, 4) (1, 11) - >>> dump_tokens("x = 314159.") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 314159.", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '314159.' (1, 4) (1, 11) - >>> dump_tokens("x = .314159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = .314159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '.314159' (1, 4) (1, 11) - >>> dump_tokens("x = 3e14159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3e14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3e14159' (1, 4) (1, 11) - >>> dump_tokens("x = 3E123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3E123", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3E123' (1, 4) (1, 9) - >>> dump_tokens("x+y = 3e-1230") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x+y = 3e-1230", """\ NAME 'x' (1, 0) (1, 1) OP '+' (1, 1) (1, 2) NAME 'y' (1, 2) (1, 3) OP '=' (1, 4) (1, 5) NUMBER '3e-1230' (1, 6) (1, 13) - >>> dump_tokens("x = 3.14e159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3.14e159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14e159' (1, 4) (1, 12) + """) -String literals - - >>> dump_tokens("x = ''; y = \\\"\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_string(self): + # String literals + self.check_tokenize("x = ''; y = \"\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "''" (1, 4) (1, 6) @@ -238,8 +196,8 @@ String literals NAME 'y' (1, 8) (1, 9) OP '=' (1, 10) (1, 11) STRING '""' (1, 12) (1, 14) - >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = '\"'; y = \"'\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '\\'"\\'' (1, 4) (1, 7) @@ -247,29 +205,29 @@ String literals NAME 'y' (1, 9) (1, 10) OP '=' (1, 11) (1, 12) STRING '"\\'"' (1, 13) (1, 16) - >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"doesn\\'t "' (1, 4) (1, 14) NAME 'shrink' (1, 14) (1, 20) STRING '", does it"' (1, 20) (1, 31) - >>> dump_tokens("x = 'abc' + 'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 'abc' + 'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "'abc'" (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING "'ABC'" (1, 12) (1, 17) - >>> dump_tokens('y = "ABC" + "ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = "ABC" + "ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"ABC"' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING '"ABC"' (1, 12) (1, 17) - >>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = r'abc' + r'ABC' + R'ABC' + R'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "r'abc'" (1, 4) (1, 10) @@ -279,8 +237,8 @@ String literals STRING "R'ABC'" (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING "R'ABC'" (1, 31) (1, 37) - >>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = r"abc" + r"ABC" + R"ABC" + R"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'r"abc"' (1, 4) (1, 10) @@ -290,30 +248,30 @@ String literals STRING 'R"ABC"' (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING 'R"ABC"' (1, 31) (1, 37) + """) - >>> dump_tokens("u'abc' + U'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("u'abc' + U'abc'", """\ STRING "u'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "U'abc'" (1, 9) (1, 15) - >>> dump_tokens('u"abc" + U"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('u"abc" + U"abc"', """\ STRING 'u"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'U"abc"' (1, 9) (1, 15) + """) - >>> dump_tokens("b'abc' + B'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("b'abc' + B'abc'", """\ STRING "b'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "B'abc'" (1, 9) (1, 15) - >>> dump_tokens('b"abc" + B"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('b"abc" + B"abc"', """\ STRING 'b"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'B"abc"' (1, 9) (1, 15) - >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\ STRING "br'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "bR'abc'" (1, 10) (1, 17) @@ -321,8 +279,8 @@ String literals STRING "Br'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "BR'abc'" (1, 30) (1, 37) - >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\ STRING 'br"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'bR"abc"' (1, 10) (1, 17) @@ -330,8 +288,8 @@ String literals STRING 'Br"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'BR"abc"' (1, 30) (1, 37) - >>> dump_tokens("rb'abc' + rB'abc' + Rb'abc' + RB'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("rb'abc' + rB'abc' + Rb'abc' + RB'abc'", """\ STRING "rb'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "rB'abc'" (1, 10) (1, 17) @@ -339,8 +297,8 @@ String literals STRING "Rb'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "RB'abc'" (1, 30) (1, 37) - >>> dump_tokens('rb"abc" + rB"abc" + Rb"abc" + RB"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('rb"abc" + rB"abc" + Rb"abc" + RB"abc"', """\ STRING 'rb"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'rB"abc"' (1, 10) (1, 17) @@ -348,11 +306,10 @@ String literals STRING 'Rb"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'RB"abc"' (1, 30) (1, 37) + """) -Operators - - >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_function(self): + self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd22' (1, 4) (1, 7) OP '(' (1, 7) (1, 8) @@ -373,8 +330,8 @@ Operators OP ')' (1, 26) (1, 27) OP ':' (1, 27) (1, 28) NAME 'pass' (1, 29) (1, 33) - >>> dump_tokens("def d01v_(a=1, *k, **w): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd01v_' (1, 4) (1, 9) OP '(' (1, 9) (1, 10) @@ -390,12 +347,12 @@ Operators OP ')' (1, 22) (1, 23) OP ':' (1, 23) (1, 24) NAME 'pass' (1, 25) (1, 29) + """) -Comparison - - >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + - ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_comparison(self): + # Comparison + self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\ NAME 'if' (1, 0) (1, 2) NUMBER '1' (1, 3) (1, 4) OP '<' (1, 5) (1, 6) @@ -428,11 +385,11 @@ Comparison NUMBER '1' (1, 81) (1, 82) OP ':' (1, 82) (1, 83) NAME 'pass' (1, 84) (1, 88) + """) -Shift - - >>> dump_tokens("x = 1 << 1 >> 5") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_shift(self): + # Shift + self.check_tokenize("x = 1 << 1 >> 5", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -440,11 +397,11 @@ Shift NUMBER '1' (1, 9) (1, 10) OP '>>' (1, 11) (1, 13) NUMBER '5' (1, 14) (1, 15) + """) -Additive - - >>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_additive(self): + # Additive + self.check_tokenize("x = 1 - y + 15 - 1 + 0x124 + z + a[5]", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -463,11 +420,11 @@ Additive OP '[' (1, 34) (1, 35) NUMBER '5' (1, 35) (1, 36) OP ']' (1, 36) (1, 37) + """) -Multiplicative - - >>> dump_tokens("x = 1//1*1/5*12%0x12@42") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_multiplicative(self): + # Multiplicative + self.check_tokenize("x = 1//1*1/5*12%0x12@42", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -483,11 +440,11 @@ Multiplicative NUMBER '0x12' (1, 16) (1, 20) OP '@' (1, 20) (1, 21) NUMBER '42' (1, 21) (1, 23) + """) -Unary - - >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unary(self): + # Unary + self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\ OP '~' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '^' (1, 3) (1, 4) @@ -499,8 +456,8 @@ Unary OP '^' (1, 14) (1, 15) OP '-' (1, 16) (1, 17) NUMBER '1' (1, 17) (1, 18) - >>> dump_tokens("-1*1/1+1*1//1 - ---1**1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\ OP '-' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '*' (1, 2) (1, 3) @@ -520,11 +477,11 @@ Unary NUMBER '1' (1, 19) (1, 20) OP '**' (1, 20) (1, 22) NUMBER '1' (1, 22) (1, 23) + """) -Selector - - >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_selector(self): + # Selector + self.check_tokenize("import sys, time\nx = sys.modules['time'].time()", """\ NAME 'import' (1, 0) (1, 6) NAME 'sys' (1, 7) (1, 10) OP ',' (1, 10) (1, 11) @@ -542,11 +499,11 @@ Selector NAME 'time' (2, 24) (2, 28) OP '(' (2, 28) (2, 29) OP ')' (2, 29) (2, 30) + """) -Methods - - >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_method(self): + # Methods + self.check_tokenize("@staticmethod\ndef foo(x,y): pass", """\ OP '@' (1, 0) (1, 1) NAME 'staticmethod (1, 1) (1, 13) NEWLINE '\\n' (1, 13) (1, 14) @@ -559,52 +516,13 @@ Methods OP ')' (2, 11) (2, 12) OP ':' (2, 12) (2, 13) NAME 'pass' (2, 14) (2, 18) + """) -Backslash means line continuation, except for comments - - >>> roundtrip("x=1+\\\\n" - ... "1\\n" - ... "# This is a comment\\\\n" - ... "# This also\\n") - True - >>> roundtrip("# Comment \\\\nx = 0") - True - -Two string literals on the same line - - >>> roundtrip("'' ''") - True - -Test roundtrip on random python modules. -pass the '-ucpu' option to process the full directory. - - >>> import random - >>> tempdir = os.path.dirname(f) or os.curdir - >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py")) - -Tokenize is broken on test_pep3131.py because regular expressions are -broken on the obscure unicode identifiers in it. *sigh* -With roundtrip extended to test the 5-tuple mode of untokenize, -7 more testfiles fail. Remove them also until the failure is diagnosed. - - >>> testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) - >>> for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): - ... testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) - ... - >>> if not support.is_resource_enabled("cpu"): - ... testfiles = random.sample(testfiles, 10) - ... - >>> for testfile in testfiles: - ... if not roundtrip(open(testfile, 'rb')): - ... print("Roundtrip failed for file %s" % testfile) - ... break - ... else: True - True - -Evil tabs - - >>> dump_tokens("def f():\\n\\tif x\\n \\tpass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_tabs(self): + # Evil tabs + self.check_tokenize("def f():\n" + "\tif x\n" + " \tpass", """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -619,11 +537,11 @@ Evil tabs NAME 'pass' (3, 9) (3, 13) DEDENT '' (4, 0) (4, 0) DEDENT '' (4, 0) (4, 0) + """) -Non-ascii identifiers - - >>> dump_tokens("Örter = 'places'\\ngrün = 'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_non_ascii_identifiers(self): + # Non-ascii identifiers + self.check_tokenize("Örter = 'places'\ngrün = 'green'", """\ NAME 'Örter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "'places'" (1, 8) (1, 16) @@ -631,11 +549,11 @@ Non-ascii identifiers NAME 'grün' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "'green'" (2, 7) (2, 14) + """) -Legacy unicode literals: - - >>> dump_tokens("Örter = u'places'\\ngrün = U'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unicode(self): + # Legacy unicode literals: + self.check_tokenize("Örter = u'places'\ngrün = U'green'", """\ NAME 'Örter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "u'places'" (1, 8) (1, 17) @@ -643,17 +561,17 @@ Legacy unicode literals: NAME 'grün' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "U'green'" (2, 7) (2, 15) + """) -Async/await extension: - - >>> dump_tokens("async = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_async(self): + # Async/await extension: + self.check_tokenize("async = 1", """\ NAME 'async' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("a = (async = 1)") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("a = (async = 1)", """\ NAME 'a' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '(' (1, 4) (1, 5) @@ -661,15 +579,15 @@ Async/await extension: OP '=' (1, 11) (1, 12) NUMBER '1' (1, 13) (1, 14) OP ')' (1, 14) (1, 15) + """) - >>> dump_tokens("async()") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async()", """\ NAME 'async' (1, 0) (1, 5) OP '(' (1, 5) (1, 6) OP ')' (1, 6) (1, 7) + """) - >>> dump_tokens("class async(Bar):pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async(Bar):pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP '(' (1, 11) (1, 12) @@ -677,28 +595,28 @@ Async/await extension: OP ')' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 17) (1, 21) + """) - >>> dump_tokens("class async:pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async:pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP ':' (1, 11) (1, 12) NAME 'pass' (1, 12) (1, 16) + """) - >>> dump_tokens("await = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("await = 1", """\ NAME 'await' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("foo.async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) + """) - >>> dump_tokens("async for a in b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async for a in b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'for' (1, 6) (1, 9) NAME 'a' (1, 10) (1, 11) @@ -706,9 +624,9 @@ Async/await extension: NAME 'b' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 18) (1, 22) + """) - >>> dump_tokens("async with a as b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async with a as b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'with' (1, 6) (1, 10) NAME 'a' (1, 11) (1, 12) @@ -716,49 +634,49 @@ Async/await extension: NAME 'b' (1, 16) (1, 17) OP ':' (1, 17) (1, 18) NAME 'pass' (1, 19) (1, 23) + """) - >>> dump_tokens("async.foo") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async.foo", """\ NAME 'async' (1, 0) (1, 5) OP '.' (1, 5) (1, 6) NAME 'foo' (1, 6) (1, 9) + """) - >>> dump_tokens("async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async", """\ NAME 'async' (1, 0) (1, 5) + """) - >>> dump_tokens("async\\n#comment\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n#comment\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) COMMENT '#comment' (2, 0) (2, 8) NL '\\n' (2, 8) (2, 9) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\n...\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n...\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) OP '...' (2, 0) (2, 3) NEWLINE '\\n' (2, 3) (2, 4) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) NAME 'await' (2, 0) (2, 5) + """) - >>> dump_tokens("foo.async + 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async + 1", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) NUMBER '1' (1, 12) (1, 13) + """) - >>> dump_tokens("async def foo(): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async def foo(): pass", """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -766,15 +684,16 @@ Async/await extension: OP ')' (1, 14) (1, 15) OP ':' (1, 15) (1, 16) NAME 'pass' (1, 17) (1, 21) - - >>> dump_tokens('''async def foo(): - ... def foo(await): - ... await = 1 - ... if 1: - ... await - ... async += 1 - ... ''') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + + self.check_tokenize('''\ +async def foo(): + def foo(await): + await = 1 + if 1: + await +async += 1 +''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -809,10 +728,11 @@ Async/await extension: OP '+=' (6, 6) (6, 8) NUMBER '1' (6, 9) (6, 10) NEWLINE '\\n' (6, 10) (6, 11) + """) - >>> dump_tokens('''async def foo(): - ... async for i in 1: pass''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def foo(): + async for i in 1: pass''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -829,9 +749,9 @@ Async/await extension: OP ':' (2, 18) (2, 19) NAME 'pass' (2, 20) (2, 24) DEDENT '' (3, 0) (3, 0) + """) - >>> dump_tokens('''async def foo(async): await''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''async def foo(async): await''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -840,14 +760,15 @@ Async/await extension: OP ')' (1, 19) (1, 20) OP ':' (1, 20) (1, 21) AWAIT 'await' (1, 22) (1, 27) + """) + + self.check_tokenize('''\ +def f(): - >>> dump_tokens('''def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + def baz(): pass + async def bar(): pass + + await = 2''', """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -876,14 +797,15 @@ Async/await extension: OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) + """) + + self.check_tokenize('''\ +async def f(): + + def baz(): pass + async def bar(): pass - >>> dump_tokens('''async def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + await = 2''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'f' (1, 10) (1, 11) @@ -913,89 +835,10 @@ Async/await extension: OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) -""" - -from test import support -from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, - STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, - open as tokenize_open, Untokenizer) -from io import BytesIO -from unittest import TestCase, mock -import os -import token + """) -def dump_tokens(s): - """Print out the tokens in s in a table format. - The ENDMARKER is omitted. - """ - f = BytesIO(s.encode('utf-8')) - for type, token, start, end, line in tokenize(f.readline): - if type == ENDMARKER: - break - type = tok_name[type] - print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals()) - -def roundtrip(f): - """ - Test roundtrip for `untokenize`. `f` is an open file or a string. - The source code in f is tokenized to both 5- and 2-tuples. - Both sequences are converted back to source code via - tokenize.untokenize(), and the latter tokenized again to 2-tuples. - The test fails if the 3 pair tokenizations do not match. - - When untokenize bugs are fixed, untokenize with 5-tuples should - reproduce code that does not contain a backslash continuation - following spaces. A proper test should test this. - - This function would be more useful for correcting bugs if it reported - the first point of failure, like assertEqual, rather than just - returning False -- or if it were only used in unittests and not - doctest and actually used assertEqual. - """ - # Get source code and original tokenizations - if isinstance(f, str): - code = f.encode('utf-8') - else: - code = f.read() - f.close() - readline = iter(code.splitlines(keepends=True)).__next__ - tokens5 = list(tokenize(readline)) - tokens2 = [tok[:2] for tok in tokens5] - # Reproduce tokens2 from pairs - bytes_from2 = untokenize(tokens2) - readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ - tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] - # Reproduce tokens2 from 5-tuples - bytes_from5 = untokenize(tokens5) - readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ - tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] - # Compare 3 versions - return tokens2 == tokens2_from2 == tokens2_from5 - -# This is an example from the docs, set up as a doctest. def decistmt(s): - """Substitute Decimals for floats in a string of statements. - - >>> from decimal import Decimal - >>> s = 'print(+21.3e-5*-.1234/81.7)' - >>> decistmt(s) - "print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))" - - The format of the exponent is inherited from the platform C library. - Known cases are "e-007" (Windows) and "e-07" (not Windows). Since - we're only showing 11 digits, and the 12th isn't close to 5, the - rest of the output should be platform-independent. - - >>> exec(s) #doctest: +ELLIPSIS - -3.2171603427...e-0...7 - - Output from calculations with Decimal should be identical across all - platforms. - - >>> exec(decistmt(s)) - -3.217160342717258261933904529E-7 - """ result = [] g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string for toknum, tokval, _, _, _ in g: @@ -1010,6 +853,28 @@ def decistmt(s): result.append((toknum, tokval)) return untokenize(result).decode('utf-8') +class TestMisc(TestCase): + + def test_decistmt(self): + # Substitute Decimals for floats in a string of statements. + # This is an example from the docs. + + from decimal import Decimal + s = '+21.3e-5*-.1234/81.7' + self.assertEqual(decistmt(s), + "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')") + + # The format of the exponent is inherited from the platform C library. + # Known cases are "e-007" (Windows) and "e-07" (not Windows). Since + # we're only showing 11 digits, and the 12th isn't close to 5, the + # rest of the output should be platform-independent. + self.assertRegex(repr(eval(s)), '-3.2171603427[0-9]*e-0+7') + + # Output from calculations with Decimal should be identical across all + # platforms. + self.assertEqual(eval(decistmt(s)), + Decimal('-3.217160342717258261933904529E-7')) + class TestTokenizerAdheresToPep0263(TestCase): """ @@ -1018,11 +883,11 @@ class TestTokenizerAdheresToPep0263(TestCase): def _testFile(self, filename): path = os.path.join(os.path.dirname(__file__), filename) - return roundtrip(open(path, 'rb')) + TestRoundtrip.check_roundtrip(self, open(path, 'rb')) def test_utf8_coding_cookie_and_no_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_latin1_coding_cookie_and_utf8_bom(self): """ @@ -1037,11 +902,11 @@ class TestTokenizerAdheresToPep0263(TestCase): def test_no_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_utf8_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_bad_coding_cookie(self): self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py') @@ -1340,7 +1205,6 @@ class TestDetectEncoding(TestCase): self.assertTrue(m.closed) - class TestTokenize(TestCase): def test_tokenize(self): @@ -1472,6 +1336,7 @@ class TestTokenize(TestCase): # See http://bugs.python.org/issue16152 self.assertExactTypeEqual('@ ', token.AT) + class UntokenizeTest(TestCase): def test_bad_input_order(self): @@ -1497,7 +1362,7 @@ class UntokenizeTest(TestCase): u.prev_row = 2 u.add_whitespace((4, 4)) self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' ']) - self.assertTrue(roundtrip('a\n b\n c\n \\\n c\n')) + TestRoundtrip.check_roundtrip(self, 'a\n b\n c\n \\\n c\n') def test_iter_compat(self): u = Untokenizer() @@ -1514,6 +1379,131 @@ class UntokenizeTest(TestCase): class TestRoundtrip(TestCase): + + def check_roundtrip(self, f): + """ + Test roundtrip for `untokenize`. `f` is an open file or a string. + The source code in f is tokenized to both 5- and 2-tuples. + Both sequences are converted back to source code via + tokenize.untokenize(), and the latter tokenized again to 2-tuples. + The test fails if the 3 pair tokenizations do not match. + + When untokenize bugs are fixed, untokenize with 5-tuples should + reproduce code that does not contain a backslash continuation + following spaces. A proper test should test this. + """ + # Get source code and original tokenizations + if isinstance(f, str): + code = f.encode('utf-8') + else: + code = f.read() + f.close() + readline = iter(code.splitlines(keepends=True)).__next__ + tokens5 = list(tokenize(readline)) + tokens2 = [tok[:2] for tok in tokens5] + # Reproduce tokens2 from pairs + bytes_from2 = untokenize(tokens2) + readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ + tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] + self.assertEqual(tokens2_from2, tokens2) + # Reproduce tokens2 from 5-tuples + bytes_from5 = untokenize(tokens5) + readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ + tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] + self.assertEqual(tokens2_from5, tokens2) + + def test_roundtrip(self): + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print(x)\n") + self.check_roundtrip("# This is a comment\n" + "# This also") + + # Some people use different formatting conventions, which makes + # untokenize a little trickier. Note that this test involves trailing + # whitespace after the colon. Note that we use hex escapes to make the + # two trailing blanks apparent in the expected output. + + self.check_roundtrip("if x == 1 : \n" + " print(x)\n") + fn = support.findfile("tokenize_tests.txt") + with open(fn, 'rb') as f: + self.check_roundtrip(f) + self.check_roundtrip("if x == 1:\n" + " # A comment by itself.\n" + " print(x) # Comment here, too.\n" + " # Another comment.\n" + "after_if = True\n") + self.check_roundtrip("if (x # The comments need to go in the right place\n" + " == 1):\n" + " print('x==1')\n") + self.check_roundtrip("class Test: # A comment here\n" + " # A comment with weird indent\n" + " after_com = 5\n" + " def x(m): return m*5 # a one liner\n" + " def y(m): # A whitespace after the colon\n" + " return y*4 # 3-space indent\n") + + # Some error-handling code + self.check_roundtrip("try: import somemodule\n" + "except ImportError: # comment\n" + " print('Can not import' # comment2\n)" + "else: print('Loaded')\n") + + def test_continuation(self): + # Balancing continuation + self.check_roundtrip("a = (3,4, \n" + "5,6)\n" + "y = [3, 4,\n" + "5]\n" + "z = {'a': 5,\n" + "'b':15, 'c':True}\n" + "x = len(y) + 5 - a[\n" + "3] - a[2]\n" + "+ len(z) - z[\n" + "'b']\n") + + def test_backslash_continuation(self): + # Backslash means line continuation, except for comments + self.check_roundtrip("x=1+\\\n" + "1\n" + "# This is a comment\\\n" + "# This also\n") + self.check_roundtrip("# Comment \\\n" + "x = 0") + + def test_string_concatenation(self): + # Two string literals on the same line + self.check_roundtrip("'' ''") + + def test_random_files(self): + # Test roundtrip on random python modules. + # pass the '-ucpu' option to process the full directory. + + import glob, random + fn = support.findfile("tokenize_tests.txt") + tempdir = os.path.dirname(fn) or os.curdir + testfiles = glob.glob(os.path.join(tempdir, "test*.py")) + + # Tokenize is broken on test_pep3131.py because regular expressions are + # broken on the obscure unicode identifiers in it. *sigh* + # With roundtrip extended to test the 5-tuple mode of untokenize, + # 7 more testfiles fail. Remove them also until the failure is diagnosed. + + testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) + for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): + testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) + + if not support.is_resource_enabled("cpu"): + testfiles = random.sample(testfiles, 10) + + for testfile in testfiles: + with open(testfile, 'rb') as f: + with self.subTest(file=testfile): + self.check_roundtrip(f) + + def roundtrip(self, code): if isinstance(code, str): code = code.encode('utf-8') @@ -1527,19 +1517,8 @@ class TestRoundtrip(TestCase): code = "if False:\n\tx=3\n\tx=3\n" codelines = self.roundtrip(code).split('\n') self.assertEqual(codelines[1], codelines[2]) + self.check_roundtrip(code) -__test__ = {"doctests" : doctests, 'decistmt': decistmt} - -def test_main(): - from test import test_tokenize - support.run_doctest(test_tokenize, True) - support.run_unittest(TestTokenizerAdheresToPep0263) - support.run_unittest(Test_Tokenize) - support.run_unittest(TestDetectEncoding) - support.run_unittest(TestTokenize) - support.run_unittest(UntokenizeTest) - support.run_unittest(TestRoundtrip) - if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py index 641a2df0..b7695d6e 100644 --- a/Lib/test/test_traceback.py +++ b/Lib/test/test_traceback.py @@ -289,6 +289,31 @@ class TracebackFormatTests(unittest.TestCase): self.assertEqual(ststderr.getvalue(), "".join(stfmt)) + def test_print_stack(self): + def prn(): + traceback.print_stack() + with captured_output("stderr") as stderr: + prn() + lineno = prn.__code__.co_firstlineno + self.assertEqual(stderr.getvalue().splitlines()[-4:], [ + ' File "%s", line %d, in test_print_stack' % (__file__, lineno+3), + ' prn()', + ' File "%s", line %d, in prn' % (__file__, lineno+1), + ' traceback.print_stack()', + ]) + + def test_format_stack(self): + def fmt(): + return traceback.format_stack() + result = fmt() + lineno = fmt.__code__.co_firstlineno + self.assertEqual(result[-2:], [ + ' File "%s", line %d, in test_format_stack\n' + ' result = fmt()\n' % (__file__, lineno+2), + ' File "%s", line %d, in fmt\n' + ' return traceback.format_stack()\n' % (__file__, lineno+1), + ]) + cause_message = ( "\nThe above exception was the direct cause " @@ -610,6 +635,16 @@ class MiscTracebackCases(unittest.TestCase): # Local variable dict should now be empty. self.assertEqual(len(inner_frame.f_locals), 0) + def test_extract_stack(self): + def extract(): + return traceback.extract_stack() + result = extract() + lineno = extract.__code__.co_firstlineno + self.assertEqual(result[-2:], [ + (__file__, lineno+2, 'test_extract_stack', 'result = extract()'), + (__file__, lineno+1, 'extract', 'return traceback.extract_stack()'), + ]) + class TestFrame(unittest.TestCase): @@ -617,10 +652,16 @@ class TestFrame(unittest.TestCase): linecache.clearcache() linecache.lazycache("f", globals()) f = traceback.FrameSummary("f", 1, "dummy") - self.assertEqual( - ("f", 1, "dummy", '"""Test cases for traceback module"""'), - tuple(f)) - self.assertEqual(None, f.locals) + self.assertEqual(f, + ("f", 1, "dummy", '"""Test cases for traceback module"""')) + self.assertEqual(tuple(f), + ("f", 1, "dummy", '"""Test cases for traceback module"""')) + self.assertEqual(f, traceback.FrameSummary("f", 1, "dummy")) + self.assertEqual(f, tuple(f)) + # Since tuple.__eq__ doesn't support FrameSummary, the equality + # operator fallbacks to FrameSummary.__eq__. + self.assertEqual(tuple(f), f) + self.assertIsNone(f.locals) def test_lazy_lines(self): linecache.clearcache() diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index 1461cfbe..060119a1 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -1,11 +1,7 @@ -from collections import namedtuple +import pickle import re import sys from unittest import TestCase, main -try: - from unittest import mock -except ImportError: - import mock # 3rd party install, for PY3.2. from typing import Any from typing import TypeVar, AnyStr @@ -317,6 +313,10 @@ class UnionTests(TestCase): with self.assertRaises(TypeError): isinstance(42, Union[int, str]) + def test_union_str_pattern(self): + # Shouldn't crash; see http://bugs.python.org/issue25390 + A = Union[str, Pattern] + class TypeVarUnionTests(TestCase): @@ -579,6 +579,36 @@ class GenericTests(TestCase): self.assertEqual(repr(MySimpleMapping), __name__ + '.' + 'MySimpleMapping[~XK, ~XV]') + def test_dict(self): + T = TypeVar('T') + class B(Generic[T]): + pass + b = B() + b.foo = 42 + self.assertEqual(b.__dict__, {'foo': 42}) + class C(B[int]): + pass + c = C() + c.bar = 'abc' + self.assertEqual(c.__dict__, {'bar': 'abc'}) + + def test_pickle(self): + T = TypeVar('T') + class B(Generic[T]): + pass + global C # pickle wants to reference the class by name + class C(B[int]): + pass + c = C() + c.foo = 42 + c.bar = 'abc' + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + z = pickle.dumps(c, proto) + x = pickle.loads(z) + self.assertEqual(x.foo, 42) + self.assertEqual(x.bar, 'abc') + self.assertEqual(x.__dict__, {'foo': 42, 'bar': 'abc'}) + def test_errors(self): with self.assertRaises(TypeError): B = SimpleMapping[XK, Any] @@ -1134,6 +1164,15 @@ class NamedTupleTests(TestCase): assert Emp._fields == ('name', 'id') assert Emp._field_types == dict(name=str, id=int) + def test_pickle(self): + global Emp # pickle wants to reference the class by name + Emp = NamedTuple('Emp', [('name', str), ('id', int)]) + jane = Emp('jane', 37) + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + z = pickle.dumps(jane, proto) + jane2 = pickle.loads(z) + self.assertEqual(jane2, jane) + class IOTests(TestCase): diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py index 3fcb590f..d7f37c59 100644 --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -849,7 +849,15 @@ class UnicodeTest(string_tests.CommonTest, @support.cpython_only def test_case_operation_overflow(self): # Issue #22643 - self.assertRaises(OverflowError, ("ü"*(2**32//12 + 1)).upper) + size = 2**32//12 + 1 + try: + s = "ü" * size + except MemoryError: + self.skipTest('no enough memory (%.0f MiB required)' % (size / 2**20)) + try: + self.assertRaises(OverflowError, s.upper) + finally: + del s def test_contains(self): # Testing Unicode contains method @@ -1553,7 +1561,7 @@ class UnicodeTest(string_tests.CommonTest, self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde') # Issue #2242: crash on some Windows/MSVC versions - self.assertEqual(b'+\xc1'.decode('utf-7'), '\xc1') + self.assertEqual(b'+\xc1'.decode('utf-7', 'ignore'), '') # Direct encoded characters set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?" @@ -1995,6 +2003,7 @@ class UnicodeTest(string_tests.CommonTest, self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict') self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x") self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x') + self.assertEqual(str(b'\202 x', 'ascii', 'replace'), '\uFFFD x') # Error handling (unknown character names) self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx") @@ -2690,6 +2699,23 @@ class UnicodeTest(string_tests.CommonTest, self.assertTrue(astral >= bmp2) self.assertFalse(astral >= astral2) + @support.cpython_only + def test_pep393_utf8_caching_bug(self): + # Issue #25709: Problem with string concatenation and utf-8 cache + from _testcapi import getargs_s_hash + for k in 0x24, 0xa4, 0x20ac, 0x1f40d: + s = '' + for i in range(5): + # Due to CPython specific optimization the 's' string can be + # resized in-place. + s += chr(k) + # Parsing with the "s#" format code calls indirectly + # PyUnicode_AsUTF8AndSize() which creates the UTF-8 + # encoded string cached in the Unicode object. + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + # Check that the second call returns the same result + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + class StringModuleTest(unittest.TestCase): def test_formatter_parser(self): diff --git a/Lib/test/test_unicode_file.py b/Lib/test/test_unicode_file.py index faa8da3c..e4709a18 100644 --- a/Lib/test/test_unicode_file.py +++ b/Lib/test/test_unicode_file.py @@ -5,7 +5,7 @@ import os, glob, time, shutil import unicodedata import unittest -from test.support import (run_unittest, rmtree, +from test.support import (run_unittest, rmtree, change_cwd, TESTFN_ENCODING, TESTFN_UNICODE, TESTFN_UNENCODABLE, create_empty_file) if not os.path.supports_unicode_filenames: @@ -82,13 +82,11 @@ class TestUnicodeFiles(unittest.TestCase): self.assertFalse(os.path.exists(filename2 + '.new')) def _do_directory(self, make_name, chdir_name): - cwd = os.getcwd() if os.path.isdir(make_name): rmtree(make_name) os.mkdir(make_name) try: - os.chdir(chdir_name) - try: + with change_cwd(chdir_name): cwd_result = os.getcwd() name_result = make_name @@ -96,8 +94,6 @@ class TestUnicodeFiles(unittest.TestCase): name_result = unicodedata.normalize("NFD", name_result) self.assertEqual(os.path.basename(cwd_result),name_result) - finally: - os.chdir(cwd) finally: os.rmdir(make_name) diff --git a/Lib/test/test_unicodedata.py b/Lib/test/test_unicodedata.py index 0f33d19f..6ecc9136 100644 --- a/Lib/test/test_unicodedata.py +++ b/Lib/test/test_unicodedata.py @@ -9,8 +9,7 @@ import sys import unittest import hashlib -import subprocess -import test.support +from test.support import script_helper encoding = 'utf-8' errors = 'surrogatepass' @@ -234,16 +233,12 @@ class UnicodeMiscTest(UnicodeDatabaseTest): code = "import sys;" \ "sys.modules['unicodedata'] = None;" \ """eval("'\\\\N{SOFT HYPHEN}'")""" - args = [sys.executable, "-c", code] - # We use a subprocess because the unicodedata module may already have - # been loaded in this process. - popen = subprocess.Popen(args, stderr=subprocess.PIPE) - popen.wait() - self.assertEqual(popen.returncode, 1) + # We use a separate process because the unicodedata module may already + # have been loaded in this process. + result = script_helper.assert_python_failure("-c", code) error = "SyntaxError: (unicode error) \\N escapes not supported " \ "(can't load unicodedata module)" - self.assertIn(error, popen.stderr.read().decode("ascii")) - popen.stderr.close() + self.assertIn(error, result.err.decode("ascii")) def test_decimal_numeric_consistent(self): # Test that decimal and numeric are consistent, diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py index 58ca2a5c..57eeeaa6 100644 --- a/Lib/test/test_urllib.py +++ b/Lib/test/test_urllib.py @@ -527,7 +527,7 @@ class urlretrieve_FileTests(unittest.TestCase): result = urllib.request.urlretrieve("file:%s" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, - "did not get a email.message.Message instance " + "did not get an email.message.Message instance " "as second returned value") def test_copy(self): diff --git a/Lib/test/test_userdict.py b/Lib/test/test_userdict.py index c83cc55f..12889438 100644 --- a/Lib/test/test_userdict.py +++ b/Lib/test/test_userdict.py @@ -29,7 +29,8 @@ class UserDictTest(mapping_tests.TestHashMappingProtocol): self.assertEqual(collections.UserDict(one=1, two=2), d2) # item sequence constructor self.assertEqual(collections.UserDict([('one',1), ('two',2)]), d2) - self.assertEqual(collections.UserDict(dict=[('one',1), ('two',2)]), d2) + with self.assertWarnsRegex(PendingDeprecationWarning, "'dict'"): + self.assertEqual(collections.UserDict(dict=[('one',1), ('two',2)]), d2) # both together self.assertEqual(collections.UserDict([('one',1), ('two',2)], two=3, three=5), d3) @@ -139,6 +140,30 @@ class UserDictTest(mapping_tests.TestHashMappingProtocol): self.assertEqual(t.popitem(), ("x", 42)) self.assertRaises(KeyError, t.popitem) + def test_init(self): + for kw in 'self', 'other', 'iterable': + self.assertEqual(list(collections.UserDict(**{kw: 42}).items()), + [(kw, 42)]) + self.assertEqual(list(collections.UserDict({}, dict=42).items()), + [('dict', 42)]) + self.assertEqual(list(collections.UserDict({}, dict=None).items()), + [('dict', None)]) + with self.assertWarnsRegex(PendingDeprecationWarning, "'dict'"): + self.assertEqual(list(collections.UserDict(dict={'a': 42}).items()), + [('a', 42)]) + self.assertRaises(TypeError, collections.UserDict, 42) + self.assertRaises(TypeError, collections.UserDict, (), ()) + self.assertRaises(TypeError, collections.UserDict.__init__) + + def test_update(self): + for kw in 'self', 'dict', 'other', 'iterable': + d = collections.UserDict() + d.update(**{kw: 42}) + self.assertEqual(list(d.items()), [(kw, 42)]) + self.assertRaises(TypeError, collections.UserDict().update, 42) + self.assertRaises(TypeError, collections.UserDict().update, {}, {}) + self.assertRaises(TypeError, collections.UserDict.update) + def test_missing(self): # Make sure UserDict doesn't have a __missing__ method self.assertEqual(hasattr(collections.UserDict, "__missing__"), False) @@ -146,7 +171,7 @@ class UserDictTest(mapping_tests.TestHashMappingProtocol): # (D) subclass defines __missing__ method returning a value # (E) subclass defines __missing__ method raising RuntimeError # (F) subclass sets __missing__ instance variable (no effect) - # (G) subclass doesn't define __missing__ at a all + # (G) subclass doesn't define __missing__ at all class D(collections.UserDict): def __missing__(self, key): return 42 diff --git a/Lib/test/test_warnings/__init__.py b/Lib/test/test_warnings/__init__.py index 991a249f..cea9c574 100644 --- a/Lib/test/test_warnings/__init__.py +++ b/Lib/test/test_warnings/__init__.py @@ -44,6 +44,7 @@ class BaseTest: """Basic bookkeeping required for testing.""" def setUp(self): + self.old_unittest_module = unittest.case.warnings # The __warningregistry__ needs to be in a pristine state for tests # to work properly. if '__warningregistry__' in globals(): @@ -55,10 +56,15 @@ class BaseTest: # The 'warnings' module must be explicitly set so that the proper # interaction between _warnings and 'warnings' can be controlled. sys.modules['warnings'] = self.module + # Ensure that unittest.TestCase.assertWarns() uses the same warnings + # module than warnings.catch_warnings(). Otherwise, + # warnings.catch_warnings() will be unable to remove the added filter. + unittest.case.warnings = self.module super(BaseTest, self).setUp() def tearDown(self): sys.modules['warnings'] = original_warnings + unittest.case.warnings = self.old_unittest_module super(BaseTest, self).tearDown() class PublicAPITests(BaseTest): diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index afd0b62f..f37f1e9e 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -947,7 +947,7 @@ class SubclassableWeakrefTestCase(TestBase): class WeakMethodTestCase(unittest.TestCase): def _subclass(self): - """Return a Object subclass overriding `some_method`.""" + """Return an Object subclass overriding `some_method`.""" class C(Object): def some_method(self): return 6 @@ -1421,6 +1421,18 @@ class MappingTestCase(TestBase): dict2 = weakref.WeakValueDictionary(dict) self.assertEqual(dict[364], o) + def test_make_weak_valued_dict_misc(self): + # errors + self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__) + self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {}) + self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ()) + # special keyword arguments + o = Object(3) + for kw in 'self', 'dict', 'other', 'iterable': + d = weakref.WeakValueDictionary(**{kw: o}) + self.assertEqual(list(d.keys()), [kw]) + self.assertEqual(d[kw], o) + def make_weak_valued_dict(self): dict = weakref.WeakValueDictionary() objects = list(map(Object, range(self.COUNT))) @@ -1501,6 +1513,19 @@ class MappingTestCase(TestBase): def test_weak_valued_dict_update(self): self.check_update(weakref.WeakValueDictionary, {1: C(), 'a': C(), C(): C()}) + # errors + self.assertRaises(TypeError, weakref.WeakValueDictionary.update) + d = weakref.WeakValueDictionary() + self.assertRaises(TypeError, d.update, {}, {}) + self.assertRaises(TypeError, d.update, (), ()) + self.assertEqual(list(d.keys()), []) + # special keyword arguments + o = Object(3) + for kw in 'self', 'dict', 'other', 'iterable': + d = weakref.WeakValueDictionary() + d.update(**{kw: o}) + self.assertEqual(list(d.keys()), [kw]) + self.assertEqual(d[kw], o) def test_weak_keyed_dict_update(self): self.check_update(weakref.WeakKeyDictionary, diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py index 112a1b9d..8cca595a 100644 --- a/Lib/test/test_wsgiref.py +++ b/Lib/test/test_wsgiref.py @@ -1,11 +1,10 @@ -from __future__ import nested_scopes # Backward compat for 2.1 from unittest import TestCase from wsgiref.util import setup_testing_defaults from wsgiref.headers import Headers from wsgiref.handlers import BaseHandler, BaseCGIHandler from wsgiref import util from wsgiref.validate import validator -from wsgiref.simple_server import WSGIServer, WSGIRequestHandler, demo_app +from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from wsgiref.simple_server import make_server from io import StringIO, BytesIO, BufferedReader from socketserver import BaseServer @@ -14,8 +13,8 @@ from platform import python_implementation import os import re import sys +import unittest -from test import support class MockServer(WSGIServer): """Non-socket HTTP server""" diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py index f83db7f6..d09b969e 100644 --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -5,6 +5,7 @@ # For this purpose, the module-level "ET" symbol is temporarily # monkey-patched when running the "test_xml_etree_c" test suite. +import copy import html import io import operator @@ -2082,6 +2083,19 @@ class ElementIterTest(unittest.TestCase): self.assertEqual(self._ilist(doc), all_tags) self.assertEqual(self._ilist(doc, '*'), all_tags) + def test_copy(self): + a = ET.Element('a') + it = a.iter() + with self.assertRaises(TypeError): + copy.copy(it) + + def test_pickle(self): + a = ET.Element('a') + it = a.iter() + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.assertRaises((TypeError, pickle.PicklingError)): + pickle.dumps(it, proto) + class TreeBuilderTest(unittest.TestCase): sample1 = ('abc') - self.assertEqual(serialize(elem, encoding="utf-8"), - b'abc') - self.assertEqual(serialize(elem, encoding="us-ascii"), - b'abc') + for enc in ("utf-8", "us-ascii"): + with self.subTest(enc): + self.assertEqual(serialize(elem, encoding=enc), + b'abc') + self.assertEqual(serialize(elem, encoding=enc.upper()), + b'abc') for enc in ("iso-8859-1", "utf-16", "utf-32"): - self.assertEqual(serialize(elem, encoding=enc), - ("\n" - "abc" % enc).encode(enc)) + with self.subTest(enc): + self.assertEqual(serialize(elem, encoding=enc), + ("\n" + "abc" % enc).encode(enc)) + upper = enc.upper() + self.assertEqual(serialize(elem, encoding=upper), + ("\n" + "abc" % upper).encode(enc)) elem = ET.Element("tag") elem.text = "<&\"\'>" diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py index 67e85704..2c10821a 100644 --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -684,6 +684,13 @@ class PyZipFileTests(unittest.TestCase): if not os.access(path, os.W_OK, effective_ids=os.access in os.supports_effective_ids): self.skipTest('requires write access to the installed location') + filename = os.path.join(path, 'test_zipfile.try') + try: + fd = os.open(filename, os.O_WRONLY | os.O_CREAT) + os.close(fd) + except Exception: + self.skipTest('requires write access to the installed location') + unlink(filename) def test_write_pyfile(self): self.requiresWriteAccess(os.path.dirname(__file__)) diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py index bd935ede..88c415bc 100644 --- a/Lib/test/test_zlib.py +++ b/Lib/test/test_zlib.py @@ -1,6 +1,7 @@ import unittest from test import support import binascii +import pickle import random import sys from test.support import bigmemtest, _1G, _4G @@ -121,11 +122,17 @@ class ExceptionTestCase(unittest.TestCase): self.assertRaises(ValueError, zlib.decompressobj().flush, 0) self.assertRaises(ValueError, zlib.decompressobj().flush, -1) + @support.cpython_only + def test_overflow(self): + with self.assertRaisesRegex(OverflowError, 'int too large'): + zlib.decompress(b'', 15, sys.maxsize + 1) + with self.assertRaisesRegex(OverflowError, 'int too large'): + zlib.decompressobj().flush(sys.maxsize + 1) + class BaseCompressTestCase(object): def check_big_compress_buffer(self, size, compress_func): _1M = 1024 * 1024 - fmt = "%%0%dx" % (2 * _1M) # Generate 10MB worth of random, and expand it by repeating it. # The assumption is that zlib's memory is not big enough to exploit # such spread out redundancy. @@ -195,6 +202,18 @@ class CompressTestCase(BaseCompressTestCase, unittest.TestCase): finally: data = None + @bigmemtest(size=_4G, memuse=1) + def test_large_bufsize(self, size): + # Test decompress(bufsize) parameter greater than the internal limit + data = HAMLET_SCENE * 10 + compressed = zlib.compress(data, 1) + self.assertEqual(zlib.decompress(compressed, 15, size), data) + + def test_custom_bufsize(self): + data = HAMLET_SCENE * 10 + compressed = zlib.compress(data, 1) + self.assertEqual(zlib.decompress(compressed, 15, CustomInt()), data) + class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): # Test compression object @@ -222,9 +241,9 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): level = 2 method = zlib.DEFLATED wbits = -12 - memlevel = 9 + memLevel = 9 strategy = zlib.Z_FILTERED - co = zlib.compressobj(level, method, wbits, memlevel, strategy) + co = zlib.compressobj(level, method, wbits, memLevel, strategy) x1 = co.compress(HAMLET_SCENE) x2 = co.flush() dco = zlib.decompressobj(wbits) @@ -232,6 +251,10 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): y2 = dco.flush() self.assertEqual(HAMLET_SCENE, y1 + y2) + # keyword arguments should also be supported + zlib.compressobj(level=level, method=method, wbits=wbits, + memLevel=memLevel, strategy=strategy, zdict=b"") + def test_compressincremental(self): # compress object in steps, decompress object as one-shot data = HAMLET_SCENE * 128 @@ -359,6 +382,21 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): self.assertRaises(ValueError, dco.decompress, b"", -1) self.assertEqual(b'', dco.unconsumed_tail) + def test_maxlen_large(self): + # Sizes up to sys.maxsize should be accepted, although zlib is + # internally limited to expressing sizes with unsigned int + data = HAMLET_SCENE * 10 + self.assertGreater(len(data), zlib.DEF_BUF_SIZE) + compressed = zlib.compress(data, 1) + dco = zlib.decompressobj() + self.assertEqual(dco.decompress(compressed, sys.maxsize), data) + + def test_maxlen_custom(self): + data = HAMLET_SCENE * 10 + compressed = zlib.compress(data, 1) + dco = zlib.decompressobj() + self.assertEqual(dco.decompress(compressed, CustomInt()), data[:100]) + def test_clear_unconsumed_tail(self): # Issue #12050: calling decompress() without providing max_length # should clear the unconsumed_tail attribute. @@ -532,6 +570,22 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): data = zlib.compress(input2) self.assertEqual(dco.flush(), input1[1:]) + @bigmemtest(size=_4G, memuse=1) + def test_flush_large_length(self, size): + # Test flush(length) parameter greater than internal limit UINT_MAX + input = HAMLET_SCENE * 10 + data = zlib.compress(input, 1) + dco = zlib.decompressobj() + dco.decompress(data, 1) + self.assertEqual(dco.flush(size), input[1:]) + + def test_flush_custom_length(self): + input = HAMLET_SCENE * 10 + data = zlib.compress(input, 1) + dco = zlib.decompressobj() + dco.decompress(data, 1) + self.assertEqual(dco.flush(CustomInt()), input[1:]) + @requires_Compress_copy def test_compresscopy(self): # Test copying a compression object @@ -596,6 +650,16 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): d.flush() self.assertRaises(ValueError, d.copy) + def test_compresspickle(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.assertRaises((TypeError, pickle.PicklingError)): + pickle.dumps(zlib.compressobj(zlib.Z_BEST_COMPRESSION), proto) + + def test_decompresspickle(self): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + with self.assertRaises((TypeError, pickle.PicklingError)): + pickle.dumps(zlib.decompressobj(), proto) + # Memory use of the following functions takes into account overallocation @bigmemtest(size=_1G + 1024 * 1024, memuse=3) @@ -710,5 +774,10 @@ LAERTES """ +class CustomInt: + def __int__(self): + return 100 + + if __name__ == "__main__": unittest.main() diff --git a/Lib/textwrap.py b/Lib/textwrap.py index 3ad3e18e..05e03067 100644 --- a/Lib/textwrap.py +++ b/Lib/textwrap.py @@ -444,11 +444,15 @@ def dedent(text): elif margin.startswith(indent): margin = indent - # Current line and previous winner have no common whitespace: - # there is no margin. + # Find the largest common whitespace between current line and previous + # winner. else: - margin = "" - break + for i, (x, y) in enumerate(zip(margin, indent)): + if x != y: + margin = margin[:i] + break + else: + margin = margin[:len(indent)] # sanity check (testing/debugging only) if 0 and margin: diff --git a/Lib/threading.py b/Lib/threading.py index 24cc911c..828019d4 100644 --- a/Lib/threading.py +++ b/Lib/threading.py @@ -499,7 +499,7 @@ class Event: def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() - self._cond.__init__() + self._cond.__init__(Lock()) def is_set(self): """Return true if and only if the internal flag is true.""" @@ -514,12 +514,9 @@ class Event: that call wait() once the flag is true will not block at all. """ - self._cond.acquire() - try: + with self._cond: self._flag = True self._cond.notify_all() - finally: - self._cond.release() def clear(self): """Reset the internal flag to false. @@ -528,11 +525,8 @@ class Event: set the internal flag to true again. """ - self._cond.acquire() - try: + with self._cond: self._flag = False - finally: - self._cond.release() def wait(self, timeout=None): """Block until the internal flag is true. @@ -549,14 +543,11 @@ class Event: True except if a timeout is given and the operation times out. """ - self._cond.acquire() - try: + with self._cond: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled - finally: - self._cond.release() # A barrier class. Inspired in part by the pthread_barrier_* api and diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py index ea747ac5..12085a9b 100644 --- a/Lib/tkinter/__init__.py +++ b/Lib/tkinter/__init__.py @@ -1123,7 +1123,7 @@ class Misc: return self._bind(('bind', className), sequence, func, add, 0) def unbind_class(self, className, sequence): - """Unbind for a all widgets with bindtag CLASSNAME for event SEQUENCE + """Unbind for all widgets with bindtag CLASSNAME for event SEQUENCE all functions.""" self.tk.call('bind', className , sequence, '') def mainloop(self, n=0): diff --git a/Lib/tkinter/test/support.py b/Lib/tkinter/test/support.py index 52df1040..dd155fad 100644 --- a/Lib/tkinter/test/support.py +++ b/Lib/tkinter/test/support.py @@ -23,7 +23,7 @@ class AbstractTkTest: def tearDownClass(cls): cls.root.update_idletasks() cls.root.destroy() - cls.root = None + del cls.root tkinter._default_root = None tkinter._support_default_root = cls._old_support_default_root diff --git a/Lib/tkinter/test/test_tkinter/test_font.py b/Lib/tkinter/test/test_tkinter/test_font.py index 25b59132..c094c61c 100644 --- a/Lib/tkinter/test/test_tkinter/test_font.py +++ b/Lib/tkinter/test/test_tkinter/test_font.py @@ -12,7 +12,7 @@ class FontTest(AbstractTkTest, unittest.TestCase): @classmethod def setUpClass(cls): - AbstractTkTest.setUpClass() + AbstractTkTest.setUpClass.__func__(cls) try: cls.font = font.Font(root=cls.root, name=fontname, exists=True) except tkinter.TclError: diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py index b9c57ad7..244fb3dd 100644 --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -289,7 +289,7 @@ def _val_or_dict(tk, options, *args): """Format options then call Tk command with args and options and return the appropriate result. - If no option is specified, a dict is returned. If a option is + If no option is specified, a dict is returned. If an option is specified with the None value, the value for that option is returned. Otherwise, the function just sets the passed options and the caller shouldn't be expecting a return value anyway.""" diff --git a/Lib/traceback.py b/Lib/traceback.py index 02edeb62..9b69da0e 100644 --- a/Lib/traceback.py +++ b/Lib/traceback.py @@ -181,11 +181,15 @@ def print_stack(f=None, limit=None, file=None): stack frame at which to start. The optional 'limit' and 'file' arguments have the same meaning as for print_exception(). """ + if f is None: + f = sys._getframe().f_back print_list(extract_stack(f, limit=limit), file=file) def format_stack(f=None, limit=None): """Shorthand for 'format_list(extract_stack(f, limit))'.""" + if f is None: + f = sys._getframe().f_back return format_list(extract_stack(f, limit=limit)) @@ -198,6 +202,8 @@ def extract_stack(f=None, limit=None): line number, function name, text), and the entries are in order from oldest to newest stack frame. """ + if f is None: + f = sys._getframe().f_back stack = StackSummary.extract(walk_stack(f), limit=limit) stack.reverse() return stack @@ -251,10 +257,14 @@ class FrameSummary: dict((k, repr(v)) for k, v in locals.items()) if locals else None def __eq__(self, other): - return (self.filename == other.filename and - self.lineno == other.lineno and - self.name == other.name and - self.locals == other.locals) + if isinstance(other, FrameSummary): + return (self.filename == other.filename and + self.lineno == other.lineno and + self.name == other.name and + self.locals == other.locals) + if isinstance(other, tuple): + return (self.filename, self.lineno, self.name, self.line) == other + return NotImplemented def __getitem__(self, pos): return (self.filename, self.lineno, self.name, self.line)[pos] diff --git a/Lib/typing.py b/Lib/typing.py index 1a4982ea..1757f138 100644 --- a/Lib/typing.py +++ b/Lib/typing.py @@ -487,6 +487,9 @@ class UnionMeta(TypingMeta): return Any if isinstance(t1, TypeVar): continue + if isinstance(t1, _TypeAlias): + # _TypeAlias is not a real class. + continue if any(issubclass(t1, t2) for t2 in all_params - {t1} if not isinstance(t2, TypeVar)): all_params.remove(t1) @@ -978,7 +981,7 @@ class GenericMeta(TypingMeta, abc.ABCMeta): "Cannot substitute %s for %s in %s" % (_type_repr(new), _type_repr(old), self)) - return self.__class__(self.__name__, self.__bases__, + return self.__class__(self.__name__, (self,) + self.__bases__, dict(self.__dict__), parameters=params, origin=self, @@ -1476,6 +1479,11 @@ def NamedTuple(typename, fields): fields = [(n, t) for n, t in fields] cls = collections.namedtuple(typename, [n for n, t in fields]) cls._field_types = dict(fields) + # Set the module to the caller's module (otherwise it'd be 'typing'). + try: + cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass return cls diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py index 7701ad3a..ac8d67dd 100644 --- a/Lib/unittest/case.py +++ b/Lib/unittest/case.py @@ -583,8 +583,11 @@ class TestCase(object): finally: result.stopTest(self) return - expecting_failure = getattr(testMethod, - "__unittest_expecting_failure__", False) + expecting_failure_method = getattr(testMethod, + "__unittest_expecting_failure__", False) + expecting_failure_class = getattr(self, + "__unittest_expecting_failure__", False) + expecting_failure = expecting_failure_class or expecting_failure_method outcome = _Outcome(result) try: self._outcome = outcome @@ -1279,8 +1282,10 @@ class TestCase(object): assert expected_regex, "expected_regex must not be empty." expected_regex = re.compile(expected_regex) if not expected_regex.search(text): - msg = msg or "Regex didn't match" - msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text) + standardMsg = "Regex didn't match: %r not found in %r" % ( + expected_regex.pattern, text) + # _formatMessage ensures the longMessage option is respected + msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) def assertNotRegex(self, text, unexpected_regex, msg=None): @@ -1289,11 +1294,12 @@ class TestCase(object): unexpected_regex = re.compile(unexpected_regex) match = unexpected_regex.search(text) if match: - msg = msg or "Regex matched" - msg = '%s: %r matches %r in %r' % (msg, - text[match.start():match.end()], - unexpected_regex.pattern, - text) + standardMsg = 'Regex matched: %r matches %r in %r' % ( + text[match.start() : match.end()], + unexpected_regex.pattern, + text) + # _formatMessage ensures the longMessage option is respected + msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) @@ -1315,6 +1321,7 @@ class TestCase(object): failIf = _deprecate(assertFalse) assertRaisesRegexp = _deprecate(assertRaisesRegex) assertRegexpMatches = _deprecate(assertRegex) + assertNotRegexpMatches = _deprecate(assertNotRegex) diff --git a/Lib/unittest/main.py b/Lib/unittest/main.py index b209a3ae..09fefe11 100644 --- a/Lib/unittest/main.py +++ b/Lib/unittest/main.py @@ -171,7 +171,7 @@ class TestProgram(object): if self.catchbreak is None: parser.add_argument('-c', '--catch', dest='catchbreak', action='store_true', - help='Catch ctrl-C and display results so far') + help='Catch Ctrl-C and display results so far') self.catchbreak = False if self.buffer is None: parser.add_argument('-b', '--buffer', dest='buffer', diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py index 4c586399..99ce1e2b 100644 --- a/Lib/unittest/mock.py +++ b/Lib/unittest/mock.py @@ -2005,8 +2005,7 @@ class _Call(tuple): else: other_args = () other_kwargs = value - else: - # len 2 + elif len_other == 2: # could be (name, args) or (name, kwargs) or (args, kwargs) first, second = other if isinstance(first, str): @@ -2017,6 +2016,8 @@ class _Call(tuple): other_args, other_kwargs = (), second else: other_args, other_kwargs = first, second + else: + return False if self_name and other_name != self_name: return False diff --git a/Lib/unittest/test/test_assertions.py b/Lib/unittest/test/test_assertions.py index c349a957..e6e2bc2c 100644 --- a/Lib/unittest/test/test_assertions.py +++ b/Lib/unittest/test/test_assertions.py @@ -133,7 +133,6 @@ class Test_Assertions(unittest.TestCase): try: self.assertNotRegex('Ala ma kota', r'k.t', 'Message') except self.failureException as e: - self.assertIn("'kot'", e.args[0]) self.assertIn('Message', e.args[0]) else: self.fail('assertNotRegex should have failed.') @@ -329,6 +328,20 @@ class TestLongMessage(unittest.TestCase): "^unexpectedly identical: None$", "^unexpectedly identical: None : oops$"]) + def testAssertRegex(self): + self.assertMessages('assertRegex', ('foo', 'bar'), + ["^Regex didn't match:", + "^oops$", + "^Regex didn't match:", + "^Regex didn't match: (.*) : oops$"]) + + def testAssertNotRegex(self): + self.assertMessages('assertNotRegex', ('foo', 'foo'), + ["^Regex matched:", + "^oops$", + "^Regex matched:", + "^Regex matched: (.*) : oops$"]) + def assertMessagesCM(self, methodName, args, func, errors): """ diff --git a/Lib/unittest/test/test_discovery.py b/Lib/unittest/test/test_discovery.py index 8991f385..55921feb 100644 --- a/Lib/unittest/test/test_discovery.py +++ b/Lib/unittest/test/test_discovery.py @@ -255,12 +255,12 @@ class TestDiscovery(unittest.TestCase): self.addCleanup(sys.path.remove, abspath('/foo')) # Test data: we expect the following: - # a listdir to find our package, and a isfile and isdir check on it. + # a listdir to find our package, and isfile and isdir checks on it. # a module-from-name call to turn that into a module # followed by load_tests. # then our load_tests will call discover() which is messy # but that finally chains into find_tests again for the child dir - - # which is why we don't have a infinite loop. + # which is why we don't have an infinite loop. # We expect to see: # the module load tests for both package and plain module called, # and the plain module result nested by the package module load_tests diff --git a/Lib/unittest/test/test_skipping.py b/Lib/unittest/test/test_skipping.py index 807510f1..71f7b70e 100644 --- a/Lib/unittest/test/test_skipping.py +++ b/Lib/unittest/test/test_skipping.py @@ -120,6 +120,39 @@ class Test_TestSkipping(unittest.TestCase): self.assertEqual(result.expectedFailures[0][0], test) self.assertTrue(result.wasSuccessful()) + def test_expected_failure_with_wrapped_class(self): + @unittest.expectedFailure + class Foo(unittest.TestCase): + def test_1(self): + self.assertTrue(False) + + events = [] + result = LoggingResult(events) + test = Foo("test_1") + test.run(result) + self.assertEqual(events, + ['startTest', 'addExpectedFailure', 'stopTest']) + self.assertEqual(result.expectedFailures[0][0], test) + self.assertTrue(result.wasSuccessful()) + + def test_expected_failure_with_wrapped_subclass(self): + class Foo(unittest.TestCase): + def test_1(self): + self.assertTrue(False) + + @unittest.expectedFailure + class Bar(Foo): + pass + + events = [] + result = LoggingResult(events) + test = Bar("test_1") + test.run(result) + self.assertEqual(events, + ['startTest', 'addExpectedFailure', 'stopTest']) + self.assertEqual(result.expectedFailures[0][0], test) + self.assertTrue(result.wasSuccessful()) + def test_expected_failure_subtests(self): # A failure in any subtest counts as the expected failure of the # whole test. diff --git a/Lib/unittest/test/testmock/testmock.py b/Lib/unittest/test/testmock/testmock.py index 97d844f9..2a6069f6 100644 --- a/Lib/unittest/test/testmock/testmock.py +++ b/Lib/unittest/test/testmock/testmock.py @@ -300,6 +300,9 @@ class MockTest(unittest.TestCase): self.assertEqual(mock.call_args, ((sentinel.Arg,), {"kw": sentinel.Kwarg})) + # Comparing call_args to a long sequence should not raise + # an exception. See issue 24857. + self.assertFalse(mock.call_args == "a long sequence") def test_assert_called_with(self): mock = Mock() diff --git a/Lib/uuid.py b/Lib/uuid.py index a6643ed4..5b24e2c2 100644 --- a/Lib/uuid.py +++ b/Lib/uuid.py @@ -44,6 +44,8 @@ Typical usage: UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') """ +import os + __author__ = 'Ka-Ping Yee ' RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ @@ -456,7 +458,7 @@ def _netbios_getnode(): # If ctypes is available, use it to find system routines for UUID generation. # XXX This makes the module non-thread-safe! -_uuid_generate_random = _uuid_generate_time = _UuidCreate = None +_uuid_generate_time = _UuidCreate = None try: import ctypes, ctypes.util import sys @@ -471,12 +473,9 @@ try: lib = ctypes.CDLL(ctypes.util.find_library(libname)) except Exception: continue - if hasattr(lib, 'uuid_generate_random'): - _uuid_generate_random = lib.uuid_generate_random if hasattr(lib, 'uuid_generate_time'): _uuid_generate_time = lib.uuid_generate_time - if _uuid_generate_random is not None: - break # found everything we were looking for + break del _libnames # The uuid_generate_* functions are broken on MacOS X 10.5, as noted @@ -489,7 +488,7 @@ try: if sys.platform == 'darwin': import os if int(os.uname().release.split('.')[0]) >= 9: - _uuid_generate_random = _uuid_generate_time = None + _uuid_generate_time = None # On Windows prior to 2000, UuidCreate gives a UUID containing the # hardware address. On Windows 2000 and later, UuidCreate makes a @@ -600,20 +599,7 @@ def uuid3(namespace, name): def uuid4(): """Generate a random UUID.""" - - # When the system provides a version-4 UUID generator, use it. - if _uuid_generate_random: - _buffer = ctypes.create_string_buffer(16) - _uuid_generate_random(_buffer) - return UUID(bytes=bytes_(_buffer.raw)) - - # Otherwise, get randomness from urandom or the 'random' module. - try: - import os - return UUID(bytes=os.urandom(16), version=4) - except Exception: - import random - return UUID(int=random.getrandbits(128), version=4) + return UUID(bytes=os.urandom(16), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" diff --git a/Lib/weakref.py b/Lib/weakref.py index a4ecadc5..2968fb9f 100644 --- a/Lib/weakref.py +++ b/Lib/weakref.py @@ -98,7 +98,13 @@ class WeakValueDictionary(collections.MutableMapping): # objects are unwrapped on the way out, and we always wrap on the # way in). - def __init__(self, *args, **kw): + def __init__(*args, **kw): + if not args: + raise TypeError("descriptor '__init__' of 'WeakValueDictionary' " + "object needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) def remove(wr, selfref=ref(self)): self = selfref() if self is not None: @@ -252,7 +258,14 @@ class WeakValueDictionary(collections.MutableMapping): else: return wr() - def update(self, dict=None, **kwargs): + def update(*args, **kwargs): + if not args: + raise TypeError("descriptor 'update' of 'WeakValueDictionary' " + "object needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + dict = args[0] if args else None if self._pending_removals: self._commit_removals() d = self.data diff --git a/Lib/xml/etree/ElementTree.py b/Lib/xml/etree/ElementTree.py index 4c109a2f..bb32a8f0 100644 --- a/Lib/xml/etree/ElementTree.py +++ b/Lib/xml/etree/ElementTree.py @@ -752,14 +752,13 @@ class ElementTree: encoding = "utf-8" else: encoding = "us-ascii" - else: - encoding = encoding.lower() - with _get_writer(file_or_filename, encoding) as write: + enc_lower = encoding.lower() + with _get_writer(file_or_filename, enc_lower) as write: if method == "xml" and (xml_declaration or (xml_declaration is None and - encoding not in ("utf-8", "us-ascii", "unicode"))): + enc_lower not in ("utf-8", "us-ascii", "unicode"))): declared_encoding = encoding - if encoding == "unicode": + if enc_lower == "unicode": # Retrieve the default encoding for the xml declaration import locale declared_encoding = locale.getpreferredencoding() diff --git a/Lib/xmlrpc/client.py b/Lib/xmlrpc/client.py index acb81421..25e684f0 100644 --- a/Lib/xmlrpc/client.py +++ b/Lib/xmlrpc/client.py @@ -823,7 +823,7 @@ class MultiCallIterator: raise ValueError("unexpected type in multicall result") class MultiCall: - """server -> a object used to boxcar method calls + """server -> an object used to boxcar method calls server should be a ServerProxy object. @@ -1170,7 +1170,7 @@ class Transport: ## # Create parser. # - # @return A 2-tuple containing a parser and a unmarshaller. + # @return A 2-tuple containing a parser and an unmarshaller. def getparser(self): # get parser and unmarshaller diff --git a/Lib/zipfile.py b/Lib/zipfile.py index 85bdaa99..2bd1007e 100644 --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -13,8 +13,11 @@ import stat import shutil import struct import binascii -import threading +try: + import threading +except ImportError: + import dummy_threading as threading try: import zlib # We may need its compression method diff --git a/Makefile.pre.in b/Makefile.pre.in index ce2c0aa2..823def3c 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -43,6 +43,11 @@ LDVERSION= @LDVERSION@ HGVERSION= @HGVERSION@ HGTAG= @HGTAG@ HGBRANCH= @HGBRANCH@ +PGO_PROF_GEN_FLAG=@PGO_PROF_GEN_FLAG@ +PGO_PROF_USE_FLAG=@PGO_PROF_USE_FLAG@ +LLVM_PROF_MERGER=@LLVM_PROF_MERGER@ +LLVM_PROF_FILE=@LLVM_PROF_FILE@ +LLVM_PROF_ERR=@LLVM_PROF_ERR@ GNULD= @GNULD@ @@ -226,8 +231,7 @@ TCLTK_INCLUDES= @TCLTK_INCLUDES@ TCLTK_LIBS= @TCLTK_LIBS@ # The task to run while instrument when building the profile-opt target -PROFILE_TASK= $(srcdir)/Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck -#PROFILE_TASK= $(srcdir)/Lib/test/regrtest.py +PROFILE_TASK=-m test.regrtest --pgo # report files for gcov / lcov coverage report COVERAGE_INFO= $(abs_builddir)/coverage.info @@ -477,27 +481,38 @@ LIBRARY_OBJS= \ all: build_all build_all: $(BUILDPYTHON) oldsharedmods sharedmods gdbhooks Programs/_testembed python-config -# Compile a binary with gcc profile guided optimization. +# Compile a binary with profile guided optimization. profile-opt: + @if [ $(LLVM_PROF_ERR) == yes ]; then \ + echo "Error: Cannot perform PGO build because llvm-profdata was not found in PATH" ;\ + echo "Please add it to PATH and run ./configure again" ;\ + exit 1;\ + fi @echo "Building with support for profile generation:" $(MAKE) clean + $(MAKE) profile-removal $(MAKE) build_all_generate_profile - @echo "Running benchmark to generate profile data:" $(MAKE) profile-removal + @echo "Running code to generate profile data (this can take a while):" $(MAKE) run_profile_task + $(MAKE) build_all_merge_profile @echo "Rebuilding with profile guided optimizations:" $(MAKE) clean $(MAKE) build_all_use_profile + $(MAKE) profile-removal build_all_generate_profile: - $(MAKE) all CFLAGS_NODIST="$(CFLAGS) -fprofile-generate" LDFLAGS="-fprofile-generate" LIBS="$(LIBS) -lgcov" + $(MAKE) all CFLAGS_NODIST="$(CFLAGS) $(PGO_PROF_GEN_FLAG)" LDFLAGS="$(LDFLAGS) $(PGO_PROF_GEN_FLAG)" LIBS="$(LIBS)" run_profile_task: : # FIXME: can't run for a cross build - $(RUNSHARED) ./$(BUILDPYTHON) $(PROFILE_TASK) + $(LLVM_PROF_FILE) $(RUNSHARED) ./$(BUILDPYTHON) $(PROFILE_TASK) || true + +build_all_merge_profile: + $(LLVM_PROF_MERGER) build_all_use_profile: - $(MAKE) all CFLAGS_NODIST="$(CFLAGS) -fprofile-use -fprofile-correction" + $(MAKE) all CFLAGS_NODIST="$(CFLAGS) $(PGO_PROF_USE_FLAG)" # Compile and run with gcov .PHONY=coverage coverage-lcov coverage-report @@ -1568,9 +1583,11 @@ clean: pycremoval -rm -f pybuilddir.txt -rm -f Lib/lib2to3/*Grammar*.pickle -rm -f Programs/_testembed Programs/_freeze_importlib + -rm -rf build profile-removal: find . -name '*.gc??' -exec rm -f {} ';' + find . -name '*.profclang?' -exec rm -f {} ';' rm -f $(COVERAGE_INFO) rm -rf $(COVERAGE_REPORT) diff --git a/Misc/ACKS b/Misc/ACKS index c2386e81..1102ff3b 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -103,6 +103,7 @@ Mike Bayer Samuel L. Bayer Donald Beaudry David Beazley +John Beck Ingolf Becker Neal Becker Robin Becker @@ -589,6 +590,7 @@ Richie Hindle Konrad Hinsen David Hobley Tim Hochberg +Benjamin Hodgson Joerg-Cyril Hoehle Gregor Hoffleit Chris Hoffman @@ -622,6 +624,7 @@ Ken Howard Brad Howes Mike Hoy Ben Hoyt +Chiu-Hsiang Hsu Chih-Hao Huang Christian Hudon Lawrence Hudson @@ -785,6 +788,7 @@ Andrew Kuchling Dave Kuhlman Jon Kuhn Toshio Kuratomi +Ilia Kurenkov Vladimir Kushnir Erno Kuusela Ross Lagerwall @@ -794,6 +798,7 @@ Thomas Lamb Valerie Lambert Jean-Baptiste "Jiba" Lamy Ronan Lamy +Peter Landry Torsten Landschoff Łukasz Langa Tino Lange @@ -808,6 +813,7 @@ Simon Law Julia Lawall Chris Lawrence Mark Lawrence +Chris Laws Brian Leair Mathieu Leduc-Hamel Amandine Lee @@ -827,6 +833,7 @@ Joerg Lehmann Robert Lehmann Petri Lehtinen Luke Kenneth Casson Leighton +John Leitch Tshepang Lekhonkhobe Marc-André Lemburg Mateusz Lenik @@ -840,6 +847,7 @@ Mark Levitt Ivan Levkivskyi William Lewis Akira Li +Robert Li Xuanji Li Robert van Liere Ross Light @@ -911,12 +919,14 @@ Nick Mathewson Simon Mathieu Laura Matson Graham Matthews +mattip Martin Matusiak Dieter Maurer Daniel May Madison May Lucas Maystre Arnaud Mazin +Pam McA'Nulty Matt McClure Jack McCracken Rebecca McCreary @@ -949,6 +959,7 @@ Steven Miale Trent Mick Jason Michalski Franck Michea +Vincent Michel Tom Middleton Thomas Miedema Stan Mihai @@ -1004,6 +1015,7 @@ Tony Nelson Trent Nelson Chad Netzer Max Neunhöffer +Anthon van der Neut George Neville-Neil Hieu Nguyen Johannes Nicolai @@ -1061,6 +1073,7 @@ Jan Palus Yongzhi Pan Martin Panter Mathias Panzenböck +Marco Paolini M. Papillon Peter Parente Alexandre Parenteau @@ -1071,6 +1084,7 @@ Heikki Partanen Harri Pasanen Gaël Pasgrimaud Ashish Nitin Patil +Alecsandru Patrascu Randy Pausch Samuele Pedroni Justin Peel @@ -1121,6 +1135,7 @@ Martin Pool Iustin Pop Claudiu Popa John Popplewell +Matheus Vieira Portela Davin Potts Guillaume Pratte Florian Preinstorfer @@ -1182,6 +1197,7 @@ Vlad Riscutia Wes Rishel Daniel Riti Juan M. Bello Rivas +Mohd Sanad Zaki Rizvi Davide Rizzo Anthony Roach Carl Robben @@ -1229,6 +1245,7 @@ Rusty Russell Nick Russo James Rutherford Chris Ryland +Bernt Røskar Brenna Constantina S. Patrick Sabin Sébastien Sablé @@ -1302,6 +1319,7 @@ Alexander Shigin Pete Shinners Michael Shiplett John W. Shipman +Shiyao Ma Alex Shkop Joel Shprentz Yue Shuaijie @@ -1319,9 +1337,11 @@ Adam Simpkins Ravi Sinha Janne Sinkkonen Ng Pheng Siong +Yann Sionneau George Sipe J. Sipprell Kragen Sitaker +Ville Skyttä Michael Sloan Nick Sloan Václav Å milauer @@ -1334,6 +1354,7 @@ Ryan Smith-Roberts Rafal Smotrzyk Eric Snow Dirk Soede +Nir Soffer Paul Sokolovsky Evgeny Sologubov Cody Somerville @@ -1374,6 +1395,7 @@ Pal Subbiah Nathan Sullivan Mark Summerfield Reuben Sumner +Eryk Sun Marek Å uppa Hisao Suzuki Kalle Svensson @@ -1387,6 +1409,7 @@ Amir Szekely Maciej Szulik Arfrever Frehtes Taifersar Arahesis Hideaki Takahashi +Takase Arihiro Indra Talip Neil Tallim Geoff Talvola @@ -1414,6 +1437,7 @@ Eric Tiedemann July Tikhonov Tracy Tims Oren Tirosh +Tim Tisdall Jason Tishler Christian Tismer Jim Tittsler @@ -1457,6 +1481,7 @@ Lukas Vacek Ville Vainio Andi Vajda Case Van Horsen +John Mark Vandenberg Kyle VanderBeek Andrew Vant Atul Varma @@ -1514,6 +1539,7 @@ Bob Weiner Edward Welbourne Cliff Wells Rickard Westman +Joseph Weston Jeff Wheeler Christopher White David White @@ -1562,6 +1588,7 @@ Daniel Wozniak Wei Wu Heiko Wundram Doug Wyatt +Xiang Zhang Robert Xiao Florent Xicluna Hirokazu Yamamoto diff --git a/Misc/HISTORY b/Misc/HISTORY index 0462f4e4..5e5bc605 100644 --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -1331,7 +1331,7 @@ Core and Builtins import as they are meant for use by importlib. - Issue #14474: Save and restore exception state in thread.start_new_thread() - while writing error message if the thread leaves a unhandled exception. + while writing error message if the thread leaves an unhandled exception. - Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. @@ -1968,8 +1968,8 @@ Core and Builtins change also applies to bytes.splitlines and bytearray.splitlines. - Issue #7732: Don't open a directory as a file anymore while importing a - module. Ignore the direcotry if its name matchs the module name (e.g. - "__init__.py") and raise a ImportError instead. + module. Ignore the directory if its name matches the module name (e.g. + "__init__.py") and raise an ImportError instead. - Issue #13021: Missing decref on an error path. Thanks to Suman Saha for finding the bug and providing a patch. @@ -2021,7 +2021,7 @@ Core and Builtins - Issue #10271: Allow warnings.showwarning() be any callable. -- Issue #11627: Fix segfault when __new__ on a exception returns a +- Issue #11627: Fix segfault when __new__ on an exception returns a non-exception class. - Issue #12149: Update the method cache after a type's dictionary gets @@ -2050,7 +2050,7 @@ Core and Builtins with other data interleaved between marshalled objects. - Issue #12356: When required positional or keyword-only arguments are not - given, produce a informative error message which includes the name(s) of the + given, produce an informative error message which includes the name(s) of the missing arguments. - Issue #12370: Fix super with no arguments when __class__ is overriden in the @@ -2651,7 +2651,7 @@ Library - Issue #12529: fix cgi.parse_header issue on strings with double-quotes and semicolons together. Patch by Ben Darnell and Petri Lehtinen. -- Issue #13227: functools.lru_cache() now has a option to distinguish +- Issue #13227: functools.lru_cache() now has an option to distinguish calls with different argument types. - Issue #6090: zipfile raises a ValueError when a document with a timestamp @@ -2914,7 +2914,7 @@ Library - Issue #12502: asyncore: fix polling loop with AF_UNIX sockets. -- Issue #4376: ctypes now supports nested structures in a endian different than +- Issue #4376: ctypes now supports nested structures in an endian different than the parent structure. Patch by Vlad Riscutia. - Raise ValueError when attempting to set the _CHUNK_SIZE attribute of a @@ -2935,7 +2935,7 @@ Library Linux for example, to have the same behaviour on all platforms. - Issue #12451: pydoc: html_getfile() now uses tokenize.open() to support - Python scripts using a encoding different than UTF-8 (read the coding cookie + Python scripts using an encoding different than UTF-8 (read the coding cookie of the script). - Issue #12493: subprocess: Popen.communicate() now also handles EINTR errors @@ -5113,7 +5113,7 @@ Library ``MaybeEncodingError`` exception. - Issue #9244: The ``apply_async()`` and ``map_async()`` methods of - ``multiprocessing.Pool`` now accepts a ``error_callback`` argument. This can + ``multiprocessing.Pool`` now accepts an ``error_callback`` argument. This can be a callback with the signature ``callback(exc)``, which will be called if the target raises an exception. @@ -5210,7 +5210,7 @@ Library - Close file objects in modulefinder in a timely manner. -- Close a io.TextIOWrapper object in email.parser in a timely manner. +- Close an io.TextIOWrapper object in email.parser in a timely manner. - Close a file object in distutils.sysconfig in a timely manner. @@ -6751,7 +6751,7 @@ Library - Issue #7895: platform.mac_ver() no longer crashes after calling os.fork(). -- Issue #9323: Fixed a bug in trace.py that resulted in loosing the name of the +- Issue #9323: Fixed a bug in trace.py that resulted in losing the name of the script being traced. Patch by Eli Bendersky. - Issue #9282: Fixed --listfuncs option of trace.py. Thanks Eli Bendersky for @@ -7308,7 +7308,7 @@ Library - Issue #7610: Reworked implementation of the internal ``zipfile.ZipExtFile`` class used to represent files stored inside an archive. The new implementation is significantly faster and can be wrapped in - a ``io.BufferedReader`` object for more speedups. It also solves an + an ``io.BufferedReader`` object for more speedups. It also solves an issue where interleaved calls to `read()` and `readline()` give wrong results. Patch by Nir Aides. @@ -8683,7 +8683,7 @@ Library - Issue #5624: Fix the _winreg module name still used in several modules. -- Issue #5628: Fix io.TextIOWrapper.read() with a unreadable buffer. +- Issue #5628: Fix io.TextIOWrapper.read() with an unreadable buffer. - Issue #5619: Multiprocessing children disobey the debug flag and causes popups on windows buildbots. Patch applied to work around this issue. @@ -9649,7 +9649,7 @@ Library - Issue #4307: The named tuple that ``inspect.getfullargspec()`` returns now uses ``kwonlydefaults`` instead of ``kwdefaults``. -- Issue #4298: Fix a segfault when pickle.loads is passed a ill-formed input. +- Issue #4298: Fix a segfault when pickle.loads is passed ill-formed input. - Issue #4283: Fix a left-over "iteritems" call in distutils. @@ -11305,7 +11305,7 @@ Core and builtins ----------------- - Bug #1441486: The literal representation of -(sys.maxint - 1) - again evaluates to a int object, not a long. + again evaluates to an int object, not a long. - Bug #1501934: The scope of global variables that are locally assigned using augmented assignment is now correctly determined. @@ -12412,7 +12412,7 @@ Extension Modules - Bug #1194181: bz2.BZ2File didn't handle mode 'U' correctly. -- Patch #1212117: os.stat().st_flags is now accessible as a attribute +- Patch #1212117: os.stat().st_flags is now accessible as an attribute if available on the platform. - Patch #1103951: Expose O_SHLOCK and O_EXLOCK in the posix module if @@ -15212,7 +15212,7 @@ Core and builtins interpreter executions, would fail. - "%c" % u"a" now returns a unicode string instead of raising a - TypeError. u"%c" % 0xffffffff now raises a OverflowError instead + TypeError. u"%c" % 0xffffffff now raises an OverflowError instead of a ValueError to be consistent with "%c" % 256. See SF patch #710127. Extension modules @@ -23105,7 +23105,7 @@ no longer use the default root. - The interfaces for the bind*() and unbind() widget methods have been redesigned; the bind*() methods now return the name of the Tcl command -created for the callback, and this can be passed as a optional +created for the callback, and this can be passed as an optional argument to unbind() in order to delete the command (normally, such commands are automatically unbound when the widget is destroyed, but for some applications this isn't enough). diff --git a/Misc/NEWS b/Misc/NEWS index 564933e1..0addd80a 100644 --- a/Misc/NEWS +++ b/Misc/NEWS @@ -2,6 +2,512 @@ Python News +++++++++++ +What's New in Python 3.5.1 final? +================================= + +Release date: 2015-12-06 + +Core and Builtins +----------------- + +- Issue #25709: Fixed problem with in-place string concatenation and + utf-8 cache. + +Windows +------- + +- Issue #25715: Python 3.5.1 installer shows wrong upgrade path and incorrect + logic for launcher detection. + + +What's New in Python 3.5.1 release candidate 1? +=============================================== + +Release date: 2015-11-22 + +Core and Builtins +----------------- + +- Issue #25630: Fix a possible segfault during argument parsing in functions + that accept filesystem paths. + +- Issue #23564: Fixed a partially broken sanity check in the _posixsubprocess + internals regarding how fds_to_pass were passed to the child. The bug had + no actual impact as subprocess.py already avoided it. + +- Issue #25388: Fixed tokenizer crash when processing undecodable source code + with a null byte. + +- Issue #25462: The hash of the key now is calculated only once in most + operations in C implementation of OrderedDict. + +- Issue #22995: Default implementation of __reduce__ and __reduce_ex__ now + rejects builtin types with not defined __new__. + +- Issue #25555: Fix parser and AST: fill lineno and col_offset of "arg" node + when compiling AST from Python objects. + +- Issue #24802: Avoid buffer overreads when int(), float(), compile(), exec() + and eval() are passed bytes-like objects. These objects are not + necessarily terminated by a null byte, but the functions assumed they were. + +- Issue #24726: Fixed a crash and leaking NULL in repr() of OrderedDict that + was mutated by direct calls of dict methods. + +- Issue #25449: Iterating OrderedDict with keys with unstable hash now raises + KeyError in C implementations as well as in Python implementation. + +- Issue #25395: Fixed crash when highly nested OrderedDict structures were + garbage collected. + +- Issue #25274: sys.setrecursionlimit() now raises a RecursionError if the new + recursion limit is too low depending at the current recursion depth. Modify + also the "lower-water mark" formula to make it monotonic. This mark is used + to decide when the overflowed flag of the thread state is reset. + +- Issue #24402: Fix input() to prompt to the redirected stdout when + sys.stdout.fileno() fails. + +- Issue #24806: Prevent builtin types that are not allowed to be subclassed from + being subclassed through multiple inheritance. + +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. + +- Issue #25280: Import trace messages emitted in verbose (-v) mode are no + longer formatted twice. + +- Issue #25003: On Solaris 11.3 or newer, os.urandom() now uses the + getrandom() function instead of the getentropy() function. The getentropy() + function is blocking to generate very good quality entropy, os.urandom() + doesn't need such high-quality entropy. + +- Issue #25182: The stdprinter (used as sys.stderr before the io module is + imported at startup) now uses the backslashreplace error handler. + +- Issue #25131: Make the line number and column offset of set/dict literals and + comprehensions correspond to the opening brace. + +- Issue #25150: Hide the private _Py_atomic_xxx symbols from the public + Python.h header to fix a compilation error with OpenMP. PyThreadState_GET() + becomes an alias to PyThreadState_Get() to avoid ABI incompatibilies. + +Library +------- + +- Issue #25626: Change three zlib functions to accept sizes that fit in + Py_ssize_t, but internally cap those sizes to UINT_MAX. This resolves a + regression in 3.5 where GzipFile.read() failed to read chunks larger than 2 + or 4 GiB. The change affects the zlib.Decompress.decompress() max_length + parameter, the zlib.decompress() bufsize parameter, and the + zlib.Decompress.flush() length parameter. + +- Issue #25583: Avoid incorrect errors raised by os.makedirs(exist_ok=True) + when the OS gives priority to errors such as EACCES over EEXIST. + +- Issue #25593: Change semantics of EventLoop.stop() in asyncio. + +- Issue #6973: When we know a subprocess.Popen process has died, do + not allow the send_signal(), terminate(), or kill() methods to do + anything as they could potentially signal a different process. + +- Issue #25590: In the Readline completer, only call getattr() once per + attribute. + +- Issue #25498: Fix a crash when garbage-collecting ctypes objects created + by wrapping a memoryview. This was a regression made in 3.5a1. Based + on patch by Eryksun. + +- Issue #25584: Added "escape" to the __all__ list in the glob module. + +- Issue #25584: Fixed recursive glob() with patterns starting with '\*\*'. + +- Issue #25446: Fix regression in smtplib's AUTH LOGIN support. + +- Issue #18010: Fix the pydoc web server's module search function to handle + exceptions from importing packages. + +- Issue #25554: Got rid of circular references in regular expression parsing. + +- Issue #25510: fileinput.FileInput.readline() now returns b'' instead of '' + at the end if the FileInput was opened with binary mode. + Patch by Ryosuke Ito. + +- Issue #25503: Fixed inspect.getdoc() for inherited docstrings of properties. + Original patch by John Mark Vandenberg. + +- Issue #25515: Always use os.urandom as a source of randomness in uuid.uuid4. + +- Issue #21827: Fixed textwrap.dedent() for the case when largest common + whitespace is a substring of smallest leading whitespace. + Based on patch by Robert Li. + +- Issue #25447: The lru_cache() wrapper objects now can be copied and pickled + (by returning the original object unchanged). + +- Issue #25390: typing: Don't crash on Union[str, Pattern]. + +- Issue #25441: asyncio: Raise error from drain() when socket is closed. + +- Issue #25410: Cleaned up and fixed minor bugs in C implementation of + OrderedDict. + +- Issue #25411: Improved Unicode support in SMTPHandler through better use of + the email package. Thanks to user simon04 for the patch. + +- Issue #25407: Remove mentions of the formatter module being removed in + Python 3.6. + +- Issue #25406: Fixed a bug in C implementation of OrderedDict.move_to_end() + that caused segmentation fault or hang in iterating after moving several + items to the start of ordered dict. + +- Issue #25364: zipfile now works in threads disabled builds. + +- Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both + decode_data and enable_SMTPUTF8 are set to true. + +- Issue #25316: distutils raises OSError instead of DistutilsPlatformError + when MSVC is not installed. + +- Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in + pickletools.opcodes. + +- Issue #23972: Updates asyncio datagram create method allowing reuseport + and reuseaddr socket options to be set prior to binding the socket. + Mirroring the existing asyncio create_server method the reuseaddr option + for datagram sockets defaults to True if the O/S is 'posix' (except if the + platform is Cygwin). Patch by Chris Laws. + +- Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you + submit a coroutine to a loop from another thread, returning a + concurrent.futures.Future. By Vincent Michel. + +- Issue #25232: Fix CGIRequestHandler to split the query from the URL at the + first question mark (?) rather than the last. Patch from Xiang Zhang. + +- Issue #24657: Prevent CGIRequestHandler from collapsing slashes in the + query part of the URL as if it were a path. Patch from Xiang Zhang. + +- Issue #24483: C implementation of functools.lru_cache() now calculates key's + hash only once. + +- Issue #22958: Constructor and update method of weakref.WeakValueDictionary + now accept the self and the dict keyword arguments. + +- Issue #22609: Constructor of collections.UserDict now accepts the self keyword + argument. + +- Issue #25111: Fixed comparison of traceback.FrameSummary. + +- Issue #25262. Added support for BINBYTES8 opcode in Python implementation of + unpickler. Highest 32 bits of 64-bit size for BINUNICODE8 and BINBYTES8 + opcodes no longer silently ignored on 32-bit platforms in C implementation. + +- Issue #25034: Fix string.Formatter problem with auto-numbering and + nested format_specs. Patch by Anthon van der Neut. + +- Issue #25233: Rewrite the guts of asyncio.Queue and + asyncio.Semaphore to be more understandable and correct. + +- Issue #25203: Failed readline.set_completer_delims() no longer left the + module in inconsistent state. + +- Issue #23600: Default implementation of tzinfo.fromutc() was returning + wrong results in some cases. + +- Issue #23329: Allow the ssl module to be built with older versions of + LibreSSL. + +- Prevent overflow in _Unpickler_Read. + +- Issue #25047: The XML encoding declaration written by Element Tree now + respects the letter case given by the user. This restores the ability to + write encoding names in uppercase like "UTF-8", which worked in Python 2. + +- Issue #25135: Make deque_clear() safer by emptying the deque before clearing. + This helps avoid possible reentrancy issues. + +- Issue #19143: platform module now reads Windows version from kernel32.dll to + avoid compatibility shims. + +- Issue #25092: Fix datetime.strftime() failure when errno was already set to + EINVAL. + +- Issue #23517: Fix rounding in fromtimestamp() and utcfromtimestamp() methods + of datetime.datetime: microseconds are now rounded to nearest with ties + going to nearest even integer (ROUND_HALF_EVEN), instead of being rounding + towards minus infinity (ROUND_FLOOR). It's important that these methods use + the same rounding mode than datetime.timedelta to keep the property: + (datetime(1970,1,1) + timedelta(seconds=t)) == datetime.utcfromtimestamp(t). + It also the rounding mode used by round(float) for example. + +- Issue #25155: Fix datetime.datetime.now() and datetime.datetime.utcnow() on + Windows to support date after year 2038. It was a regression introduced in + Python 3.5.0. + +- Issue #25108: Omitted internal frames in traceback functions print_stack(), + format_stack(), and extract_stack() called without arguments. + +- Issue #25118: Fix a regression of Python 3.5.0 in os.waitpid() on Windows. + +- Issue #24684: socket.socket.getaddrinfo() now calls + PyUnicode_AsEncodedString() instead of calling the encode() method of the + host, to handle correctly custom string with an encode() method which doesn't + return a byte string. The encoder of the IDNA codec is now called directly + instead of calling the encode() method of the string. + +- Issue #25060: Correctly compute stack usage of the BUILD_MAP opcode. + +- Issue #24857: Comparing call_args to a long sequence now correctly returns a + boolean result instead of raising an exception. Patch by A Kaptur. + +- Issue #23144: Make sure that HTMLParser.feed() returns all the data, even + when convert_charrefs is True. + +- Issue #24982: shutil.make_archive() with the "zip" format now adds entries + for directories (including empty directories) in ZIP file. + +- Issue #25019: Fixed a crash caused by setting non-string key of expat parser. + Based on patch by John Leitch. + +- Issue #16180: Exit pdb if file has syntax error, instead of trapping user + in an infinite loop. Patch by Xavier de Gaye. + +- Issue #24891: Fix a race condition at Python startup if the file descriptor + of stdin (0), stdout (1) or stderr (2) is closed while Python is creating + sys.stdin, sys.stdout and sys.stderr objects. These attributes are now set + to None if the creation of the object failed, instead of raising an OSError + exception. Initial patch written by Marco Paolini. + +- Issue #24992: Fix error handling and a race condition (related to garbage + collection) in collections.OrderedDict constructor. + +- Issue #24881: Fixed setting binary mode in Python implementation of FileIO + on Windows and Cygwin. Patch from Akira Li. + +- Issue #25578: Fix (another) memory leak in SSLSocket.getpeercer(). + +- Issue #25530: Disable the vulnerable SSLv3 protocol by default when creating + ssl.SSLContext. + +- Issue #25569: Fix memory leak in SSLSocket.getpeercert(). + +- Issue #25471: Sockets returned from accept() shouldn't appear to be + nonblocking. + +- Issue #25319: When threading.Event is reinitialized, the underlying condition + should use a regular lock rather than a recursive lock. + +- Issue #21112: Fix regression in unittest.expectedFailure on subclasses. + Patch from Berker Peksag. + +- Issue #24764: cgi.FieldStorage.read_multi() now ignores the Content-Length + header in part headers. Patch written by Peter Landry and reviewed by Pierre + Quentel. + +- Issue #24913: Fix overrun error in deque.index(). + Found by John Leitch and Bryce Darling. + +- Issue #24774: Fix docstring in http.server.test. Patch from Chiu-Hsiang Hsu. + +- Issue #21159: Improve message in configparser.InterpolationMissingOptionError. + Patch from Łukasz Langa. + +- Issue #20362: Honour TestCase.longMessage correctly in assertRegex. + Patch from Ilia Kurenkov. + +- Issue #23572: Fixed functools.singledispatch on classes with falsy + metaclasses. Patch by Ethan Furman. + +- asyncio: ensure_future() now accepts awaitable objects. + +IDLE +---- + +- Issue 15348: Stop the debugger engine (normally in a user process) + before closing the debugger window (running in the IDLE process). + This prevents the RuntimeErrors that were being caught and ignored. + +- Issue #24455: Prevent IDLE from hanging when a) closing the shell while the + debugger is active (15347); b) closing the debugger with the [X] button + (15348); and c) activating the debugger when already active (24455). + The patch by Mark Roseman does this by making two changes. + 1. Suspend and resume the gui.interaction method with the tcl vwait + mechanism intended for this purpose (instead of root.mainloop & .quit). + 2. In gui.run, allow any existing interaction to terminate first. + +- Change 'The program' to 'Your program' in an IDLE 'kill program?' message + to make it clearer that the program referred to is the currently running + user program, not IDLE itself. + +- Issue #24750: Improve the appearance of the IDLE editor window status bar. + Patch by Mark Roseman. + +- Issue #25313: Change the handling of new built-in text color themes to better + address the compatibility problem introduced by the addition of IDLE Dark. + Consistently use the revised idleConf.CurrentTheme everywhere in idlelib. + +- Issue #24782: Extension configuration is now a tab in the IDLE Preferences + dialog rather than a separate dialog. The former tabs are now a sorted + list. Patch by Mark Roseman. + +- Issue #22726: Re-activate the config dialog help button with some content + about the other buttons and the new IDLE Dark theme. + +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + To use it with IDLEs released before November 2015, hit the + 'Save as New Custom Theme' button and enter a new name, + such as 'Custom Dark'. The custom theme will work with any IDLE + release, and can be modified. + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc chapter. + 'IDLE' now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + +- Issue #24972: Inactive selection background now matches active selection + background, as configured by users, on all systems. Found items are now + always highlighted on Windows. Initial patch by Mark Roseman. + +- Issue #24570: Idle: make calltip and completion boxes appear on Macs + affected by a tk regression. Initial patch by Mark Roseman. + +- Issue #24988: Idle ScrolledList context menus (used in debugger) + now work on Mac Aqua. Patch by Mark Roseman. + +- Issue #24801: Make right-click for context menu work on Mac Aqua. + Patch by Mark Roseman. + +- Issue #25173: Associate tkinter messageboxes with a specific widget. + For Mac OSX, make them a 'sheet'. Patch by Mark Roseman. + +- Issue #25198: Enhance the initial html viewer now used for Idle Help. + * Properly indent fixed-pitch text (patch by Mark Roseman). + * Give code snippet a very Sphinx-like light blueish-gray background. + * Re-use initial width and height set by users for shell and editor. + * When the Table of Contents (TOC) menu is used, put the section header + at the top of the screen. + +- Issue #25225: Condense and rewrite Idle doc section on text colors. + +- Issue #21995: Explain some differences between IDLE and console Python. + +- Issue #22820: Explain need for *print* when running file from Idle editor. + +- Issue #25224: Doc: augment Idle feature list and no-subprocess section. + +- Issue #25219: Update doc for Idle command line options. + Some were missing and notes were not correct. + +- Issue #24861: Most of idlelib is private and subject to change. + Use idleib.idle.* to start Idle. See idlelib.__init__.__doc__. + +- Issue #25199: Idle: add synchronization comments for future maintainers. + +- Issue #16893: Replace help.txt with help.html for Idle doc display. + The new idlelib/help.html is rstripped Doc/build/html/library/idle.html. + It looks better than help.txt and will better document Idle as released. + The tkinter html viewer that works for this file was written by Mark Roseman. + The now unused EditorWindow.HelpDialog class and helt.txt file are deprecated. + +- Issue #24199: Deprecate unused idlelib.idlever with possible removal in 3.6. + +- Issue #24790: Remove extraneous code (which also create 2 & 3 conflicts). + +Documentation +------------- + +- Issue #12067: Rewrite Comparisons section in the Expressions chapter of the + language reference. Some of the details of comparing mixed types were + incorrect or ambiguous. NotImplemented is only relevant at a lower level + than the Expressions chapter. Added details of comparing range() objects, + and default behaviour and consistency suggestions for user-defined classes. + Patch from Andy Maier. + +- Issue #24952: Clarify the default size argument of stack_size() in + the "threading" and "_thread" modules. Patch from Mattip. + +- Issue #23725: Overhaul tempfile docs. Note deprecated status of mktemp. + Patch from Zbigniew Jędrzejewski-Szmek. + +- Issue #24808: Update the types of some PyTypeObject fields. Patch by + Joseph Weston. + +- Issue #22812: Fix unittest discovery examples. + Patch from Pam McA'Nulty. + +Tests +----- + +- Issue #25449: Added tests for OrderedDict subclasses. + +- Issue #25099: Make test_compileall not fail when an entry on sys.path cannot + be written to (commonly seen in administrative installs on Windows). + +- Issue #23919: Prevents assert dialogs appearing in the test suite. + +- ``PCbuild\rt.bat`` now accepts an unlimited number of arguments to pass along + to regrtest.py. Previously there was a limit of 9. + +Build +----- + +- Issue #24915: Add LLVM support for PGO builds and use the test suite to + generate the profile data. Initial patch by Alecsandru Patrascu of Intel. + +- Issue #24910: Windows MSIs now have unique display names. + +- Issue #24986: It is now possible to build Python on Windows without errors + when external libraries are not available. + +Windows +------- + +- Issue #25450: Updates shortcuts to start Python in installation directory. + +- Issue #25164: Changes default all-users install directory to match per-user + directory. + +- Issue #25143: Improves installer error messages for unsupported platforms. + +- Issue #25163: Display correct directory in installer when using non-default + settings. + +- Issue #25361: Disables use of SSE2 instructions in Windows 32-bit build + +- Issue #25089: Adds logging to installer for case where launcher is not + selected on upgrade. + +- Issue #25165: Windows uninstallation should not remove launcher if other + versions remain + +- Issue #25112: py.exe launcher is missing icons + +- Issue #25102: Windows installer does not precompile for -O or -OO. + +- Issue #25081: Makes Back button in installer go back to upgrade page when + upgrading. + +- Issue #25091: Increases font size of the installer. + +- Issue #25126: Clarifies that the non-web installer will download some + components. + +- Issue #25213: Restores requestedExecutionLevel to manifest to disable + UAC virtualization. + +- Issue #25022: Removed very outdated PC/example_nt/ directory. + +Tools/Demos +----------- + +- Issue #25440: Fix output of python-config --extension-suffix. + + What's New in Python 3.5.0 final? ================================= @@ -144,10 +650,7 @@ Library - Issue #17527: Add PATCH to wsgiref.validator. Patch from Luca Sbardella. -- Issue #23812: Fix asyncio.Queue.get() to avoid loosing items on cancellation. - Patch by Gustavo J. A. M. Carneiro. - -- Issue #24791: Fix grammar regression for call syntax: ``g(*a or b)``. +- Issue #24791: Fix grammar regression for call syntax: 'g(\*a or b)'. IDLE ---- @@ -747,7 +1250,7 @@ Library - Issue #23703: Fix a regression in urljoin() introduced in 901e4e52b20a. Patch by Demian Brecht. -- Issue #4254: Adds _curses.update_lines_cols() Patch by Arnon Yaari +- Issue #4254: Adds _curses.update_lines_cols(). Patch by Arnon Yaari - Issue #19933: Provide default argument for ndigits in round. Patch by Vajrasky Kok. @@ -1239,7 +1742,7 @@ Build C API ----- -- Issue #20204: Deprecation warning is now raised for builtin type without the +- Issue #20204: Deprecation warning is now raised for builtin types without the __module__ attribute. Windows @@ -1383,7 +1886,7 @@ Core and Builtins type) can now be weakref'ed. Patch by Wei Wu. - Issue #22077: Improve index error messages for bytearrays, bytes, lists, - and tuples by adding 'or slices'. Added ', not ' for bytearrays. Original patch by Claudiu Popa. - Issue #20179: Apply Argument Clinic to bytes and bytearray. @@ -1407,7 +1910,7 @@ Core and Builtins engine friendly) error messages when "exec" and "print" are used as statements. -- Issue #21642: If the conditional if-else expression, allow an integer written +- Issue #21642: In the conditional if-else expression, allow an integer written with no space between itself and the ``else`` keyword (e.g. ``True if 42else False``) to be valid syntax. @@ -1468,7 +1971,7 @@ Core and Builtins and does not require to carry long the spark.py parser-generator library; spark.py was removed from the source base. -- Issue #12546: Allow \x00 to be used as a fill character when using str, int, +- Issue #12546: Allow ``\x00`` to be used as a fill character when using str, int, float, and complex __format__ methods. - Issue #20480: Add ipaddress.reverse_pointer. Patch by Leon Weber. @@ -1599,7 +2102,7 @@ Library - Issue #23132: Improve performance and introspection support of comparison methods created by functool.total_ordering. -- Issue #19776: Add a expanduser() method on Path objects. +- Issue #19776: Add an expanduser() method on Path objects. - Issue #23112: Fix SimpleHTTPServer to correctly carry the query string and fragment when it redirects to add a trailing slash. @@ -2092,7 +2595,7 @@ Library - Issue #20170: Convert posixmodule to use Argument Clinic. -- Issue #21539: Add a *exists_ok* argument to `Pathlib.mkdir()` to mimic +- Issue #21539: Add an *exists_ok* argument to `Pathlib.mkdir()` to mimic `mkdir -p` and `os.makedirs()` functionality. When true, ignore FileExistsErrors. Patch by Berker Peksag. @@ -2148,7 +2651,7 @@ Library Patch by Tom Flanagan. - Issue #19884: readline: Disable the meta modifier key if stdout is not - a terminal to not write the ANSI sequence "\033[1034h" into stdout. This + a terminal to not write the ANSI sequence ``"\033[1034h"`` into stdout. This sequence is used on some terminal (ex: TERM=xterm-256color") to enable support of 8 bit characters. @@ -2310,7 +2813,7 @@ Library - Issue #3015: _tkinter.create() now creates tkapp object with wantobject=1 by default. -- Issue #10203: sqlite3.Row now truly supports sequence protocol. In particulr +- Issue #10203: sqlite3.Row now truly supports sequence protocol. In particular it supports reverse() and negative indices. Original patch by Claudiu Popa. - Issue #18807: If copying (no symlinks) specified for a venv, then the python @@ -2351,7 +2854,7 @@ Library - Issue #21486: Optimize parsing of netmasks in ipaddress.IPv4Network and ipaddress.IPv6Network. -- Issue #13916: Disallowed the surrogatepass error handler for non UTF-* +- Issue #13916: Disallowed the surrogatepass error handler for non UTF-\* encodings. - Issue #20998: Fixed re.fullmatch() of repeated single character pattern @@ -2681,7 +3184,7 @@ IDLE Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name @@ -4524,7 +5027,7 @@ Build upgrade pip by default, using the bundled pip provided by the new ensurepip module. A new configure option, --with-ensurepip[=upgrade|install|no], is available to override the default ensurepip "--upgrade" option. The option - can also be set with "make [alt]install ENSUREPIP=[upgrade|install\no]". + can also be set with "make [alt]install ENSUREPIP=[upgrade|install|no]". - Issue #19551: PEP 453 - the OS X installer now installs pip by default. @@ -4693,7 +5196,7 @@ Library - Issue #10712: 2to3 has a new "asserts" fixer that replaces deprecated names of unittest methods (e.g. failUnlessEqual -> assertEqual). -- Issue #18037: 2to3 now escapes '\u' and '\U' in native strings. +- Issue #18037: 2to3 now escapes ``'\u'`` and ``'\U'`` in native strings. - Issue #17839: base64.decodebytes and base64.encodebytes now accept any object that exports a 1 dimensional array of bytes (this means the same @@ -5360,7 +5863,7 @@ Core and Builtins - Issue #5308: Raise ValueError when marshalling too large object (a sequence with size >= 2**31), instead of producing illegal marshal data. -- Issue #12983: Bytes literals with invalid \x escape now raise a SyntaxError +- Issue #12983: Bytes literals with invalid ``\x`` escape now raise a SyntaxError and a full traceback including line number. - Issue #16967: In function definition, evaluate positional defaults before @@ -5731,7 +6234,7 @@ Library loaded X.509 certs, X.509 CA certs and CRLs. - Issue #18167: cgi.FieldStorage no longer fails to handle multipart/form-data - when \r\n appears at end of 65535 bytes without other newlines. + when ``\r\n`` appears at end of 65535 bytes without other newlines. - Issue #18076: Introduce importlib.util.decode_source(). @@ -5963,7 +6466,7 @@ Library - Issue #17016: Get rid of possible pointer wraparounds and integer overflows in the re module. Patch by Nickolai Zeldovich. -- Issue #16658: add missing return to HTTPConnection.send() +- Issue #16658: add missing return to HTTPConnection.send(). Patch by Jeff Knupp. - Issue #9556: the logging package now allows specifying a time-of-day for a @@ -6344,8 +6847,9 @@ Library - Issue #16900: Issue a ResourceWarning when an ssl socket is left unclosed. -- Issue #13899: \A, \Z, and \B now correctly match the A, Z, and B literals - when used inside character classes (e.g. '[\A]'). Patch by Matthew Barnett. +- Issue #13899: ``\A``, ``\Z``, and ``\B`` now correctly match the A, Z, + and B literals when used inside character classes (e.g. ``'[\A]'``). + Patch by Matthew Barnett. - Issue #15545: Fix regression in sqlite3's iterdump method where it was failing if the connection used a row factory (such as sqlite3.Row) that @@ -6560,7 +7064,7 @@ Library - Issue #16431: Use the type information when constructing a Decimal subtype from a Decimal argument. -- Issue #15641: Clean up deprecated classes from importlib +- Issue #15641: Clean up deprecated classes from importlib. Patch by Taras Lyapun. - Issue #16350: zlib.decompressobj().decompress() now accumulates data from @@ -6829,7 +7333,7 @@ IDLE - Issue #16511: Use default IDLE width and height if config param is not valid. Patch Serhiy Storchaka. -- Issue #1207589: Add Cut/Copy/Paste items to IDLE right click Context Menu +- Issue #1207589: Add Cut/Copy/Paste items to IDLE right click Context Menu. Patch by Todd Rovito. - Issue #16123: IDLE - deprecate running without a subprocess. @@ -7342,7 +7846,7 @@ Tools/Demos - Issue #16549: Make json.tool work again on Python 3 and add tests. Initial patch by Berker Peksag and Serhiy Storchaka. -- Issue #13301: use ast.literal_eval() instead of eval() in Tools/i18n/msgfmt.py +- Issue #13301: use ast.literal_eval() instead of eval() in Tools/i18n/msgfmt.py. Patch by Serhiy Storchaka. Windows diff --git a/Misc/Porting b/Misc/Porting index c283cb27..c43b1129 100644 --- a/Misc/Porting +++ b/Misc/Porting @@ -1,41 +1 @@ -Q. I want to port Python to a new platform. How do I begin? - -A. I guess the two things to start with is to familiarize yourself -with are the development system for your target platform and the -generic build process for Python. Make sure you can compile and run a -simple hello-world program on your target platform. Make sure you can -compile and run the Python interpreter on a platform to which it has -already been ported (preferably Unix, but Mac or Windows will do, -too). - -I also would never start something like this without at least -medium-level understanding of your target platform (i.e. how it is -generally used, how to write platform specific apps etc.) and Python -(or else you'll never know how to test the results). - -The build process for Python, in particular the Makefiles in the -source distribution, will give you a hint on which files to compile -for Python. Not all source files are relevant -- some are platform -specific, others are only used in emergencies (e.g. getopt.c). The -Makefiles tell the story. - -You'll also need a pyconfig.h file tailored for your platform. You can -start with pyconfig.h.in, read the comments and turn on definitions that -apply to your platform. - -And you'll need a config.c file, which lists the built-in modules you -support. Start with Modules/config.c.in. - -Finally, you'll run into some things that aren't supported on your -target platform. Forget about the posix module for now -- simply take -it out of the config.c file. - -Bang on it until you get a >>> prompt. (You may have to disable the -importing of "site.py" by passing the -S option.) - -Then bang on it until it executes very simple Python statements. - -Now bang on it some more. At some point you'll want to use the os -module; this is the time to start thinking about what to do with the -posix module. It's okay to simply #ifdef out those functions that -cause problems; the remaining ones will be quite useful. +This document is moved to https://docs.python.org/devguide/faq.html#how-do-i-port-python-to-a-new-platform diff --git a/Misc/python-config.sh.in b/Misc/python-config.sh.in index 64c81e5f..30c69278 100644 --- a/Misc/python-config.sh.in +++ b/Misc/python-config.sh.in @@ -49,7 +49,7 @@ PY_ENABLE_SHARED="@PY_ENABLE_SHARED@" LDVERSION="@LDVERSION@" LIBDEST=${prefix}/lib/python${VERSION} LIBPL=$(echo "@LIBPL@" | sed "s#$prefix_build#$prefix_real#") -SO="@SO@" +SO="@EXT_SUFFIX@" PYTHONFRAMEWORK="@PYTHONFRAMEWORK@" INCDIR="-I$includedir/python${VERSION}${ABIFLAGS}" PLATINCDIR="-I$includedir/python${VERSION}${ABIFLAGS}" diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c index 3856d83f..214872b3 100644 --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -419,9 +419,11 @@ deque_extend(dequeobject *deque, PyObject *iterable) deque->rightblock->data[deque->rightindex] = item; deque_trim_left(deque); } - Py_DECREF(it); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { + Py_DECREF(it); return NULL; + } + Py_DECREF(it); Py_RETURN_NONE; } @@ -480,9 +482,11 @@ deque_extendleft(dequeobject *deque, PyObject *iterable) deque->leftblock->data[deque->leftindex] = item; deque_trim_right(deque); } - Py_DECREF(it); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { + Py_DECREF(it); return NULL; + } + Py_DECREF(it); Py_RETURN_NONE; } @@ -497,8 +501,8 @@ deque_inplace_concat(dequeobject *deque, PyObject *other) result = deque_extend(deque, other); if (result == NULL) return result; - Py_DECREF(result); Py_INCREF(deque); + Py_DECREF(result); return (PyObject *)deque; } @@ -625,7 +629,7 @@ volume rotations should take care not to penalize the common case. Conceptually, a rotate by one is equivalent to a pop on one side and an append on the other. However, a pop/append pair is unnecessarily slow -because it requires a incref/decref pair for an object located randomly +because it requires an incref/decref pair for an object located randomly in memory. It is better to just move the object pointer from one block to the next without changing the reference count. @@ -1034,16 +1038,75 @@ PyDoc_STRVAR(remove_doc, static void deque_clear(dequeobject *deque) { + block *b; + block *prevblock; + block *leftblock; + Py_ssize_t leftindex; + Py_ssize_t n; PyObject *item; + if (Py_SIZE(deque) == 0) + return; + + /* During the process of clearing a deque, decrefs can cause the + deque to mutate. To avoid fatal confusion, we have to make the + deque empty before clearing the blocks and never refer to + anything via deque->ref while clearing. (This is the same + technique used for clearing lists, sets, and dicts.) + + Making the deque empty requires allocating a new empty block. In + the unlikely event that memory is full, we fall back to an + alternate method that doesn't require a new block. Repeating + pops in a while-loop is slower, possibly re-entrant (and a clever + adversary could cause it to never terminate). + */ + + b = newblock(0); + if (b == NULL) { + PyErr_Clear(); + goto alternate_method; + } + + /* Remember the old size, leftblock, and leftindex */ + leftblock = deque->leftblock; + leftindex = deque->leftindex; + n = Py_SIZE(deque); + + /* Set the deque to be empty using the newly allocated block */ + MARK_END(b->leftlink); + MARK_END(b->rightlink); + Py_SIZE(deque) = 0; + deque->leftblock = b; + deque->rightblock = b; + deque->leftindex = CENTER + 1; + deque->rightindex = CENTER; + deque->state++; + + /* Now the old size, leftblock, and leftindex are disconnected from + the empty deque and we can use them to decref the pointers. + */ + while (n--) { + item = leftblock->data[leftindex]; + Py_DECREF(item); + leftindex++; + if (leftindex == BLOCKLEN && n) { + CHECK_NOT_END(leftblock->rightlink); + prevblock = leftblock; + leftblock = leftblock->rightlink; + leftindex = 0; + freeblock(prevblock); + } + } + CHECK_END(leftblock->rightlink); + freeblock(leftblock); + return; + + alternate_method: while (Py_SIZE(deque)) { item = deque_pop(deque, NULL); assert (item != NULL); Py_DECREF(item); } - assert(deque->leftblock == deque->rightblock); - assert(deque->leftindex - 1 == deque->rightindex); - assert(Py_SIZE(deque) == 0); } static int @@ -1260,8 +1323,8 @@ deque_repr(PyObject *deque) aslist, ((dequeobject *)deque)->maxlen); else result = PyUnicode_FromFormat("deque(%R)", aslist); - Py_DECREF(aslist); Py_ReprLeave(deque); + Py_DECREF(aslist); return result; } @@ -1363,7 +1426,8 @@ deque_init(dequeobject *deque, PyObject *args, PyObject *kwdargs) } } deque->maxlen = maxlen; - deque_clear(deque); + if (Py_SIZE(deque) > 0) + deque_clear(deque); if (iterable != NULL) { PyObject *rv = deque_extend(deque, iterable); if (rv == NULL) diff --git a/Modules/_csv.c b/Modules/_csv.c index a614191a..af901e2d 100644 --- a/Modules/_csv.c +++ b/Modules/_csv.c @@ -1562,7 +1562,7 @@ PyDoc_STRVAR(csv_reader_doc, "provided by the dialect.\n" "\n" "The returned object is an iterator. Each iteration returns a row\n" -"of the CSV file (which can span multiple input lines):\n"); +"of the CSV file (which can span multiple input lines).\n"); PyDoc_STRVAR(csv_writer_doc, " csv_writer = csv.writer(fileobj [, dialect='excel']\n" diff --git a/Modules/_ctypes/_ctypes.c b/Modules/_ctypes/_ctypes.c index b9fd82e8..00eb700f 100644 --- a/Modules/_ctypes/_ctypes.c +++ b/Modules/_ctypes/_ctypes.c @@ -463,45 +463,65 @@ KeepRef(CDataObject *target, Py_ssize_t index, PyObject *keep); static PyObject * CDataType_from_buffer(PyObject *type, PyObject *args) { - Py_buffer buffer; + PyObject *obj; + PyObject *mv; + PyObject *result; + Py_buffer *buffer; Py_ssize_t offset = 0; - PyObject *result, *mv; + StgDictObject *dict = PyType_stgdict(type); assert (dict); - if (!PyArg_ParseTuple(args, "w*|n:from_buffer", &buffer, &offset)) + if (!PyArg_ParseTuple(args, "O|n:from_buffer", &obj, &offset)) return NULL; + mv = PyMemoryView_FromObject(obj); + if (mv == NULL) + return NULL; + + buffer = PyMemoryView_GET_BUFFER(mv); + + if (buffer->readonly) { + PyErr_SetString(PyExc_TypeError, + "underlying buffer is not writable"); + Py_DECREF(mv); + return NULL; + } + + if (!PyBuffer_IsContiguous(buffer, 'C')) { + PyErr_SetString(PyExc_TypeError, + "underlying buffer is not C contiguous"); + Py_DECREF(mv); + return NULL; + } + if (offset < 0) { PyErr_SetString(PyExc_ValueError, "offset cannot be negative"); - PyBuffer_Release(&buffer); + Py_DECREF(mv); return NULL; } - if (dict->size > buffer.len - offset) { + + if (dict->size > buffer->len - offset) { PyErr_Format(PyExc_ValueError, - "Buffer size too small (%zd instead of at least %zd bytes)", - buffer.len, dict->size + offset); - PyBuffer_Release(&buffer); + "Buffer size too small " + "(%zd instead of at least %zd bytes)", + buffer->len, dict->size + offset); + Py_DECREF(mv); return NULL; } - result = PyCData_AtAddress(type, (char *)buffer.buf + offset); + result = PyCData_AtAddress(type, (char *)buffer->buf + offset); if (result == NULL) { - PyBuffer_Release(&buffer); + Py_DECREF(mv); return NULL; } - mv = PyMemoryView_FromBuffer(&buffer); - if (mv == NULL) { - PyBuffer_Release(&buffer); + if (-1 == KeepRef((CDataObject *)result, -1, mv)) { + Py_DECREF(result); return NULL; } - /* Hack the memoryview so that it will release the buffer. */ - ((PyMemoryViewObject *)mv)->mbuf->master.obj = buffer.obj; - ((PyMemoryViewObject *)mv)->view.obj = buffer.obj; - if (-1 == KeepRef((CDataObject *)result, -1, mv)) - result = NULL; + return result; } diff --git a/Modules/_ctypes/libffi/src/x86/darwin64.S b/Modules/_ctypes/libffi/src/x86/darwin64.S index 2f7394ef..0c72ed29 100644 --- a/Modules/_ctypes/libffi/src/x86/darwin64.S +++ b/Modules/_ctypes/libffi/src/x86/darwin64.S @@ -350,7 +350,7 @@ LASFDE1: .set L$set$3,LUW1-LUW0 .long L$set$3 - /* New stack frame based off rbp. This is a itty bit of unwind + /* New stack frame based off rbp. This is an itty bit of unwind trickery in that the CFA *has* changed. There is no easy way to describe it correctly on entry to the function. Fortunately, it doesn't matter too much since at all points we can correctly diff --git a/Modules/_ctypes/libffi/src/x86/unix64.S b/Modules/_ctypes/libffi/src/x86/unix64.S index dcd6bc71..45a0ed74 100644 --- a/Modules/_ctypes/libffi/src/x86/unix64.S +++ b/Modules/_ctypes/libffi/src/x86/unix64.S @@ -366,7 +366,7 @@ ffi_closure_unix64: .byte 0x4 /* DW_CFA_advance_loc4 */ .long .LUW1-.LUW0 - /* New stack frame based off rbp. This is a itty bit of unwind + /* New stack frame based off rbp. This is an itty bit of unwind trickery in that the CFA *has* changed. There is no easy way to describe it correctly on entry to the function. Fortunately, it doesn't matter too much since at all points we can correctly diff --git a/Modules/_ctypes/libffi_osx/x86/darwin64.S b/Modules/_ctypes/libffi_osx/x86/darwin64.S index 165d4693..1286d33f 100644 --- a/Modules/_ctypes/libffi_osx/x86/darwin64.S +++ b/Modules/_ctypes/libffi_osx/x86/darwin64.S @@ -351,7 +351,7 @@ LASFDE1: .set L$set$3,LUW1-LUW0 .long L$set$3 - /* New stack frame based off rbp. This is a itty bit of unwind + /* New stack frame based off rbp. This is an itty bit of unwind trickery in that the CFA *has* changed. There is no easy way to describe it correctly on entry to the function. Fortunately, it doesn't matter too much since at all points we can correctly diff --git a/Modules/_datetimemodule.c b/Modules/_datetimemodule.c index 7e4be5ba..e3de537a 100644 --- a/Modules/_datetimemodule.c +++ b/Modules/_datetimemodule.c @@ -3046,7 +3046,7 @@ tzinfo_fromutc(PyDateTime_TZInfo *self, PyObject *dt) goto Fail; if (dst == Py_None) goto Inconsistent; - if (delta_bool(delta) != 0) { + if (delta_bool((PyDateTime_Delta *)dst) != 0) { PyObject *temp = result; result = add_datetime_timedelta((PyDateTime_DateTime *)result, (PyDateTime_Delta *)dst, 1); @@ -4083,6 +4083,44 @@ datetime_from_timet_and_us(PyObject *cls, TM_FUNC f, time_t timet, int us, tzinfo); } +static time_t +_PyTime_DoubleToTimet(double x) +{ + time_t result; + double diff; + + result = (time_t)x; + /* How much info did we lose? time_t may be an integral or + * floating type, and we don't know which. If it's integral, + * we don't know whether C truncates, rounds, returns the floor, + * etc. If we lost a second or more, the C rounding is + * unreasonable, or the input just doesn't fit in a time_t; + * call it an error regardless. Note that the original cast to + * time_t can cause a C error too, but nothing we can do to + * worm around that. + */ + diff = x - (double)result; + if (diff <= -1.0 || diff >= 1.0) { + PyErr_SetString(PyExc_OverflowError, + "timestamp out of range for platform time_t"); + result = (time_t)-1; + } + return result; +} + +/* Round a double to the nearest long. |x| must be small enough to fit + * in a C long; this is not checked. + */ +static double +_PyTime_RoundHalfEven(double x) +{ + double rounded = round(x); + if (fabs(x-rounded) == 0.5) + /* halfway case: round to even */ + rounded = 2.0*round(x/2.0); + return rounded; +} + /* Internal helper. * Build datetime from a Python timestamp. Pass localtime or gmtime for f, * to control the interpretation of the timestamp. Since a double doesn't @@ -4091,18 +4129,32 @@ datetime_from_timet_and_us(PyObject *cls, TM_FUNC f, time_t timet, int us, * to get that much precision (e.g., C time() isn't good enough). */ static PyObject * -datetime_from_timestamp(PyObject *cls, TM_FUNC f, PyObject *timestamp, +datetime_from_timestamp(PyObject *cls, TM_FUNC f, double timestamp, PyObject *tzinfo) { time_t timet; - long us; + double fraction; + int us; - if (_PyTime_ObjectToTimeval(timestamp, - &timet, &us, _PyTime_ROUND_FLOOR) == -1) + timet = _PyTime_DoubleToTimet(timestamp); + if (timet == (time_t)-1 && PyErr_Occurred()) return NULL; - assert(0 <= us && us <= 999999); - - return datetime_from_timet_and_us(cls, f, timet, (int)us, tzinfo); + fraction = timestamp - (double)timet; + us = (int)_PyTime_RoundHalfEven(fraction * 1e6); + if (us < 0) { + /* Truncation towards zero is not what we wanted + for negative numbers (Python's mod semantics) */ + timet -= 1; + us += 1000000; + } + /* If timestamp is less than one microsecond smaller than a + * full second, round up. Otherwise, ValueErrors are raised + * for some floats. */ + if (us == 1000000) { + timet += 1; + us = 0; + } + return datetime_from_timet_and_us(cls, f, timet, us, tzinfo); } /* Internal helper. @@ -4113,13 +4165,14 @@ static PyObject * datetime_best_possible(PyObject *cls, TM_FUNC f, PyObject *tzinfo) { _PyTime_t ts = _PyTime_GetSystemClock(); - struct timeval tv; + time_t secs; + int us; - if (_PyTime_AsTimeval(ts, &tv, _PyTime_ROUND_FLOOR) < 0) + if (_PyTime_AsTimevalTime_t(ts, &secs, &us, _PyTime_ROUND_FLOOR) < 0) return NULL; - assert(0 <= tv.tv_usec && tv.tv_usec <= 999999); + assert(0 <= us && us <= 999999); - return datetime_from_timet_and_us(cls, f, tv.tv_sec, tv.tv_usec, tzinfo); + return datetime_from_timet_and_us(cls, f, secs, us, tzinfo); } /*[clinic input] @@ -4174,11 +4227,11 @@ static PyObject * datetime_fromtimestamp(PyObject *cls, PyObject *args, PyObject *kw) { PyObject *self; - PyObject *timestamp; + double timestamp; PyObject *tzinfo = Py_None; static char *keywords[] = {"timestamp", "tz", NULL}; - if (! PyArg_ParseTupleAndKeywords(args, kw, "O|O:fromtimestamp", + if (! PyArg_ParseTupleAndKeywords(args, kw, "d|O:fromtimestamp", keywords, ×tamp, &tzinfo)) return NULL; if (check_tzinfo_subclass(tzinfo) < 0) @@ -4202,10 +4255,10 @@ datetime_fromtimestamp(PyObject *cls, PyObject *args, PyObject *kw) static PyObject * datetime_utcfromtimestamp(PyObject *cls, PyObject *args) { - PyObject *timestamp; + double timestamp; PyObject *result = NULL; - if (PyArg_ParseTuple(args, "O:utcfromtimestamp", ×tamp)) + if (PyArg_ParseTuple(args, "d:utcfromtimestamp", ×tamp)) result = datetime_from_timestamp(cls, gmtime, timestamp, Py_None); return result; diff --git a/Modules/_decimal/libmpdec/mpdecimal.c b/Modules/_decimal/libmpdec/mpdecimal.c index 21d22227..593f9f5e 100644 --- a/Modules/_decimal/libmpdec/mpdecimal.c +++ b/Modules/_decimal/libmpdec/mpdecimal.c @@ -43,6 +43,7 @@ #ifdef PPRO #if defined(_MSC_VER) #include + #pragma float_control(precise, on) #pragma fenv_access(on) #elif !defined(__OpenBSD__) && !defined(__NetBSD__) /* C99 */ diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c index dc64cfee..fadc0a9c 100644 --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -601,6 +601,7 @@ struct lru_cache_object; typedef struct lru_list_elem { PyObject_HEAD struct lru_list_elem *prev, *next; /* borrowed links */ + Py_hash_t hash; PyObject *key, *result; } lru_list_elem; @@ -762,10 +763,14 @@ static PyObject * infinite_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds) { PyObject *result; + Py_hash_t hash; PyObject *key = lru_cache_make_key(args, kwds, self->typed); if (!key) return NULL; - result = PyDict_GetItemWithError(self->cache, key); + hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + result = _PyDict_GetItem_KnownHash(self->cache, key, hash); if (result) { Py_INCREF(result); self->hits++; @@ -781,7 +786,7 @@ infinite_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwd Py_DECREF(key); return NULL; } - if (PyDict_SetItem(self->cache, key, result) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, result, hash) < 0) { Py_DECREF(result); Py_DECREF(key); return NULL; @@ -813,11 +818,15 @@ bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds { lru_list_elem *link; PyObject *key, *result; + Py_hash_t hash; key = lru_cache_make_key(args, kwds, self->typed); if (!key) return NULL; - link = (lru_list_elem *)PyDict_GetItemWithError(self->cache, key); + hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + link = (lru_list_elem *)_PyDict_GetItem_KnownHash(self->cache, key, hash); if (link) { lru_cache_extricate_link(link); lru_cache_append_link(self, link); @@ -845,7 +854,8 @@ bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds /* Remove it from the cache. The cache dict holds one reference to the link, and the linked list holds yet one reference to it. */ - if (PyDict_DelItem(self->cache, link->key) < 0) { + if (_PyDict_DelItem_KnownHash(self->cache, link->key, + link->hash) < 0) { lru_cache_append_link(self, link); Py_DECREF(key); Py_DECREF(result); @@ -859,9 +869,11 @@ bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds oldkey = link->key; oldresult = link->result; + link->hash = hash; link->key = key; link->result = result; - if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, + hash) < 0) { Py_DECREF(link); Py_DECREF(oldkey); Py_DECREF(oldresult); @@ -881,10 +893,12 @@ bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds return NULL; } + link->hash = hash; link->key = key; link->result = result; _PyObject_GC_TRACK(link); - if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, + hash) < 0) { Py_DECREF(link); return NULL; } @@ -1033,6 +1047,12 @@ lru_cache_cache_clear(lru_cache_object *self, PyObject *unused) Py_RETURN_NONE; } +static PyObject * +lru_cache_reduce(PyObject *self, PyObject *unused) +{ + return PyObject_GetAttrString(self, "__qualname__"); +} + static int lru_cache_tp_traverse(lru_cache_object *self, visitproc visit, void *arg) { @@ -1083,6 +1103,7 @@ cache_info_type: namedtuple class with the fields:\n\ static PyMethodDef lru_cache_methods[] = { {"cache_info", (PyCFunction)lru_cache_cache_info, METH_NOARGS}, {"cache_clear", (PyCFunction)lru_cache_cache_clear, METH_NOARGS}, + {"__reduce__", (PyCFunction)lru_cache_reduce, METH_NOARGS}, {NULL} }; diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c index a157fbb1..44765acf 100644 --- a/Modules/_hashopenssl.c +++ b/Modules/_hashopenssl.c @@ -486,8 +486,8 @@ PKCS5_PBKDF2_HMAC_fast(const char *pass, int passlen, HMAC_CTX_cleanup(&hctx_tpl); return 0; } - while(tkeylen) { - if(tkeylen > mdlen) + while (tkeylen) { + if (tkeylen > mdlen) cplen = mdlen; else cplen = tkeylen; @@ -684,7 +684,7 @@ _openssl_hash_name_mapper(const OBJ_NAME *openssl_obj_name, void *arg) if (openssl_obj_name == NULL) return; /* Ignore aliased names, they pollute the list and OpenSSL appears to - * have a its own definition of alias as the resulting list still + * have its own definition of alias as the resulting list still * contains duplicate and alternate names for several algorithms. */ if (openssl_obj_name->alias) return; diff --git a/Modules/_io/_iomodule.c b/Modules/_io/_iomodule.c index 528bcd4a..ec681702 100644 --- a/Modules/_io/_iomodule.c +++ b/Modules/_io/_iomodule.c @@ -75,7 +75,7 @@ PyDoc_STRVAR(module_doc, "Another IOBase subclass, TextIOBase, deals with the encoding and decoding\n" "of streams into text. TextIOWrapper, which extends it, is a buffered text\n" "interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO\n" -"is a in-memory stream for text.\n" +"is an in-memory stream for text.\n" "\n" "Argument names are not part of the specification, and only the arguments\n" "of open() are intended to be used as keyword arguments.\n" diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h index 9d5205ec..0c6eae26 100644 --- a/Modules/_io/_iomodule.h +++ b/Modules/_io/_iomodule.h @@ -52,7 +52,12 @@ extern PyObject *_PyIncrementalNewlineDecoder_decode( which can be safely put aside until another search. NOTE: for performance reasons, `end` must point to a NUL character ('\0'). - Otherwise, the function will scan further and return garbage. */ + Otherwise, the function will scan further and return garbage. + + There are three modes, in order of priority: + * translated: Only find \n (assume newlines already translated) + * universal: Use universal newlines algorithm + * Otherwise, the line ending is specified by readnl, a str object */ extern Py_ssize_t _PyIO_find_line_ending( int translated, int universal, PyObject *readnl, int kind, char *start, char *end, Py_ssize_t *consumed); diff --git a/Modules/_io/stringio.c b/Modules/_io/stringio.c index 8315ab8e..73018a53 100644 --- a/Modules/_io/stringio.c +++ b/Modules/_io/stringio.c @@ -696,10 +696,8 @@ _io_StringIO___init___impl(stringio *self, PyObject *value, char *newline = "\n"; Py_ssize_t value_len; - /* Parse the newline argument. This used to be done with the 'z' - specifier, however this allowed any object with the buffer interface to - be converted. Thus we have to parse it manually since we only want to - allow unicode objects or None. */ + /* Parse the newline argument. We only want to allow unicode objects or + None. */ if (newline_obj == Py_None) { newline = NULL; } @@ -750,7 +748,7 @@ _io_StringIO___init___impl(stringio *self, PyObject *value, /* If newline == "", we don't translate anything. If newline == "\n" or newline == None, we translate to "\n", which is a no-op. - (for newline == None, TextIOWrapper translates to os.sepline, but it + (for newline == None, TextIOWrapper translates to os.linesep, but it is pointless for StringIO) */ if (newline != NULL && newline[0] == '\r') { diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c index c962c0ba..b232b024 100644 --- a/Modules/_io/textio.c +++ b/Modules/_io/textio.c @@ -905,8 +905,8 @@ _io_TextIOWrapper___init___impl(textio *self, PyObject *buffer, if (self->encoding == NULL) { catch_ImportError: /* - Importing locale can raise a ImportError because of - _functools, and locale.getpreferredencoding can raise a + Importing locale can raise an ImportError because of + _functools, and locale.getpreferredencoding can raise an ImportError if _locale is not available. These will happen during module building. */ diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 3ad9a976..5f385000 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -1193,6 +1193,13 @@ _Unpickler_Read(UnpicklerObject *self, char **s, Py_ssize_t n) { Py_ssize_t num_read; + *s = NULL; + if (self->next_read_idx > PY_SSIZE_T_MAX - n) { + PickleState *st = _Pickle_GetGlobalState(); + PyErr_SetString(st->UnpicklingError, + "read would overflow (invalid bytecode)"); + return -1; + } if (self->next_read_idx + n <= self->input_len) { *s = self->input_buffer + self->next_read_idx; self->next_read_idx += n; @@ -4093,7 +4100,7 @@ version of Python needed to read the pickle produced. The *file* argument must have a write() method that accepts a single bytes argument. It can thus be a file object opened for binary -writing, a io.BytesIO instance, or any other custom object that meets +writing, an io.BytesIO instance, or any other custom object that meets this interface. If *fix_imports* is True and protocol is less than 3, pickle will try @@ -4104,7 +4111,7 @@ to map the new Python 3 names to the old module names used in Python static int _pickle_Pickler___init___impl(PicklerObject *self, PyObject *file, PyObject *protocol, int fix_imports) -/*[clinic end generated code: output=b5f31078dab17fb0 input=b8cdeb7e3f5ee674]*/ +/*[clinic end generated code: output=b5f31078dab17fb0 input=4faabdbc763c2389]*/ { _Py_IDENTIFIER(persistent_id); _Py_IDENTIFIER(dispatch_table); @@ -4599,10 +4606,20 @@ static Py_ssize_t calc_binsize(char *bytes, int nbytes) { unsigned char *s = (unsigned char *)bytes; - Py_ssize_t i; + int i; size_t x = 0; - for (i = 0; i < nbytes && (size_t)i < sizeof(size_t); i++) { + if (nbytes > (int)sizeof(size_t)) { + /* Check for integer overflow. BINBYTES8 and BINUNICODE8 opcodes + * have 64-bit size that can't be represented on 32-bit platform. + */ + for (i = (int)sizeof(size_t); i < nbytes; i++) { + if (s[i]) + return -1; + } + nbytes = (int)sizeof(size_t); + } + for (i = 0; i < nbytes; i++) { x |= (size_t) s[i] << (8 * i); } @@ -6497,7 +6514,7 @@ representation are ignored. The argument *file* must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return bytes. Thus *file* can be a -binary file object opened for reading, a io.BytesIO object, or any +binary file object opened for reading, an io.BytesIO object, or any other custom object that meets this interface. Optional keyword arguments are *fix_imports*, *encoding* and *errors*, @@ -6514,7 +6531,7 @@ static int _pickle_Unpickler___init___impl(UnpicklerObject *self, PyObject *file, int fix_imports, const char *encoding, const char *errors) -/*[clinic end generated code: output=e2c8ce748edc57b0 input=30b4dc9e976b890c]*/ +/*[clinic end generated code: output=e2c8ce748edc57b0 input=04ece661aa884837]*/ { _Py_IDENTIFIER(persistent_load); @@ -6933,7 +6950,7 @@ version of Python needed to read the pickle produced. The *file* argument must have a write() method that accepts a single bytes argument. It can thus be a file object opened for binary -writing, a io.BytesIO instance, or any other custom object that meets +writing, an io.BytesIO instance, or any other custom object that meets this interface. If *fix_imports* is True and protocol is less than 3, pickle will try @@ -6944,7 +6961,7 @@ to map the new Python 3 names to the old module names used in Python static PyObject * _pickle_dump_impl(PyModuleDef *module, PyObject *obj, PyObject *file, PyObject *protocol, int fix_imports) -/*[clinic end generated code: output=0de7dff89c406816 input=e9e5fdd48de92eae]*/ +/*[clinic end generated code: output=0de7dff89c406816 input=830f8a64cef6f042]*/ { PicklerObject *pickler = _Pickler_New(); @@ -7043,7 +7060,7 @@ representation are ignored. The argument *file* must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return bytes. Thus *file* can be a -binary file object opened for reading, a io.BytesIO object, or any +binary file object opened for reading, an io.BytesIO object, or any other custom object that meets this interface. Optional keyword arguments are *fix_imports*, *encoding* and *errors*, @@ -7059,7 +7076,7 @@ string instances as bytes objects. static PyObject * _pickle_load_impl(PyModuleDef *module, PyObject *file, int fix_imports, const char *encoding, const char *errors) -/*[clinic end generated code: output=798f1c57cb2b4eb1 input=da97372e38e510a6]*/ +/*[clinic end generated code: output=798f1c57cb2b4eb1 input=2df7c7a1e6742204]*/ { PyObject *result; UnpicklerObject *unpickler = _Unpickler_New(); diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c index a327fc56..2cdc3817 100644 --- a/Modules/_posixsubprocess.c +++ b/Modules/_posixsubprocess.c @@ -109,10 +109,11 @@ _sanity_check_python_fd_sequence(PyObject *fd_sequence) for (seq_idx = 0; seq_idx < seq_len; ++seq_idx) { PyObject* py_fd = PySequence_Fast_GET_ITEM(fd_sequence, seq_idx); long iter_fd = PyLong_AsLong(py_fd); - if (iter_fd < 0 || iter_fd < prev_fd || iter_fd > INT_MAX) { + if (iter_fd < 0 || iter_fd <= prev_fd || iter_fd > INT_MAX) { /* Negative, overflow, not a Long, unsorted, too big for a fd. */ return 1; } + prev_fd = iter_fd; } return 0; } @@ -549,7 +550,9 @@ subprocess_fork_exec(PyObject* self, PyObject *args) int need_to_reenable_gc = 0; char *const *exec_array, *const *argv = NULL, *const *envp = NULL; Py_ssize_t arg_num; +#ifdef WITH_THREAD int import_lock_held = 0; +#endif if (!PyArg_ParseTuple( args, "OOpOOOiiiiiiiiiiO:fork_exec", @@ -644,8 +647,10 @@ subprocess_fork_exec(PyObject* self, PyObject *args) preexec_fn_args_tuple = PyTuple_New(0); if (!preexec_fn_args_tuple) goto cleanup; +#ifdef WITH_THREAD _PyImport_AcquireLock(); import_lock_held = 1; +#endif } if (cwd_obj != Py_None) { @@ -688,12 +693,14 @@ subprocess_fork_exec(PyObject* self, PyObject *args) /* Capture the errno exception before errno can be clobbered. */ PyErr_SetFromErrno(PyExc_OSError); } - if (preexec_fn != Py_None && - _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { +#ifdef WITH_THREAD + if (preexec_fn != Py_None + && _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "not holding the import lock"); } import_lock_held = 0; +#endif /* Parent process */ if (envp) @@ -716,8 +723,10 @@ subprocess_fork_exec(PyObject* self, PyObject *args) return PyLong_FromPid(pid); cleanup: +#ifdef WITH_THREAD if (import_lock_held) _PyImport_ReleaseLock(); +#endif if (envp) _Py_FreeCharPArray(envp); if (argv) diff --git a/Modules/_ssl.c b/Modules/_ssl.c index ca3549c6..67402fe0 100644 --- a/Modules/_ssl.c +++ b/Modules/_ssl.c @@ -109,8 +109,7 @@ struct py_ssl_library_code { # define HAVE_SNI 0 #endif -/* ALPN added in OpenSSL 1.0.2 */ -#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x1000200fL && !defined(OPENSSL_NO_TLSEXT) +#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation # define HAVE_ALPN #endif @@ -1018,7 +1017,10 @@ _get_aia_uri(X509 *certificate, int nid) { AUTHORITY_INFO_ACCESS *info; info = X509_get_ext_d2i(certificate, NID_info_access, NULL, NULL); - if ((info == NULL) || (sk_ACCESS_DESCRIPTION_num(info) == 0)) { + if (info == NULL) + return Py_None; + if (sk_ACCESS_DESCRIPTION_num(info) == 0) { + AUTHORITY_INFO_ACCESS_free(info); return Py_None; } @@ -1068,25 +1070,23 @@ _get_aia_uri(X509 *certificate, int nid) { static PyObject * _get_crl_dp(X509 *certificate) { STACK_OF(DIST_POINT) *dps; - int i, j, result; - PyObject *lst; + int i, j; + PyObject *lst, *res = NULL; #if OPENSSL_VERSION_NUMBER < 0x10001000L - dps = X509_get_ext_d2i(certificate, NID_crl_distribution_points, - NULL, NULL); + dps = X509_get_ext_d2i(certificate, NID_crl_distribution_points, NULL, NULL); #else /* Calls x509v3_cache_extensions and sets up crldp */ X509_check_ca(certificate); dps = certificate->crldp; #endif - if (dps == NULL) { + if (dps == NULL) return Py_None; - } - if ((lst = PyList_New(0)) == NULL) { - return NULL; - } + lst = PyList_New(0); + if (lst == NULL) + goto done; for (i=0; i < sk_DIST_POINT_num(dps); i++) { DIST_POINT *dp; @@ -1099,6 +1099,7 @@ _get_crl_dp(X509 *certificate) { GENERAL_NAME *gn; ASN1_IA5STRING *uri; PyObject *ouri; + int err; gn = sk_GENERAL_NAME_value(gns, j); if (gn->type != GEN_URI) { @@ -1107,28 +1108,25 @@ _get_crl_dp(X509 *certificate) { uri = gn->d.uniformResourceIdentifier; ouri = PyUnicode_FromStringAndSize((char *)uri->data, uri->length); - if (ouri == NULL) { - Py_DECREF(lst); - return NULL; - } - result = PyList_Append(lst, ouri); + if (ouri == NULL) + goto done; + + err = PyList_Append(lst, ouri); Py_DECREF(ouri); - if (result < 0) { - Py_DECREF(lst); - return NULL; - } + if (err < 0) + goto done; } } - /* convert to tuple or None */ - if (PyList_Size(lst) == 0) { - Py_DECREF(lst); - return Py_None; - } else { - PyObject *tup; - tup = PyList_AsTuple(lst); - Py_DECREF(lst); - return tup; - } + + /* Convert to tuple. */ + res = (PyList_GET_SIZE(lst) > 0) ? PyList_AsTuple(lst) : Py_None; + + done: + Py_XDECREF(lst); +#if OPENSSL_VERSION_NUMBER < 0x10001000L + sk_DIST_POINT_free(dps); +#endif + return res; } static PyObject * @@ -2281,6 +2279,8 @@ _ssl__SSLContext_impl(PyTypeObject *type, int proto_version) options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS; if (proto_version != PY_SSL_VERSION_SSL2) options |= SSL_OP_NO_SSLv2; + if (proto_version != PY_SSL_VERSION_SSL3) + options |= SSL_OP_NO_SSLv3; SSL_CTX_set_options(self->ctx, options); #ifndef OPENSSL_NO_ECDH @@ -2997,7 +2997,7 @@ _ssl__SSLContext_load_verify_locations_impl(PySSLContext *self, cadata_ascii = PyUnicode_AsASCIIString(cadata); if (cadata_ascii == NULL) { PyErr_SetString(PyExc_TypeError, - "cadata should be a ASCII string or a " + "cadata should be an ASCII string or a " "bytes-like object"); goto error; } @@ -3274,7 +3274,7 @@ _servername_callback(SSL *s, int *al, void *args) ssl = SSL_get_app_data(s); assert(PySSLSocket_Check(ssl)); - /* The servername callback expects a argument that represents the current + /* The servername callback expects an argument that represents the current * SSL connection and that has a .context attribute that can be changed to * identify the requested hostname. Since the official API is the Python * level API we want to pass the callback a Python level object rather than @@ -3970,7 +3970,7 @@ _ssl_get_default_verify_paths_impl(PyModuleDef *module) else if ((target = PyUnicode_DecodeFSDefault(tmp)) == NULL) { \ target = PyBytes_FromString(tmp); } \ if (!target) goto error; \ - } + } CONVERT(X509_get_default_cert_file_env(), ofile_env); CONVERT(X509_get_default_cert_file(), ofile); diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index ba0a24bd..9a036482 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -3518,6 +3518,15 @@ test_PyTime_AsMicroseconds(PyObject *self, PyObject *args) return _PyTime_AsNanosecondsObject(ms); } +static PyObject* +get_recursion_depth(PyObject *self, PyObject *args) +{ + PyThreadState *tstate = PyThreadState_GET(); + + /* substract one to ignore the frame of the get_recursion_depth() call */ + return PyLong_FromLong(tstate->recursion_depth - 1); +} + static PyMethodDef TestMethods[] = { {"raise_exception", raise_exception, METH_VARARGS}, @@ -3694,6 +3703,7 @@ static PyMethodDef TestMethods[] = { #endif {"PyTime_AsMilliseconds", test_PyTime_AsMilliseconds, METH_VARARGS}, {"PyTime_AsMicroseconds", test_PyTime_AsMicroseconds, METH_VARARGS}, + {"get_recursion_depth", get_recursion_depth, METH_NOARGS}, {NULL, NULL} /* sentinel */ }; diff --git a/Modules/atexitmodule.c b/Modules/atexitmodule.c index 739c1883..3cdf2d7e 100644 --- a/Modules/atexitmodule.c +++ b/Modules/atexitmodule.c @@ -257,7 +257,7 @@ atexit_free(PyObject *m) PyDoc_STRVAR(atexit_unregister__doc__, "unregister(func) -> None\n\ \n\ -Unregister a exit function which was previously registered using\n\ +Unregister an exit function which was previously registered using\n\ atexit.register\n\ \n\ func - function to be unregistered"); diff --git a/Modules/audioop.c b/Modules/audioop.c index bbc458f9..3b05aec0 100644 --- a/Modules/audioop.c +++ b/Modules/audioop.c @@ -112,7 +112,7 @@ static PyInt16 _st_ulaw2linear16[256] = { /* * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data - * stored in a unsigned char. This function should only be called with + * stored in an unsigned char. This function should only be called with * the data shifted such that it only contains information in the lower * 14-bits. * @@ -218,7 +218,7 @@ static PyInt16 _st_alaw2linear16[256] = { /* * linear2alaw() accepts an 13-bit signed integer and encodes it as A-law data - * stored in a unsigned char. This function should only be called with + * stored in an unsigned char. This function should only be called with * the data shifted such that it only contains information in the lower * 13-bits. * diff --git a/Modules/clinic/_pickle.c.h b/Modules/clinic/_pickle.c.h index b698ce81..a9f0c424 100644 --- a/Modules/clinic/_pickle.c.h +++ b/Modules/clinic/_pickle.c.h @@ -77,7 +77,7 @@ PyDoc_STRVAR(_pickle_Pickler___init____doc__, "\n" "The *file* argument must have a write() method that accepts a single\n" "bytes argument. It can thus be a file object opened for binary\n" -"writing, a io.BytesIO instance, or any other custom object that meets\n" +"writing, an io.BytesIO instance, or any other custom object that meets\n" "this interface.\n" "\n" "If *fix_imports* is True and protocol is less than 3, pickle will try\n" @@ -260,7 +260,7 @@ PyDoc_STRVAR(_pickle_Unpickler___init____doc__, "The argument *file* must have two methods, a read() method that takes\n" "an integer argument, and a readline() method that requires no\n" "arguments. Both methods should return bytes. Thus *file* can be a\n" -"binary file object opened for reading, a io.BytesIO object, or any\n" +"binary file object opened for reading, an io.BytesIO object, or any\n" "other custom object that meets this interface.\n" "\n" "Optional keyword arguments are *fix_imports*, *encoding* and *errors*,\n" @@ -369,7 +369,7 @@ PyDoc_STRVAR(_pickle_dump__doc__, "\n" "The *file* argument must have a write() method that accepts a single\n" "bytes argument. It can thus be a file object opened for binary\n" -"writing, a io.BytesIO instance, or any other custom object that meets\n" +"writing, an io.BytesIO instance, or any other custom object that meets\n" "this interface.\n" "\n" "If *fix_imports* is True and protocol is less than 3, pickle will try\n" @@ -462,7 +462,7 @@ PyDoc_STRVAR(_pickle_load__doc__, "The argument *file* must have two methods, a read() method that takes\n" "an integer argument, and a readline() method that requires no\n" "arguments. Both methods should return bytes. Thus *file* can be a\n" -"binary file object opened for reading, a io.BytesIO object, or any\n" +"binary file object opened for reading, an io.BytesIO object, or any\n" "other custom object that meets this interface.\n" "\n" "Optional keyword arguments are *fix_imports*, *encoding* and *errors*,\n" @@ -545,4 +545,4 @@ _pickle_loads(PyModuleDef *module, PyObject *args, PyObject *kwargs) exit: return return_value; } -/*[clinic end generated code: output=06f3a5233298448e input=a9049054013a1b77]*/ +/*[clinic end generated code: output=aecd61660d1cf31d input=a9049054013a1b77]*/ diff --git a/Modules/clinic/posixmodule.c.h b/Modules/clinic/posixmodule.c.h index 98eeae48..9ef702a2 100644 --- a/Modules/clinic/posixmodule.c.h +++ b/Modules/clinic/posixmodule.c.h @@ -1487,10 +1487,10 @@ PyDoc_STRVAR(os_utime__doc__, "\n" "If times is not None, it must be a tuple (atime, mtime);\n" " atime and mtime should be expressed as float seconds since the epoch.\n" -"If ns is not None, it must be a tuple (atime_ns, mtime_ns);\n" +"If ns is specified, it must be a tuple (atime_ns, mtime_ns);\n" " atime_ns and mtime_ns should be expressed as integer nanoseconds\n" " since the epoch.\n" -"If both times and ns are None, utime uses the current time.\n" +"If times is None and ns is unspecified, utime uses the current time.\n" "Specifying tuples for both times and ns is an error.\n" "\n" "If dir_fd is not None, it should be a file descriptor open to a directory,\n" @@ -5788,4 +5788,4 @@ exit: #ifndef OS_SET_HANDLE_INHERITABLE_METHODDEF #define OS_SET_HANDLE_INHERITABLE_METHODDEF #endif /* !defined(OS_SET_HANDLE_INHERITABLE_METHODDEF) */ -/*[clinic end generated code: output=f3f92b2d2e2c3fe3 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=95824c52fd034654 input=a9049054013a1b77]*/ diff --git a/Modules/clinic/zlibmodule.c.h b/Modules/clinic/zlibmodule.c.h index 35661a5a..c5cdf427 100644 --- a/Modules/clinic/zlibmodule.c.h +++ b/Modules/clinic/zlibmodule.c.h @@ -68,7 +68,7 @@ zlib_decompress(PyModuleDef *module, PyObject *args) unsigned int bufsize = DEF_BUF_SIZE; if (!PyArg_ParseTuple(args, "y*|iO&:decompress", - &data, &wbits, uint_converter, &bufsize)) + &data, &wbits, capped_uint_converter, &bufsize)) goto exit; return_value = zlib_decompress_impl(module, &data, wbits, bufsize); @@ -242,7 +242,7 @@ zlib_Decompress_decompress(compobject *self, PyObject *args) unsigned int max_length = 0; if (!PyArg_ParseTuple(args, "y*|O&:decompress", - &data, uint_converter, &max_length)) + &data, capped_uint_converter, &max_length)) goto exit; return_value = zlib_Decompress_decompress_impl(self, &data, max_length); @@ -353,7 +353,7 @@ zlib_Decompress_flush(compobject *self, PyObject *args) unsigned int length = DEF_BUF_SIZE; if (!PyArg_ParseTuple(args, "|O&:flush", - uint_converter, &length)) + capped_uint_converter, &length)) goto exit; return_value = zlib_Decompress_flush_impl(self, length); @@ -438,4 +438,4 @@ exit: #ifndef ZLIB_COMPRESS_COPY_METHODDEF #define ZLIB_COMPRESS_COPY_METHODDEF #endif /* !defined(ZLIB_COMPRESS_COPY_METHODDEF) */ -/*[clinic end generated code: output=56ed1147bbbb4788 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=7734aec079550bc8 input=a9049054013a1b77]*/ diff --git a/Modules/expat/pyexpatns.h b/Modules/expat/pyexpatns.h index 2f2f4f95..999c5c73 100644 --- a/Modules/expat/pyexpatns.h +++ b/Modules/expat/pyexpatns.h @@ -21,7 +21,7 @@ * * * The Solution: - * Prefix all a exported symbols with "PyExpat_". This is similar to + * Prefix all exported symbols with "PyExpat_". This is similar to * what Mozilla does for some common libs: * http://lxr.mozilla.org/seamonkey/source/modules/libimg/png/mozpngconf.h#115 * diff --git a/Modules/hashlib.h b/Modules/hashlib.h index 51c68f6a..35804536 100644 --- a/Modules/hashlib.h +++ b/Modules/hashlib.h @@ -30,7 +30,7 @@ /* * Helper code to synchronize access to the hash object when the GIL is * released around a CPU consuming hashlib operation. All code paths that - * access a mutable part of obj must be enclosed in a ENTER_HASHLIB / + * access a mutable part of obj must be enclosed in an ENTER_HASHLIB / * LEAVE_HASHLIB block or explicitly acquire and release the lock inside * a PY_BEGIN / END_ALLOW_THREADS block if they wish to release the GIL for * an operation. diff --git a/Modules/itertoolsmodule.c b/Modules/itertoolsmodule.c index ac31e2f9..dfafeab5 100644 --- a/Modules/itertoolsmodule.c +++ b/Modules/itertoolsmodule.c @@ -1725,7 +1725,7 @@ PyDoc_STRVAR(starmap_doc, "starmap(function, sequence) --> starmap object\n\ \n\ Return an iterator whose values are returned from the function evaluated\n\ -with a argument tuple taken from the given sequence."); +with an argument tuple taken from the given sequence."); static PyTypeObject starmap_type = { PyVarObject_HEAD_INIT(NULL, 0) @@ -2236,13 +2236,21 @@ product_setstate(productobject *lz, PyObject *state) { PyObject* indexObject = PyTuple_GET_ITEM(state, i); Py_ssize_t index = PyLong_AsSsize_t(indexObject); + PyObject* pool; + Py_ssize_t poolsize; if (index < 0 && PyErr_Occurred()) return NULL; /* not an integer */ + pool = PyTuple_GET_ITEM(lz->pools, i); + poolsize = PyTuple_GET_SIZE(pool); + if (poolsize == 0) { + lz->stopped = 1; + Py_RETURN_NONE; + } /* clamp the index */ if (index < 0) index = 0; - else if (index > n-1) - index = n-1; + else if (index > poolsize-1) + index = poolsize-1; lz->indices[i] = index; } diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index ec8c526c..854a7491 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -4560,9 +4560,7 @@ typedef struct { } \ -#define UTIME_HAVE_DIR_FD (defined(HAVE_FUTIMESAT) || defined(HAVE_UTIMENSAT)) - -#if UTIME_HAVE_DIR_FD +#if defined(HAVE_FUTIMESAT) || defined(HAVE_UTIMENSAT) static int utime_dir_fd(utime_t *ut, int dir_fd, char *path, int follow_symlinks) @@ -4588,9 +4586,7 @@ utime_dir_fd(utime_t *ut, int dir_fd, char *path, int follow_symlinks) #define FUTIMENSAT_DIR_FD_CONVERTER dir_fd_unavailable #endif -#define UTIME_HAVE_FD (defined(HAVE_FUTIMES) || defined(HAVE_FUTIMENS)) - -#if UTIME_HAVE_FD +#if defined(HAVE_FUTIMES) || defined(HAVE_FUTIMENS) static int utime_fd(utime_t *ut, int fd) @@ -4683,7 +4679,7 @@ os.utime dir_fd: dir_fd(requires='futimensat') = None follow_symlinks: bool=True -# "utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True)\n\ +# "utime(path, times=None, *[, ns], dir_fd=None, follow_symlinks=True)\n\ Set the access and modified time of path. @@ -4693,10 +4689,10 @@ On some platforms, path may also be specified as an open file descriptor. If times is not None, it must be a tuple (atime, mtime); atime and mtime should be expressed as float seconds since the epoch. -If ns is not None, it must be a tuple (atime_ns, mtime_ns); +If ns is specified, it must be a tuple (atime_ns, mtime_ns); atime_ns and mtime_ns should be expressed as integer nanoseconds since the epoch. -If both times and ns are None, utime uses the current time. +If times is None and ns is unspecified, utime uses the current time. Specifying tuples for both times and ns is an error. If dir_fd is not None, it should be a file descriptor open to a directory, @@ -4714,7 +4710,7 @@ dir_fd and follow_symlinks may not be available on your platform. static PyObject * os_utime_impl(PyModuleDef *module, path_t *path, PyObject *times, PyObject *ns, int dir_fd, int follow_symlinks) -/*[clinic end generated code: output=31f3434e560ba2f0 input=1f18c17d5941aa82]*/ +/*[clinic end generated code: output=31f3434e560ba2f0 input=081cdc54ca685385]*/ { #ifdef MS_WINDOWS HANDLE hFile; @@ -4835,13 +4831,13 @@ os_utime_impl(PyModuleDef *module, path_t *path, PyObject *times, else #endif -#if UTIME_HAVE_DIR_FD +#if defined(HAVE_FUTIMESAT) || defined(HAVE_UTIMENSAT) if ((dir_fd != DEFAULT_DIR_FD) || (!follow_symlinks)) result = utime_dir_fd(&utime, dir_fd, path->narrow, follow_symlinks); else #endif -#if UTIME_HAVE_FD +#if defined(HAVE_FUTIMES) || defined(HAVE_FUTIMENS) if (path->fd != -1) result = utime_fd(&utime, path->fd); else @@ -7025,7 +7021,7 @@ os_waitpid_impl(PyModuleDef *module, Py_intptr_t pid, int options) res = _cwait(&status, pid, options); Py_END_ALLOW_THREADS } while (res < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); - if (res != 0) + if (res < 0) return (!async_err) ? posix_error() : NULL; /* shift the status left a byte so this is more like the POSIX waitpid */ @@ -7735,7 +7731,7 @@ os_open_impl(PyModuleDef *module, path_t *path, int flags, int mode, } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); _Py_END_SUPPRESS_IPH - if (fd == -1) { + if (fd < 0) { if (!async_err) PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, path->object); return -1; @@ -8239,10 +8235,10 @@ os_write_impl(PyModuleDef *module, int fd, Py_buffer *data) #ifdef HAVE_SENDFILE PyDoc_STRVAR(posix_sendfile__doc__, -"sendfile(out, in, offset, nbytes) -> byteswritten\n\ -sendfile(out, in, offset, nbytes, headers=None, trailers=None, flags=0)\n\ +"sendfile(out, in, offset, count) -> byteswritten\n\ +sendfile(out, in, offset, count[, headers][, trailers], flags=0)\n\ -> byteswritten\n\ -Copy nbytes bytes from file descriptor in to file descriptor out."); +Copy count bytes from file descriptor in to file descriptor out."); /* AC 3.5: don't bother converting, has optional group*/ static PyObject * @@ -8262,6 +8258,7 @@ posix_sendfile(PyObject *self, PyObject *args, PyObject *kwdict) off_t sbytes; struct sf_hdtr sf; int flags = 0; + /* Beware that "in" clashes with Python's own "in" operator keyword */ static char *keywords[] = {"out", "in", "offset", "count", "headers", "trailers", "flags", NULL}; @@ -8281,7 +8278,7 @@ posix_sendfile(PyObject *self, PyObject *args, PyObject *kwdict) if (headers != NULL) { if (!PySequence_Check(headers)) { PyErr_SetString(PyExc_TypeError, - "sendfile() headers must be a sequence or None"); + "sendfile() headers must be a sequence"); return NULL; } else { Py_ssize_t i = 0; /* Avoid uninitialized warning */ @@ -8298,7 +8295,7 @@ posix_sendfile(PyObject *self, PyObject *args, PyObject *kwdict) if (trailers != NULL) { if (!PySequence_Check(trailers)) { PyErr_SetString(PyExc_TypeError, - "sendfile() trailers must be a sequence or None"); + "sendfile() trailers must be a sequence"); return NULL; } else { Py_ssize_t i = 0; /* Avoid uninitialized warning */ diff --git a/Modules/pyexpat.c b/Modules/pyexpat.c index a43887e0..9a6da737 100644 --- a/Modules/pyexpat.c +++ b/Modules/pyexpat.c @@ -1378,11 +1378,16 @@ static int xmlparse_setattro(xmlparseobject *self, PyObject *name, PyObject *v) { /* Set attribute 'name' to value 'v'. v==NULL means delete */ + if (!PyUnicode_Check(name)) { + PyErr_Format(PyExc_TypeError, + "attribute name must be string, not '%.200s'", + name->ob_type->tp_name); + return -1; + } if (v == NULL) { PyErr_SetString(PyExc_RuntimeError, "Cannot delete attribute"); return -1; } - assert(PyUnicode_Check(name)); if (PyUnicode_CompareWithASCIIString(name, "buffer_text") == 0) { int b = PyObject_IsTrue(v); if (b < 0) @@ -1435,17 +1440,18 @@ xmlparse_setattro(xmlparseobject *self, PyObject *name, PyObject *v) return -1; } - new_buffer_size=PyLong_AS_LONG(v); + new_buffer_size = PyLong_AsLong(v); + if (new_buffer_size <= 0) { + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_ValueError, "buffer_size must be greater than zero"); + return -1; + } + /* trivial case -- no change */ if (new_buffer_size == self->buffer_size) { return 0; } - if (new_buffer_size <= 0) { - PyErr_SetString(PyExc_ValueError, "buffer_size must be greater than zero"); - return -1; - } - /* check maximum */ if (new_buffer_size > INT_MAX) { char errmsg[100]; diff --git a/Modules/readline.c b/Modules/readline.c index f6b52a07..a6581b0b 100644 --- a/Modules/readline.c +++ b/Modules/readline.c @@ -464,10 +464,11 @@ set_completer_delims(PyObject *self, PyObject *args) /* Keep a reference to the allocated memory in the module state in case some other module modifies rl_completer_word_break_characters (see issue #17289). */ - free(completer_word_break_characters); - completer_word_break_characters = strdup(break_chars); - if (completer_word_break_characters) { - rl_completer_word_break_characters = completer_word_break_characters; + break_chars = strdup(break_chars); + if (break_chars) { + free(completer_word_break_characters); + completer_word_break_characters = break_chars; + rl_completer_word_break_characters = break_chars; Py_RETURN_NONE; } else @@ -1235,7 +1236,7 @@ call_readline(FILE *sys_stdin, FILE *sys_stdout, const char *prompt) return NULL; } - /* We got an EOF, return a empty string. */ + /* We got an EOF, return an empty string. */ if (p == NULL) { p = PyMem_RawMalloc(1); if (p != NULL) diff --git a/Modules/selectmodule.c b/Modules/selectmodule.c index 44ff3dd1..b3ac8073 100644 --- a/Modules/selectmodule.c +++ b/Modules/selectmodule.c @@ -2363,7 +2363,7 @@ arguments; each contains the subset of the corresponding file descriptors\n\ that are ready.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file\n\ +On Windows, only sockets are supported; on Unix, all file\n\ descriptors can be used."); static PyMethodDef select_methods[] = { @@ -2381,7 +2381,7 @@ PyDoc_STRVAR(module_doc, "This module supports asynchronous I/O on multiple file descriptors.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file descriptors."); +On Windows, only sockets are supported; on Unix, all file descriptors."); static struct PyModuleDef selectmodule = { diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c index ee249070..bae9634e 100644 --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -719,8 +719,10 @@ sock_call_ex(PySocketSockObject *s, int deadline_initialized = 0; int res; +#ifdef WITH_THREAD /* sock_call() must be called with the GIL held. */ assert(PyGILState_Check()); +#endif /* outer loop to retry select() when select() is interrupted by a signal or to retry select()+sock_func() on false positive (see above) */ @@ -5513,9 +5515,7 @@ socket_getaddrinfo(PyObject *self, PyObject *args, PyObject* kwargs) if (hobj == Py_None) { hptr = NULL; } else if (PyUnicode_Check(hobj)) { - _Py_IDENTIFIER(encode); - - idna = _PyObject_CallMethodId(hobj, &PyId_encode, "s", "idna"); + idna = PyUnicode_AsEncodedString(hobj, "idna", NULL); if (!idna) return NULL; assert(PyBytes_Check(idna)); diff --git a/Modules/timemodule.c b/Modules/timemodule.c index 9de3c843..d2caacdc 100644 --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -653,6 +653,9 @@ time_strftime(PyObject *self, PyObject *args) PyErr_NoMemory(); break; } +#if defined _MSC_VER && _MSC_VER >= 1400 && defined(__STDC_SECURE_LIB__) + errno = 0; +#endif _Py_BEGIN_SUPPRESS_IPH buflen = format_time(outbuf, i, fmt, &buf); _Py_END_SUPPRESS_IPH diff --git a/Modules/zlibmodule.c b/Modules/zlibmodule.c index 1997b40d..37307be3 100644 --- a/Modules/zlibmodule.c +++ b/Modules/zlibmodule.c @@ -226,42 +226,42 @@ zlib_compress_impl(PyModuleDef *module, Py_buffer *bytes, int level) /*[python input] -class uint_converter(CConverter): +class capped_uint_converter(CConverter): type = 'unsigned int' - converter = 'uint_converter' + converter = 'capped_uint_converter' c_ignored_default = "0" [python start generated code]*/ -/*[python end generated code: output=da39a3ee5e6b4b0d input=22263855f7a3ebfd]*/ +/*[python end generated code: output=da39a3ee5e6b4b0d input=35521e4e733823c7]*/ static int -uint_converter(PyObject *obj, void *ptr) +capped_uint_converter(PyObject *obj, void *ptr) { - long val; - unsigned long uval; + PyObject *long_obj; + Py_ssize_t val; - val = PyLong_AsLong(obj); - if (val == -1 && PyErr_Occurred()) { - uval = PyLong_AsUnsignedLong(obj); - if (uval == (unsigned long)-1 && PyErr_Occurred()) - return 0; + long_obj = (PyObject *)_PyLong_FromNbInt(obj); + if (long_obj == NULL) { + return 0; } - else { - if (val < 0) { - PyErr_SetString(PyExc_ValueError, - "value must be positive"); - return 0; - } - uval = (unsigned long)val; + val = PyLong_AsSsize_t(long_obj); + Py_DECREF(long_obj); + if (val == -1 && PyErr_Occurred()) { + return 0; } - - if (uval > UINT_MAX) { - PyErr_SetString(PyExc_OverflowError, - "Python int too large for C unsigned int"); + if (val < 0) { + PyErr_SetString(PyExc_ValueError, + "value must be positive"); return 0; } - *(unsigned int *)ptr = Py_SAFE_DOWNCAST(uval, unsigned long, unsigned int); + if ((size_t)val > UINT_MAX) { + *(unsigned int *)ptr = UINT_MAX; + } + else { + *(unsigned int *)ptr = Py_SAFE_DOWNCAST(val, Py_ssize_t, + unsigned int); + } return 1; } @@ -272,7 +272,7 @@ zlib.decompress Compressed data. wbits: int(c_default="MAX_WBITS") = MAX_WBITS The window buffer size. - bufsize: uint(c_default="DEF_BUF_SIZE") = DEF_BUF_SIZE + bufsize: capped_uint(c_default="DEF_BUF_SIZE") = DEF_BUF_SIZE The initial output buffer size. / @@ -282,7 +282,7 @@ Returns a bytes object containing the uncompressed data. static PyObject * zlib_decompress_impl(PyModuleDef *module, Py_buffer *data, int wbits, unsigned int bufsize) -/*[clinic end generated code: output=444d0987f3429574 input=0f4b9abb7103f50e]*/ +/*[clinic end generated code: output=444d0987f3429574 input=da095118b3243b27]*/ { PyObject *result_str = NULL; Byte *input; @@ -691,7 +691,7 @@ zlib.Decompress.decompress data: Py_buffer The binary data to decompress. - max_length: uint = 0 + max_length: capped_uint = 0 The maximum allowable length of the decompressed data. Unconsumed input data will be stored in the unconsumed_tail attribute. @@ -707,7 +707,7 @@ Call the flush() method to clear these buffers. static PyObject * zlib_Decompress_decompress_impl(compobject *self, Py_buffer *data, unsigned int max_length) -/*[clinic end generated code: output=b82e2a2c19f5fe7b input=02cfc047377cec86]*/ +/*[clinic end generated code: output=b82e2a2c19f5fe7b input=68b6508ab07c2cf0]*/ { int err; unsigned int old_length, length = DEF_BUF_SIZE; @@ -1048,7 +1048,7 @@ error: /*[clinic input] zlib.Decompress.flush - length: uint(c_default="DEF_BUF_SIZE") = zlib.DEF_BUF_SIZE + length: capped_uint(c_default="DEF_BUF_SIZE") = zlib.DEF_BUF_SIZE the initial size of the output buffer. / @@ -1057,7 +1057,7 @@ Return a bytes object containing any remaining decompressed data. static PyObject * zlib_Decompress_flush_impl(compobject *self, unsigned int length) -/*[clinic end generated code: output=db6fb753ab698e22 input=1580956505978993]*/ +/*[clinic end generated code: output=db6fb753ab698e22 input=1bb961eb21b62aa0]*/ { int err; unsigned int new_length; diff --git a/Objects/abstract.c b/Objects/abstract.c index 31cc0a8b..a0362e74 100644 --- a/Objects/abstract.c +++ b/Objects/abstract.c @@ -1288,7 +1288,7 @@ PyNumber_Long(PyObject *o) if (truncated == NULL || PyLong_Check(truncated)) return truncated; /* __trunc__ is specified to return an Integral type, - but int() needs to return a int. */ + but int() needs to return an int. */ m = truncated->ob_type->tp_as_number; if (m == NULL || m->nb_int == NULL) { PyErr_Format( @@ -1309,12 +1309,30 @@ PyNumber_Long(PyObject *o) /* The below check is done in PyLong_FromUnicode(). */ return PyLong_FromUnicodeObject(o, 10); - if (PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) == 0) { + if (PyBytes_Check(o)) /* need to do extra error checking that PyLong_FromString() * doesn't do. In particular int('9\x005') must raise an * exception, not truncate at the null. */ - PyObject *result = _PyLong_FromBytes(view.buf, view.len, 10); + return _PyLong_FromBytes(PyBytes_AS_STRING(o), + PyBytes_GET_SIZE(o), 10); + + if (PyByteArray_Check(o)) + return _PyLong_FromBytes(PyByteArray_AS_STRING(o), + PyByteArray_GET_SIZE(o), 10); + + if (PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) == 0) { + PyObject *result, *bytes; + + /* Copy to NUL-terminated buffer. */ + bytes = PyBytes_FromStringAndSize((const char *)view.buf, view.len); + if (bytes == NULL) { + PyBuffer_Release(&view); + return NULL; + } + result = _PyLong_FromBytes(PyBytes_AS_STRING(bytes), + PyBytes_GET_SIZE(bytes), 10); + Py_DECREF(bytes); PyBuffer_Release(&view); return result; } @@ -2131,7 +2149,7 @@ PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) /* PyObject_Call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); call = func->ob_type->tp_call; diff --git a/Objects/complexobject.c b/Objects/complexobject.c index dc1212e4..a5bfb667 100644 --- a/Objects/complexobject.c +++ b/Objects/complexobject.c @@ -767,7 +767,6 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v) int got_bracket=0; PyObject *s_buffer = NULL; Py_ssize_t len; - Py_buffer view = {NULL, NULL}; if (PyUnicode_Check(v)) { s_buffer = _PyUnicode_TransformDecimalAndSpaceToASCII(v); @@ -777,10 +776,6 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v) if (s == NULL) goto error; } - else if (PyObject_GetBuffer(v, &view, PyBUF_SIMPLE) == 0) { - s = (const char *)view.buf; - len = view.len; - } else { PyErr_Format(PyExc_TypeError, "complex() argument must be a string or a number, not '%.200s'", @@ -895,7 +890,6 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v) if (s-start != len) goto parse_error; - PyBuffer_Release(&view); Py_XDECREF(s_buffer); return complex_subtype_from_doubles(type, x, y); @@ -903,7 +897,6 @@ complex_subtype_from_string(PyTypeObject *type, PyObject *v) PyErr_SetString(PyExc_ValueError, "complex() arg is a malformed string"); error: - PyBuffer_Release(&view); Py_XDECREF(s_buffer); return NULL; } diff --git a/Objects/dictobject.c b/Objects/dictobject.c index 5a7de9e5..624ae9b8 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -1242,6 +1242,7 @@ _PyDict_SetItem_KnownHash(PyObject *op, PyObject *key, PyObject *value, } assert(key); assert(value); + assert(hash != -1); mp = (PyDictObject *)op; /* insertdict() handles any resizing that might be necessary */ @@ -1290,6 +1291,42 @@ PyDict_DelItem(PyObject *op, PyObject *key) return 0; } +int +_PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash) +{ + PyDictObject *mp; + PyDictKeyEntry *ep; + PyObject *old_key, *old_value; + PyObject **value_addr; + + if (!PyDict_Check(op)) { + PyErr_BadInternalCall(); + return -1; + } + assert(key); + assert(hash != -1); + mp = (PyDictObject *)op; + ep = (mp->ma_keys->dk_lookup)(mp, key, hash, &value_addr); + if (ep == NULL) + return -1; + if (*value_addr == NULL) { + _PyErr_SetKeyError(key); + return -1; + } + old_value = *value_addr; + *value_addr = NULL; + mp->ma_used--; + if (!_PyDict_HasSplitTable(mp)) { + ENSURE_ALLOWS_DELETIONS(mp); + old_key = ep->me_key; + Py_INCREF(dummy); + ep->me_key = dummy; + Py_DECREF(old_key); + } + Py_DECREF(old_value); + return 0; +} + void PyDict_Clear(PyObject *op) { diff --git a/Objects/exceptions.c b/Objects/exceptions.c index a2759978..f5a1a2b2 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -874,7 +874,7 @@ OSError_init(PyOSErrorObject *self, PyObject *args, PyObject *kwds); static int oserror_use_init(PyTypeObject *type) { - /* When __init__ is defined in a OSError subclass, we want any + /* When __init__ is defined in an OSError subclass, we want any extraneous argument to __new__ to be ignored. The only reasonable solution, given __new__ takes a variable number of arguments, is to defer arg parsing and initialization to __init__. diff --git a/Objects/fileobject.c b/Objects/fileobject.c index 1b184100..234d07e5 100644 --- a/Objects/fileobject.c +++ b/Objects/fileobject.c @@ -372,8 +372,11 @@ PyFile_NewStdPrinter(int fd) static PyObject * stdprinter_write(PyStdPrinter_Object *self, PyObject *args) { + PyObject *unicode; + PyObject *bytes = NULL; char *str; Py_ssize_t n; + int err; if (self->fd < 0) { /* fd might be invalid on Windows @@ -383,13 +386,30 @@ stdprinter_write(PyStdPrinter_Object *self, PyObject *args) Py_RETURN_NONE; } - /* encode Unicode to UTF-8 */ - if (!PyArg_ParseTuple(args, "s", &str)) + if (!PyArg_ParseTuple(args, "U", &unicode)) return NULL; - n = _Py_write(self->fd, str, strlen(str)); + /* encode Unicode to UTF-8 */ + str = PyUnicode_AsUTF8AndSize(unicode, &n); + if (str == NULL) { + PyErr_Clear(); + bytes = _PyUnicode_AsUTF8String(unicode, "backslashreplace"); + if (bytes == NULL) + return NULL; + if (PyBytes_AsStringAndSize(bytes, &str, &n) < 0) { + Py_DECREF(bytes); + return NULL; + } + } + + n = _Py_write(self->fd, str, n); + /* save errno, it can be modified indirectly by Py_XDECREF() */ + err = errno; + + Py_XDECREF(bytes); + if (n == -1) { - if (errno == EAGAIN) { + if (err == EAGAIN) { PyErr_Clear(); Py_RETURN_NONE; } diff --git a/Objects/floatobject.c b/Objects/floatobject.c index d6819814..b8d6f2b5 100644 --- a/Objects/floatobject.c +++ b/Objects/floatobject.c @@ -144,9 +144,24 @@ PyFloat_FromString(PyObject *v) return NULL; } } + else if (PyBytes_Check(v)) { + s = PyBytes_AS_STRING(v); + len = PyBytes_GET_SIZE(v); + } + else if (PyByteArray_Check(v)) { + s = PyByteArray_AS_STRING(v); + len = PyByteArray_GET_SIZE(v); + } else if (PyObject_GetBuffer(v, &view, PyBUF_SIMPLE) == 0) { s = (const char *)view.buf; len = view.len; + /* Copy to NUL-terminated buffer. */ + s_buffer = PyBytes_FromStringAndSize(s, len); + if (s_buffer == NULL) { + PyBuffer_Release(&view); + return NULL; + } + s = PyBytes_AS_STRING(s_buffer); } else { PyErr_Format(PyExc_TypeError, diff --git a/Objects/longobject.c b/Objects/longobject.c index 40da0b15..5f224557 100644 --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -4495,11 +4495,13 @@ _PyLong_GCD(PyObject *aarg, PyObject *barg) simple: assert(Py_REFCNT(a) > 0); assert(Py_REFCNT(b) > 0); -#if LONG_MAX >> 2*PyLong_SHIFT +/* Issue #24999: use two shifts instead of ">> 2*PyLong_SHIFT" to avoid + undefined behaviour when LONG_MAX type is smaller than 60 bits */ +#if LONG_MAX >> PyLong_SHIFT >> PyLong_SHIFT /* a fits into a long, so b must too */ x = PyLong_AsLong((PyObject *)a); y = PyLong_AsLong((PyObject *)b); -#elif defined(PY_LONG_LONG) && PY_LLONG_MAX >> 2*PyLong_SHIFT +#elif defined(PY_LONG_LONG) && PY_LLONG_MAX >> PyLong_SHIFT >> PyLong_SHIFT x = PyLong_AsLongLong((PyObject *)a); y = PyLong_AsLongLong((PyObject *)b); #else @@ -4516,9 +4518,9 @@ simple: y = x % y; x = t; } -#if LONG_MAX >> 2*PyLong_SHIFT +#if LONG_MAX >> PyLong_SHIFT >> PyLong_SHIFT return PyLong_FromLong(x); -#elif defined(PY_LONG_LONG) && PY_LLONG_MAX >> 2*PyLong_SHIFT +#elif defined(PY_LONG_LONG) && PY_LLONG_MAX >> PyLong_SHIFT >> PyLong_SHIFT return PyLong_FromLongLong(x); #else # error "_PyLong_GCD" diff --git a/Objects/memoryobject.c b/Objects/memoryobject.c index 74cad7dc..10162cb9 100644 --- a/Objects/memoryobject.c +++ b/Objects/memoryobject.c @@ -2156,8 +2156,23 @@ static PyObject * memory_hex(PyMemoryViewObject *self, PyObject *dummy) { Py_buffer *src = VIEW_ADDR(self); + PyObject *bytes; + PyObject *ret; + CHECK_RELEASED(self); - return _Py_strhex(src->buf, src->len); + + if (MV_C_CONTIGUOUS(self->flags)) { + return _Py_strhex(src->buf, src->len); + } + + bytes = memory_tobytes(self, dummy); + if (bytes == NULL) + return NULL; + + ret = _Py_strhex(PyBytes_AS_STRING(bytes), Py_SIZE(bytes)); + Py_DECREF(bytes); + + return ret; } static PyObject * @@ -2227,7 +2242,7 @@ ptr_from_tuple(Py_buffer *view, PyObject *tup) PyExc_IndexError); if (index == -1 && PyErr_Occurred()) return NULL; - ptr = lookup_dimension(view, ptr, dim, index); + ptr = lookup_dimension(view, ptr, (int)dim, index); if (ptr == NULL) return NULL; } diff --git a/Objects/methodobject.c b/Objects/methodobject.c index 18177229..946357f2 100644 --- a/Objects/methodobject.c +++ b/Objects/methodobject.c @@ -89,7 +89,7 @@ PyCFunction_Call(PyObject *func, PyObject *args, PyObject *kwds) /* PyCFunction_Call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); flags = PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST); diff --git a/Objects/object.c b/Objects/object.c index 11b7bff4..6fc4df16 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -475,7 +475,7 @@ PyObject_Repr(PyObject *v) #ifdef Py_DEBUG /* PyObject_Repr() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif diff --git a/Objects/odictobject.c b/Objects/odictobject.c index 7dbaa890..5ad88bca 100644 --- a/Objects/odictobject.c +++ b/Objects/odictobject.c @@ -88,17 +88,16 @@ For adding nodes: * _odict_add_head(od, node) * _odict_add_tail(od, node) -* _odict_add_new_node(od, key) +* _odict_add_new_node(od, key, hash) For removing nodes: -* _odict_pop_node(od, node, key) -* _odict_clear_node(od, node) +* _odict_clear_node(od, node, key, hash) * _odict_clear_nodes(od, clear_each) Others: -* _odict_initialize(od) +* _odict_find_node_hash(od, key, hash) * _odict_find_node(od, key) * _odict_keys_equal(od1, od2) @@ -483,10 +482,12 @@ struct _odictobject { PyDictObject od_dict; /* the underlying dict */ _ODictNode *od_first; /* first node in the linked list, if any */ _ODictNode *od_last; /* last node in the linked list, if any */ - /* od_fast_nodes and od_resize_sentinel are managed by _odict_resize() + /* od_fast_nodes, od_fast_nodes_size and od_resize_sentinel are managed + * by _odict_resize(). * Note that we rely on implementation details of dict for both. */ _ODictNode **od_fast_nodes; /* hash table that mirrors the dict table */ - Py_uintptr_t od_resize_sentinel; /* changes if odict should be resized */ + Py_ssize_t od_fast_nodes_size; + void *od_resize_sentinel; /* changes if odict should be resized */ size_t od_state; /* incremented whenever the LL changes */ PyObject *od_inst_dict; /* OrderedDict().__dict__ */ @@ -532,7 +533,7 @@ _odict_free_fast_nodes(PyODictObject *od) { /* Return the index into the hash table, regardless of a valid node. */ static Py_ssize_t -_odict_get_index_hash(PyODictObject *od, PyObject *key, Py_hash_t hash) +_odict_get_index_raw(PyODictObject *od, PyObject *key, Py_hash_t hash) { PyObject **value_addr = NULL; PyDictKeyEntry *ep; @@ -563,7 +564,7 @@ _odict_resize(PyODictObject *od) { /* Copy the current nodes into the table. */ _odict_FOREACH(od, node) { - i = _odict_get_index_hash(od, _odictnode_KEY(node), + i = _odict_get_index_raw(od, _odictnode_KEY(node), _odictnode_HASH(node)); if (i < 0) { PyMem_FREE(fast_nodes); @@ -575,51 +576,57 @@ _odict_resize(PyODictObject *od) { /* Replace the old fast nodes table. */ _odict_free_fast_nodes(od); od->od_fast_nodes = fast_nodes; - od->od_resize_sentinel = (Py_uintptr_t)(((PyDictObject *)od)->ma_keys); + od->od_fast_nodes_size = size; + od->od_resize_sentinel = ((PyDictObject *)od)->ma_keys; return 0; } /* Return the index into the hash table, regardless of a valid node. */ static Py_ssize_t -_odict_get_index(PyODictObject *od, PyObject *key) +_odict_get_index(PyODictObject *od, PyObject *key, Py_hash_t hash) { - Py_hash_t hash; PyDictKeysObject *keys; assert(key != NULL); - hash = PyObject_Hash(key); - if (hash == -1) - return -1; keys = ((PyDictObject *)od)->ma_keys; /* Ensure od_fast_nodes and dk_entries are in sync. */ - if (od->od_resize_sentinel != (Py_uintptr_t)keys) { + if (od->od_resize_sentinel != keys || + od->od_fast_nodes_size != keys->dk_size) { int resize_res = _odict_resize(od); if (resize_res < 0) return -1; } - return _odict_get_index_hash(od, key, hash); + return _odict_get_index_raw(od, key, hash); } -static int -_odict_initialize(PyODictObject *od) +/* Returns NULL if there was some error or the key was not found. */ +static _ODictNode * +_odict_find_node_hash(PyODictObject *od, PyObject *key, Py_hash_t hash) { - od->od_state = 0; - _odict_FIRST(od) = NULL; - _odict_LAST(od) = NULL; - return _odict_resize((PyODictObject *)od); + Py_ssize_t index; + + if (_odict_EMPTY(od)) + return NULL; + index = _odict_get_index(od, key, hash); + if (index < 0) + return NULL; + return od->od_fast_nodes[index]; } -/* Returns NULL if there was some error or the key was not found. */ static _ODictNode * _odict_find_node(PyODictObject *od, PyObject *key) { Py_ssize_t index; + Py_hash_t hash; if (_odict_EMPTY(od)) return NULL; - index = _odict_get_index(od, key); + hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + index = _odict_get_index(od, key, hash); if (index < 0) return NULL; return od->od_fast_nodes[index]; @@ -628,54 +635,38 @@ _odict_find_node(PyODictObject *od, PyObject *key) static void _odict_add_head(PyODictObject *od, _ODictNode *node) { - if (_odict_FIRST(od) == NULL) { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = NULL; - _odict_FIRST(od) = node; + _odictnode_PREV(node) = NULL; + _odictnode_NEXT(node) = _odict_FIRST(od); + if (_odict_FIRST(od) == NULL) _odict_LAST(od) = node; - } - else { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = _odict_FIRST(od); - _odict_FIRST(od) = node; + else _odictnode_PREV(_odict_FIRST(od)) = node; - } + _odict_FIRST(od) = node; od->od_state++; } static void _odict_add_tail(PyODictObject *od, _ODictNode *node) { - if (_odict_LAST(od) == NULL) { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = NULL; + _odictnode_PREV(node) = _odict_LAST(od); + _odictnode_NEXT(node) = NULL; + if (_odict_LAST(od) == NULL) _odict_FIRST(od) = node; - _odict_LAST(od) = node; - } - else { - _odictnode_PREV(node) = _odict_LAST(od); - _odictnode_NEXT(node) = NULL; + else _odictnode_NEXT(_odict_LAST(od)) = node; - _odict_LAST(od) = node; - } - + _odict_LAST(od) = node; od->od_state++; } /* adds the node to the end of the list */ static int -_odict_add_new_node(PyODictObject *od, PyObject *key) +_odict_add_new_node(PyODictObject *od, PyObject *key, Py_hash_t hash) { - Py_hash_t hash; Py_ssize_t i; _ODictNode *node; Py_INCREF(key); - hash = PyObject_Hash(key); - if (hash == -1) - return -1; - - i = _odict_get_index(od, key); + i = _odict_get_index(od, key, hash); if (i < 0) { if (!PyErr_Occurred()) PyErr_SetObject(PyExc_KeyError, key); @@ -729,22 +720,10 @@ _odict_remove_node(PyODictObject *od, _ODictNode *node) od->od_state++; } -static _ODictNode * -_odict_pop_node(PyODictObject *od, _ODictNode *node, PyObject *key) -{ - if (node == NULL) { - node = _odict_find_node(od, key); - if (node == NULL) - return NULL; - } - _odict_remove_node(od, node); - return node; -} - /* If someone calls PyDict_DelItem() directly on an OrderedDict, we'll get all sorts of problems here. In PyODict_DelItem we make sure to call _odict_clear_node first. - + This matters in the case of colliding keys. Suppose we add 3 keys: [A, B, C], where the hash of C collides with A and the next possible index in the hash table is occupied by B. If we remove B then for C @@ -758,7 +737,8 @@ _odict_pop_node(PyODictObject *od, _ODictNode *node, PyObject *key) we modify od_fast_nodes. */ static int -_odict_clear_node(PyODictObject *od, _ODictNode *node, PyObject *key) +_odict_clear_node(PyODictObject *od, _ODictNode *node, PyObject *key, + Py_hash_t hash) { Py_ssize_t i; @@ -768,7 +748,7 @@ _odict_clear_node(PyODictObject *od, _ODictNode *node, PyObject *key) return 0; } - i = _odict_get_index(od, key); + i = _odict_get_index(od, key, hash); if (i < 0) return PyErr_Occurred() ? -1 : 0; @@ -983,7 +963,7 @@ odict_sizeof(PyODictObject *od) return NULL; res += temp; - res += sizeof(_ODictNode) * _odict_FAST_SIZE(od); /* od_fast_nodes */ + res += sizeof(_ODictNode *) * _odict_FAST_SIZE(od); /* od_fast_nodes */ if (!_odict_EMPTY(od)) { res += sizeof(_ODictNode) * PyODict_SIZE(od); /* linked-list */ } @@ -998,81 +978,45 @@ static PyObject * odict_reduce(register PyODictObject *od) { _Py_IDENTIFIER(__dict__); - _Py_IDENTIFIER(__class__); - PyObject *vars = NULL, *ns = NULL, *result = NULL, *cls = NULL; - PyObject *items_iter = NULL, *items = NULL, *args = NULL; + _Py_IDENTIFIER(items); + PyObject *dict = NULL, *result = NULL; + PyObject *items_iter, *items, *args = NULL; /* capture any instance state */ - vars = _PyObject_GetAttrId((PyObject *)od, &PyId___dict__); - if (vars == NULL) + dict = _PyObject_GetAttrId((PyObject *)od, &PyId___dict__); + if (dict == NULL) goto Done; else { - PyObject *empty, *od_vars, *iterator, *key; - int ns_len; - /* od.__dict__ isn't necessarily a dict... */ - ns = PyObject_CallMethod((PyObject *)vars, "copy", NULL); - if (ns == NULL) + Py_ssize_t dict_len = PyObject_Length(dict); + if (dict_len == -1) goto Done; - empty = PyODict_New(); - if (empty == NULL) - goto Done; - od_vars = _PyObject_GetAttrId((PyObject *)empty, &PyId___dict__); - Py_DECREF(empty); - if (od_vars == NULL) - goto Done; - iterator = PyObject_GetIter(od_vars); - Py_DECREF(od_vars); - if (iterator == NULL) - goto Done; - - while ( (key = PyIter_Next(iterator)) ) { - if (PyMapping_HasKey(ns, key) && PyMapping_DelItem(ns, key) != 0) { - Py_DECREF(iterator); - Py_DECREF(key); - goto Done; - } - Py_DECREF(key); - } - Py_DECREF(iterator); - if (PyErr_Occurred()) - goto Done; - - ns_len = PyObject_Length(ns); - if (ns_len == -1) - goto Done; - if (!ns_len) { - /* nothing novel to pickle in od.__dict__ */ - Py_CLEAR(ns); + if (!dict_len) { + /* nothing to pickle in od.__dict__ */ + Py_CLEAR(dict); } } /* build the result */ - cls = _PyObject_GetAttrId((PyObject *)od, &PyId___class__); - if (cls == NULL) - goto Done; - args = PyTuple_New(0); if (args == NULL) goto Done; - items = PyObject_CallMethod((PyObject *)od, "items", NULL); + items = _PyObject_CallMethodIdObjArgs((PyObject *)od, &PyId_items, NULL); if (items == NULL) goto Done; items_iter = PyObject_GetIter(items); + Py_DECREF(items); if (items_iter == NULL) goto Done; - result = PyTuple_Pack(5, cls, args, ns ? ns : Py_None, Py_None, items_iter); + result = PyTuple_Pack(5, Py_TYPE(od), args, dict ? dict : Py_None, Py_None, items_iter); + Py_DECREF(items_iter); Done: - Py_XDECREF(vars); - Py_XDECREF(ns); - Py_XDECREF(cls); + Py_XDECREF(dict); Py_XDECREF(args); - Py_XDECREF(items); - Py_XDECREF(items_iter); return result; } @@ -1157,7 +1101,8 @@ odict_pop(PyObject *od, PyObject *args, PyObject *kwargs) } static PyObject * -_odict_popkey(PyObject *od, PyObject *key, PyObject *failobj) +_odict_popkey_hash(PyObject *od, PyObject *key, PyObject *failobj, + Py_hash_t hash) { _ODictNode *node; PyObject *value = NULL; @@ -1165,13 +1110,13 @@ _odict_popkey(PyObject *od, PyObject *key, PyObject *failobj) /* Pop the node first to avoid a possible dict resize (due to eval loop reentrancy) and complications due to hash collision resolution. */ - node = _odict_find_node((PyODictObject *)od, key); + node = _odict_find_node_hash((PyODictObject *)od, key, hash); if (node == NULL) { if (PyErr_Occurred()) return NULL; } else { - int res = _odict_clear_node((PyODictObject *)od, node, key); + int res = _odict_clear_node((PyODictObject *)od, node, key, hash); if (res < 0) { return NULL; } @@ -1180,8 +1125,14 @@ _odict_popkey(PyObject *od, PyObject *key, PyObject *failobj) /* Now delete the value from the dict. */ if (PyODict_CheckExact(od)) { if (node != NULL) { - /* We could do PyDict_GetItem() and PyDict_DelItem() directly... */ - value = _PyDict_Pop((PyDictObject *)od, key, failobj); + value = _PyDict_GetItem_KnownHash(od, key, hash); /* borrowed */ + if (value != NULL) { + Py_INCREF(value); + if (_PyDict_DelItem_KnownHash(od, key, hash) < 0) { + Py_DECREF(value); + return NULL; + } + } } } else { @@ -1212,6 +1163,16 @@ _odict_popkey(PyObject *od, PyObject *key, PyObject *failobj) return value; } +static PyObject * +_odict_popkey(PyObject *od, PyObject *key, PyObject *failobj) +{ + Py_hash_t hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + + return _odict_popkey_hash(od, key, failobj, hash); +} + /* popitem() */ PyDoc_STRVAR(odict_popitem__doc__, @@ -1224,38 +1185,27 @@ static PyObject * odict_popitem(PyObject *od, PyObject *args, PyObject *kwargs) { static char *kwlist[] = {"last", 0}; - PyObject *key, *value, *item = NULL, *last = NULL; + PyObject *key, *value, *item = NULL; _ODictNode *node; - int pos = -1; - - if (_odict_EMPTY((PyODictObject *)od)) { - PyErr_SetString(PyExc_KeyError, "dictionary is empty"); - return NULL; - } + int last = 1; /* pull the item */ /* borrowed */ - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:popitem", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|p:popitem", kwlist, &last)) { return NULL; } - if (last != NULL) { - int is_true; - is_true = PyObject_IsTrue(last); - if (is_true == -1) - return NULL; - pos = is_true ? -1 : 0; + if (_odict_EMPTY(od)) { + PyErr_SetString(PyExc_KeyError, "dictionary is empty"); + return NULL; } - if (pos == 0) - node = _odict_FIRST((PyODictObject *)od); - else - node = _odict_LAST((PyODictObject *)od); + node = last ? _odict_LAST(od) : _odict_FIRST(od); key = _odictnode_KEY(node); Py_INCREF(key); - value = _odict_popkey(od, key, NULL); + value = _odict_popkey_hash(od, key, NULL, _odictnode_HASH(node)); if (value == NULL) return NULL; item = PyTuple_Pack(2, key, value); @@ -1314,6 +1264,10 @@ odict_clear(register PyODictObject *od) /* copy() */ +/* forward */ +static int _PyODict_SetItem_KnownHash(PyObject *, PyObject *, PyObject *, + Py_hash_t); + PyDoc_STRVAR(odict_copy__doc__, "od.copy() -> a shallow copy of od"); static PyObject * @@ -1338,7 +1292,8 @@ odict_copy(register PyODictObject *od) PyErr_SetObject(PyExc_KeyError, key); goto fail; } - if (PyODict_SetItem((PyObject *)od_copy, key, value) != 0) + if (_PyODict_SetItem_KnownHash((PyObject *)od_copy, key, value, + _odictnode_HASH(node)) != 0) goto fail; } } @@ -1394,58 +1349,39 @@ static PyObject * odict_move_to_end(PyODictObject *od, PyObject *args, PyObject *kwargs) { static char *kwlist[] = {"key", "last", 0}; - PyObject *key, *last = NULL; - Py_ssize_t pos = -1; + PyObject *key; + int last = 1; + _ODictNode *node; - /* both borrowed */ - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O:move_to_end", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|p:move_to_end", kwlist, &key, &last)) { return NULL; } + if (_odict_EMPTY(od)) { PyErr_SetObject(PyExc_KeyError, key); return NULL; } - if (last != NULL) { - int is_true; - is_true = PyObject_IsTrue(last); - if (is_true == -1) - return NULL; - pos = is_true ? -1 : 0; - } - if (pos == 0) { - /* Only move if not already the first one. */ - PyObject *first_key = _odictnode_KEY(_odict_FIRST(od)); - int not_equal = PyObject_RichCompareBool(key, first_key, Py_NE); - if (not_equal == -1) + node = last ? _odict_LAST(od) : _odict_FIRST(od); + if (key != _odictnode_KEY(node)) { + node = _odict_find_node(od, key); + if (node == NULL) { + if (!PyErr_Occurred()) + PyErr_SetObject(PyExc_KeyError, key); return NULL; - if (not_equal) { - _ODictNode *node = _odict_pop_node(od, NULL, key); - if (node != NULL) { - _odict_add_head(od, node); - } - else { - if (!PyErr_Occurred()) - PyErr_SetObject(PyExc_KeyError, key); - return NULL; - } } - } - else if (pos == -1) { - /* Only move if not already the last one. */ - PyObject *last_key = _odictnode_KEY(_odict_LAST(od)); - int not_equal = PyObject_RichCompareBool(key, last_key, Py_NE); - if (not_equal == -1) - return NULL; - if (not_equal) { - _ODictNode *node = _odict_pop_node(od, NULL, key); - if (node != NULL) { + if (last) { + /* Only move if not already the last one. */ + if (node != _odict_LAST(od)) { + _odict_remove_node(od, node); _odict_add_tail(od, node); } - else { - if (!PyErr_Occurred()) - PyErr_SetObject(PyExc_KeyError, key); - return NULL; + } + else { + /* Only move if not already the first one. */ + if (node != _odict_FIRST(od)) { + _odict_remove_node(od, node); + _odict_add_head(od, node); } } } @@ -1531,17 +1467,28 @@ static PyMemberDef odict_members[] = { static void odict_dealloc(PyODictObject *self) { + PyThreadState *tstate = PyThreadState_GET(); + PyObject_GC_UnTrack(self); - Py_TRASHCAN_SAFE_BEGIN(self); + Py_TRASHCAN_SAFE_BEGIN(self) + Py_XDECREF(self->od_inst_dict); if (self->od_weakreflist != NULL) PyObject_ClearWeakRefs((PyObject *)self); _odict_clear_nodes(self); - Py_TRASHCAN_SAFE_END(self); - /* must be last */ + /* Call the base tp_dealloc(). Since it too uses the trashcan mechanism, + * temporarily decrement trash_delete_nesting to prevent triggering it + * and putting the partially deallocated object on the trashcan's + * to-be-deleted-later list. + */ + --tstate->trash_delete_nesting; + assert(_tstate->trash_delete_nesting < PyTrash_UNWIND_LEVEL); PyDict_Type.tp_dealloc((PyObject *)self); + ++tstate->trash_delete_nesting; + + Py_TRASHCAN_SAFE_END(self) }; /* tp_repr */ @@ -1550,24 +1497,26 @@ static PyObject * odict_repr(PyODictObject *self) { int i; - const char *formatstr; - _Py_IDENTIFIER(__class__); - _Py_IDENTIFIER(__name__); - Py_ssize_t count = -1; - PyObject *pieces = NULL, *result = NULL, *cls = NULL; - PyObject *classname = NULL, *format = NULL, *args = NULL; + _Py_IDENTIFIER(items); + PyObject *pieces = NULL, *result = NULL; + const char *classname; + + classname = strrchr(Py_TYPE(self)->tp_name, '.'); + if (classname == NULL) + classname = Py_TYPE(self)->tp_name; + else + classname++; + + if (PyODict_SIZE(self) == 0) + return PyUnicode_FromFormat("%s()", classname); i = Py_ReprEnter((PyObject *)self); if (i != 0) { return i > 0 ? PyUnicode_FromString("...") : NULL; } - if (PyODict_SIZE(self) == 0) { - /* "OrderedDict()" */ - goto Finish; - } - if (PyODict_CheckExact(self)) { + Py_ssize_t count = 0; _ODictNode *node; pieces = PyList_New(PyODict_SIZE(self)); if (pieces == NULL) @@ -1586,49 +1535,35 @@ odict_repr(PyODictObject *self) if (pair == NULL) goto Done; - PyList_SET_ITEM(pieces, ++count, pair); /* steals reference */ + if (count < PyList_GET_SIZE(pieces)) + PyList_SET_ITEM(pieces, count, pair); /* steals reference */ + else { + if (PyList_Append(pieces, pair) < 0) { + Py_DECREF(pair); + goto Done; + } + Py_DECREF(pair); + } + count++; } + if (count < PyList_GET_SIZE(pieces)) + PyList_GET_SIZE(pieces) = count; } else { - PyObject *items = PyObject_CallMethod((PyObject *)self, "items", NULL); + PyObject *items = _PyObject_CallMethodIdObjArgs((PyObject *)self, + &PyId_items, NULL); if (items == NULL) goto Done; pieces = PySequence_List(items); Py_DECREF(items); - if(pieces == NULL) + if (pieces == NULL) goto Done; } -Finish: - cls = _PyObject_GetAttrId((PyObject *)self, &PyId___class__); - if (cls == NULL) - goto Done; - classname = _PyObject_GetAttrId(cls, &PyId___name__); - if (classname == NULL) - goto Done; + result = PyUnicode_FromFormat("%s(%R)", classname, pieces); - if (pieces == NULL) { - formatstr = "%s()"; - args = PyTuple_Pack(1, classname); - } - else { - formatstr = "%s(%r)"; - args = PyTuple_Pack(2, classname, pieces); - } - if (args == NULL) - goto Done; - - format = PyUnicode_InternFromString(formatstr); - if (format == NULL) - goto Done; - - result = PyUnicode_Format(format, args); Done: Py_XDECREF(pieces); - Py_XDECREF(cls); - Py_XDECREF(classname); - Py_XDECREF(format); - Py_XDECREF(args); Py_ReprLeave((PyObject *)self); return result; }; @@ -1739,14 +1674,28 @@ odict_init(PyObject *self, PyObject *args, PyObject *kwds) static PyObject * odict_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { - PyObject *od = PyDict_Type.tp_new(type, args, kwds); - if (od != NULL) { - if (_odict_initialize((PyODictObject *)od) < 0) - return NULL; - ((PyODictObject *)od)->od_inst_dict = PyDict_New(); - ((PyODictObject *)od)->od_weakreflist = NULL; + PyObject *dict; + PyODictObject *od; + + dict = PyDict_New(); + if (dict == NULL) + return NULL; + + od = (PyODictObject *)PyDict_Type.tp_new(type, args, kwds); + if (od == NULL) { + Py_DECREF(dict); + return NULL; + } + + od->od_inst_dict = dict; + /* type constructor fills the memory with zeros (see + PyType_GenericAlloc()), there is no need to set them to zero again */ + if (_odict_resize(od) < 0) { + Py_DECREF(od); + return NULL; } - return od; + + return (PyObject*)od; } /* PyODict_Type */ @@ -1803,22 +1752,44 @@ PyODict_New(void) { return odict_new(&PyODict_Type, NULL, NULL); }; -int -PyODict_SetItem(PyObject *od, PyObject *key, PyObject *value) { - int res = PyDict_SetItem(od, key, value); +static int +_PyODict_SetItem_KnownHash(PyObject *od, PyObject *key, PyObject *value, + Py_hash_t hash) +{ + int res = _PyDict_SetItem_KnownHash(od, key, value, hash); if (res == 0) { - res = _odict_add_new_node((PyODictObject *)od, key); - /* XXX Revert setting the value on the dict? */ + res = _odict_add_new_node((PyODictObject *)od, key, hash); + if (res < 0) { + /* Revert setting the value on the dict */ + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + (void) _PyDict_DelItem_KnownHash(od, key, hash); + _PyErr_ChainExceptions(exc, val, tb); + } } return res; }; int -PyODict_DelItem(PyObject *od, PyObject *key) { - int res = _odict_clear_node((PyODictObject *)od, NULL, key); +PyODict_SetItem(PyObject *od, PyObject *key, PyObject *value) +{ + Py_hash_t hash = PyObject_Hash(key); + if (hash == -1) + return -1; + return _PyODict_SetItem_KnownHash(od, key, value, hash); +}; + +int +PyODict_DelItem(PyObject *od, PyObject *key) +{ + int res; + Py_hash_t hash = PyObject_Hash(key); + if (hash == -1) + return -1; + res = _odict_clear_node((PyODictObject *)od, NULL, key, hash); if (res < 0) return -1; - return PyDict_DelItem(od, key); + return _PyDict_DelItem_KnownHash(od, key, hash); }; @@ -1887,6 +1858,8 @@ odictiter_nextkey(odictiterobject *di) /* Get the key. */ node = _odict_find_node(di->di_odict, di->di_current); if (node == NULL) { + if (!PyErr_Occurred()) + PyErr_SetObject(PyExc_KeyError, di->di_current); /* Must have been deleted. */ Py_CLEAR(di->di_current); return NULL; @@ -1914,7 +1887,7 @@ done: static PyObject * odictiter_iternext(odictiterobject *di) { - PyObject *value; + PyObject *result, *value; PyObject *key = odictiter_nextkey(di); /* new reference */ if (key == NULL) @@ -1925,53 +1898,44 @@ odictiter_iternext(odictiterobject *di) return key; } - /* Handle the items case. */ - if (di->kind & _odict_ITER_KEYS) { - PyObject *result = di->di_result; - - value = PyODict_GetItem((PyObject *)di->di_odict, key); /* borrowed */ - if (value == NULL) { - if (!PyErr_Occurred()) - PyErr_SetObject(PyExc_KeyError, key); - Py_DECREF(key); - goto done; - } - Py_INCREF(value); + value = PyODict_GetItem((PyObject *)di->di_odict, key); /* borrowed */ + if (value == NULL) { + if (!PyErr_Occurred()) + PyErr_SetObject(PyExc_KeyError, key); + Py_DECREF(key); + goto done; + } + Py_INCREF(value); - if (result->ob_refcnt == 1) { - /* not in use so we can reuse it - * (the common case during iteration) */ - Py_INCREF(result); - Py_DECREF(PyTuple_GET_ITEM(result, 0)); /* borrowed */ - Py_DECREF(PyTuple_GET_ITEM(result, 1)); /* borrowed */ - } - else { - result = PyTuple_New(2); - if (result == NULL) { - Py_DECREF(key); - Py_DECREF(value); - goto done; - } - } + /* Handle the values case. */ + if (!(di->kind & _odict_ITER_KEYS)) { + Py_DECREF(key); + return value; + } - PyTuple_SET_ITEM(result, 0, key); /* steals reference */ - PyTuple_SET_ITEM(result, 1, value); /* steals reference */ + /* Handle the items case. */ + result = di->di_result; - return result; + if (Py_REFCNT(result) == 1) { + /* not in use so we can reuse it + * (the common case during iteration) */ + Py_INCREF(result); + Py_DECREF(PyTuple_GET_ITEM(result, 0)); /* borrowed */ + Py_DECREF(PyTuple_GET_ITEM(result, 1)); /* borrowed */ } - /* Handle the values case. */ else { - value = PyODict_GetItem((PyObject *)di->di_odict, key); - Py_DECREF(key); - if (value == NULL) { - if (!PyErr_Occurred()) - PyErr_SetObject(PyExc_KeyError, key); + result = PyTuple_New(2); + if (result == NULL) { + Py_DECREF(key); + Py_DECREF(value); goto done; } - Py_INCREF(value); - return value; } + PyTuple_SET_ITEM(result, 0, key); /* steals reference */ + PyTuple_SET_ITEM(result, 1, value); /* steals reference */ + return result; + done: Py_CLEAR(di->di_current); Py_CLEAR(di->di_odict); @@ -2402,25 +2366,30 @@ static PyObject * mutablemapping_update(PyObject *self, PyObject *args, PyObject *kwargs) { int res = 0; - Py_ssize_t len = (args != NULL) ? PyObject_Size(args) : 0; + Py_ssize_t len; + _Py_IDENTIFIER(items); + _Py_IDENTIFIER(keys); /* first handle args, if any */ - if (len < 0) /* PyObject_Size raised an exception. */ - return NULL; - + assert(args == NULL || PyTuple_Check(args)); + len = (args != NULL) ? PyTuple_GET_SIZE(args) : 0; if (len > 1) { char *msg = "update() takes at most 1 positional argument (%d given)"; PyErr_Format(PyExc_TypeError, msg, len); return NULL; } - if (len == 1) { + if (len) { PyObject *other = PyTuple_GET_ITEM(args, 0); /* borrowed reference */ - if (other == NULL) - return NULL; + assert(other != NULL); Py_INCREF(other); - if (PyObject_HasAttrString(other, "items")) { /* never fails */ - PyObject *items = PyMapping_Items(other); + if (PyDict_CheckExact(other) || + _PyObject_HasAttrId(other, &PyId_items)) { /* never fails */ + PyObject *items; + if (PyDict_CheckExact(other)) + items = PyDict_Items(other); + else + items = _PyObject_CallMethodId(other, &PyId_items, NULL); Py_DECREF(other); if (items == NULL) return NULL; @@ -2429,9 +2398,9 @@ mutablemapping_update(PyObject *self, PyObject *args, PyObject *kwargs) if (res == -1) return NULL; } - else if (PyObject_HasAttrString(other, "keys")) { /* never fails */ + else if (_PyObject_HasAttrId(other, &PyId_keys)) { /* never fails */ PyObject *keys, *iterator, *key; - keys = PyObject_CallMethod(other, "keys", NULL); + keys = _PyObject_CallMethodIdObjArgs(other, &PyId_keys, NULL); if (keys == NULL) { Py_DECREF(other); return NULL; @@ -2467,16 +2436,10 @@ mutablemapping_update(PyObject *self, PyObject *args, PyObject *kwargs) } /* now handle kwargs */ - len = (kwargs != NULL) ? PyObject_Size(kwargs) : 0; - if (len < 0) /* PyObject_Size raised an exception. */ - return NULL; + assert(kwargs == NULL || PyDict_Check(kwargs)); + len = (kwargs != NULL) ? PyDict_Size(kwargs) : 0; if (len > 0) { - PyObject *items; - if (!PyMapping_Check(kwargs)) { - PyErr_SetString(PyExc_TypeError, "expected mapping for kwargs"); - return NULL; - } - items = PyMapping_Items(kwargs); + PyObject *items = PyDict_Items(kwargs); if (items == NULL) return NULL; res = mutablemapping_add_pairs(self, items); diff --git a/Objects/typeobject.c b/Objects/typeobject.c index bb83530d..f87d58fe 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -901,7 +901,7 @@ type_call(PyTypeObject *type, PyObject *args, PyObject *kwds) #ifdef Py_DEBUG /* type_call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif @@ -1965,6 +1965,12 @@ best_base(PyObject *bases) if (PyType_Ready(base_i) < 0) return NULL; } + if (!PyType_HasFeature(base_i, Py_TPFLAGS_BASETYPE)) { + PyErr_Format(PyExc_TypeError, + "type '%.100s' is not an acceptable base type", + base_i->tp_name); + return NULL; + } candidate = solid_base(base_i); if (winner == NULL) { winner = candidate; @@ -2345,12 +2351,6 @@ type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds) if (base == NULL) { goto error; } - if (!PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)) { - PyErr_Format(PyExc_TypeError, - "type '%.100s' is not an acceptable base type", - base->tp_name); - goto error; - } dict = PyDict_Copy(orig_dict); if (dict == NULL) @@ -4100,6 +4100,12 @@ reduce_newobj(PyObject *obj, int proto) PyObject *newobj, *newargs, *state, *listitems, *dictitems; PyObject *result; + if (Py_TYPE(obj)->tp_new == NULL) { + PyErr_Format(PyExc_TypeError, + "can't pickle %s objects", + Py_TYPE(obj)->tp_name); + return NULL; + } if (_PyObject_GetNewArguments(obj, &args, &kwargs) < 0) return NULL; diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c index 9223c991..0b78301f 100644 --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -722,6 +722,11 @@ resize_compact(PyObject *unicode, Py_ssize_t length) } new_size = (struct_size + (length + 1) * char_size); + if (_PyUnicode_HAS_UTF8_MEMORY(unicode)) { + PyObject_DEL(_PyUnicode_UTF8(unicode)); + _PyUnicode_UTF8(unicode) = NULL; + _PyUnicode_UTF8_LENGTH(unicode) = 0; + } _Py_DEC_REFTOTAL; _Py_ForgetReference(unicode); @@ -1701,7 +1706,7 @@ PyUnicode_Resize(PyObject **p_unicode, Py_ssize_t length) return unicode_resize(p_unicode, length); } -/* Copy a ASCII or latin1 char* string into a Python Unicode string. +/* Copy an ASCII or latin1 char* string into a Python Unicode string. WARNING: The function doesn't copy the terminating null character and doesn't check the maximum character (may write a latin1 character in an @@ -3614,6 +3619,7 @@ PyUnicode_FSConverter(PyObject* arg, void* addr) void *data; if (arg == NULL) { Py_DECREF(*(PyObject**)addr); + *(PyObject**)addr = NULL; return 1; } if (PyBytes_Check(arg)) { @@ -4330,31 +4336,31 @@ PyUnicode_DecodeUTF7Stateful(const char *s, } else { /* now leaving a base-64 section */ inShift = 0; - s++; - if (surrogate) { - if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) - goto onError; - surrogate = 0; - } if (base64bits > 0) { /* left-over bits */ if (base64bits >= 6) { /* We've seen at least one base-64 character */ + s++; errmsg = "partial character in shift sequence"; goto utf7Error; } else { /* Some bits remain; they should be zero */ if (base64buffer != 0) { + s++; errmsg = "non-zero padding bits in shift sequence"; goto utf7Error; } } } - if (ch != '-') { + if (surrogate && DECODE_DIRECT(ch)) { + if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) + goto onError; + } + surrogate = 0; + if (ch == '-') { /* '-' is absorbed; other terminating characters are preserved */ - if (_PyUnicodeWriter_WriteCharInline(&writer, ch) < 0) - goto onError; + s++; } } } @@ -4368,6 +4374,7 @@ PyUnicode_DecodeUTF7Stateful(const char *s, } else { /* begin base64-encoded section */ inShift = 1; + surrogate = 0; shiftOutStart = writer.pos; base64bits = 0; base64buffer = 0; @@ -4399,6 +4406,7 @@ utf7Error: if (inShift && !consumed) { /* in shift sequence, no more to follow */ /* if we're in an inconsistent state, that's an error */ + inShift = 0; if (surrogate || (base64bits >= 6) || (base64bits > 0 && base64buffer != 0)) { @@ -7189,7 +7197,7 @@ error: } /* - * Encode a Unicode string to a Windows code page into a byte string using a + * Encode a Unicode string to a Windows code page into a byte string using an * error handler. * * Returns consumed characters if succeed, or raise an OSError and returns @@ -13291,6 +13299,7 @@ _PyUnicodeWriter_PrepareInternal(_PyUnicodeWriter *writer, if (maxchar > writer->maxchar || writer->readonly) { /* resize + widen */ + maxchar = Py_MAX(maxchar, writer->maxchar); newbuffer = PyUnicode_New(newlen, maxchar); if (newbuffer == NULL) return -1; diff --git a/PC/example_nt/example.c b/PC/example_nt/example.c deleted file mode 100644 index 669d11b2..00000000 --- a/PC/example_nt/example.c +++ /dev/null @@ -1,32 +0,0 @@ -#include "Python.h" - -static PyObject * -ex_foo(PyObject *self, PyObject *args) -{ - printf("Hello, world\n"); - Py_INCREF(Py_None); - return Py_None; -} - -static PyMethodDef example_methods[] = { - {"foo", ex_foo, METH_VARARGS, "foo() doc string"}, - {NULL, NULL} -}; - -static struct PyModuleDef examplemodule = { - PyModuleDef_HEAD_INIT, - "example", - "example module doc string", - -1, - example_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyMODINIT_FUNC -PyInit_example(void) -{ - return PyModule_Create(&examplemodule); -} diff --git a/PC/example_nt/example.sln b/PC/example_nt/example.sln deleted file mode 100644 index d8a3119f..00000000 --- a/PC/example_nt/example.sln +++ /dev/null @@ -1,21 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 8.00 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "example", "example.vcproj", "{A0608D6F-84ED-44AE-A2A6-A3CC7F4A4030}" - ProjectSection(ProjectDependencies) = postProject - EndProjectSection -EndProject -Global - GlobalSection(SolutionConfiguration) = preSolution - Debug = Debug - Release = Release - EndGlobalSection - GlobalSection(ProjectConfiguration) = postSolution - {A0608D6F-84ED-44AE-A2A6-A3CC7F4A4030}.Debug.ActiveCfg = Debug|Win32 - {A0608D6F-84ED-44AE-A2A6-A3CC7F4A4030}.Debug.Build.0 = Debug|Win32 - {A0608D6F-84ED-44AE-A2A6-A3CC7F4A4030}.Release.ActiveCfg = Release|Win32 - {A0608D6F-84ED-44AE-A2A6-A3CC7F4A4030}.Release.Build.0 = Release|Win32 - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - EndGlobalSection - GlobalSection(ExtensibilityAddIns) = postSolution - EndGlobalSection -EndGlobal diff --git a/PC/example_nt/example.vcproj b/PC/example_nt/example.vcproj deleted file mode 100644 index d82f76e6..00000000 --- a/PC/example_nt/example.vcproj +++ /dev/null @@ -1,189 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/PC/example_nt/readme.txt b/PC/example_nt/readme.txt deleted file mode 100644 index d8efa74e..00000000 --- a/PC/example_nt/readme.txt +++ /dev/null @@ -1,183 +0,0 @@ -Example Python extension for Windows NT -======================================= - -This directory contains everything needed (except for the Python -distribution!) to build a Python extension module using Microsoft VC++. -Notice that you need to use the same compiler version that was used to build -Python itself. - -The simplest way to build this example is to use the distutils script -'setup.py'. To do this, simply execute: - - % python setup.py install - -after everything builds and installs, you can test it: - - % python -c "import example; example.foo()" - Hello, world - -See setup.py for more details. alternatively, see below for instructions on -how to build inside the Visual Studio environment. - -Visual Studio Build Instructions -================================ - -These are instructions how to build an extension using Visual C++. The -instructions and project files have not been updated to the latest VC -version. In general, it is recommended you use the 'setup.py' instructions -above. - -It has been tested with VC++ 7.1 on Python 2.4. You can also use earlier -versions of VC to build Python extensions, but the sample VC project file -(example.dsw in this directory) is in VC 7.1 format. - -COPY THIS DIRECTORY! --------------------- -This "example_nt" directory is a subdirectory of the PC directory, in order -to keep all the PC-specific files under the same directory. However, the -example_nt directory can't actually be used from this location. You first -need to copy or move it up one level, so that example_nt is a direct -sibling of the PC\ and Include\ directories. Do all your work from within -this new location -- sorry, but you'll be sorry if you don't. - -OPEN THE PROJECT ----------------- -From VC 7.1, use the - File -> Open Solution... -dialog (*not* the "File -> Open..." dialog!). Navigate to and select the -file "example.sln", in the *copy* of the example_nt directory you made -above. -Click Open. - -BUILD THE EXAMPLE DLL ---------------------- -In order to check that everything is set up right, try building: - -1. Select a configuration. This step is optional. Do - Build -> Configuration Manager... -> Active Solution Configuration - and select either "Release" or "Debug". - If you skip this step, you'll use the Debug configuration by default. - -2. Build the DLL. Do - Build -> Build Solution - This creates all intermediate and result files in a subdirectory which - is called either Debug or Release, depending on which configuration you - picked in the preceding step. - -TESTING THE DEBUG-MODE DLL --------------------------- -Once the Debug build has succeeded, bring up a DOS box, and cd to -example_nt\Debug. You should now be able to repeat the following session -("C>" is the DOS prompt, ">>>" is the Python prompt) (note that various -debug output from Python may not match this screen dump exactly): - - C>..\..\PCbuild\python_d - Adding parser accelerators ... - Done. - Python 2.2c1+ (#28, Dec 14 2001, 18:06:39) [MSC 32 bit (Intel)] on win32 - Type "help", "copyright", "credits" or "license" for more information. - >>> import example - [7052 refs] - >>> example.foo() - Hello, world - [7052 refs] - >>> - -TESTING THE RELEASE-MODE DLL ----------------------------- -Once the Release build has succeeded, bring up a DOS box, and cd to -example_nt\Release. You should now be able to repeat the following session -("C>" is the DOS prompt, ">>>" is the Python prompt): - - C>..\..\PCbuild\python - Python 2.2c1+ (#28, Dec 14 2001, 18:06:04) [MSC 32 bit (Intel)] on win32 - Type "help", "copyright", "credits" or "license" for more information. - >>> import example - >>> example.foo() - Hello, world - >>> - -Congratulations! You've successfully built your first Python extension -module. - -CREATING YOUR OWN PROJECT -------------------------- -Choose a name ("spam" is always a winner :-) and create a directory for -it. Copy your C sources into it. Note that the module source file name -does not necessarily have to match the module name, but the "init" function -name should match the module name -- i.e. you can only import a module -"spam" if its init function is called "initspam()", and it should call -Py_InitModule with the string "spam" as its first argument (use the minimal -example.c in this directory as a guide). By convention, it lives in a file -called "spam.c" or "spammodule.c". The output file should be called -"spam.dll" or "spam.pyd" (the latter is supported to avoid confusion with a -system library "spam.dll" to which your module could be a Python interface) -in Release mode, or spam_d.dll or spam_d.pyd in Debug mode. - -Now your options are: - -1) Copy example.sln and example.vcproj, rename them to spam.*, and edit them -by hand. - -or - -2) Create a brand new project; instructions are below. - -In either case, copy example_nt\example.def to spam\spam.def, and edit the -new spam.def so its second line contains the string "initspam". If you -created a new project yourself, add the file spam.def to the project now. -(This is an annoying little file with only two lines. An alternative -approach is to forget about the .def file, and add the option -"/export:initspam" somewhere to the Link settings, by manually editing the -"Project -> Properties -> Linker -> Command Line -> Additional Options" -box). - -You are now all set to build your extension, unless it requires other -external libraries, include files, etc. See Python's Extending and -Embedding manual for instructions on how to write an extension. - - -CREATING A BRAND NEW PROJECT ----------------------------- -Use the - File -> New -> Project... -dialog to create a new Project Workspace. Select "Visual C++ Projects/Win32/ -Win32 Project", enter the name ("spam"), and make sure the "Location" is -set to parent of the spam directory you have created (which should be a direct -subdirectory of the Python build tree, a sibling of Include and PC). -In "Application Settings", select "DLL", and "Empty Project". Click OK. - -You should now create the file spam.def as instructed in the previous -section. Add the source files (including the .def file) to the project, -using "Project", "Add Existing Item". - -Now open the - Project -> spam properties... -dialog. (Impressive, isn't it? :-) You only need to change a few -settings. Make sure "All Configurations" is selected from the "Settings -for:" dropdown list. Select the "C/C++" tab. Choose the "General" -category in the popup menu at the top. Type the following text in the -entry box labeled "Addditional Include Directories:" - - ..\Include,..\PC - -Then, choose the "General" category in the "Linker" tab, and enter - ..\PCbuild -in the "Additional library Directories" box. - -Now you need to add some mode-specific settings (select "Accept" -when asked to confirm your changes): - -Select "Release" in the "Configuration" dropdown list. Click the -"Link" tab, choose the "Input" Category, and append "python24.lib" to the -list in the "Additional Dependencies" box. - -Select "Debug" in the "Settings for:" dropdown list, and append -"python24_d.lib" to the list in the Additional Dependencies" box. Then -click on the C/C++ tab, select "Code Generation", and select -"Multi-threaded Debug DLL" from the "Runtime library" dropdown list. - -Select "Release" again from the "Settings for:" dropdown list. -Select "Multi-threaded DLL" from the "Use run-time library:" dropdown list. - -That's all . diff --git a/PC/example_nt/setup.py b/PC/example_nt/setup.py deleted file mode 100644 index 0443bc70..00000000 --- a/PC/example_nt/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -# This is an example of a distutils 'setup' script for the example_nt -# sample. This provides a simpler way of building your extension -# and means you can avoid keeping MSVC solution files etc in source-control. -# It also means it should magically build with all compilers supported by -# python. - -# USAGE: you probably want 'setup.py install' - but execute 'setup.py --help' -# for all the details. - -# NOTE: This is *not* a sample for distutils - it is just the smallest -# script that can build this. See distutils docs for more info. - -from distutils.core import setup, Extension - -example_mod = Extension('example', sources = ['example.c']) - - -setup(name = "example", - version = "1.0", - description = "A sample extension module", - ext_modules = [example_mod], -) diff --git a/PC/pyconfig.h b/PC/pyconfig.h index 324a1301..d44173ae 100644 --- a/PC/pyconfig.h +++ b/PC/pyconfig.h @@ -147,7 +147,11 @@ WIN32 is still required for the locale module. #define MS_WINI64 #define PYD_PLATFORM_TAG "win_ia64" #elif defined(_M_X64) || defined(_M_AMD64) +#if defined(__INTEL_COMPILER) +#define COMPILER ("[ICC v." _Py_STRINGIZE(__INTEL_COMPILER) " 64 bit (amd64) with MSC v." _Py_STRINGIZE(_MSC_VER) " CRT]") +#else #define COMPILER _Py_PASTE_VERSION("64 bit (AMD64)") +#endif /* __INTEL_COMPILER */ #define MS_WINX64 #define PYD_PLATFORM_TAG "win_amd64" #else @@ -194,7 +198,11 @@ typedef _W64 int ssize_t; #if defined(MS_WIN32) && !defined(MS_WIN64) #if defined(_M_IX86) +#if defined(__INTEL_COMPILER) +#define COMPILER ("[ICC v." _Py_STRINGIZE(__INTEL_COMPILER) " 32 bit (Intel) with MSC v." _Py_STRINGIZE(_MSC_VER) " CRT]") +#else #define COMPILER _Py_PASTE_VERSION("32 bit (Intel)") +#endif /* __INTEL_COMPILER */ #define PYD_PLATFORM_TAG "win32" #elif defined(_M_ARM) #define COMPILER _Py_PASTE_VERSION("32 bit (ARM)") diff --git a/PC/pylauncher.rc b/PC/pylauncher.rc index 634b8d6d..a8db0a84 100644 --- a/PC/pylauncher.rc +++ b/PC/pylauncher.rc @@ -1,51 +1,50 @@ #include -#define MS_WINDOWS -#include "..\Include\modsupport.h" -#include "..\Include\patchlevel.h" -#ifdef _DEBUG -# include "pythonnt_rc_d.h" -#else -# include "pythonnt_rc.h" -#endif +#include "python_ver_rc.h" + +// Include the manifest file that indicates we support all +// current versions of Windows. +#include +1 RT_MANIFEST "python.manifest" -#define PYTHON_VERSION PY_VERSION "\0" -#define PYVERSION64 PY_MAJOR_VERSION, PY_MINOR_VERSION, FIELD3, PYTHON_API_VERSION +1 ICON DISCARDABLE "launcher.ico" +2 ICON DISCARDABLE "py.ico" +3 ICON DISCARDABLE "pyc.ico" + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// VS_VERSION_INFO VERSIONINFO FILEVERSION PYVERSION64 PRODUCTVERSION PYVERSION64 - FILEFLAGSMASK 0x17L + FILEFLAGSMASK 0x3fL #ifdef _DEBUG - FILEFLAGS 0x1L + FILEFLAGS VS_FF_DEBUG #else FILEFLAGS 0x0L #endif - FILEOS 0x4L - FILETYPE 0x1L + FILEOS VOS__WINDOWS32 + FILETYPE VFT_APP FILESUBTYPE 0x0L BEGIN BLOCK "StringFileInfo" BEGIN - BLOCK "080904b0" + BLOCK "000004b0" BEGIN - VALUE "Comments", "Python Launcher for Windows" - VALUE "CompanyName", "Python Software Foundation" - VALUE "FileDescription", "Python Launcher for Windows (Console)" + VALUE "CompanyName", PYTHON_COMPANY "\0" + VALUE "FileDescription", "Python\0" VALUE "FileVersion", PYTHON_VERSION - VALUE "InternalName", "py" - VALUE "LegalCopyright", "Copyright (C) 2011-2014 Python Software Foundation" - VALUE "OriginalFilename", "py" - VALUE "ProductName", "Python Launcher for Windows" + VALUE "InternalName", "Python Launcher\0" + VALUE "LegalCopyright", PYTHON_COPYRIGHT "\0" + VALUE "OriginalFilename", "py" PYTHON_DEBUG_EXT ".exe\0" + VALUE "ProductName", "Python\0" VALUE "ProductVersion", PYTHON_VERSION END END BLOCK "VarFileInfo" BEGIN - VALUE "Translation", 0x809, 1200 + VALUE "Translation", 0x0, 1200 END -END - -IDI_ICON1 ICON "launcher.ico" - - +END \ No newline at end of file diff --git a/PC/python.manifest b/PC/python.manifest index 3ac2630e..9b7b2f8e 100644 --- a/PC/python.manifest +++ b/PC/python.manifest @@ -1,5 +1,12 @@ + + + + + + + diff --git a/PC/readme.txt b/PC/readme.txt index 8639dc33..0a96d269 100644 --- a/PC/readme.txt +++ b/PC/readme.txt @@ -71,8 +71,6 @@ getpathp.c Default sys.path calculations (for all PC platforms). dllbase_nt.txt A (manually maintained) list of base addresses for various DLLs, to avoid run-time relocation. -example_nt A subdirectory showing how to build an extension as a - DLL. Note for Windows 3.x and DOS users ================================== diff --git a/PC/testpy.py b/PC/testpy.py index 36f92009..4ef3d4f7 100644 --- a/PC/testpy.py +++ b/PC/testpy.py @@ -18,8 +18,6 @@ except: a PC, you should add the dos_8x3 directory to your PYTHONPATH.""") sys.exit(1) -import os - for dir in sys.path: file = os.path.join(dir, "os.py") if os.path.isfile(file): diff --git a/PCbuild/build.bat b/PCbuild/build.bat index 0fb01d77..cfbc4a29 100644 --- a/PCbuild/build.bat +++ b/PCbuild/build.bat @@ -1,19 +1,47 @@ @echo off -rem A batch program to build or rebuild a particular configuration, -rem just for convenience. - -rem Arguments: -rem -c Set the configuration (default: Release) -rem -p Set the platform (x64 or Win32, default: Win32) -rem -r Target Rebuild instead of Build -rem -t Set the target manually (Build, Rebuild, Clean, or CleanAll) -rem -d Set the configuration to Debug -rem -e Pull in external libraries using get_externals.bat -rem -m Enable parallel build (enabled by default) -rem -M Disable parallel build -rem -v Increased output messages -rem -k Attempt to kill any running Pythons before building (usually unnecessary) +goto Run +:Usage +echo.%~nx0 [flags and arguments] [quoted MSBuild options] +echo. +echo.Build CPython from the command line. Requires the appropriate +echo.version(s) of Microsoft Visual Studio to be installed (see readme.txt). +echo.Also requires Subversion (svn.exe) to be on PATH if the '-e' flag is +echo.given. +echo. +echo.After the flags recognized by this script, up to 9 arguments to be passed +echo.directly to MSBuild may be passed. If the argument contains an '=', the +echo.entire argument must be quoted (e.g. `%~nx0 "/p:PlatformToolset=v100"`) +echo. +echo.Available flags: +echo. -h Display this help message +echo. -V Display version information for the current build +echo. -r Target Rebuild instead of Build +echo. -d Set the configuration to Debug +echo. -e Build external libraries fetched by get_externals.bat +echo. Extension modules that depend on external libraries will not attempt +echo. to build if this flag is not present +echo. -m Enable parallel build (enabled by default) +echo. -M Disable parallel build +echo. -v Increased output messages +echo. -k Attempt to kill any running Pythons before building (usually done +echo. automatically by the pythoncore project) +echo. +echo.Available flags to avoid building certain modules. +echo.These flags have no effect if '-e' is not given: +echo. --no-ssl Do not attempt to build _ssl +echo. --no-tkinter Do not attempt to build Tkinter +echo. +echo.Available arguments: +echo. -c Release ^| Debug ^| PGInstrument ^| PGUpdate +echo. Set the configuration (default: Release) +echo. -p x64 ^| Win32 +echo. Set the platform (default: Win32) +echo. -t Build ^| Rebuild ^| Clean ^| CleanAll +echo. Set the target manually +echo. --test-marker Enable the test marker within the build. +exit /b 127 +:Run setlocal set platf=Win32 set vs_platf=x86 @@ -25,17 +53,30 @@ set verbose=/nologo /v:m set kill= :CheckOpts +if "%~1"=="-h" goto Usage if "%~1"=="-c" (set conf=%2) & shift & shift & goto CheckOpts if "%~1"=="-p" (set platf=%2) & shift & shift & goto CheckOpts if "%~1"=="-r" (set target=Rebuild) & shift & goto CheckOpts if "%~1"=="-t" (set target=%2) & shift & shift & goto CheckOpts if "%~1"=="-d" (set conf=Debug) & shift & goto CheckOpts -if "%~1"=="-e" call "%dir%get_externals.bat" & shift & goto CheckOpts if "%~1"=="-m" (set parallel=/m) & shift & goto CheckOpts if "%~1"=="-M" (set parallel=) & shift & goto CheckOpts if "%~1"=="-v" (set verbose=/v:n) & shift & goto CheckOpts if "%~1"=="-k" (set kill=true) & shift & goto CheckOpts +if "%~1"=="--test-marker" (set UseTestMarker=true) & shift & goto CheckOpts if "%~1"=="-V" shift & goto Version +rem These use the actual property names used by MSBuild. We could just let +rem them in through the environment, but we specify them on the command line +rem anyway for visibility so set defaults after this +if "%~1"=="-e" (set IncludeExternals=true) & shift & goto CheckOpts +if "%~1"=="--no-ssl" (set IncludeSSL=false) & shift & goto CheckOpts +if "%~1"=="--no-tkinter" (set IncludeTkinter=false) & shift & goto CheckOpts + +if "%IncludeExternals%"=="" set IncludeExternals=false +if "%IncludeSSL%"=="" set IncludeSSL=true +if "%IncludeTkinter%"=="" set IncludeTkinter=true + +if "%IncludeExternals%"=="true" call "%dir%get_externals.bat" if "%platf%"=="x64" (set vs_platf=x86_amd64) @@ -43,14 +84,19 @@ rem Setup the environment call "%dir%env.bat" %vs_platf% >nul if "%kill%"=="true" ( - msbuild /v:m /nologo /target:KillPython "%pcbuild%\pythoncore.vcxproj" /p:Configuration=%conf% /p:Platform=%platf% /p:KillPython=true + msbuild /v:m /nologo /target:KillPython "%dir%\pythoncore.vcxproj" /p:Configuration=%conf% /p:Platform=%platf% /p:KillPython=true ) rem Call on MSBuild to do the work, echo the command. rem Passing %1-9 is not the preferred option, but argument parsing in rem batch is, shall we say, "lackluster" echo on -msbuild "%dir%pcbuild.proj" /t:%target% %parallel% %verbose% /p:Configuration=%conf% /p:Platform=%platf% %1 %2 %3 %4 %5 %6 %7 %8 %9 +msbuild "%dir%pcbuild.proj" /t:%target% %parallel% %verbose%^ + /p:Configuration=%conf% /p:Platform=%platf%^ + /p:IncludeExternals=%IncludeExternals%^ + /p:IncludeSSL=%IncludeSSL% /p:IncludeTkinter=%IncludeTkinter%^ + /p:UseTestMarker=%UseTestMarker%^ + %1 %2 %3 %4 %5 %6 %7 %8 %9 @goto :eof diff --git a/PCbuild/get_externals.bat b/PCbuild/get_externals.bat index f9b740fa..0b3c08b6 100644 --- a/PCbuild/get_externals.bat +++ b/PCbuild/get_externals.bat @@ -51,16 +51,17 @@ if ERRORLEVEL 9009 ( echo.Fetching external libraries... -for %%e in ( - bzip2-1.0.6 - nasm-2.11.06 - openssl-1.0.2d - tcl-core-8.6.4.2 - tk-8.6.4.2 - tix-8.4.3.6 - sqlite-3.8.11.0 - xz-5.0.5 - ) do ( +set libraries= +set libraries=%libraries% bzip2-1.0.6 +if NOT "%IncludeSSL%"=="false" set libraries=%libraries% nasm-2.11.06 +if NOT "%IncludeSSL%"=="false" set libraries=%libraries% openssl-1.0.2d +set libraries=%libraries% sqlite-3.8.11.0 +if NOT "%IncludeTkinter%"=="false" set libraries=%libraries% tcl-core-8.6.4.2 +if NOT "%IncludeTkinter%"=="false" set libraries=%libraries% tk-8.6.4.2 +if NOT "%IncludeTkinter%"=="false" set libraries=%libraries% tix-8.4.3.6 +set libraries=%libraries% xz-5.0.5 + +for %%e in (%libraries%) do ( if exist %%e ( echo.%%e already exists, skipping. ) else ( diff --git a/PCbuild/pcbuild.proj b/PCbuild/pcbuild.proj index 813831e3..36621c91 100644 --- a/PCbuild/pcbuild.proj +++ b/PCbuild/pcbuild.proj @@ -5,8 +5,10 @@ Win32 Release true + true true true + true @@ -25,7 +27,7 @@ @@ -40,10 +42,14 @@ - + + + - - + + + + diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props index c1303e1a..25cdfcc9 100644 --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -43,6 +43,7 @@ Default true true + NoExtensions Disabled @@ -127,8 +128,8 @@ foreach (System.Diagnostics.Process p in System.Diagnostics.Process.GetProcesses - - + + _d + + -test + + + -32 + $(BuildPath)python$(PyDebugExt).exe @@ -129,17 +135,17 @@ $([msbuild]::Multiply($(MicroVersionNumber), 1000)) )) )) + $([msbuild]::Add($(Field3Value), 9000)) python$(MajorVersionNumber)$(MinorVersionNumber)$(PyDebugExt) - .cp$(MajorVersionNumber)$(MinorVersionNumber)-win32 - .cp$(MajorVersionNumber)$(MinorVersionNumber)-win_amd64 + .cp$(MajorVersionNumber)$(MinorVersionNumber)-win32 + .cp$(MajorVersionNumber)$(MinorVersionNumber)-win_amd64 - $(MajorVersionNumber).$(MinorVersionNumber) - $(SysWinVer)-32 + $(MajorVersionNumber).$(MinorVersionNumber)$(PyArchExt)$(PyTestExt) @@ -148,5 +154,7 @@ + + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 3b275bb0..837b7369 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -435,6 +435,9 @@ Modules + + Include + @@ -968,6 +971,9 @@ PC + + Objects + diff --git a/PCbuild/readme.txt b/PCbuild/readme.txt index 68cdb0ff..09a996f3 100644 --- a/PCbuild/readme.txt +++ b/PCbuild/readme.txt @@ -41,8 +41,7 @@ The solution currently supports two platforms. The Win32 platform is used to build standard x86-compatible 32-bit binaries, output into the win32 sub-directory. The x64 platform is used for building 64-bit AMD64 (aka x86_64 or EM64T) binaries, output into the amd64 sub-directory. -The Itanium (IA-64) platform is no longer supported. See the "Building -for AMD64" section below for more information about 64-bit builds. +The Itanium (IA-64) platform is no longer supported. Four configuration options are supported by the solution: Debug @@ -76,29 +75,7 @@ officially supported. By default, build.bat will build Python in Release configuration for the 32-bit Win32 platform. It accepts several arguments to change -this behavior: - - -c Set the configuration (see above) - -d Shortcut for "-c Debug" - -p Set the platform to build for ("Win32" or "x64") - -r Rebuild instead of just building - -t Set the target (Build, Rebuild, Clean or CleanAll) - -e Use get_externals.bat to fetch external sources - -M Don't build in parallel - -v Increased output messages - -Up to 9 MSBuild switches can also be passed, though they must be passed -after specifying any of the above switches. For example, use: - - build.bat -e -d /fl - -to do a debug build with externals fetched as needed and write detailed -build logs to a file. If the MSBuild switch requires an equal sign -("="), the entire switch must be quoted: - - build.bat -e -d "/p:ExternalsDir=P:\cpython-externals" - -There may also be other situations where quotes are necessary. +this behavior, try `build.bat -h` to learn more. C Runtime @@ -129,8 +106,6 @@ pythoncore .dll and .lib python .exe -make_buildinfo, make_versioninfo - helpers to provide necessary information to the build process These sub-projects provide extra executables that are useful for running CPython in different ways: @@ -152,9 +127,6 @@ categories: _freeze_importlib _freeze_importlib.exe, used to regenerate Python\importlib.h after changes have been made to Lib\importlib\_bootstrap.py -bdist_wininst - ..\Lib\distutils\command\wininst-14.0[-amd64].exe, the base - executable used by the distutils bdist_wininst command python3dll python3.dll, the PEP 384 Stable ABI dll xxlimited @@ -274,14 +246,8 @@ as the values of certain properties in order for the build solution to find them. This is an advanced topic and not necessarily fully supported. - -Building for AMD64 ------------------- - -The build process for AMD64 / x64 is very similar to standard builds, -you just have to set x64 as platform. In addition, the HOST_PYTHON -environment variable must point to a Python interpreter (at least 2.4), -to support cross-compilation from Win32. +The get_externals.bat script is called automatically by build.bat when +you pass the '-e' option to it. Profile Guided Optimization @@ -298,7 +264,7 @@ It creates the PGI files, runs the unit test suite or PyBench with the PGI python, and finally creates the optimized files. See - http://msdn.microsoft.com/en-us/library/e7k32f4k(VS.100).aspx + http://msdn.microsoft.com/en-us/library/e7k32f4k(VS.140).aspx for more on this topic. @@ -332,11 +298,3 @@ project, with some projects overriding certain specific values. The GUI doesn't always reflect the correct settings and may confuse the user with false information, especially for settings that automatically adapt for diffirent configurations. - - -Your Own Extension DLLs ------------------------ - -If you want to create your own extension module DLL (.pyd), there's an -example with easy-to-follow instructions in ..\PC\example\; read the -file readme.txt there first. diff --git a/PCbuild/rt.bat b/PCbuild/rt.bat index 039c8105..2d93b80a 100644 --- a/PCbuild/rt.bat +++ b/PCbuild/rt.bat @@ -32,15 +32,17 @@ set prefix=%pcbuild%win32\ set suffix= set qmode= set dashO= +set regrtestargs= :CheckOpts if "%1"=="-O" (set dashO=-O) & shift & goto CheckOpts if "%1"=="-q" (set qmode=yes) & shift & goto CheckOpts if "%1"=="-d" (set suffix=_d) & shift & goto CheckOpts if "%1"=="-x64" (set prefix=%pcbuild%amd64\) & shift & goto CheckOpts +if NOT "%1"=="" (set regrtestargs=%regrtestargs% %1) & shift & goto CheckOpts set exe=%prefix%python%suffix%.exe -set cmd="%exe%" %dashO% -Wd -E -bb "%pcbuild%..\lib\test\regrtest.py" %1 %2 %3 %4 %5 %6 %7 %8 %9 +set cmd="%exe%" %dashO% -Wd -E -bb "%pcbuild%..\lib\test\regrtest.py" %regrtestargs% if defined qmode goto Qmode echo Deleting .pyc/.pyo files ... diff --git a/Parser/asdl_c.py b/Parser/asdl_c.py index a5e35d93..f38c2535 100755 --- a/Parser/asdl_c.py +++ b/Parser/asdl_c.py @@ -275,7 +275,9 @@ class PrototypeVisitor(EmitVisitor): def visitProduct(self, prod, name): self.emit_function(name, get_c_type(name), - self.get_args(prod.fields), [], union=False) + self.get_args(prod.fields), + self.get_args(prod.attributes), + union=False) class FunctionVisitor(PrototypeVisitor): @@ -329,7 +331,8 @@ class FunctionVisitor(PrototypeVisitor): self.emit(s, depth, reflow) for argtype, argname, opt in args: emit("p->%s = %s;" % (argname, argname), 1) - assert not attrs + for argtype, argname, opt in attrs: + emit("p->%s = %s;" % (argname, argname), 1) class PickleVisitor(EmitVisitor): @@ -452,10 +455,15 @@ class Obj2ModVisitor(PickleVisitor): self.emit("PyObject* tmp = NULL;", 1) for f in prod.fields: self.visitFieldDeclaration(f, name, prod=prod, depth=1) + for a in prod.attributes: + self.visitFieldDeclaration(a, name, prod=prod, depth=1) self.emit("", 0) for f in prod.fields: self.visitField(f, name, prod=prod, depth=1) + for a in prod.attributes: + self.visitField(a, name, prod=prod, depth=1) args = [f.name for f in prod.fields] + args.extend([a.name for a in prod.attributes]) self.emit("*out = %s(%s);" % (name, self.buildArgs(args)), 1) self.emit("return 0;", 1) self.emit("failed:", 0) @@ -1289,7 +1297,6 @@ def main(srcfile, dump_module=False): f.close() if __name__ == "__main__": - import sys import getopt INC_DIR = '' diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 04baeaf3..6a528db6 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -196,7 +196,8 @@ error_ret(struct tok_state *tok) /* XXX */ tok->decoding_erred = 1; if (tok->fp != NULL && tok->buf != NULL) /* see PyTokenizer_Free */ PyMem_FREE(tok->buf); - tok->buf = NULL; + tok->buf = tok->cur = tok->end = tok->inp = tok->start = NULL; + tok->done = E_DECODE; return NULL; /* as if it were EOF */ } @@ -952,11 +953,6 @@ tok_nextc(struct tok_state *tok) } buflen = PyBytes_GET_SIZE(u); buf = PyBytes_AS_STRING(u); - if (!buf) { - Py_DECREF(u); - tok->done = E_DECODE; - return EOF; - } newtok = PyMem_MALLOC(buflen+1); strcpy(newtok, buf); Py_DECREF(u); @@ -998,7 +994,6 @@ tok_nextc(struct tok_state *tok) if (tok->buf != NULL) PyMem_FREE(tok->buf); tok->buf = newtok; - tok->line_start = tok->buf; tok->cur = tok->buf; tok->line_start = tok->buf; tok->inp = strchr(tok->buf, '\0'); @@ -1021,7 +1016,8 @@ tok_nextc(struct tok_state *tok) } if (decoding_fgets(tok->buf, (int)(tok->end - tok->buf), tok) == NULL) { - tok->done = E_EOF; + if (!tok->decoding_erred) + tok->done = E_EOF; done = 1; } else { @@ -1055,6 +1051,8 @@ tok_nextc(struct tok_state *tok) return EOF; } tok->buf = newbuf; + tok->cur = tok->buf + cur; + tok->line_start = tok->cur; tok->inp = tok->buf + curvalid; tok->end = tok->buf + newsize; tok->start = curstart < 0 ? NULL : diff --git a/Python/Python-ast.c b/Python/Python-ast.c index 8a2dc7cc..edfcbad1 100644 --- a/Python/Python-ast.c +++ b/Python/Python-ast.c @@ -2372,7 +2372,8 @@ arguments(asdl_seq * args, arg_ty vararg, asdl_seq * kwonlyargs, asdl_seq * } arg_ty -arg(identifier arg, expr_ty annotation, PyArena *arena) +arg(identifier arg, expr_ty annotation, int lineno, int col_offset, PyArena + *arena) { arg_ty p; if (!arg) { @@ -2385,6 +2386,8 @@ arg(identifier arg, expr_ty annotation, PyArena *arena) return NULL; p->arg = arg; p->annotation = annotation; + p->lineno = lineno; + p->col_offset = col_offset; return p; } @@ -7086,6 +7089,8 @@ obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena) PyObject* tmp = NULL; identifier arg; expr_ty annotation; + int lineno; + int col_offset; if (_PyObject_HasAttrId(obj, &PyId_arg)) { int res; @@ -7108,7 +7113,29 @@ obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena) } else { annotation = NULL; } - *out = arg(arg, annotation, arena); + if (_PyObject_HasAttrId(obj, &PyId_lineno)) { + int res; + tmp = _PyObject_GetAttrId(obj, &PyId_lineno); + if (tmp == NULL) goto failed; + res = obj2ast_int(tmp, &lineno, arena); + if (res != 0) goto failed; + Py_CLEAR(tmp); + } else { + PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from arg"); + return 1; + } + if (_PyObject_HasAttrId(obj, &PyId_col_offset)) { + int res; + tmp = _PyObject_GetAttrId(obj, &PyId_col_offset); + if (tmp == NULL) goto failed; + res = obj2ast_int(tmp, &col_offset, arena); + if (res != 0) goto failed; + Py_CLEAR(tmp); + } else { + PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from arg"); + return 1; + } + *out = arg(arg, annotation, lineno, col_offset, arena); return 0; failed: Py_XDECREF(tmp); diff --git a/Python/ast.c b/Python/ast.c index b2f09b9c..7743c31c 100644 --- a/Python/ast.c +++ b/Python/ast.c @@ -1173,11 +1173,9 @@ ast_for_arg(struct compiling *c, const node *n) return NULL; } - ret = arg(name, annotation, c->c_arena); + ret = arg(name, annotation, LINENO(n), n->n_col_offset, c->c_arena); if (!ret) return NULL; - ret->lineno = LINENO(n); - ret->col_offset = n->n_col_offset; return ret; } @@ -1233,11 +1231,10 @@ handle_keywordonly_args(struct compiling *c, const node *n, int start, goto error; if (forbidden_name(c, argname, ch, 0)) goto error; - arg = arg(argname, annotation, c->c_arena); + arg = arg(argname, annotation, LINENO(ch), ch->n_col_offset, + c->c_arena); if (!arg) goto error; - arg->lineno = LINENO(ch); - arg->col_offset = ch->n_col_offset; asdl_seq_SET(kwonlyargs, j++, arg); i += 2; /* the name and the comma */ break; @@ -2101,6 +2098,7 @@ ast_for_atom(struct compiling *c, const node *n) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ + expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ @@ -2112,12 +2110,12 @@ ast_for_atom(struct compiling *c, const node *n) (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ - return ast_for_setdisplay(c, ch); + res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ - return ast_for_setcomp(c, ch); + res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { @@ -2127,12 +2125,17 @@ ast_for_atom(struct compiling *c, const node *n) "dict comprehension"); return NULL; } - return ast_for_dictcomp(c, ch); + res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ - return ast_for_dictdisplay(c, ch); + res = ast_for_dictdisplay(c, ch); + } + if (res) { + res->lineno = LINENO(n); + res->col_offset = n->n_col_offset; } + return res; } } default: diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c index 2f22209e..a06ef610 100644 --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -599,20 +599,37 @@ builtin_chr_impl(PyModuleDef *module, int i) static const char * -source_as_string(PyObject *cmd, const char *funcname, const char *what, PyCompilerFlags *cf, Py_buffer *view) +source_as_string(PyObject *cmd, const char *funcname, const char *what, PyCompilerFlags *cf, PyObject **cmd_copy) { const char *str; Py_ssize_t size; + Py_buffer view; + *cmd_copy = NULL; if (PyUnicode_Check(cmd)) { cf->cf_flags |= PyCF_IGNORE_COOKIE; str = PyUnicode_AsUTF8AndSize(cmd, &size); if (str == NULL) return NULL; } - else if (PyObject_GetBuffer(cmd, view, PyBUF_SIMPLE) == 0) { - str = (const char *)view->buf; - size = view->len; + else if (PyBytes_Check(cmd)) { + str = PyBytes_AS_STRING(cmd); + size = PyBytes_GET_SIZE(cmd); + } + else if (PyByteArray_Check(cmd)) { + str = PyByteArray_AS_STRING(cmd); + size = PyByteArray_GET_SIZE(cmd); + } + else if (PyObject_GetBuffer(cmd, &view, PyBUF_SIMPLE) == 0) { + /* Copy to NUL-terminated buffer. */ + *cmd_copy = PyBytes_FromStringAndSize( + (const char *)view.buf, view.len); + PyBuffer_Release(&view); + if (*cmd_copy == NULL) { + return NULL; + } + str = PyBytes_AS_STRING(*cmd_copy); + size = PyBytes_GET_SIZE(*cmd_copy); } else { PyErr_Format(PyExc_TypeError, @@ -624,7 +641,7 @@ source_as_string(PyObject *cmd, const char *funcname, const char *what, PyCompil if (strlen(str) != (size_t)size) { PyErr_SetString(PyExc_ValueError, "source code string cannot contain null bytes"); - PyBuffer_Release(view); + Py_CLEAR(*cmd_copy); return NULL; } return str; @@ -660,7 +677,7 @@ builtin_compile_impl(PyModuleDef *module, PyObject *source, int dont_inherit, int optimize) /*[clinic end generated code: output=31881762c1bb90c4 input=9d53e8cfb3c86414]*/ { - Py_buffer view = {NULL, NULL}; + PyObject *source_copy; const char *str; int compile_mode = -1; int is_ast; @@ -732,12 +749,12 @@ builtin_compile_impl(PyModuleDef *module, PyObject *source, goto finally; } - str = source_as_string(source, "compile", "string, bytes or AST", &cf, &view); + str = source_as_string(source, "compile", "string, bytes or AST", &cf, &source_copy); if (str == NULL) goto error; result = Py_CompileStringObject(str, filename, start[compile_mode], &cf, optimize); - PyBuffer_Release(&view); + Py_XDECREF(source_copy); goto finally; error: @@ -812,8 +829,7 @@ builtin_eval_impl(PyModuleDef *module, PyObject *source, PyObject *globals, PyObject *locals) /*[clinic end generated code: output=7284501fb7b4d666 input=11ee718a8640e527]*/ { - PyObject *result, *tmp = NULL; - Py_buffer view = {NULL, NULL}; + PyObject *result, *source_copy; const char *str; PyCompilerFlags cf; @@ -861,7 +877,7 @@ builtin_eval_impl(PyModuleDef *module, PyObject *source, PyObject *globals, } cf.cf_flags = PyCF_SOURCE_IS_UTF8; - str = source_as_string(source, "eval", "string, bytes or code", &cf, &view); + str = source_as_string(source, "eval", "string, bytes or code", &cf, &source_copy); if (str == NULL) return NULL; @@ -870,8 +886,7 @@ builtin_eval_impl(PyModuleDef *module, PyObject *source, PyObject *globals, (void)PyEval_MergeCompilerFlags(&cf); result = PyRun_StringFlags(str, Py_eval_input, globals, locals, &cf); - PyBuffer_Release(&view); - Py_XDECREF(tmp); + Py_XDECREF(source_copy); return result; } @@ -942,12 +957,13 @@ builtin_exec_impl(PyModuleDef *module, PyObject *source, PyObject *globals, v = PyEval_EvalCode(source, globals, locals); } else { - Py_buffer view = {NULL, NULL}; + PyObject *source_copy; const char *str; PyCompilerFlags cf; cf.cf_flags = PyCF_SOURCE_IS_UTF8; str = source_as_string(source, "exec", - "string, bytes or code", &cf, &view); + "string, bytes or code", &cf, + &source_copy); if (str == NULL) return NULL; if (PyEval_MergeCompilerFlags(&cf)) @@ -955,7 +971,7 @@ builtin_exec_impl(PyModuleDef *module, PyObject *source, PyObject *globals, locals, &cf); else v = PyRun_String(str, Py_file_input, globals, locals); - PyBuffer_Release(&view); + Py_XDECREF(source_copy); } if (v == NULL) return NULL; @@ -1853,8 +1869,10 @@ builtin_input_impl(PyModuleDef *module, PyObject *prompt) } if (tty) { tmp = _PyObject_CallMethodId(fout, &PyId_fileno, ""); - if (tmp == NULL) + if (tmp == NULL) { PyErr_Clear(); + tty = 0; + } else { fd = PyLong_AsLong(tmp); Py_DECREF(tmp); diff --git a/Python/ceval.c b/Python/ceval.c index 8d2cdc2e..beabfebc 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1214,7 +1214,7 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag) #ifdef Py_DEBUG /* PyEval_EvalFrameEx() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif @@ -2363,6 +2363,10 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag) /* Slow-path if globals or builtins is not a dict */ v = PyObject_GetItem(f->f_globals, name); if (v == NULL) { + if (!PyErr_ExceptionMatches(PyExc_KeyError)) + goto error; + PyErr_Clear(); + v = PyObject_GetItem(f->f_builtins, name); if (v == NULL) { if (PyErr_ExceptionMatches(PyExc_KeyError)) diff --git a/Python/codecs.c b/Python/codecs.c index 596bd808..d90bf737 100644 --- a/Python/codecs.c +++ b/Python/codecs.c @@ -966,7 +966,6 @@ PyObject *PyCodec_BackslashReplaceErrors(PyObject *exc) } static _PyUnicode_Name_CAPI *ucnhash_CAPI = NULL; -static int ucnhash_initialized = 0; PyObject *PyCodec_NameReplaceErrors(PyObject *exc) { @@ -988,17 +987,17 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) return NULL; if (!(object = PyUnicodeEncodeError_GetObject(exc))) return NULL; - if (!ucnhash_initialized) { + if (!ucnhash_CAPI) { /* load the unicode data module */ ucnhash_CAPI = (_PyUnicode_Name_CAPI *)PyCapsule_Import( PyUnicodeData_CAPSULE_NAME, 1); - ucnhash_initialized = 1; + if (!ucnhash_CAPI) + return NULL; } for (i = start, ressize = 0; i < end; ++i) { /* object is guaranteed to be "ready" */ c = PyUnicode_READ_CHAR(object, i); - if (ucnhash_CAPI && - ucnhash_CAPI->getname(NULL, c, buffer, sizeof(buffer), 1)) { + if (ucnhash_CAPI->getname(NULL, c, buffer, sizeof(buffer), 1)) { replsize = 1+1+1+(int)strlen(buffer)+1; } else if (c >= 0x10000) { @@ -1021,8 +1020,7 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) i < end; ++i) { c = PyUnicode_READ_CHAR(object, i); *outp++ = '\\'; - if (ucnhash_CAPI && - ucnhash_CAPI->getname(NULL, c, buffer, sizeof(buffer), 1)) { + if (ucnhash_CAPI->getname(NULL, c, buffer, sizeof(buffer), 1)) { *outp++ = 'N'; *outp++ = '{'; strcpy((char *)outp, buffer); diff --git a/Python/compile.c b/Python/compile.c index cfeab0fd..4befaa78 100644 --- a/Python/compile.c +++ b/Python/compile.c @@ -548,7 +548,7 @@ compiler_enter_scope(struct compiler *c, identifier name, return 0; } if (u->u_ste->ste_needs_class_closure) { - /* Cook up a implicit __class__ cell. */ + /* Cook up an implicit __class__ cell. */ _Py_IDENTIFIER(__class__); PyObject *tuple, *name, *zero; int res; @@ -985,7 +985,7 @@ PyCompile_OpcodeStackEffect(int opcode, int oparg) case BUILD_MAP_UNPACK_WITH_CALL: return 1 - (oparg & 0xFF); case BUILD_MAP: - return 1; + return 1 - 2*oparg; case LOAD_ATTR: return 0; case COMPARE_OP: diff --git a/Python/fileutils.c b/Python/fileutils.c index bccd3214..079918c4 100644 --- a/Python/fileutils.c +++ b/Python/fileutils.c @@ -986,8 +986,10 @@ _Py_open_impl(const char *pathname, int flags, int gil_held) int _Py_open(const char *pathname, int flags) { +#ifdef WITH_THREAD /* _Py_open() must be called with the GIL held. */ assert(PyGILState_Check()); +#endif return _Py_open_impl(pathname, flags, 1); } @@ -1080,7 +1082,9 @@ _Py_fopen_obj(PyObject *path, const char *mode) wchar_t wmode[10]; int usize; +#ifdef WITH_THREAD assert(PyGILState_Check()); +#endif if (!PyUnicode_Check(path)) { PyErr_Format(PyExc_TypeError, @@ -1108,7 +1112,9 @@ _Py_fopen_obj(PyObject *path, const char *mode) PyObject *bytes; char *path_bytes; +#ifdef WITH_THREAD assert(PyGILState_Check()); +#endif if (!PyUnicode_FSConverter(path, &bytes)) return NULL; diff --git a/Python/importlib_external.h b/Python/importlib_external.h index aea434c8..b20ce175 100644 --- a/Python/importlib_external.h +++ b/Python/importlib_external.h @@ -635,7 +635,7 @@ const unsigned char _Py_M__importlib_external[] = { 95,102,105,110,100,95,109,111,100,117,108,101,95,115,104,105, 109,143,1,0,0,115,10,0,0,0,0,10,21,1,24,1, 6,1,29,1,114,130,0,0,0,99,4,0,0,0,0,0, - 0,0,11,0,0,0,19,0,0,0,67,0,0,0,115,228, + 0,0,11,0,0,0,19,0,0,0,67,0,0,0,115,240, 1,0,0,105,0,0,125,4,0,124,2,0,100,1,0,107, 9,0,114,31,0,124,2,0,124,4,0,100,2,0,60,110, 6,0,100,3,0,125,2,0,124,3,0,100,1,0,107,9, @@ -643,58 +643,59 @@ const unsigned char _Py_M__importlib_external[] = { 0,100,1,0,100,5,0,133,2,0,25,125,5,0,124,0, 0,100,5,0,100,6,0,133,2,0,25,125,6,0,124,0, 0,100,6,0,100,7,0,133,2,0,25,125,7,0,124,5, - 0,116,0,0,107,3,0,114,165,0,100,8,0,106,1,0, - 124,2,0,124,5,0,131,2,0,125,8,0,116,2,0,124, - 8,0,131,1,0,1,116,3,0,124,8,0,124,4,0,141, - 1,0,130,1,0,110,113,0,116,4,0,124,6,0,131,1, - 0,100,5,0,107,3,0,114,223,0,100,9,0,106,1,0, - 124,2,0,131,1,0,125,8,0,116,2,0,124,8,0,131, - 1,0,1,116,5,0,124,8,0,131,1,0,130,1,0,110, - 55,0,116,4,0,124,7,0,131,1,0,100,5,0,107,3, - 0,114,22,1,100,10,0,106,1,0,124,2,0,131,1,0, - 125,8,0,116,2,0,124,8,0,131,1,0,1,116,5,0, - 124,8,0,131,1,0,130,1,0,124,1,0,100,1,0,107, - 9,0,114,214,1,121,20,0,116,6,0,124,1,0,100,11, - 0,25,131,1,0,125,9,0,87,110,18,0,4,116,7,0, - 107,10,0,114,74,1,1,1,1,89,110,59,0,88,116,8, - 0,124,6,0,131,1,0,124,9,0,107,3,0,114,133,1, - 100,12,0,106,1,0,124,2,0,131,1,0,125,8,0,116, - 2,0,124,8,0,131,1,0,1,116,3,0,124,8,0,124, - 4,0,141,1,0,130,1,0,121,18,0,124,1,0,100,13, - 0,25,100,14,0,64,125,10,0,87,110,18,0,4,116,7, - 0,107,10,0,114,171,1,1,1,1,89,110,43,0,88,116, - 8,0,124,7,0,131,1,0,124,10,0,107,3,0,114,214, - 1,116,3,0,100,12,0,106,1,0,124,2,0,131,1,0, - 124,4,0,141,1,0,130,1,0,124,0,0,100,7,0,100, - 1,0,133,2,0,25,83,41,15,97,122,1,0,0,86,97, - 108,105,100,97,116,101,32,116,104,101,32,104,101,97,100,101, - 114,32,111,102,32,116,104,101,32,112,97,115,115,101,100,45, - 105,110,32,98,121,116,101,99,111,100,101,32,97,103,97,105, - 110,115,116,32,115,111,117,114,99,101,95,115,116,97,116,115, - 32,40,105,102,10,32,32,32,32,103,105,118,101,110,41,32, - 97,110,100,32,114,101,116,117,114,110,105,110,103,32,116,104, - 101,32,98,121,116,101,99,111,100,101,32,116,104,97,116,32, - 99,97,110,32,98,101,32,99,111,109,112,105,108,101,100,32, - 98,121,32,99,111,109,112,105,108,101,40,41,46,10,10,32, - 32,32,32,65,108,108,32,111,116,104,101,114,32,97,114,103, - 117,109,101,110,116,115,32,97,114,101,32,117,115,101,100,32, - 116,111,32,101,110,104,97,110,99,101,32,101,114,114,111,114, - 32,114,101,112,111,114,116,105,110,103,46,10,10,32,32,32, - 32,73,109,112,111,114,116,69,114,114,111,114,32,105,115,32, - 114,97,105,115,101,100,32,119,104,101,110,32,116,104,101,32, - 109,97,103,105,99,32,110,117,109,98,101,114,32,105,115,32, - 105,110,99,111,114,114,101,99,116,32,111,114,32,116,104,101, - 32,98,121,116,101,99,111,100,101,32,105,115,10,32,32,32, - 32,102,111,117,110,100,32,116,111,32,98,101,32,115,116,97, - 108,101,46,32,69,79,70,69,114,114,111,114,32,105,115,32, - 114,97,105,115,101,100,32,119,104,101,110,32,116,104,101,32, - 100,97,116,97,32,105,115,32,102,111,117,110,100,32,116,111, - 32,98,101,10,32,32,32,32,116,114,117,110,99,97,116,101, - 100,46,10,10,32,32,32,32,78,114,106,0,0,0,122,10, - 60,98,121,116,101,99,111,100,101,62,114,35,0,0,0,114, - 12,0,0,0,233,8,0,0,0,233,12,0,0,0,122,30, - 98,97,100,32,109,97,103,105,99,32,110,117,109,98,101,114, - 32,105,110,32,123,33,114,125,58,32,123,33,114,125,122,43, + 0,116,0,0,107,3,0,114,168,0,100,8,0,106,1,0, + 124,2,0,124,5,0,131,2,0,125,8,0,116,2,0,100, + 9,0,124,8,0,131,2,0,1,116,3,0,124,8,0,124, + 4,0,141,1,0,130,1,0,110,119,0,116,4,0,124,6, + 0,131,1,0,100,5,0,107,3,0,114,229,0,100,10,0, + 106,1,0,124,2,0,131,1,0,125,8,0,116,2,0,100, + 9,0,124,8,0,131,2,0,1,116,5,0,124,8,0,131, + 1,0,130,1,0,110,58,0,116,4,0,124,7,0,131,1, + 0,100,5,0,107,3,0,114,31,1,100,11,0,106,1,0, + 124,2,0,131,1,0,125,8,0,116,2,0,100,9,0,124, + 8,0,131,2,0,1,116,5,0,124,8,0,131,1,0,130, + 1,0,124,1,0,100,1,0,107,9,0,114,226,1,121,20, + 0,116,6,0,124,1,0,100,12,0,25,131,1,0,125,9, + 0,87,110,18,0,4,116,7,0,107,10,0,114,83,1,1, + 1,1,89,110,62,0,88,116,8,0,124,6,0,131,1,0, + 124,9,0,107,3,0,114,145,1,100,13,0,106,1,0,124, + 2,0,131,1,0,125,8,0,116,2,0,100,9,0,124,8, + 0,131,2,0,1,116,3,0,124,8,0,124,4,0,141,1, + 0,130,1,0,121,18,0,124,1,0,100,14,0,25,100,15, + 0,64,125,10,0,87,110,18,0,4,116,7,0,107,10,0, + 114,183,1,1,1,1,89,110,43,0,88,116,8,0,124,7, + 0,131,1,0,124,10,0,107,3,0,114,226,1,116,3,0, + 100,13,0,106,1,0,124,2,0,131,1,0,124,4,0,141, + 1,0,130,1,0,124,0,0,100,7,0,100,1,0,133,2, + 0,25,83,41,16,97,122,1,0,0,86,97,108,105,100,97, + 116,101,32,116,104,101,32,104,101,97,100,101,114,32,111,102, + 32,116,104,101,32,112,97,115,115,101,100,45,105,110,32,98, + 121,116,101,99,111,100,101,32,97,103,97,105,110,115,116,32, + 115,111,117,114,99,101,95,115,116,97,116,115,32,40,105,102, + 10,32,32,32,32,103,105,118,101,110,41,32,97,110,100,32, + 114,101,116,117,114,110,105,110,103,32,116,104,101,32,98,121, + 116,101,99,111,100,101,32,116,104,97,116,32,99,97,110,32, + 98,101,32,99,111,109,112,105,108,101,100,32,98,121,32,99, + 111,109,112,105,108,101,40,41,46,10,10,32,32,32,32,65, + 108,108,32,111,116,104,101,114,32,97,114,103,117,109,101,110, + 116,115,32,97,114,101,32,117,115,101,100,32,116,111,32,101, + 110,104,97,110,99,101,32,101,114,114,111,114,32,114,101,112, + 111,114,116,105,110,103,46,10,10,32,32,32,32,73,109,112, + 111,114,116,69,114,114,111,114,32,105,115,32,114,97,105,115, + 101,100,32,119,104,101,110,32,116,104,101,32,109,97,103,105, + 99,32,110,117,109,98,101,114,32,105,115,32,105,110,99,111, + 114,114,101,99,116,32,111,114,32,116,104,101,32,98,121,116, + 101,99,111,100,101,32,105,115,10,32,32,32,32,102,111,117, + 110,100,32,116,111,32,98,101,32,115,116,97,108,101,46,32, + 69,79,70,69,114,114,111,114,32,105,115,32,114,97,105,115, + 101,100,32,119,104,101,110,32,116,104,101,32,100,97,116,97, + 32,105,115,32,102,111,117,110,100,32,116,111,32,98,101,10, + 32,32,32,32,116,114,117,110,99,97,116,101,100,46,10,10, + 32,32,32,32,78,114,106,0,0,0,122,10,60,98,121,116, + 101,99,111,100,101,62,114,35,0,0,0,114,12,0,0,0, + 233,8,0,0,0,233,12,0,0,0,122,30,98,97,100,32, + 109,97,103,105,99,32,110,117,109,98,101,114,32,105,110,32, + 123,33,114,125,58,32,123,33,114,125,122,2,123,125,122,43, 114,101,97,99,104,101,100,32,69,79,70,32,119,104,105,108, 101,32,114,101,97,100,105,110,103,32,116,105,109,101,115,116, 97,109,112,32,105,110,32,123,33,114,125,122,48,114,101,97, @@ -719,9 +720,9 @@ const unsigned char _Py_M__importlib_external[] = { 95,118,97,108,105,100,97,116,101,95,98,121,116,101,99,111, 100,101,95,104,101,97,100,101,114,160,1,0,0,115,76,0, 0,0,0,11,6,1,12,1,13,3,6,1,12,1,10,1, - 16,1,16,1,16,1,12,1,18,1,10,1,18,1,18,1, - 15,1,10,1,15,1,18,1,15,1,10,1,12,1,12,1, - 3,1,20,1,13,1,5,2,18,1,15,1,10,1,15,1, + 16,1,16,1,16,1,12,1,18,1,13,1,18,1,18,1, + 15,1,13,1,15,1,18,1,15,1,13,1,12,1,12,1, + 3,1,20,1,13,1,5,2,18,1,15,1,13,1,15,1, 3,1,18,1,13,1,5,2,18,1,15,1,9,1,114,141, 0,0,0,99,4,0,0,0,0,0,0,0,5,0,0,0, 6,0,0,0,67,0,0,0,115,112,0,0,0,116,0,0, @@ -1395,7 +1396,7 @@ const unsigned char _Py_M__importlib_external[] = { 32,83,111,117,114,99,101,76,111,97,100,101,114,32,117,115, 105,110,103,32,116,104,101,32,102,105,108,101,32,115,121,115, 116,101,109,46,99,2,0,0,0,0,0,0,0,3,0,0, - 0,5,0,0,0,67,0,0,0,115,34,0,0,0,116,0, + 0,4,0,0,0,67,0,0,0,115,34,0,0,0,116,0, 0,124,1,0,131,1,0,125,2,0,100,1,0,124,2,0, 106,1,0,100,2,0,124,2,0,106,2,0,105,2,0,83, 41,3,122,33,82,101,116,117,114,110,32,116,104,101,32,109, diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index a17adf76..4f5efc96 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -1,4 +1,3 @@ - /* Python interpreter top-level routines, including init/exit */ #include "Python.h" @@ -253,13 +252,13 @@ import_init(PyInterpreterState *interp, PyObject *sysmod) interp->importlib = importlib; Py_INCREF(interp->importlib); - /* Install _importlib as __import__ */ + /* Import the _imp module */ impmod = PyInit_imp(); if (impmod == NULL) { - Py_FatalError("Py_Initialize: can't import imp"); + Py_FatalError("Py_Initialize: can't import _imp"); } else if (Py_VerboseFlag) { - PySys_FormatStderr("import imp # builtin\n"); + PySys_FormatStderr("import _imp # builtin\n"); } sys_modules = PyImport_GetModuleDict(); if (Py_VerboseFlag) { @@ -269,6 +268,7 @@ import_init(PyInterpreterState *interp, PyObject *sysmod) Py_FatalError("Py_Initialize: can't save _imp to sys.modules"); } + /* Install importlib as the implementation of import */ value = PyObject_CallMethod(importlib, "_install", "OO", sysmod, impmod); if (value == NULL) { PyErr_Print(); @@ -963,6 +963,23 @@ initsite(void) } } +/* Check if a file descriptor is valid or not. + Return 0 if the file descriptor is invalid, return non-zero otherwise. */ +static int +is_valid_fd(int fd) +{ + int fd2; + if (fd < 0 || !_PyVerify_fd(fd)) + return 0; + _Py_BEGIN_SUPPRESS_IPH + fd2 = dup(fd); + if (fd2 >= 0) + close(fd2); + _Py_END_SUPPRESS_IPH + return fd2 >= 0; +} + +/* returns Py_None if the fd is not valid */ static PyObject* create_stdio(PyObject* io, int fd, int write_mode, char* name, @@ -978,6 +995,9 @@ create_stdio(PyObject* io, _Py_IDENTIFIER(TextIOWrapper); _Py_IDENTIFIER(mode); + if (!is_valid_fd(fd)) + Py_RETURN_NONE; + /* stdin is always opened in buffered mode, first because it shouldn't make a difference in common use cases, second because TextIOWrapper depends on the presence of a read1() method which only exists on @@ -1059,21 +1079,15 @@ error: Py_XDECREF(stream); Py_XDECREF(text); Py_XDECREF(raw); - return NULL; -} -static int -is_valid_fd(int fd) -{ - int dummy_fd; - if (fd < 0 || !_PyVerify_fd(fd)) - return 0; - _Py_BEGIN_SUPPRESS_IPH - dummy_fd = dup(fd); - if (dummy_fd >= 0) - close(dummy_fd); - _Py_END_SUPPRESS_IPH - return dummy_fd >= 0; + if (PyErr_ExceptionMatches(PyExc_OSError) && !is_valid_fd(fd)) { + /* Issue #24891: the file descriptor was closed after the first + is_valid_fd() check was called. Ignore the OSError and set the + stream to None. */ + PyErr_Clear(); + Py_RETURN_NONE; + } + return NULL; } /* Initialize sys.stdin, stdout, stderr and builtins.open */ @@ -1158,30 +1172,18 @@ initstdio(void) * and fileno() may point to an invalid file descriptor. For example * GUI apps don't have valid standard streams by default. */ - if (!is_valid_fd(fd)) { - std = Py_None; - Py_INCREF(std); - } - else { - std = create_stdio(iomod, fd, 0, "", encoding, errors); - if (std == NULL) - goto error; - } /* if (fd < 0) */ + std = create_stdio(iomod, fd, 0, "", encoding, errors); + if (std == NULL) + goto error; PySys_SetObject("__stdin__", std); _PySys_SetObjectId(&PyId_stdin, std); Py_DECREF(std); /* Set sys.stdout */ fd = fileno(stdout); - if (!is_valid_fd(fd)) { - std = Py_None; - Py_INCREF(std); - } - else { - std = create_stdio(iomod, fd, 1, "", encoding, errors); - if (std == NULL) - goto error; - } /* if (fd < 0) */ + std = create_stdio(iomod, fd, 1, "", encoding, errors); + if (std == NULL) + goto error; PySys_SetObject("__stdout__", std); _PySys_SetObjectId(&PyId_stdout, std); Py_DECREF(std); @@ -1189,15 +1191,9 @@ initstdio(void) #if 1 /* Disable this if you have trouble debugging bootstrap stuff */ /* Set sys.stderr, replaces the preliminary stderr */ fd = fileno(stderr); - if (!is_valid_fd(fd)) { - std = Py_None; - Py_INCREF(std); - } - else { - std = create_stdio(iomod, fd, 1, "", encoding, "backslashreplace"); - if (std == NULL) - goto error; - } /* if (fd < 0) */ + std = create_stdio(iomod, fd, 1, "", encoding, "backslashreplace"); + if (std == NULL) + goto error; /* Same as hack above, pre-import stderr's codec to avoid recursion when import.c tries to write to stderr in verbose mode. */ diff --git a/Python/pytime.c b/Python/pytime.c index 02a93749..7f65824b 100644 --- a/Python/pytime.c +++ b/Python/pytime.c @@ -305,17 +305,22 @@ _PyTime_AsNanosecondsObject(_PyTime_t t) } static _PyTime_t -_PyTime_Divide(_PyTime_t t, _PyTime_t k, _PyTime_round_t round) +_PyTime_Divide(const _PyTime_t t, const _PyTime_t k, + const _PyTime_round_t round) { assert(k > 1); if (round == _PyTime_ROUND_CEILING) { if (t >= 0) return (t + k - 1) / k; + else + return t / k; + } + else { + if (t >= 0) + return t / k; else return (t - (k - 1)) / k; } - else - return t / k; } _PyTime_t @@ -331,69 +336,92 @@ _PyTime_AsMicroseconds(_PyTime_t t, _PyTime_round_t round) } static int -_PyTime_AsTimeval_impl(_PyTime_t t, struct timeval *tv, _PyTime_round_t round, - int raise) +_PyTime_AsTimeval_impl(_PyTime_t t, _PyTime_t *p_secs, int *p_us, + _PyTime_round_t round) { _PyTime_t secs, ns; + int usec; int res = 0; secs = t / SEC_TO_NS; ns = t % SEC_TO_NS; - if (ns < 0) { - ns += SEC_TO_NS; - secs -= 1; - } -#ifdef MS_WINDOWS - /* On Windows, timeval.tv_sec is a long (32 bit), - whereas time_t can be 64-bit. */ - assert(sizeof(tv->tv_sec) == sizeof(long)); -#if SIZEOF_TIME_T > SIZEOF_LONG - if (secs > LONG_MAX) { - secs = LONG_MAX; - res = -1; + usec = (int)_PyTime_Divide(ns, US_TO_NS, round); + if (usec < 0) { + usec += SEC_TO_US; + if (secs != _PyTime_MIN) + secs -= 1; + else + res = -1; } - else if (secs < LONG_MIN) { - secs = LONG_MIN; - res = -1; + else if (usec >= SEC_TO_US) { + usec -= SEC_TO_US; + if (secs != _PyTime_MAX) + secs += 1; + else + res = -1; } -#endif + assert(0 <= usec && usec < SEC_TO_US); + + *p_secs = secs; + *p_us = usec; + + return res; +} + +static int +_PyTime_AsTimevalStruct_impl(_PyTime_t t, struct timeval *tv, + _PyTime_round_t round, int raise) +{ + _PyTime_t secs; + int us; + int res; + + res = _PyTime_AsTimeval_impl(t, &secs, &us, round); + +#ifdef MS_WINDOWS tv->tv_sec = (long)secs; #else - /* On OpenBSD 5.4, timeval.tv_sec is a long. - Example: long is 64-bit, whereas time_t is 32-bit. */ tv->tv_sec = secs; - if ((_PyTime_t)tv->tv_sec != secs) - res = -1; #endif + tv->tv_usec = us; - if (round == _PyTime_ROUND_CEILING) - tv->tv_usec = (int)((ns + US_TO_NS - 1) / US_TO_NS); - else - tv->tv_usec = (int)(ns / US_TO_NS); - - if (tv->tv_usec >= SEC_TO_US) { - tv->tv_usec -= SEC_TO_US; - tv->tv_sec += 1; + if (res < 0 || (_PyTime_t)tv->tv_sec != secs) { + if (raise) + error_time_t_overflow(); + return -1; } - - if (res && raise) - _PyTime_overflow(); - - assert(0 <= tv->tv_usec && tv->tv_usec <= 999999); - return res; + return 0; } int _PyTime_AsTimeval(_PyTime_t t, struct timeval *tv, _PyTime_round_t round) { - return _PyTime_AsTimeval_impl(t, tv, round, 1); + return _PyTime_AsTimevalStruct_impl(t, tv, round, 1); } int _PyTime_AsTimeval_noraise(_PyTime_t t, struct timeval *tv, _PyTime_round_t round) { - return _PyTime_AsTimeval_impl(t, tv, round, 0); + return _PyTime_AsTimevalStruct_impl(t, tv, round, 0); +} + +int +_PyTime_AsTimevalTime_t(_PyTime_t t, time_t *p_secs, int *us, + _PyTime_round_t round) +{ + _PyTime_t secs; + int res; + + res = _PyTime_AsTimeval_impl(t, &secs, us, round); + + *p_secs = secs; + + if (res < 0 || (_PyTime_t)*p_secs != secs) { + error_time_t_overflow(); + return -1; + } + return 0; } #if defined(HAVE_CLOCK_GETTIME) || defined(HAVE_KQUEUE) @@ -421,7 +449,7 @@ _PyTime_AsTimespec(_PyTime_t t, struct timespec *ts) #endif static int -pygettimeofday_new(_PyTime_t *tp, _Py_clock_info_t *info, int raise) +pygettimeofday(_PyTime_t *tp, _Py_clock_info_t *info, int raise) { #ifdef MS_WINDOWS FILETIME system_time; @@ -513,7 +541,7 @@ _PyTime_t _PyTime_GetSystemClock(void) { _PyTime_t t; - if (pygettimeofday_new(&t, NULL, 0) < 0) { + if (pygettimeofday(&t, NULL, 0) < 0) { /* should not happen, _PyTime_Init() checked the clock at startup */ assert(0); @@ -526,17 +554,13 @@ _PyTime_GetSystemClock(void) int _PyTime_GetSystemClockWithInfo(_PyTime_t *t, _Py_clock_info_t *info) { - return pygettimeofday_new(t, info, 1); + return pygettimeofday(t, info, 1); } static int -pymonotonic_new(_PyTime_t *tp, _Py_clock_info_t *info, int raise) +pymonotonic(_PyTime_t *tp, _Py_clock_info_t *info, int raise) { -#ifdef Py_DEBUG - static int last_set = 0; - static _PyTime_t last = 0; -#endif #if defined(MS_WINDOWS) ULONGLONG result; @@ -627,12 +651,6 @@ pymonotonic_new(_PyTime_t *tp, _Py_clock_info_t *info, int raise) } if (_PyTime_FromTimespec(tp, &ts, raise) < 0) return -1; -#endif -#ifdef Py_DEBUG - /* monotonic clock cannot go backward */ - assert(!last_set || last <= *tp); - last = *tp; - last_set = 1; #endif return 0; } @@ -641,7 +659,7 @@ _PyTime_t _PyTime_GetMonotonicClock(void) { _PyTime_t t; - if (pymonotonic_new(&t, NULL, 0) < 0) { + if (pymonotonic(&t, NULL, 0) < 0) { /* should not happen, _PyTime_Init() checked that monotonic clock at startup */ assert(0); @@ -655,7 +673,7 @@ _PyTime_GetMonotonicClock(void) int _PyTime_GetMonotonicClockWithInfo(_PyTime_t *tp, _Py_clock_info_t *info) { - return pymonotonic_new(tp, info, 1); + return pymonotonic(tp, info, 1); } int diff --git a/Python/random.c b/Python/random.c index ea09e84a..772bfef0 100644 --- a/Python/random.c +++ b/Python/random.c @@ -6,7 +6,9 @@ # ifdef HAVE_SYS_STAT_H # include # endif -# ifdef HAVE_GETRANDOM_SYSCALL +# ifdef HAVE_GETRANDOM +# include +# elif defined(HAVE_GETRANDOM_SYSCALL) # include # endif #endif @@ -70,7 +72,9 @@ win32_urandom(unsigned char *buffer, Py_ssize_t size, int raise) return 0; } -#elif HAVE_GETENTROPY +#elif defined(HAVE_GETENTROPY) && !defined(sun) +#define PY_GETENTROPY 1 + /* Fill buffer with size pseudo-random bytes generated by getentropy(). Return 0 on success, or raise an exception and return -1 on error. @@ -105,16 +109,21 @@ py_getentropy(unsigned char *buffer, Py_ssize_t size, int fatal) return 0; } -#else /* !HAVE_GETENTROPY */ +#else + +/* Issue #25003: Don' use getentropy() on Solaris (available since + * Solaris 11.3), it is blocking whereas os.urandom() should not block. */ +#if defined(HAVE_GETRANDOM) || defined(HAVE_GETRANDOM_SYSCALL) +#define PY_GETRANDOM 1 -#ifdef HAVE_GETRANDOM_SYSCALL static int py_getrandom(void *buffer, Py_ssize_t size, int raise) { - /* is getrandom() supported by the running kernel? - * need Linux kernel 3.17 or later */ + /* Is getrandom() supported by the running kernel? + * Need Linux kernel 3.17 or newer, or Solaris 11.3 or newer */ static int getrandom_works = 1; - /* Use /dev/urandom, block if the kernel has no entropy */ + /* Use non-blocking /dev/urandom device. On Linux at boot, the getrandom() + * syscall blocks until /dev/urandom is initialized with enough entropy. */ const int flags = 0; int n; @@ -124,7 +133,18 @@ py_getrandom(void *buffer, Py_ssize_t size, int raise) while (0 < size) { errno = 0; - /* Use syscall() because the libc doesn't expose getrandom() yet, see: +#ifdef HAVE_GETRANDOM + if (raise) { + Py_BEGIN_ALLOW_THREADS + n = getrandom(buffer, size, flags); + Py_END_ALLOW_THREADS + } + else { + n = getrandom(buffer, size, flags); + } +#else + /* On Linux, use the syscall() function because the GNU libc doesn't + * expose the Linux getrandom() syscall yet. See: * https://sourceware.org/bugzilla/show_bug.cgi?id=17252 */ if (raise) { Py_BEGIN_ALLOW_THREADS @@ -134,6 +154,7 @@ py_getrandom(void *buffer, Py_ssize_t size, int raise) else { n = syscall(SYS_getrandom, buffer, size, flags); } +#endif if (n < 0) { if (errno == ENOSYS) { @@ -182,7 +203,7 @@ dev_urandom_noraise(unsigned char *buffer, Py_ssize_t size) assert (0 < size); -#ifdef HAVE_GETRANDOM_SYSCALL +#ifdef PY_GETRANDOM if (py_getrandom(buffer, size, 0) == 1) return; /* getrandom() is not supported by the running kernel, fall back @@ -218,14 +239,14 @@ dev_urandom_python(char *buffer, Py_ssize_t size) int fd; Py_ssize_t n; struct _Py_stat_struct st; -#ifdef HAVE_GETRANDOM_SYSCALL +#ifdef PY_GETRANDOM int res; #endif if (size <= 0) return 0; -#ifdef HAVE_GETRANDOM_SYSCALL +#ifdef PY_GETRANDOM res = py_getrandom(buffer, size, 1); if (res < 0) return -1; @@ -304,7 +325,7 @@ dev_urandom_close(void) } } -#endif /* HAVE_GETENTROPY */ +#endif /* Fill buffer with pseudo-random bytes generated by a linear congruent generator (LCG): @@ -345,7 +366,7 @@ _PyOS_URandom(void *buffer, Py_ssize_t size) #ifdef MS_WINDOWS return win32_urandom((unsigned char *)buffer, size, 1); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) return py_getentropy(buffer, size, 0); #else return dev_urandom_python((char*)buffer, size); @@ -392,7 +413,7 @@ _PyRandom_Init(void) else { #ifdef MS_WINDOWS (void)win32_urandom(secret, secret_size, 0); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) (void)py_getentropy(secret, secret_size, 1); #else dev_urandom_noraise(secret, secret_size); @@ -408,7 +429,7 @@ _PyRandom_Fini(void) CryptReleaseContext(hCryptProv, 0); hCryptProv = 0; } -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) /* nothing to clean */ #else dev_urandom_close(); diff --git a/Python/sysmodule.c b/Python/sysmodule.c index f600baf1..334f5d0c 100644 --- a/Python/sysmodule.c +++ b/Python/sysmodule.c @@ -632,14 +632,37 @@ processor's time-stamp counter." static PyObject * sys_setrecursionlimit(PyObject *self, PyObject *args) { - int new_limit; + int new_limit, mark; + PyThreadState *tstate; + if (!PyArg_ParseTuple(args, "i:setrecursionlimit", &new_limit)) return NULL; - if (new_limit <= 0) { + + if (new_limit < 1) { PyErr_SetString(PyExc_ValueError, - "recursion limit must be positive"); + "recursion limit must be greater or equal than 1"); return NULL; } + + /* Issue #25274: When the recursion depth hits the recursion limit in + _Py_CheckRecursiveCall(), the overflowed flag of the thread state is + set to 1 and a RecursionError is raised. The overflowed flag is reset + to 0 when the recursion depth goes below the low-water mark: see + Py_LeaveRecursiveCall(). + + Reject too low new limit if the current recursion depth is higher than + the new low-water mark. Otherwise it may not be possible anymore to + reset the overflowed flag to 0. */ + mark = _Py_RecursionLimitLowerWaterMark(new_limit); + tstate = PyThreadState_GET(); + if (tstate->recursion_depth >= mark) { + PyErr_Format(PyExc_RecursionError, + "cannot set the recursion limit to %i at " + "the recursion depth %i: the limit is too low", + new_limit, tstate->recursion_depth); + return NULL; + } + Py_SetRecursionLimit(new_limit); Py_INCREF(Py_None); return Py_None; diff --git a/Python/thread.c b/Python/thread.c index 44c071ea..63eeb1e1 100644 --- a/Python/thread.c +++ b/Python/thread.c @@ -31,7 +31,7 @@ threads. This is valid for HP-UX 11.23 running on an ia64 system. If needed, add - a check of __ia64 to verify that we're running on a ia64 system instead + a check of __ia64 to verify that we're running on an ia64 system instead of a pa-risc system. */ #ifdef __hpux diff --git a/README b/README index d37b96b3..84aaef90 100644 --- a/README +++ b/README @@ -1,4 +1,4 @@ -This is Python version 3.5.0 +This is Python version 3.5.1 ============================ Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, @@ -46,6 +46,34 @@ For example: (This will fail if you *also* built at the top-level directory. You should do a "make clean" at the toplevel first.) +If you need an optimized version of Python, you type "make profile-opt" in the +top level directory. This will rebuild the interpreter executable using Profile +Guided Optimization (PGO). For more details, see the section bellow. + + +Profile Guided Optimization +--------------------------- + +PGO takes advantage of recent versions of the GCC or Clang compilers. +If ran, the "profile-opt" rule will do several steps. + +First, the entire Python directory is cleaned of temporary files that +may resulted in a previous compilation. + +Then, an instrumented version of the interpreter is built, using suitable +compiler flags for each flavour. Note that this is just an intermediary +step and the binary resulted after this step is not good for real life +workloads, as it has profiling instructions embedded inside. + +After this instrumented version of the interpreter is built, the Makefile +will automatically run a training workload. This is necessary in order to +profile the interpreter execution. Note also that any output, both stdout +and stderr, that may appear at this step is supressed. + +Finally, the last step is to rebuild the interpreter, using the information +collected in the previous one. The end result will be a the Python binary +that is optimized and suitable for distribution or production installation. + What's New ---------- diff --git a/Tools/buildbot/test.bat b/Tools/buildbot/test.bat index 154dfa57..ff7d167e 100644 --- a/Tools/buildbot/test.bat +++ b/Tools/buildbot/test.bat @@ -1,15 +1,19 @@ -@rem Used by the buildbot "test" step. -@setlocal +@echo off +rem Used by the buildbot "test" step. +setlocal -@set here=%~dp0 -@set rt_opts=-q -d +set here=%~dp0 +set rt_opts=-q -d +set regrtest_args= :CheckOpts -@if '%1'=='-x64' (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts -@if '%1'=='-d' (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts -@if '%1'=='-O' (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts -@if '%1'=='-q' (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts -@if '%1'=='+d' (set rt_opts=%rt_opts:-d=%) & shift & goto CheckOpts -@if '%1'=='+q' (set rt_opts=%rt_opts:-q=%) & shift & goto CheckOpts +if "%1"=="-x64" (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts +if "%1"=="-d" (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts +if "%1"=="-O" (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts +if "%1"=="-q" (set rt_opts=%rt_opts% %1) & shift & goto CheckOpts +if "%1"=="+d" (set rt_opts=%rt_opts:-d=%) & shift & goto CheckOpts +if "%1"=="+q" (set rt_opts=%rt_opts:-q=%) & shift & goto CheckOpts +if NOT "%1"=="" (set regrtest_args=%regrtest_args% %1) & shift & goto CheckOpts -call "%here%..\..\PCbuild\rt.bat" %rt_opts% -uall -rwW -n --timeout=3600 %1 %2 %3 %4 %5 %6 %7 %8 %9 +echo on +call "%here%..\..\PCbuild\rt.bat" %rt_opts% -uall -rwW --timeout=3600 %regrtest_args% diff --git a/Tools/freeze/checkextensions_win32.py b/Tools/freeze/checkextensions_win32.py index ee446e75..c9cb576a 100644 --- a/Tools/freeze/checkextensions_win32.py +++ b/Tools/freeze/checkextensions_win32.py @@ -118,7 +118,7 @@ def get_extension_defn(moduleName, mapFileName, prefix): for exc in exclude: if exc in module.sourceFiles: - modules.sourceFiles.remove(exc) + module.sourceFiles.remove(exc) return module diff --git a/Tools/gdb/libpython.py b/Tools/gdb/libpython.py index 2ad22e2b..32341e77 100755 --- a/Tools/gdb/libpython.py +++ b/Tools/gdb/libpython.py @@ -91,7 +91,7 @@ class NullPyObjectPtr(RuntimeError): def safety_limit(val): - # Given a integer value from the process being debugged, limit it to some + # Given an integer value from the process being debugged, limit it to some # safety threshold so that arbitrary breakage within said process doesn't # break the gdb process too much (e.g. sizes of iterations, sizes of lists) return min(val, 1000) @@ -158,7 +158,7 @@ class TruncatedStringIO(object): class PyObjectPtr(object): """ - Class wrapping a gdb.Value that's a either a (PyObject*) within the + Class wrapping a gdb.Value that's either a (PyObject*) within the inferior process, or some subclass pointer e.g. (PyBytesObject*) There will be a subclass for every refined PyObject type that we care diff --git a/Tools/msi/README.txt b/Tools/msi/README.txt index 25dcf32c..7023b611 100644 --- a/Tools/msi/README.txt +++ b/Tools/msi/README.txt @@ -62,10 +62,14 @@ the initial download size by separating them into their own MSIs. Building the Installer ====================== +Before building the installer, download extra build dependencies using +Tools\msi\get_externals.bat. (Note that this is in addition to the +similarly named file in PCBuild.) + For testing, the installer should be built with the Tools/msi/build.bat script: - build.bat [-x86] [-x64] [--doc] + build.bat [-x86] [-x64] [--doc] [--test-marker] [--pack] This script will build the required configurations of Python and generate an installer layout in PCBuild/(win32|amd64)/en-us. @@ -80,8 +84,13 @@ available, it will simply be excluded from the installer. Ensure also set %HTMLHELP% to the Html Help Compiler (hhc.exe), or put HHC on your PATH or in externals/. -If WiX is not found on your system, it will be automatically downloaded -and extracted to the externals/ directory. +Specify --test-marker to build an installer that works side-by-side with +an official Python release. All registry keys and install locations will +include an extra marker to avoid overwriting files. This marker is +currently an 'x' prefix, but may change at any time. + +Specify --pack to build an installer that does not require all MSIs to +be available alongside. This takes longer, but is easier to share. For an official release, the installer should be built with the @@ -175,6 +184,38 @@ The following properties may be passed when building these projects. When true, rebuilds all of the MSIs making up the layout. Defaults to true. +Uploading the Installer +======================= + +For official releases, the uploadrelease.bat script should be used. + +You will require PuTTY so that plink.exe and pscp.exe can be used, and your +SSH key can be activated in pageant.exe. PuTTY should be either on your path +or in %ProgramFiles(x86)%\PuTTY. + +To include signatures for each uploaded file, you will need gpg2.exe on your +path or have run get_externals.bat. You may also need to "gpg2.exe --import" +your key before running the upload script. + + uploadrelease.bat --host --user [--dry-run] [--no-gpg] + +The host is the URL to the server. This can be provided by the Release +Manager. You should be able to SSH to this address. + +The username is your own username, which you have permission to SSH into +the server containing downloads. + +Use --dry-run to display the generated upload commands without executing +them. Signatures for each file will be generated but not uploaded unless +--no-gpg is also passed. + +Use --no-gpg to suppress signature generation and upload. + +The default target directory (which appears in uploadrelease.proj) is +correct for official Python releases, but may be overridden with +--target for other purposes. This path should generally not include +any version specifier, as that will be added automatically. + Modifying the Installer ======================= @@ -298,9 +339,9 @@ based on whether the install is for all users of the machine or just for the user performing the installation. The default installation location when installing for all users is -"%ProgramFiles%\Python 3.X" for the 64-bit interpreter and -"%ProgramFiles(x86)%\Python 3.X" for the 32-bit interpreter. (Note that -the latter path is equivalent to "%ProgramFiles%\Python 3.X" when +"%ProgramFiles%\Python3X" for the 64-bit interpreter and +"%ProgramFiles(x86)%\Python3X-32" for the 32-bit interpreter. (Note that +the latter path is equivalent to "%ProgramFiles%\Python3X-32" when running a 32-bit version of Windows.) This location requires administrative privileges to install or later modify the installation. @@ -311,6 +352,8 @@ interpreter. Only the current user can access this location. This provides a suitable level of protection against malicious modification of Python's files. +(Default installation locations are set in Tools\msi\bundle\bundle.wxs.) + Within this install directory is the following approximate layout: .\python[w].exe The core executable files @@ -487,6 +530,6 @@ Removing Python will clean up all the files and registry keys that were created by the installer, as well as __pycache__ folders that are explicitly handled by the installer. Python packages installed later using a tool like pip will not be removed. Some components may be -installed by other installers (such as the MSVCRT) and these will not be -removed if another product has a dependency on them. +installed by other installers and these will not be removed if another +product has a dependency on them. diff --git a/Tools/msi/build.bat b/Tools/msi/build.bat index 008c9376..a61ace8a 100644 --- a/Tools/msi/build.bat +++ b/Tools/msi/build.bat @@ -6,29 +6,35 @@ set PCBUILD=%D%..\..\PCBuild\ set BUILDX86= set BUILDX64= set BUILDDOC= -set BUILDPX= +set BUILDTEST=--test-marker +set BUILDPACK= +set REBUILD= :CheckOpts if "%~1" EQU "-h" goto Help if "%~1" EQU "-x86" (set BUILDX86=1) && shift && goto CheckOpts if "%~1" EQU "-x64" (set BUILDX64=1) && shift && goto CheckOpts if "%~1" EQU "--doc" (set BUILDDOC=1) && shift && goto CheckOpts -if "%~1" EQU "--test-marker" (set BUILDPX=1) && shift && goto CheckOpts +if "%~1" EQU "--no-test-marker" (set BUILDTEST=) && shift && goto CheckOpts +if "%~1" EQU "--pack" (set BUILDPACK=1) && shift && goto CheckOpts +if "%~1" EQU "-r" (set REBUILD=-r) && shift && goto CheckOpts if not defined BUILDX86 if not defined BUILDX64 (set BUILDX86=1) && (set BUILDX64=1) +call "%D%get_externals.bat" + call "%PCBUILD%env.bat" x86 if defined BUILDX86 ( - call "%PCBUILD%build.bat" -d -e + call "%PCBUILD%build.bat" -d -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof - call "%PCBUILD%build.bat" -e + call "%PCBUILD%build.bat" -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof ) if defined BUILDX64 ( - call "%PCBUILD%build.bat" -p x64 -d -e + call "%PCBUILD%build.bat" -p x64 -d -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof - call "%PCBUILD%build.bat" -p x64 -e + call "%PCBUILD%build.bat" -p x64 -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof ) @@ -38,17 +44,21 @@ if defined BUILDDOC ( ) set BUILD_CMD="%D%bundle\snapshot.wixproj" -if defined BUILDPX ( +if defined BUILDTEST ( set BUILD_CMD=%BUILD_CMD% /p:UseTestMarker=true ) +if defined BUILDPACK ( + set BUILD_CMD=%BUILD_CMD% /p:Pack=true +) +if defined REBUILD ( + set BUILD_CMD=%BUILD_CMD% /t:Rebuild +) if defined BUILDX86 ( - "%PCBUILD%win32\python.exe" "%D%get_wix.py" msbuild %BUILD_CMD% if errorlevel 1 goto :eof ) if defined BUILDX64 ( - "%PCBUILD%amd64\python.exe" "%D%get_wix.py" msbuild /p:Platform=x64 %BUILD_CMD% if errorlevel 1 goto :eof ) @@ -56,9 +66,11 @@ if defined BUILDX64 ( exit /B 0 :Help -echo build.bat [-x86] [-x64] [--doc] [-h] [--test-marker] +echo build.bat [-x86] [-x64] [--doc] [-h] [--no-test-marker] [--pack] [-r] echo. echo -x86 Build x86 installers echo -x64 Build x64 installers echo --doc Build CHM documentation -echo --test-marker Build installers with 'x' markers +echo --no-test-marker Build without test markers +echo --pack Embed core MSIs into installer +echo -r Rebuild rather than incremental build diff --git a/Tools/msi/buildrelease.bat b/Tools/msi/buildrelease.bat index d22ba101..fc7cb9fd 100644 --- a/Tools/msi/buildrelease.bat +++ b/Tools/msi/buildrelease.bat @@ -4,25 +4,38 @@ rem This script is intended for building official releases of Python. rem To use it to build alternative releases, you should clone this file rem and modify the following three URIs. -rem -rem The first two will ensure that your release can be installed -rem alongside an official Python release, while the second specifies -rem the URL that will be used to download installation files. The -rem files available from this URL *will* conflict with your installer. -rem Trust me, you don't want them, even if it seems like a good idea. -set RELEASE_URI_X86=http://www.python.org/win32 -set RELEASE_URI_X64=http://www.python.org/amd64 -set DOWNLOAD_URL_BASE=https://www.python.org/ftp/python -set DOWNLOAD_URL= +rem These two will ensure that your release can be installed +rem alongside an official Python release, by modifying the GUIDs used +rem for all components. +rem +rem The following substitutions will be applied to the release URI: +rem Variable Description Example +rem {arch} architecture amd64, win32 +set RELEASE_URI=http://www.python.org/{arch} + +rem This is the URL that will be used to download installation files. +rem The files available from the default URL *will* conflict with your +rem installer. Trust me, you don't want them, even if it seems like a +rem good idea. +rem +rem The following substitutions will be applied to the download URL: +rem Variable Description Example +rem {version} version number 3.5.0 +rem {arch} architecture amd64, win32 +rem {releasename} release name a1, b2, rc3 (or blank for final) +rem {msi} MSI filename core.msi +set DOWNLOAD_URL=https://www.python.org/ftp/python/{version}/{arch}{releasename}/{msi} set D=%~dp0 set PCBUILD=%D%..\..\PCBuild\ +set EXTERNALS=%D%..\..\externals\windows-installer\ set BUILDX86= set BUILDX64= set TARGET=Rebuild set TESTTARGETDIR= +set PGO= :CheckOpts @@ -41,15 +54,21 @@ if "%1" EQU "-b" (set TARGET=Build) && shift && goto CheckOpts if "%1" EQU "--build" (set TARGET=Build) && shift && goto CheckOpts if "%1" EQU "-x86" (set BUILDX86=1) && shift && goto CheckOpts if "%1" EQU "-x64" (set BUILDX64=1) && shift && goto CheckOpts +if "%1" EQU "--pgo" (set PGO=%~2) && shift && shift && goto CheckOpts + +if "%1" NEQ "" echo Invalid option: "%1" && exit /B 1 if not defined BUILDX86 if not defined BUILDX64 (set BUILDX86=1) && (set BUILDX64=1) +call "%D%get_externals.bat" + :builddoc if "%SKIPBUILD%" EQU "1" goto skipdoc if "%SKIPDOC%" EQU "1" goto skipdoc if not defined PYTHON where py -q || echo Cannot find py on path and PYTHON is not set. && exit /B 1 if not defined SPHINXBUILD where sphinx-build -q || echo Cannot find sphinx-build on path and SPHINXBUILD is not set. && exit /B 1 + call "%D%..\..\doc\make.bat" htmlhelp if errorlevel 1 goto :eof :skipdoc @@ -58,7 +77,7 @@ where hg /q || echo Cannot find Mercurial on PATH && exit /B 1 where dlltool /q && goto skipdlltoolsearch set _DLLTOOL_PATH= -where /R "%D%..\..\externals" dlltool > "%TEMP%\dlltool.loc" 2> nul && set /P _DLLTOOL_PATH= < "%TEMP%\dlltool.loc" & del "%TEMP%\dlltool.loc" +where /R "%EXTERNALS%\" dlltool > "%TEMP%\dlltool.loc" 2> nul && set /P _DLLTOOL_PATH= < "%TEMP%\dlltool.loc" & del "%TEMP%\dlltool.loc" if not exist "%_DLLTOOL_PATH%" echo Cannot find binutils on PATH or in external && exit /B 1 for %%f in (%_DLLTOOL_PATH%) do set PATH=%PATH%;%%~dpf set _DLLTOOL_PATH= @@ -70,7 +89,7 @@ if defined BUILDX86 ( ) if defined BUILDX64 ( - call :build x64 + call :build x64 "%PGO%" if errorlevel 1 exit /B ) @@ -90,14 +109,19 @@ if "%1" EQU "x86" ( set BUILD_PLAT=Win32 set OUTDIR_PLAT=win32 set OBJDIR_PLAT=x86 - set RELEASE_URI=%RELEASE_URI_X86% -) ELSE ( - call "%PCBUILD%env.bat" x86_amd64 +) else if "%~2" NEQ "" ( + call "%PCBUILD%env.bat" amd64 + set PGO=%~2 + set BUILD=%PCBUILD%amd64-pgo\ + set BUILD_PLAT=x64 + set OUTDIR_PLAT=amd64 + set OBJDIR_PLAT=x64 +) else ( + call "%PCBUILD%env.bat" amd64 set BUILD=%PCBUILD%amd64\ set BUILD_PLAT=x64 set OUTDIR_PLAT=amd64 set OBJDIR_PLAT=x64 - set RELEASE_URI=%RELEASE_URI_X64% ) if exist "%BUILD%en-us" ( @@ -119,17 +143,39 @@ if not "%CERTNAME%" EQU "" ( ) if not "%SKIPBUILD%" EQU "1" ( - call "%PCBUILD%build.bat" -e -p %BUILD_PLAT% -d -t %TARGET% %CERTOPTS% - if errorlevel 1 exit /B - call "%PCBUILD%build.bat" -e -p %BUILD_PLAT% -t %TARGET% %CERTOPTS% - if errorlevel 1 exit /B + @call "%PCBUILD%build.bat" -e -p %BUILD_PLAT% -d -t %TARGET% %CERTOPTS% + @if errorlevel 1 exit /B @rem build.bat turns echo back on, so we disable it again @echo off + + if "%PGO%" EQU "" ( + @call "%PCBUILD%build.bat" -e -p %BUILD_PLAT% -t %TARGET% %CERTOPTS% + ) else ( + @call "%PCBUILD%build.bat" -e -p %BUILD_PLAT% -c PGInstrument -t %TARGET% %CERTOPTS% + @if errorlevel 1 exit /B + + @del "%BUILD%*.pgc" + if "%PGO%" EQU "default" ( + "%BUILD%python.exe" -m test -q --pgo + ) else if "%PGO%" EQU "default2" ( + "%BUILD%python.exe" -m test -r -q --pgo + "%BUILD%python.exe" -m test -r -q --pgo + ) else if "%PGO%" EQU "default10" ( + for /L %%i in (0, 1, 9) do "%BUILD%python.exe" -m test -q -r --pgo + ) else if "%PGO%" EQU "pybench" ( + "%BUILD%python.exe" "%PCBUILD%..\Tools\pybench\pybench.py" + ) else ( + "%BUILD%python.exe" %PGO% + ) + + @call "%PCBUILD%build.bat" -e -p %BUILD_PLAT% -c PGUpdate -t %TARGET% %CERTOPTS% + ) + @if errorlevel 1 exit /B + @echo off ) -"%BUILD%python.exe" "%D%get_wix.py" - set BUILDOPTS=/p:Platform=%1 /p:BuildForRelease=true /p:DownloadUrl=%DOWNLOAD_URL% /p:DownloadUrlBase=%DOWNLOAD_URL_BASE% /p:ReleaseUri=%RELEASE_URI% +if "%PGO%" NEQ "" set BUILDOPTS=%BUILDOPTS% /p:PGOBuildPath=%BUILD% msbuild "%D%bundle\releaselocal.wixproj" /t:Rebuild %BUILDOPTS% %CERTOPTS% /p:RebuildAll=true if errorlevel 1 exit /B msbuild "%D%bundle\releaseweb.wixproj" /t:Rebuild %BUILDOPTS% %CERTOPTS% /p:RebuildAll=false @@ -149,7 +195,8 @@ exit /B 0 :Help echo buildrelease.bat [--out DIR] [-x86] [-x64] [--certificate CERTNAME] [--build] [--skip-build] -echo [--skip-doc] [--download DOWNLOAD URL] [--test TARGETDIR] [-h] +echo [--pgo COMMAND] [--skip-doc] [--download DOWNLOAD URL] [--test TARGETDIR] +echo [-h] echo. echo --out (-o) Specify an additional output directory for installers echo -x86 Build x86 installers @@ -157,10 +204,25 @@ echo -x64 Build x64 installers echo --build (-b) Incrementally build Python rather than rebuilding echo --skip-build (-B) Do not build Python (just do the installers) echo --skip-doc (-D) Do not build documentation -echo --download Specify the full download URL for MSIs (should include {2}) +echo --pgo Build x64 installers using PGO +echo --download Specify the full download URL for MSIs echo --test Specify the test directory to run the installer tests echo -h Display this help information echo. echo If no architecture is specified, all architectures will be built. echo If --test is not specified, the installer tests are not run. -echo. \ No newline at end of file +echo. +echo For the --pgo option, any Python command line can be used as well as the +echo following shortcuts: +echo Shortcut Description +echo default Test suite with --pgo +echo default2 2x test suite with --pgo and randomized test order +echo default10 10x test suite with --pgo and randomized test order +echo pybench pybench script +echo. +echo The following substitutions will be applied to the download URL: +echo Variable Description Example +echo {version} version number 3.5.0 +echo {arch} architecture amd64, win32 +echo {releasename} release name a1, b2, rc3 (or blank for final) +echo {msi} MSI filename core.msi diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm index d919bea7..8ff4c3b9 100644 --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -1,136 +1,136 @@ - #(loc.Caption) - Segoe UI - Segoe UI - Segoe UI - Segoe UI - Segoe UI - Segoe UI + #(loc.Caption) + Segoe UI + Segoe UI + Segoe UI + Segoe UI + Segoe UI + Segoe UI - #(loc.HelpHeader) - + #(loc.HelpHeader) + #(loc.HelpText) - + - #(loc.InstallHeader) - + #(loc.InstallHeader) + #(loc.InstallMessage) - - + + - #(loc.ShortPrependPathLabel) - #(loc.ShortInstallLauncherAllUsersLabel) + #(loc.ShortInstallLauncherAllUsersLabel) + #(loc.ShortPrependPathLabel) - + - #(loc.InstallUpgradeHeader) - + #(loc.InstallUpgradeHeader) + #(loc.InstallUpgradeMessage) - - + + - + - #(loc.InstallHeader) - + #(loc.InstallHeader) + - + - + - #(loc.Custom1Header) - + #(loc.Custom1Header) + - #(loc.Include_docLabel) - #(loc.Include_docHelpLabel) + #(loc.Include_docLabel) + #(loc.Include_docHelpLabel) - #(loc.Include_pipLabel) - #(loc.Include_pipHelpLabel) + #(loc.Include_pipLabel) + #(loc.Include_pipHelpLabel) - #(loc.Include_tcltkLabel) - #(loc.Include_tcltkHelpLabel) + #(loc.Include_tcltkLabel) + #(loc.Include_tcltkHelpLabel) - #(loc.Include_testLabel) - #(loc.Include_testHelpLabel) + #(loc.Include_testLabel) + #(loc.Include_testHelpLabel) - #(loc.Include_launcherLabel) - #(loc.InstallLauncherAllUsersLabel) - #(loc.Include_launcherHelpLabel) + #(loc.Include_launcherLabel) + #(loc.InstallLauncherAllUsersLabel) + #(loc.Include_launcherHelpLabel) - - - + + + - #(loc.Custom2Header) - + #(loc.Custom2Header) + - #(loc.InstallAllUsersLabel) - #(loc.AssociateFilesLabel) - #(loc.ShortcutsLabel) - #(loc.PrependPathLabel) - #(loc.PrecompileLabel) - #(loc.Include_symbolsLabel) - #(loc.Include_debugLabel) - - #(loc.CustomLocationLabel) - - - #(loc.CustomLocationHelpLabel) - - - - + #(loc.InstallAllUsersLabel) + #(loc.AssociateFilesLabel) + #(loc.ShortcutsLabel) + #(loc.PrependPathLabel) + #(loc.PrecompileLabel) + #(loc.Include_symbolsLabel) + #(loc.Include_debugLabel) + + #(loc.CustomLocationLabel) + + + #(loc.CustomLocationHelpLabel) + + + + - #(loc.ProgressHeader) - + #(loc.ProgressHeader) + - #(loc.ProgressLabel) - #(loc.OverallProgressPackageText) - - + #(loc.ProgressLabel) + #(loc.OverallProgressPackageText) + + - #(loc.ModifyHeader) - + #(loc.ModifyHeader) + - - - + + + - + - #(loc.SuccessHeader) - + #(loc.SuccessHeader) + #(loc.SuccessRestartText) - - + + - #(loc.FailureHeader) - + #(loc.FailureHeader) + #(loc.FailureHyperlinkLogText) - + #(loc.FailureRestartText) - - + + \ No newline at end of file diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl index 7af907e0..1f3e77c5 100644 --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -42,7 +42,7 @@ Continue? [WixBundleName] <a href="#">license terms</a>. I &agree to the license terms and conditions &Install Now - [DefaultJustForMeTargetDir] + [TargetDir] Includes IDLE, pip and documentation Creates shortcuts and file associations @@ -52,7 +52,7 @@ Creates shortcuts and file associations Use settings preselected by your administrator [SimpleInstallDescription] - &Upgrade Now + Up&grade Now [TargetDir] Replaces your existing installation without changing settings. @@ -86,8 +86,8 @@ Select Customize to review current options. for &all users (requires elevation) Install &launcher for all users (recommended) &Precompile standard library - Install debugging &symbols - Install debu&g binaries (requires VS 2015 or later) + Download debugging &symbols + Download debu&g binaries (requires VS 2015 or later) [ActionLikeInstallation] Progress [ActionLikeInstalling]: @@ -120,4 +120,14 @@ Feel free to email <a href="mailto:python-list@python.org">python-list@pyt You must restart your computer to complete the rollback of the software. &Restart Unable to install [WixBundleName] due to an existing install. Use Programs and Features to modify, repair or remove [WixBundleName]. + + Windows 7 Service Pack 1 and all applicable updates are required to install [WixBundleName]. + +Please <a href="https://www.bing.com/search?q=how%20to%20install%20windows%207%20service%20pack%201">update your machine</a> and then restart the installation. + Windows Vista Service Pack 2 and all applicable updates are required to install [WixBundleName]. + +Please <a href="https://www.bing.com/search?q=how%20to%20install%20windows%20vista%20service%20pack%202">update your machine</a> and then restart the installation. + Windows Vista or later is required to install and use [WixBundleName]. + +Visit <a href="https://www.python.org/">python.org</a> to download Python 3.4. diff --git a/Tools/msi/bundle/SideBar.png b/Tools/msi/bundle/SideBar.png index 9c18fff3..a23ce5e1 100644 Binary files a/Tools/msi/bundle/SideBar.png and b/Tools/msi/bundle/SideBar.png differ diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp index 35ed2fe7..9d94e3b4 100644 --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -293,28 +293,8 @@ class PythonBootstrapperApplication : public CBalBaseBootstrapperApplication { hr = _engine->SetVariableNumeric(L"CompileAll", installAllUsers); ExitOnFailure(hr, L"Failed to update CompileAll"); - hr = BalGetStringVariable(L"TargetDir", &targetDir); - if (FAILED(hr) || !targetDir || !targetDir[0]) { - ReleaseStr(targetDir); - targetDir = nullptr; - - hr = BalGetStringVariable( - installAllUsers ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", - &defaultDir - ); - BalExitOnFailure(hr, "Failed to get the default install directory"); - - if (!defaultDir || !defaultDir[0]) { - BalLogError(E_INVALIDARG, "Default install directory is blank"); - } - - hr = BalFormatString(defaultDir, &targetDir); - BalExitOnFailure1(hr, "Failed to format '%ls'", defaultDir); - - hr = _engine->SetVariableString(L"TargetDir", targetDir); - BalExitOnFailure(hr, "Failed to set install target directory"); - } - ReleaseStr(targetDir); + hr = EnsureTargetDir(); + ExitOnFailure(hr, L"Failed to set TargetDir"); OnPlan(BOOTSTRAPPER_ACTION_INSTALL); break; @@ -323,6 +303,8 @@ class PythonBootstrapperApplication : public CBalBaseBootstrapperApplication { SavePageSettings(); if (_modifying) { GoToPage(PAGE_MODIFY); + } else if (_upgrading) { + GoToPage(PAGE_UPGRADE); } else { GoToPage(PAGE_INSTALL); } @@ -685,6 +667,15 @@ public: // IBootstrapperApplication if (hr == S_FALSE) { hr = LoadLauncherStateFromKey(_engine, HKEY_LOCAL_MACHINE); } + if (FAILED(hr)) { + BalLog( + BOOTSTRAPPER_LOG_LEVEL_ERROR, + "Failed to load launcher state: error code 0x%08X", + hr + ); + } + + LoadOptionalFeatureStates(_engine); } else if (BOOTSTRAPPER_RELATED_OPERATION_NONE == operation) { if (_command.action == BOOTSTRAPPER_ACTION_INSTALL) { LOC_STRING *pLocString = nullptr; @@ -1227,6 +1218,8 @@ private: hr = pThis->CreateMainWindow(); BalExitOnFailure(hr, "Failed to create main window."); + pThis->ValidateOperatingSystem(); + if (FAILED(pThis->_hrFinal)) { pThis->SetState(PYBA_STATE_FAILED, hr); ::PostMessageW(pThis->_hWnd, WM_PYBA_SHOW_FAILURE, 0, 0); @@ -2524,6 +2517,7 @@ private: case BOOTSTRAPPER_ACTION_INSTALL: if (_upgradingOldVersion) { _installPage = PAGE_UPGRADE; + _upgrading = TRUE; } else if (SUCCEEDED(BalGetNumericVariable(L"SimpleInstall", &simple)) && simple) { _installPage = PAGE_SIMPLE_INSTALL; } else { @@ -2564,7 +2558,14 @@ private: BOOL WillElevate() { static BAL_CONDITION WILL_ELEVATE_CONDITION = { - L"not WixBundleElevated and (InstallAllUsers or (InstallLauncherAllUsers and Include_launcher))", + L"not WixBundleElevated and (" + /*Elevate when installing for all users*/ + L"InstallAllUsers or" + /*Elevate when installing the launcher for all users and it was not detected*/ + L"(InstallLauncherAllUsers and Include_launcher and not DetectedLauncher) or" + /*Elevate when the launcher was installed for all users and it is being removed*/ + L"(DetectedLauncher and DetectedLauncherAllUsers and not Include_launcher)" + L")", L"" }; BOOL result; @@ -2892,6 +2893,10 @@ private: pEngine->SetVariableNumeric(L"Include_launcher", 0); } else if (res == ERROR_SUCCESS) { pEngine->SetVariableNumeric(L"Include_launcher", 1); + pEngine->SetVariableNumeric(L"DetectedLauncher", 1); + pEngine->SetVariableNumeric(L"InstallLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); + pEngine->SetVariableNumeric(L"DetectedLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); + pEngine->SetVariableString(L"InstallLauncherAllUsersState", L"disable"); } res = RegQueryValueExW(hKey, L"AssociateFiles", nullptr, nullptr, nullptr, nullptr); @@ -2962,6 +2967,69 @@ private: return; } + HRESULT EnsureTargetDir() { + LONGLONG installAllUsers; + LPWSTR targetDir = nullptr, defaultDir = nullptr; + HRESULT hr = BalGetStringVariable(L"TargetDir", &targetDir); + if (FAILED(hr) || !targetDir || !targetDir[0]) { + ReleaseStr(targetDir); + targetDir = nullptr; + + hr = BalGetNumericVariable(L"InstallAllUsers", &installAllUsers); + ExitOnFailure(hr, L"Failed to get install scope"); + + hr = BalGetStringVariable( + installAllUsers ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", + &defaultDir + ); + BalExitOnFailure(hr, "Failed to get the default install directory"); + + if (!defaultDir || !defaultDir[0]) { + BalLogError(E_INVALIDARG, "Default install directory is blank"); + } + + hr = BalFormatString(defaultDir, &targetDir); + BalExitOnFailure1(hr, "Failed to format '%ls'", defaultDir); + + hr = _engine->SetVariableString(L"TargetDir", targetDir); + BalExitOnFailure(hr, "Failed to set install target directory"); + } + LExit: + ReleaseStr(defaultDir); + ReleaseStr(targetDir); + return hr; + } + + void ValidateOperatingSystem() { + LOC_STRING *pLocString = nullptr; + + if (IsWindows7SP1OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Target OS is Windows 7 SP1 or later"); + return; + } else if (IsWindows7OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows 7 RTM"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Service Pack 1 is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureWin7MissingSP1)", &pLocString); + } else if (IsWindowsVistaSP2OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Target OS is Windows Vista SP2"); + return; + } else if (IsWindowsVistaOrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows Vista RTM or SP1"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Service Pack 2 is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureVistaMissingSP2)", &pLocString); + } else { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows XP or earlier"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Windows Vista SP2 or later is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureXPOrEarlier)", &pLocString); + } + + if (pLocString && pLocString->wzText) { + BalFormatString(pLocString->wzText, &_failedMessage); + } + + _hrFinal = E_WIXSTDBA_CONDITION_FAILED; + } + public: // // Constructor - initialize member variables. @@ -3029,6 +3097,7 @@ public: _suppressDowngradeFailure = FALSE; _suppressRepair = FALSE; _modifying = FALSE; + _upgrading = FALSE; _overridableVariables = nullptr; _taskbarList = nullptr; @@ -3045,7 +3114,7 @@ public: _hBAFModule = nullptr; _baFunction = nullptr; - LoadOptionalFeatureStates(pEngine); + EnsureTargetDir(); } @@ -3113,6 +3182,7 @@ private: BOOL _suppressDowngradeFailure; BOOL _suppressRepair; BOOL _modifying; + BOOL _upgrading; int _crtInstalledToken; diff --git a/Tools/msi/bundle/bootstrap/pch.h b/Tools/msi/bundle/bootstrap/pch.h index 0956413d..6a66fa5a 100644 --- a/Tools/msi/bundle/bootstrap/pch.h +++ b/Tools/msi/bundle/bootstrap/pch.h @@ -23,6 +23,7 @@ #include #include #include +#include #include "dutil.h" #include "memutil.h" diff --git a/Tools/msi/bundle/bundle.targets b/Tools/msi/bundle/bundle.targets index b77646be..aeeff3b7 100644 --- a/Tools/msi/bundle/bundle.targets +++ b/Tools/msi/bundle/bundle.targets @@ -16,8 +16,9 @@ $(OutputPath)en-us\ $(OutputPath) - $(DownloadUrlBase.TrimEnd(`/`))/$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)/$(ArchName)$(ReleaseLevelName)/ - $(DefineConstants);DownloadUrl=$(DownloadUrl){2} + + $(DownloadUrlBase.TrimEnd(`/`))/{version}/{arch}{releasename}/{msi} + $(DefineConstants);DownloadUrl=$(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseLevelName)`).Replace(`{msi}`, `{2}`)) $(DefineConstants);DownloadUrl={2} @@ -87,8 +88,12 @@ - - + diff --git a/Tools/msi/bundle/bundle.wxl b/Tools/msi/bundle/bundle.wxl index 684e0dac..d7a65c48 100644 --- a/Tools/msi/bundle/bundle.wxl +++ b/Tools/msi/bundle/bundle.wxl @@ -2,4 +2,6 @@ C Runtime Update (KB2999226) Precompiling standard library + Precompiling standard library (-O) + Precompiling standard library (-OO) diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs index cc62c621..978efc0a 100644 --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -22,20 +22,26 @@ + + + + + + - + - + - - - + + + + + + \ No newline at end of file diff --git a/Tools/msi/bundle/packagegroups/pip.wxs b/Tools/msi/bundle/packagegroups/pip.wxs new file mode 100644 index 00000000..201a6c44 --- /dev/null +++ b/Tools/msi/bundle/packagegroups/pip.wxs @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Tools/msi/bundle/packagegroups/postinstall.wxs b/Tools/msi/bundle/packagegroups/postinstall.wxs index 7b0f3fc8..11ab6739 100644 --- a/Tools/msi/bundle/packagegroups/postinstall.wxs +++ b/Tools/msi/bundle/packagegroups/postinstall.wxs @@ -2,25 +2,6 @@ - - - - - - - - - - + + + + + + + + $(DefineConstants);CompressMSI=no; + + + $(DefineConstants);CompressMSI=yes; + $(DefineConstants); - CompressMSI=no; CompressPDB=no; CompressMSI_D=no; diff --git a/Tools/msi/common.wxs b/Tools/msi/common.wxs index 9f96bd3f..b9078720 100644 --- a/Tools/msi/common.wxs +++ b/Tools/msi/common.wxs @@ -1,7 +1,7 @@ - + @@ -22,17 +22,19 @@ + + Installed OR NOT MISSING_CORE + Installed OR NOT DOWNGRADE - Installed OR NOT MISSING_CORE Installed OR TARGETDIR OR Suppress_TARGETDIR_Check - UPGRADE + UPGRADE @@ -103,7 +105,7 @@ - + diff --git a/Tools/msi/core/core_d.wxs b/Tools/msi/core/core_d.wxs index bb35f994..07e03976 100644 --- a/Tools/msi/core/core_d.wxs +++ b/Tools/msi/core/core_d.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/core/core_pdb.wxs b/Tools/msi/core/core_pdb.wxs index ba72d956..c2c31789 100644 --- a/Tools/msi/core/core_pdb.wxs +++ b/Tools/msi/core/core_pdb.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/dev/dev_d.wxs b/Tools/msi/dev/dev_d.wxs index c3cb2ea8..c467aac5 100644 --- a/Tools/msi/dev/dev_d.wxs +++ b/Tools/msi/dev/dev_d.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/doc/doc.wxs b/Tools/msi/doc/doc.wxs index bbe30a13..8dd0e21a 100644 --- a/Tools/msi/doc/doc.wxs +++ b/Tools/msi/doc/doc.wxs @@ -22,7 +22,8 @@ + Description="!(loc.ShortcutDescription)" + WorkingDirectory="InstallDirectory" /> diff --git a/Tools/msi/exe/exe.wxs b/Tools/msi/exe/exe.wxs index dcbf646d..154cee5c 100644 --- a/Tools/msi/exe/exe.wxs +++ b/Tools/msi/exe/exe.wxs @@ -20,7 +20,8 @@ + Description="!(loc.ShortcutDescription)" + WorkingDirectory="InstallDirectory" /> diff --git a/Tools/msi/exe/exe_d.wxs b/Tools/msi/exe/exe_d.wxs index abcb0126..eedb6bb6 100644 --- a/Tools/msi/exe/exe_d.wxs +++ b/Tools/msi/exe/exe_d.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/exe/exe_pdb.wxs b/Tools/msi/exe/exe_pdb.wxs index 5129ec00..f25094f8 100644 --- a/Tools/msi/exe/exe_pdb.wxs +++ b/Tools/msi/exe/exe_pdb.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/get_externals.bat b/Tools/msi/get_externals.bat new file mode 100644 index 00000000..4ead75e7 --- /dev/null +++ b/Tools/msi/get_externals.bat @@ -0,0 +1,27 @@ +@echo off +setlocal +rem Simple script to fetch source for external tools + +where /Q svn +if ERRORLEVEL 1 ( + echo.svn.exe must be on your PATH to get external tools. + echo.Try TortoiseSVN (http://tortoisesvn.net/^) and be sure to check the + echo.command line tools option. + popd + exit /b 1 +) + +if not exist "%~dp0..\..\externals" mkdir "%~dp0..\..\externals" +pushd "%~dp0..\..\externals" + +if "%SVNROOT%"=="" set SVNROOT=http://svn.python.org/projects/external/ + +if not exist "windows-installer\.svn" ( + echo.Checking out installer dependencies to %CD%\windows-installer + svn co %SVNROOT%windows-installer +) else ( + echo.Updating installer dependencies in %CD%\windows-installer + svn up windows-installer +) + +popd diff --git a/Tools/msi/get_wix.py b/Tools/msi/get_wix.py deleted file mode 100644 index db141567..00000000 --- a/Tools/msi/get_wix.py +++ /dev/null @@ -1,49 +0,0 @@ -''' -Downloads and extracts WiX to a local directory -''' - -__author__ = 'Steve Dower ' - -import io -import os -import sys - -from pathlib import Path -from subprocess import Popen -from zipfile import ZipFile - -EXTERNALS_DIR = None -for p in (Path.cwd() / __file__).parents: - if any(p.glob("PCBuild/*.vcxproj")): - EXTERNALS_DIR = p / "externals" - break - -if not EXTERNALS_DIR: - print("Cannot find project root") - sys.exit(1) - -WIX_BINARIES_ZIP = 'http://wixtoolset.org/downloads/v3.10.0.1823/wix310-binaries.zip' -TARGET_BIN_ZIP = EXTERNALS_DIR / "wix.zip" -TARGET_BIN_DIR = EXTERNALS_DIR / "wix" - -POWERSHELL_COMMAND = "[IO.File]::WriteAllBytes('{}', (Invoke-WebRequest {} -UseBasicParsing).Content)" - -if __name__ == '__main__': - if TARGET_BIN_DIR.exists() and any(TARGET_BIN_DIR.glob("*")): - print('WiX is already installed') - sys.exit(0) - - try: - TARGET_BIN_DIR.mkdir() - except FileExistsError: - pass - - print('Downloading WiX to', TARGET_BIN_ZIP) - p = Popen(["powershell.exe", "-Command", POWERSHELL_COMMAND.format(TARGET_BIN_ZIP, WIX_BINARIES_ZIP)]) - p.wait() - print('Extracting WiX to', TARGET_BIN_DIR) - with ZipFile(str(TARGET_BIN_ZIP)) as z: - z.extractall(str(TARGET_BIN_DIR)) - TARGET_BIN_ZIP.unlink() - - print('Extracted WiX') diff --git a/Tools/msi/launcher/launcher.wixproj b/Tools/msi/launcher/launcher.wixproj index 73f26a8e..a0f1d574 100644 --- a/Tools/msi/launcher/launcher.wixproj +++ b/Tools/msi/launcher/launcher.wixproj @@ -5,6 +5,7 @@ 2.0 launcher Package + SkipMissingCore=1;$(DefineConstants) diff --git a/Tools/msi/launcher/launcher.wxs b/Tools/msi/launcher/launcher.wxs index b20cff85..718b666a 100644 --- a/Tools/msi/launcher/launcher.wxs +++ b/Tools/msi/launcher/launcher.wxs @@ -26,6 +26,13 @@ NOT Installed AND NOT ALLUSERS=1 NOT Installed AND ALLUSERS=1 + + UPGRADE or REMOVE_OLD_LAUNCHER + + + + + diff --git a/Tools/msi/launcher/launcher_files.wxs b/Tools/msi/launcher/launcher_files.wxs index 7148258a..589dee56 100644 --- a/Tools/msi/launcher/launcher_files.wxs +++ b/Tools/msi/launcher/launcher_files.wxs @@ -3,22 +3,22 @@ - + - + NOT ALLUSERS=1 - + ALLUSERS=1 - + diff --git a/Tools/msi/launcher/launcher_reg.wxs b/Tools/msi/launcher/launcher_reg.wxs index d00f442f..bb42255b 100644 --- a/Tools/msi/launcher/launcher_reg.wxs +++ b/Tools/msi/launcher/launcher_reg.wxs @@ -3,43 +3,43 @@ - + - - + + - + - - + + - + - - + + - + - + - - + + - + - - + + - + diff --git a/Tools/msi/lib/lib_d.wxs b/Tools/msi/lib/lib_d.wxs index 5a5cf70c..8a8a530d 100644 --- a/Tools/msi/lib/lib_d.wxs +++ b/Tools/msi/lib/lib_d.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/lib/lib_pdb.wxs b/Tools/msi/lib/lib_pdb.wxs index a2be0c9c..8839e8a4 100644 --- a/Tools/msi/lib/lib_pdb.wxs +++ b/Tools/msi/lib/lib_pdb.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/make_zip.proj b/Tools/msi/make_zip.proj index 766fe859..d2e031f6 100644 --- a/Tools/msi/make_zip.proj +++ b/Tools/msi/make_zip.proj @@ -14,6 +14,7 @@ python-$(PythonVersion)-embed-$(ArchName) .zip $(OutputPath)\en-us\$(TargetName)$(TargetExt) + rmdir /q/s "$(IntermediateOutputPath)\zip_$(ArchName)" "$(PythonExe)" "$(MSBuildThisFileDirectory)\make_zip.py" $(Arguments) -e -o "$(TargetPath)" -t "$(IntermediateOutputPath)\zip_$(ArchName)" -a $(ArchName) set DOC_FILENAME=python$(PythonVersion).chm @@ -23,6 +24,7 @@ set VCREDIST_PATH=$(VS140COMNTOOLS)\..\..\VC\redist\$(Platform)\Microsoft.VC140. diff --git a/Tools/msi/make_zip.py b/Tools/msi/make_zip.py index 09ee4916..96fdad21 100644 --- a/Tools/msi/make_zip.py +++ b/Tools/msi/make_zip.py @@ -3,6 +3,7 @@ import py_compile import re import sys import shutil +import stat import os import tempfile @@ -14,6 +15,20 @@ TKTCL_RE = re.compile(r'^(_?tk|tcl).+\.(pyd|dll)', re.IGNORECASE) DEBUG_RE = re.compile(r'_d\.(pyd|dll|exe)$', re.IGNORECASE) PYTHON_DLL_RE = re.compile(r'python\d\d?\.dll$', re.IGNORECASE) +EXCLUDE_FROM_LIBRARY = { + '__pycache__', + 'ensurepip', + 'idlelib', + 'pydoc_data', + 'site-packages', + 'tkinter', + 'turtledemo', +} + +EXCLUDE_FILE_FROM_LIBRARY = { + 'bdist_wininst.py', +} + def is_not_debug(p): if DEBUG_RE.search(p.name): return False @@ -36,16 +51,21 @@ def is_not_debug_or_python(p): def include_in_lib(p): name = p.name.lower() if p.is_dir(): - if name in {'__pycache__', 'ensurepip', 'idlelib', 'pydoc_data', 'tkinter', 'turtledemo'}: + if name in EXCLUDE_FROM_LIBRARY: return False if name.startswith('plat-'): return False if name == 'test' and p.parts[-2].lower() == 'lib': return False + if name in {'test', 'tests'} and p.parts[-3].lower() == 'lib': + return False return True + if name in EXCLUDE_FILE_FROM_LIBRARY: + return False + suffix = p.suffix.lower() - return suffix not in {'.pyc', '.pyo'} + return suffix not in {'.pyc', '.pyo', '.exe'} def include_in_tools(p): if p.is_dir() and p.name.lower() in {'scripts', 'i18n', 'pynche', 'demo', 'parser'}: @@ -101,11 +121,16 @@ def copy_to_layout(target, rel_sources): else: for s, rel in rel_sources: + dest = target / rel try: - (target / rel).parent.mkdir(parents=True) + dest.parent.mkdir(parents=True) except FileExistsError: pass - shutil.copy(str(s), str(target / rel)) + if dest.is_file(): + dest.chmod(stat.S_IWRITE) + shutil.copy(str(s), str(dest)) + if dest.is_file(): + dest.chmod(stat.S_IWRITE) count += 1 return count diff --git a/Tools/msi/msi.props b/Tools/msi/msi.props index cc52c7ae..bbb8aeb4 100644 --- a/Tools/msi/msi.props +++ b/Tools/msi/msi.props @@ -11,6 +11,12 @@ Release x86 perUser + + + + + + - $(ComputerName) + $(ComputerName)/$(ArchName)/ $(ReleaseUri)/ - - @@ -49,7 +53,7 @@ $(OutputPath)\ $(OutputPath) true - $(ExternalsDir)\redist + $(ExternalsDir)\windows-installer\redist python$(MajorVersionNumber)$(MinorVersionNumber)$(MicroVersionNumber)$(ReleaseLevelName).chm @@ -76,17 +80,13 @@ NextMajorVersionNumber=$(MajorVersionNumber).$([msbuild]::Add($(MinorVersionNumber), 1)).0.0; Bitness=$(Bitness); PyDebugExt=$(PyDebugExt); + PyArchExt=$(PyArchExt); + PyTestExt=$(PyTestExt); OptionalFeatureName=$(OutputName); $(DefineConstants);CRTRedist=$(CRTRedist); - - $(DefineConstants);TestPrefix=;FileExtension=py; - - - $(DefineConstants);TestPrefix=x;FileExtension=px; - $(DefineConstants);Suffix32=-32; @@ -106,7 +106,10 @@ - + + + + @@ -150,7 +153,7 @@ <_Uuids>@(_Uuid->'("%(Identity)", "$(MajorVersionNumber).$(MinorVersionNumber)/%(Uri)")',',') - <_GenerateCommand>import uuid; print('\n'.join('{}={}'.format(i, uuid.uuid5(uuid.UUID('c8d9733e-a70c-43ff-ab0c-e26456f11083'), '$(ReleaseUri)' + j)) for i,j in [$(_Uuids.Replace(`"`,`'`))])) + <_GenerateCommand>import uuid; print('\n'.join('{}={}'.format(i, uuid.uuid5(uuid.UUID('c8d9733e-a70c-43ff-ab0c-e26456f11083'), '$(ReleaseUri.Replace(`{arch}`, `$(ArchName)`))' + j)) for i,j in [$(_Uuids.Replace(`"`,`'`))])) - <_Content>$([System.IO.File]::ReadAllText(%(WxlTemplate.FullPath)).Replace(`{{ShortVersion}}`, `$(MajorVersionNumber).$(MinorVersionNumber)`).Replace(`{{LongVersion}}`, `$(PythonVersion)`).Replace(`{{Bitness}}`, `$(Bitness)`)) + <_Content>$([System.IO.File]::ReadAllText(%(WxlTemplate.FullPath)).Replace(`{{ShortVersion}}`, `$(MajorVersionNumber).$(MinorVersionNumber)$(PyTestExt)`).Replace(`{{LongVersion}}`, `$(PythonVersion)$(PyTestExt)`).Replace(`{{Bitness}}`, `$(Bitness)`)) <_ExistingContent Condition="Exists('$(IntermediateOutputPath)%(WxlTemplate.Filename).wxl')">$([System.IO.File]::ReadAllText($(IntermediateOutputPath)%(WxlTemplate.Filename).wxl)) diff --git a/Tools/msi/tcltk/tcltk.wxs b/Tools/msi/tcltk/tcltk.wxs index 0b83c5cb..eeae8e8b 100644 --- a/Tools/msi/tcltk/tcltk.wxs +++ b/Tools/msi/tcltk/tcltk.wxs @@ -49,7 +49,8 @@ Description="!(loc.ShortcutDescription)" Target="[PYTHONW_EXE]" Arguments='"[#Lib_idlelib_idle.pyw]"' - Icon="idle.exe"> + Icon="idle.exe" + WorkingDirectory="InstallDirectory"> + Icon="idle.exe" + WorkingDirectory="InstallDirectory" /> diff --git a/Tools/msi/tcltk/tcltk_d.wxs b/Tools/msi/tcltk/tcltk_d.wxs index 7f5048f0..01d0d243 100644 --- a/Tools/msi/tcltk/tcltk_d.wxs +++ b/Tools/msi/tcltk/tcltk_d.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/tcltk/tcltk_pdb.wxs b/Tools/msi/tcltk/tcltk_pdb.wxs index 75c62bb4..04454f38 100644 --- a/Tools/msi/tcltk/tcltk_pdb.wxs +++ b/Tools/msi/tcltk/tcltk_pdb.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/tcltk/tcltk_reg.wxs b/Tools/msi/tcltk/tcltk_reg.wxs index e0989274..2778bcc8 100644 --- a/Tools/msi/tcltk/tcltk_reg.wxs +++ b/Tools/msi/tcltk/tcltk_reg.wxs @@ -5,25 +5,25 @@ VersionNT > 600 - - + + VersionNT > 600 - - + + VersionNT > 600 - + VersionNT > 600 - + @@ -31,14 +31,14 @@ VersionNT = 600 - + VersionNT = 600 - + diff --git a/Tools/msi/test/test_d.wxs b/Tools/msi/test/test_d.wxs index a25afdda..a9548767 100644 --- a/Tools/msi/test/test_d.wxs +++ b/Tools/msi/test/test_d.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/test/test_pdb.wxs b/Tools/msi/test/test_pdb.wxs index 1510a6f8..de634a39 100644 --- a/Tools/msi/test/test_pdb.wxs +++ b/Tools/msi/test/test_pdb.wxs @@ -1,6 +1,6 @@  - + diff --git a/Tools/msi/uploadrelease.bat b/Tools/msi/uploadrelease.bat index 79676373..4e319ce4 100644 --- a/Tools/msi/uploadrelease.bat +++ b/Tools/msi/uploadrelease.bat @@ -8,6 +8,7 @@ set HOST= set USER= set TARGET= set DRYRUN=false +set NOGPG= :CheckOpts if "%1" EQU "-h" goto Help @@ -18,6 +19,7 @@ if "%1" EQU "--user" (set USER=%~2) && shift && shift && goto CheckOpts if "%1" EQU "-t" (set TARGET=%~2) && shift && shift && goto CheckOpts if "%1" EQU "--target" (set TARGET=%~2) && shift && shift && goto CheckOpts if "%1" EQU "--dry-run" (set DRYRUN=true) && shift && goto CheckOpts +if "%1" EQU "--no-gpg" (set NOGPG=true) && shift && goto CheckOpts if not defined PLINK where plink > "%TEMP%\plink.loc" 2> nul && set /P PLINK= < "%TEMP%\plink.loc" & del "%TEMP%\plink.loc" if not defined PLINK where /R "%ProgramFiles(x86)%\PuTTY" plink > "%TEMP%\plink.loc" 2> nul && set /P PLINK= < "%TEMP%\plink.loc" & del "%TEMP%\plink.loc" @@ -31,10 +33,15 @@ if not defined PSCP where /R "%ProgramFiles(x86)%" pscp > "%TEMP%\pscp.loc" 2> n if not defined PSCP echo Cannot locate pscp.exe & exit /B 1 echo Found pscp.exe at %PSCP% -if not defined GPG where gpg2 > "%TEMP%\gpg.loc" 2> nul && set /P GPG= < "%TEMP%\gpg.loc" & del "%TEMP%\gpg.loc" -if not defined GPG where /R "%PCBUILD%..\externals" gpg2 > "%TEMP%\gpg.loc" 2> nul && set /P GPG= < "%TEMP%\gpg.loc" & del "%TEMP%\gpg.loc" -if not defined GPG echo Cannot locate gpg2.exe. Signatures will not be uploaded & pause -echo Found gpg2.exe at %GPG% +if defined NOGPG ( + set GPG= + echo Skipping GPG signature generation because of --no-gpg +) else ( + if not defined GPG where gpg2 > "%TEMP%\gpg.loc" 2> nul && set /P GPG= < "%TEMP%\gpg.loc" & del "%TEMP%\gpg.loc" + if not defined GPG where /R "%PCBUILD%..\externals\windows-installer" gpg2 > "%TEMP%\gpg.loc" 2> nul && set /P GPG= < "%TEMP%\gpg.loc" & del "%TEMP%\gpg.loc" + if not defined GPG echo Cannot locate gpg2.exe. Signatures will not be uploaded & pause + echo Found gpg2.exe at %GPG% +) call "%PCBUILD%env.bat" > nul 2> nul pushd "%D%" diff --git a/Tools/msi/uploadrelease.proj b/Tools/msi/uploadrelease.proj index ec4ede59..0d472dea 100644 --- a/Tools/msi/uploadrelease.proj +++ b/Tools/msi/uploadrelease.proj @@ -16,7 +16,7 @@ $(DownloadUrlBase.TrimEnd(`/`))/$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber) - $(DownloadUrl.TrimEnd(`/`)) + $(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseLevelName)`).Replace(`{msi}`, ``).TrimEnd(`/`)) diff --git a/Tools/msi/wix.props b/Tools/msi/wix.props index 35492f9b..fbb2d102 100644 --- a/Tools/msi/wix.props +++ b/Tools/msi/wix.props @@ -4,9 +4,9 @@ $(MSBuildThisFileDirectory)\Wix\ - $(ExternalsDir)\Wix\ - $(Registry:HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows Installer XML\3.9@InstallRoot) - $(Registry:HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\Windows Installer XML\3.9@InstallRoot) + $(ExternalsDir)\windows-installer\wix\ + $(Registry:HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows Installer XML\3.10@InstallRoot) + $(Registry:HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\Windows Installer XML\3.10@InstallRoot) $(WixInstallPath)\Wix.targets \ No newline at end of file diff --git a/Tools/scripts/fixdiv.py b/Tools/scripts/fixdiv.py index 4ecbea15..20f33865 100755 --- a/Tools/scripts/fixdiv.py +++ b/Tools/scripts/fixdiv.py @@ -335,8 +335,6 @@ class FileContext: self.buffer.append(line) self.lineno += 1 return line - def truncate(self): - del self.buffer[-window:] def __getitem__(self, index): self.fill() bufstart = self.lineno - len(self.buffer) diff --git a/configure b/configure index e823a083..398f1216 100755 --- a/configure +++ b/configure @@ -667,6 +667,12 @@ UNIVERSAL_ARCH_FLAGS CFLAGS_NODIST BASECFLAGS OPT +LLVM_PROF_FOUND +LLVM_PROF_ERR +LLVM_PROF_FILE +LLVM_PROF_MERGER +PGO_PROF_USE_FLAG +PGO_PROF_GEN_FLAG ABIFLAGS LN MKDIR_P @@ -6431,6 +6437,80 @@ $as_echo "no" >&6; } fi +# Enable PGO flags. +# Extract the first word of "llvm-profdata", so it can be a program name with args. +set dummy llvm-profdata; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LLVM_PROF_FOUND+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LLVM_PROF_FOUND"; then + ac_cv_prog_LLVM_PROF_FOUND="$LLVM_PROF_FOUND" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_LLVM_PROF_FOUND="found" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_LLVM_PROF_FOUND" && ac_cv_prog_LLVM_PROF_FOUND="not-found" +fi +fi +LLVM_PROF_FOUND=$ac_cv_prog_LLVM_PROF_FOUND +if test -n "$LLVM_PROF_FOUND"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LLVM_PROF_FOUND" >&5 +$as_echo "$LLVM_PROF_FOUND" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +LLVM_PROF_ERR=no +case $CC in + *clang*) + # Any changes made here should be reflected in the GCC+Darwin case below + PGO_PROF_GEN_FLAG="-fprofile-instr-generate" + PGO_PROF_USE_FLAG="-fprofile-instr-use=code.profclangd" + LLVM_PROF_MERGER="llvm-profdata merge -output=code.profclangd *.profclangr" + LLVM_PROF_FILE="LLVM_PROFILE_FILE=\"code-%p.profclangr\"" + if test $LLVM_PROF_FOUND = not-found + then + LLVM_PROF_ERR=yes + fi + ;; + *gcc*) + case $ac_sys_system in + Darwin*) + PGO_PROF_GEN_FLAG="-fprofile-instr-generate" + PGO_PROF_USE_FLAG="-fprofile-instr-use=code.profclangd" + LLVM_PROF_MERGER="llvm-profdata merge -output=code.profclangd *.profclangr" + LLVM_PROF_FILE="LLVM_PROFILE_FILE=\"code-%p.profclangr\"" + if test $LLVM_PROF_FOUND = not-found + then + LLVM_PROF_ERR=yes + fi + ;; + *) + PGO_PROF_GEN_FLAG="-fprofile-generate" + PGO_PROF_USE_FLAG="-fprofile-use -fprofile-correction" + LLVM_PROF_MERGER="true" + LLVM_PROF_FILE="" + ;; + esac + ;; +esac + # XXX Shouldn't the code above that fiddles with BASECFLAGS and OPT be # merged with this chunk of code? @@ -16005,11 +16085,11 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #include int main() { - const int flags = 0; char buffer[1]; - int n; + const size_t buflen = sizeof(buffer); + const int flags = 0; /* ignore the result, Python checks for ENOSYS at runtime */ - (void)syscall(SYS_getrandom, buffer, sizeof(buffer), flags); + (void)syscall(SYS_getrandom, buffer, buflen, flags); return 0; } @@ -16031,6 +16111,43 @@ $as_echo "#define HAVE_GETRANDOM_SYSCALL 1" >>confdefs.h fi +# check if the getrandom() function is available +# the test was written for the Solaris function of +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for the getrandom() function" >&5 +$as_echo_n "checking for the getrandom() function... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + + #include + + int main() { + char buffer[1]; + const size_t buflen = sizeof(buffer); + const int flags = 0; + /* ignore the result, Python checks for ENOSYS at runtime */ + (void)getrandom(buffer, buflen, flags); + return 0; + } + + +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + have_getrandom=yes +else + have_getrandom=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_getrandom" >&5 +$as_echo "$have_getrandom" >&6; } + +if test "$have_getrandom" = yes; then + +$as_echo "#define HAVE_GETRANDOM 1" >>confdefs.h + +fi + # generate output files ac_config_files="$ac_config_files Makefile.pre Modules/Setup.config Misc/python.pc Misc/python-config.sh" diff --git a/configure.ac b/configure.ac index 56a73df2..694293e8 100644 --- a/configure.ac +++ b/configure.ac @@ -1218,6 +1218,49 @@ else AC_MSG_RESULT(no); Py_DEBUG='false' fi], [AC_MSG_RESULT(no)]) +# Enable PGO flags. +AC_SUBST(PGO_PROF_GEN_FLAG) +AC_SUBST(PGO_PROF_USE_FLAG) +AC_SUBST(LLVM_PROF_MERGER) +AC_SUBST(LLVM_PROF_FILE) +AC_SUBST(LLVM_PROF_ERR) +AC_SUBST(LLVM_PROF_FOUND) +AC_CHECK_PROG(LLVM_PROF_FOUND, llvm-profdata, found, not-found) +LLVM_PROF_ERR=no +case $CC in + *clang*) + # Any changes made here should be reflected in the GCC+Darwin case below + PGO_PROF_GEN_FLAG="-fprofile-instr-generate" + PGO_PROF_USE_FLAG="-fprofile-instr-use=code.profclangd" + LLVM_PROF_MERGER="llvm-profdata merge -output=code.profclangd *.profclangr" + LLVM_PROF_FILE="LLVM_PROFILE_FILE=\"code-%p.profclangr\"" + if test $LLVM_PROF_FOUND = not-found + then + LLVM_PROF_ERR=yes + fi + ;; + *gcc*) + case $ac_sys_system in + Darwin*) + PGO_PROF_GEN_FLAG="-fprofile-instr-generate" + PGO_PROF_USE_FLAG="-fprofile-instr-use=code.profclangd" + LLVM_PROF_MERGER="llvm-profdata merge -output=code.profclangd *.profclangr" + LLVM_PROF_FILE="LLVM_PROFILE_FILE=\"code-%p.profclangr\"" + if test $LLVM_PROF_FOUND = not-found + then + LLVM_PROF_ERR=yes + fi + ;; + *) + PGO_PROF_GEN_FLAG="-fprofile-generate" + PGO_PROF_USE_FLAG="-fprofile-use -fprofile-correction" + LLVM_PROF_MERGER="true" + LLVM_PROF_FILE="" + ;; + esac + ;; +esac + # XXX Shouldn't the code above that fiddles with BASECFLAGS and OPT be # merged with this chunk of code? @@ -5111,11 +5154,11 @@ AC_LINK_IFELSE( #include int main() { - const int flags = 0; char buffer[1]; - int n; + const size_t buflen = sizeof(buffer); + const int flags = 0; /* ignore the result, Python checks for ENOSYS at runtime */ - (void)syscall(SYS_getrandom, buffer, sizeof(buffer), flags); + (void)syscall(SYS_getrandom, buffer, buflen, flags); return 0; } ]]) @@ -5127,6 +5170,31 @@ if test "$have_getrandom_syscall" = yes; then [Define to 1 if the Linux getrandom() syscall is available]) fi +# check if the getrandom() function is available +# the test was written for the Solaris function of +AC_MSG_CHECKING(for the getrandom() function) +AC_LINK_IFELSE( +[ + AC_LANG_SOURCE([[ + #include + + int main() { + char buffer[1]; + const size_t buflen = sizeof(buffer); + const int flags = 0; + /* ignore the result, Python checks for ENOSYS at runtime */ + (void)getrandom(buffer, buflen, flags); + return 0; + } + ]]) +],[have_getrandom=yes],[have_getrandom=no]) +AC_MSG_RESULT($have_getrandom) + +if test "$have_getrandom" = yes; then + AC_DEFINE(HAVE_GETRANDOM, 1, + [Define to 1 if the getrandom() function is available]) +fi + # generate output files AC_CONFIG_FILES(Makefile.pre Modules/Setup.config Misc/python.pc Misc/python-config.sh) AC_CONFIG_FILES([Modules/ld_so_aix], [chmod +x Modules/ld_so_aix]) diff --git a/pyconfig.h.in b/pyconfig.h.in index 0d40c94c..b0cafb31 100644 --- a/pyconfig.h.in +++ b/pyconfig.h.in @@ -395,6 +395,9 @@ /* Define to 1 if you have the `getpwent' function. */ #undef HAVE_GETPWENT +/* Define to 1 if the getrandom() function is available */ +#undef HAVE_GETRANDOM + /* Define to 1 if the Linux getrandom() syscall is available */ #undef HAVE_GETRANDOM_SYSCALL