diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 7e9c3caf23f079f..221008717b29b1c 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -280,3 +280,5 @@ Lib/test/test_configparser.py @jaraco
# Doc sections
Doc/reference/ @willingc
+
+**/*weakref* @kumaraditya303
\ No newline at end of file
diff --git a/.github/workflows/reusable-change-detection.yml b/.github/workflows/reusable-change-detection.yml
index 6f599f75547ceb1..5cd6fb39f1e12f4 100644
--- a/.github/workflows/reusable-change-detection.yml
+++ b/.github/workflows/reusable-change-detection.yml
@@ -1,6 +1,4 @@
----
-
-name: Change detection
+name: Reusable change detection
on: # yamllint disable-line rule:truthy
workflow_call:
diff --git a/.github/workflows/reusable-docs.yml b/.github/workflows/reusable-docs.yml
index 4b021b3dc32f154..3809f24dcc977e6 100644
--- a/.github/workflows/reusable-docs.yml
+++ b/.github/workflows/reusable-docs.yml
@@ -1,4 +1,4 @@
-name: Docs
+name: Reusable Docs
on:
workflow_call:
@@ -95,7 +95,7 @@ jobs:
# Run "doctest" on HEAD as new syntax doesn't exist in the latest stable release
doctest:
name: 'Doctest'
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/reusable-macos.yml b/.github/workflows/reusable-macos.yml
index b4227545887ad16..b3a160fbbf8053a 100644
--- a/.github/workflows/reusable-macos.yml
+++ b/.github/workflows/reusable-macos.yml
@@ -1,3 +1,5 @@
+name: Reusable macOS
+
on:
workflow_call:
inputs:
diff --git a/.github/workflows/reusable-tsan.yml b/.github/workflows/reusable-tsan.yml
index 27f4eacd86fd958..f4c976ca996410e 100644
--- a/.github/workflows/reusable-tsan.yml
+++ b/.github/workflows/reusable-tsan.yml
@@ -1,3 +1,5 @@
+name: Reusable Thread Sanitizer
+
on:
workflow_call:
inputs:
diff --git a/.github/workflows/reusable-ubuntu.yml b/.github/workflows/reusable-ubuntu.yml
index 769f1210de4d3cd..0cf40ba8a9b03b1 100644
--- a/.github/workflows/reusable-ubuntu.yml
+++ b/.github/workflows/reusable-ubuntu.yml
@@ -1,3 +1,5 @@
+name: Reusable Ubuntu
+
on:
workflow_call:
inputs:
diff --git a/.github/workflows/reusable-wasi.yml b/.github/workflows/reusable-wasi.yml
index 1b1a68c0badc763..4c8137c958a3128 100644
--- a/.github/workflows/reusable-wasi.yml
+++ b/.github/workflows/reusable-wasi.yml
@@ -1,3 +1,5 @@
+name: Reusable WASI
+
on:
workflow_call:
inputs:
diff --git a/.github/workflows/reusable-windows-msi.yml b/.github/workflows/reusable-windows-msi.yml
index fc34ab7c3eb1f25..abdb1a1982fef8d 100644
--- a/.github/workflows/reusable-windows-msi.yml
+++ b/.github/workflows/reusable-windows-msi.yml
@@ -1,4 +1,4 @@
-name: TestsMSI
+name: Reusable Windows MSI
on:
workflow_call:
diff --git a/.github/workflows/reusable-windows.yml b/.github/workflows/reusable-windows.yml
index e9c3c8e05a801c4..dcfc62d7f5d1456 100644
--- a/.github/workflows/reusable-windows.yml
+++ b/.github/workflows/reusable-windows.yml
@@ -1,3 +1,5 @@
+name: Reusable Windows
+
on:
workflow_call:
inputs:
diff --git a/Doc/c-api/contextvars.rst b/Doc/c-api/contextvars.rst
index 0de135b232aaaf1..b7c6550ff34aac1 100644
--- a/Doc/c-api/contextvars.rst
+++ b/Doc/c-api/contextvars.rst
@@ -122,18 +122,18 @@ Context object management functions:
.. c:type:: PyContextEvent
Enumeration of possible context object watcher events:
- - ``Py_CONTEXT_EVENT_ENTER``
- - ``Py_CONTEXT_EVENT_EXIT``
+
+ - ``Py_CONTEXT_SWITCHED``: The :term:`current context` has switched to a
+ different context. The object passed to the watch callback is the
+ now-current :class:`contextvars.Context` object, or None if no context is
+ current.
.. versionadded:: 3.14
-.. c:type:: int (*PyContext_WatchCallback)(PyContextEvent event, PyContext* ctx)
+.. c:type:: int (*PyContext_WatchCallback)(PyContextEvent event, PyObject *obj)
- Type of a context object watcher callback function.
- If *event* is ``Py_CONTEXT_EVENT_ENTER``, then the callback is invoked
- after *ctx* has been set as the current context for the current thread.
- Otherwise, the callback is invoked before the deactivation of *ctx* as the current context
- and the restoration of the previous contex object for the current thread.
+ Context object watcher callback function. The object passed to the callback
+ is event-specific; see :c:type:`PyContextEvent` for details.
If the callback returns with an exception set, it must return ``-1``; this
exception will be printed as an unraisable exception using
diff --git a/Doc/c-api/init_config.rst b/Doc/c-api/init_config.rst
index 6f8962afc7af0d0..6194d7446c73e45 100644
--- a/Doc/c-api/init_config.rst
+++ b/Doc/c-api/init_config.rst
@@ -1621,6 +1621,8 @@ Create Config
Free memory of the initialization configuration *config*.
+ If *config* is ``NULL``, no operation is performed.
+
Error Handling
--------------
@@ -1823,14 +1825,18 @@ return ``-1`` on error:
PyInitConfig_Free(config);
return 0;
- // Display the error message
- const char *err_msg;
error:
- (void)PyInitConfig_GetError(config, &err_msg);
- printf("PYTHON INIT ERROR: %s\n", err_msg);
- PyInitConfig_Free(config);
+ {
+ // Display the error message
+ // This uncommon braces style is used, because you cannot make
+ // goto targets point to variable declarations.
+ const char *err_msg;
+ (void)PyInitConfig_GetError(config, &err_msg);
+ printf("PYTHON INIT ERROR: %s\n", err_msg);
+ PyInitConfig_Free(config);
- return -1;
+ return -1;
+ }
}
diff --git a/Doc/c-api/long.rst b/Doc/c-api/long.rst
index e0ae0f77a01db97..02ef8aa78464681 100644
--- a/Doc/c-api/long.rst
+++ b/Doc/c-api/long.rst
@@ -608,6 +608,9 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate.
Exactly what values are considered compact is an implementation detail
and is subject to change.
+ .. versionadded:: 3.12
+
+
.. c:function:: Py_ssize_t PyUnstable_Long_CompactValue(const PyLongObject* op)
If *op* is compact, as determined by :c:func:`PyUnstable_Long_IsCompact`,
@@ -615,3 +618,5 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate.
Otherwise, the return value is undefined.
+ .. versionadded:: 3.12
+
diff --git a/Doc/c-api/unicode.rst b/Doc/c-api/unicode.rst
index f5704cffa199a58..4daf9e9fdbf2f10 100644
--- a/Doc/c-api/unicode.rst
+++ b/Doc/c-api/unicode.rst
@@ -1600,6 +1600,8 @@ object.
Discard the internal Unicode buffer and destroy the writer instance.
+ If *writer* is ``NULL``, no operation is performed.
+
.. c:function:: int PyUnicodeWriter_WriteChar(PyUnicodeWriter *writer, Py_UCS4 ch)
Write the single Unicode character *ch* into *writer*.
diff --git a/Doc/conf.py b/Doc/conf.py
index 287e0da46eb11cb..839beaad08bebdc 100644
--- a/Doc/conf.py
+++ b/Doc/conf.py
@@ -11,6 +11,8 @@
import sys
import time
+import sphinx
+
sys.path.append(os.path.abspath('tools/extensions'))
sys.path.append(os.path.abspath('includes'))
@@ -62,7 +64,10 @@
# General substitutions.
project = 'Python'
-copyright = f"2001-{time.strftime('%Y')}, Python Software Foundation"
+if sphinx.version_info[:2] >= (8, 1):
+ copyright = "2001-%Y, Python Software Foundation"
+else:
+ copyright = f"2001-{time.strftime('%Y')}, Python Software Foundation"
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
@@ -361,10 +366,14 @@
}
# This 'Last updated on:' timestamp is inserted at the bottom of every page.
-html_time = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
-html_last_updated_fmt = time.strftime(
- '%b %d, %Y (%H:%M UTC)', time.gmtime(html_time)
-)
+html_last_updated_fmt = '%b %d, %Y (%H:%M UTC)'
+if sphinx.version_info[:2] >= (8, 1):
+ html_last_updated_use_utc = True
+else:
+ html_time = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
+ html_last_updated_fmt = time.strftime(
+ html_last_updated_fmt, time.gmtime(html_time)
+ )
# Path to find HTML templates.
templates_path = ['tools/templates']
@@ -596,13 +605,21 @@
# mapping unique short aliases to a base URL and a prefix.
# https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
extlinks = {
- "cve": ("https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s", "CVE-%s"),
- "cwe": ("https://cwe.mitre.org/data/definitions/%s.html", "CWE-%s"),
"pypi": ("https://pypi.org/project/%s/", "%s"),
"source": (SOURCE_URI, "%s"),
}
extlinks_detect_hardcoded_links = True
+if sphinx.version_info[:2] < (8, 1):
+ # Sphinx 8.1 has in-built CVE and CWE roles.
+ extlinks |= {
+ "cve": (
+ "https://www.cve.org/CVERecord?id=CVE-%s",
+ "CVE-%s",
+ ),
+ "cwe": ("https://cwe.mitre.org/data/definitions/%s.html", "CWE-%s"),
+ }
+
# Options for c_annotations
# -------------------------
diff --git a/Doc/deprecations/c-api-pending-removal-in-3.14.rst b/Doc/deprecations/c-api-pending-removal-in-3.14.rst
index d16da66c29abe7e..9e10bf2691e5c85 100644
--- a/Doc/deprecations/c-api-pending-removal-in-3.14.rst
+++ b/Doc/deprecations/c-api-pending-removal-in-3.14.rst
@@ -1,4 +1,4 @@
-Pending Removal in Python 3.14
+Pending removal in Python 3.14
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* The ``ma_version_tag`` field in :c:type:`PyDictObject` for extension modules
diff --git a/Doc/deprecations/c-api-pending-removal-in-3.15.rst b/Doc/deprecations/c-api-pending-removal-in-3.15.rst
index e3974415e0cc89a..1bb49e5b4874f2e 100644
--- a/Doc/deprecations/c-api-pending-removal-in-3.15.rst
+++ b/Doc/deprecations/c-api-pending-removal-in-3.15.rst
@@ -1,4 +1,4 @@
-Pending Removal in Python 3.15
+Pending removal in Python 3.15
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* The bundled copy of ``libmpdecimal``.
diff --git a/Doc/deprecations/c-api-pending-removal-in-future.rst b/Doc/deprecations/c-api-pending-removal-in-future.rst
index 0c3ae52b87ff74c..8fc1c80c35d0925 100644
--- a/Doc/deprecations/c-api-pending-removal-in-future.rst
+++ b/Doc/deprecations/c-api-pending-removal-in-future.rst
@@ -1,4 +1,4 @@
-Pending Removal in Future Versions
+Pending removal in future versions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following APIs are deprecated and will be removed,
diff --git a/Doc/deprecations/index.rst b/Doc/deprecations/index.rst
index a9efb0bc744335a..bac6e3f18d4594c 100644
--- a/Doc/deprecations/index.rst
+++ b/Doc/deprecations/index.rst
@@ -7,7 +7,7 @@ Deprecations
.. include:: pending-removal-in-future.rst
-C API Deprecations
+C API deprecations
------------------
.. include:: c-api-pending-removal-in-3.15.rst
diff --git a/Doc/deprecations/pending-removal-in-3.13.rst b/Doc/deprecations/pending-removal-in-3.13.rst
index 89790497816e836..2fd2f12cc6a2c42 100644
--- a/Doc/deprecations/pending-removal-in-3.13.rst
+++ b/Doc/deprecations/pending-removal-in-3.13.rst
@@ -1,4 +1,4 @@
-Pending Removal in Python 3.13
+Pending removal in Python 3.13
------------------------------
Modules (see :pep:`594`):
diff --git a/Doc/deprecations/pending-removal-in-3.14.rst b/Doc/deprecations/pending-removal-in-3.14.rst
index de30f4695059ed2..b8791b8d6c387e5 100644
--- a/Doc/deprecations/pending-removal-in-3.14.rst
+++ b/Doc/deprecations/pending-removal-in-3.14.rst
@@ -1,4 +1,4 @@
-Pending Removal in Python 3.14
+Pending removal in Python 3.14
------------------------------
* The import system:
diff --git a/Doc/deprecations/pending-removal-in-3.15.rst b/Doc/deprecations/pending-removal-in-3.15.rst
index a55fb6bea3fdaac..17029b8d4773bdb 100644
--- a/Doc/deprecations/pending-removal-in-3.15.rst
+++ b/Doc/deprecations/pending-removal-in-3.15.rst
@@ -1,4 +1,4 @@
-Pending Removal in Python 3.15
+Pending removal in Python 3.15
------------------------------
* The import system:
@@ -63,7 +63,7 @@ Pending Removal in Python 3.15
* The undocumented keyword argument syntax for creating
:class:`~typing.NamedTuple` classes
- (e.g. ``Point = NamedTuple("Point", x=int, y=int)``)
+ (for example, ``Point = NamedTuple("Point", x=int, y=int)``)
has been deprecated since Python 3.13.
Use the class-based syntax or the functional syntax instead.
diff --git a/Doc/deprecations/pending-removal-in-3.16.rst b/Doc/deprecations/pending-removal-in-3.16.rst
index fc2ef33de5e5cc5..fac500d34742ca7 100644
--- a/Doc/deprecations/pending-removal-in-3.16.rst
+++ b/Doc/deprecations/pending-removal-in-3.16.rst
@@ -1,15 +1,6 @@
-Pending Removal in Python 3.16
+Pending removal in Python 3.16
------------------------------
-* :mod:`builtins`:
-
- * Bitwise inversion on boolean types, ``~True`` or ``~False``
- has been deprecated since Python 3.12,
- as it produces surprising and unintuitive results (``-2`` and ``-1``).
- Use ``not x`` instead for the logical negation of a Boolean.
- In the rare case that you need the bitwise inversion of
- the underlying integer, convert to ``int`` explicitly (``~int(x)``).
-
* :mod:`array`:
* The ``'u'`` format code (:c:type:`wchar_t`)
@@ -20,11 +11,19 @@ Pending Removal in Python 3.16
* :mod:`asyncio`:
- * :mod:`asyncio`:
- :func:`!asyncio.iscoroutinefunction` is deprecated
- and will be removed in Python 3.16,
- use :func:`inspect.iscoroutinefunction` instead.
- (Contributed by Jiahao Li and Kumar Aditya in :gh:`122875`.)
+ * :func:`!asyncio.iscoroutinefunction` is deprecated
+ and will be removed in Python 3.16,
+ use :func:`inspect.iscoroutinefunction` instead.
+ (Contributed by Jiahao Li and Kumar Aditya in :gh:`122875`.)
+
+* :mod:`builtins`:
+
+ * Bitwise inversion on boolean types, ``~True`` or ``~False``
+ has been deprecated since Python 3.12,
+ as it produces surprising and unintuitive results (``-2`` and ``-1``).
+ Use ``not x`` instead for the logical negation of a Boolean.
+ In the rare case that you need the bitwise inversion of
+ the underlying integer, convert to ``int`` explicitly (``~int(x)``).
* :mod:`shutil`:
diff --git a/Doc/deprecations/pending-removal-in-future.rst b/Doc/deprecations/pending-removal-in-future.rst
index 3f9cf6f208221ab..f916797c07a068c 100644
--- a/Doc/deprecations/pending-removal-in-future.rst
+++ b/Doc/deprecations/pending-removal-in-future.rst
@@ -1,4 +1,4 @@
-Pending Removal in Future Versions
+Pending removal in future versions
----------------------------------
The following APIs will be removed in the future,
diff --git a/Doc/glossary.rst b/Doc/glossary.rst
index cb7e0a2b89d0379..f67f3ecad0bc406 100644
--- a/Doc/glossary.rst
+++ b/Doc/glossary.rst
@@ -265,19 +265,33 @@ Glossary
advanced mathematical feature. If you're not aware of a need for them,
it's almost certain you can safely ignore them.
+ context
+ This term has different meanings depending on where and how it is used.
+ Some common meanings:
+
+ * The temporary state or environment established by a :term:`context
+ manager` via a :keyword:`with` statement.
+ * The collection of keyvalue bindings associated with a particular
+ :class:`contextvars.Context` object and accessed via
+ :class:`~contextvars.ContextVar` objects. Also see :term:`context
+ variable`.
+ * A :class:`contextvars.Context` object. Also see :term:`current
+ context`.
+
+ context management protocol
+ The :meth:`~object.__enter__` and :meth:`~object.__exit__` methods called
+ by the :keyword:`with` statement. See :pep:`343`.
+
context manager
- An object which controls the environment seen in a :keyword:`with`
- statement by defining :meth:`~object.__enter__` and :meth:`~object.__exit__` methods.
- See :pep:`343`.
+ An object which implements the :term:`context management protocol` and
+ controls the environment seen in a :keyword:`with` statement. See
+ :pep:`343`.
context variable
- A variable which can have different values depending on its context.
- This is similar to Thread-Local Storage in which each execution
- thread may have a different value for a variable. However, with context
- variables, there may be several contexts in one execution thread and the
- main usage for context variables is to keep track of variables in
+ A variable whose value depends on which context is the :term:`current
+ context`. Values are accessed via :class:`contextvars.ContextVar`
+ objects. Context variables are primarily used to isolate state between
concurrent asynchronous tasks.
- See :mod:`contextvars`.
contiguous
.. index:: C-contiguous, Fortran contiguous
@@ -311,6 +325,14 @@ Glossary
is used when necessary to distinguish this implementation from others
such as Jython or IronPython.
+ current context
+ The :term:`context` (:class:`contextvars.Context` object) that is
+ currently used by :class:`~contextvars.ContextVar` objects to access (get
+ or set) the values of :term:`context variables `. Each
+ thread has its own current context. Frameworks for executing asynchronous
+ tasks (see :mod:`asyncio`) associate each task with a context which
+ becomes the current context whenever the task starts or resumes execution.
+
decorator
A function returning another function, usually applied as a function
transformation using the ``@wrapper`` syntax. Common examples for
@@ -595,7 +617,7 @@ Glossary
As of Python 3.13, the GIL can be disabled using the :option:`--disable-gil`
build configuration. After building Python with this option, code must be
- run with :option:`-X gil 0 <-X>` or after setting the :envvar:`PYTHON_GIL=0 `
+ run with :option:`-X gil=0 <-X>` or after setting the :envvar:`PYTHON_GIL=0 `
environment variable. This feature enables improved performance for
multi-threaded applications and makes it easier to use multi-core CPUs
efficiently. For more details, see :pep:`703`.
diff --git a/Doc/library/_thread.rst b/Doc/library/_thread.rst
index 5fd604c05380ac5..6a66fc4c64bc450 100644
--- a/Doc/library/_thread.rst
+++ b/Doc/library/_thread.rst
@@ -219,9 +219,11 @@ In addition to these methods, lock objects can also be used via the
* Calling :func:`sys.exit` or raising the :exc:`SystemExit` exception is
equivalent to calling :func:`_thread.exit`.
-* It is not possible to interrupt the :meth:`~threading.Lock.acquire` method on
- a lock --- the :exc:`KeyboardInterrupt` exception will happen after the lock
- has been acquired.
+* It is platform-dependent whether the :meth:`~threading.Lock.acquire` method
+ on a lock can be interrupted (so that the :exc:`KeyboardInterrupt` exception
+ will happen immediately, rather than only after the lock has been acquired or
+ the operation has timed out). It can be interrupted on POSIX, but not on
+ Windows.
* When the main thread exits, it is system defined whether the other threads
survive. On most systems, they are killed without executing
diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst
index e9a08984f77c3ae..d58c75eef3e739d 100644
--- a/Doc/library/argparse.rst
+++ b/Doc/library/argparse.rst
@@ -541,7 +541,8 @@ exit_on_error
^^^^^^^^^^^^^
Normally, when you pass an invalid argument list to the :meth:`~ArgumentParser.parse_args`
-method of an :class:`ArgumentParser`, it will exit with error info.
+method of an :class:`ArgumentParser`, it will print a *message* to :data:`sys.stderr` and exit with a status
+code of 2.
If the user would like to catch errors manually, the feature can be enabled by setting
``exit_on_error`` to ``False``::
@@ -601,7 +602,7 @@ The add_argument() method
The following sections describe how each of these are used.
-.. _name_or_flags:
+.. _`name or flags`:
name or flags
^^^^^^^^^^^^^
@@ -635,6 +636,25 @@ be positional::
usage: PROG [-h] [-f FOO] bar
PROG: error: the following arguments are required: bar
+By default, argparse automatically handles the internal naming and
+display names of arguments, simplifying the process without requiring
+additional configuration.
+As such, you do not need to specify the dest_ and metavar_ parameters.
+The dest_ parameter defaults to the argument name with underscores ``_``
+replacing hyphens ``-`` . The metavar_ parameter defaults to the
+upper-cased name. For example::
+
+ >>> parser = argparse.ArgumentParser(prog='PROG')
+ >>> parser.add_argument('--foo-bar')
+ >>> parser.parse_args(['--foo-bar', 'FOO-BAR']
+ Namespace(foo_bar='FOO-BAR')
+ >>> parser.print_help()
+ usage: [-h] [--foo-bar FOO-BAR]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --foo-bar FOO-BAR
+
.. _action:
@@ -731,6 +751,9 @@ how the command-line arguments should be handled. The supplied actions are:
.. versionadded:: 3.8
+Only actions that consume command-line arguments (e.g. ``'store'``,
+``'append'`` or ``'extend'``) can be used with positional arguments.
+
You may also specify an arbitrary action by passing an Action subclass or
other object that implements the same interface. The ``BooleanOptionalAction``
is available in ``argparse`` and adds support for boolean actions such as
@@ -858,6 +881,8 @@ See also :ref:`specifying-ambiguous-arguments`. The supported values are:
If the ``nargs`` keyword argument is not provided, the number of arguments consumed
is determined by the action_. Generally this means a single command-line argument
will be consumed and a single item (not a list) will be produced.
+Actions that do not consume command-line arguments (e.g.
+``'store_const'``) set ``nargs=0``.
.. _const:
diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst
index 4716a3f9c8ac79f..f27e858cf420f4b 100644
--- a/Doc/library/asyncio-task.rst
+++ b/Doc/library/asyncio-task.rst
@@ -158,7 +158,7 @@ other coroutines::
# Nothing happens if we just call "nested()".
# A coroutine object is created but not awaited,
# so it *won't run at all*.
- nested()
+ nested() # will raise a "RuntimeWarning".
# Let's do it differently now and await it:
print(await nested()) # will print "42".
diff --git a/Doc/library/configparser.rst b/Doc/library/configparser.rst
index b5c18bbccffb785..3aad6f7b5d2d20d 100644
--- a/Doc/library/configparser.rst
+++ b/Doc/library/configparser.rst
@@ -54,6 +54,7 @@ can be customized by end users easily.
import os
os.remove("example.ini")
+ os.remove("override.ini")
Quick Start
diff --git a/Doc/library/contextvars.rst b/Doc/library/contextvars.rst
index 2a79dfe8f81e26e..2b1fb9fdd29cd88 100644
--- a/Doc/library/contextvars.rst
+++ b/Doc/library/contextvars.rst
@@ -144,51 +144,89 @@ Manual Context Management
To get a copy of the current context use the
:func:`~contextvars.copy_context` function.
- Every thread will have a different top-level :class:`~contextvars.Context`
- object. This means that a :class:`ContextVar` object behaves in a similar
- fashion to :func:`threading.local` when values are assigned in different
- threads.
+ Each thread has its own effective stack of :class:`!Context` objects. The
+ :term:`current context` is the :class:`!Context` object at the top of the
+ current thread's stack. All :class:`!Context` objects in the stacks are
+ considered to be *entered*.
+
+ *Entering* a context, which can be done by calling its :meth:`~Context.run`
+ method, makes the context the current context by pushing it onto the top of
+ the current thread's context stack.
+
+ *Exiting* from the current context, which can be done by returning from the
+ callback passed to the :meth:`~Context.run` method, restores the current
+ context to what it was before the context was entered by popping the context
+ off the top of the context stack.
+
+ Since each thread has its own context stack, :class:`ContextVar` objects
+ behave in a similar fashion to :func:`threading.local` when values are
+ assigned in different threads.
+
+ Attempting to enter an already entered context, including contexts entered in
+ other threads, raises a :exc:`RuntimeError`.
+
+ After exiting a context, it can later be re-entered (from any thread).
+
+ Any changes to :class:`ContextVar` values via the :meth:`ContextVar.set`
+ method are recorded in the current context. The :meth:`ContextVar.get`
+ method returns the value associated with the current context. Exiting a
+ context effectively reverts any changes made to context variables while the
+ context was entered (if needed, the values can be restored by re-entering the
+ context).
Context implements the :class:`collections.abc.Mapping` interface.
.. method:: run(callable, *args, **kwargs)
- Execute ``callable(*args, **kwargs)`` code in the context object
- the *run* method is called on. Return the result of the execution
- or propagate an exception if one occurred.
+ Enters the Context, executes ``callable(*args, **kwargs)``, then exits the
+ Context. Returns *callable*'s return value, or propagates an exception if
+ one occurred.
+
+ Example:
+
+ .. testcode::
+
+ import contextvars
- Any changes to any context variables that *callable* makes will
- be contained in the context object::
+ var = contextvars.ContextVar('var')
+ var.set('spam')
+ print(var.get()) # 'spam'
- var = ContextVar('var')
- var.set('spam')
+ ctx = contextvars.copy_context()
- def main():
- # 'var' was set to 'spam' before
- # calling 'copy_context()' and 'ctx.run(main)', so:
- # var.get() == ctx[var] == 'spam'
+ def main():
+ # 'var' was set to 'spam' before
+ # calling 'copy_context()' and 'ctx.run(main)', so:
+ print(var.get()) # 'spam'
+ print(ctx[var]) # 'spam'
- var.set('ham')
+ var.set('ham')
- # Now, after setting 'var' to 'ham':
- # var.get() == ctx[var] == 'ham'
+ # Now, after setting 'var' to 'ham':
+ print(var.get()) # 'ham'
+ print(ctx[var]) # 'ham'
- ctx = copy_context()
+ # Any changes that the 'main' function makes to 'var'
+ # will be contained in 'ctx'.
+ ctx.run(main)
- # Any changes that the 'main' function makes to 'var'
- # will be contained in 'ctx'.
- ctx.run(main)
+ # The 'main()' function was run in the 'ctx' context,
+ # so changes to 'var' are contained in it:
+ print(ctx[var]) # 'ham'
- # The 'main()' function was run in the 'ctx' context,
- # so changes to 'var' are contained in it:
- # ctx[var] == 'ham'
+ # However, outside of 'ctx', 'var' is still set to 'spam':
+ print(var.get()) # 'spam'
- # However, outside of 'ctx', 'var' is still set to 'spam':
- # var.get() == 'spam'
+ .. testoutput::
+ :hide:
- The method raises a :exc:`RuntimeError` when called on the same
- context object from more than one OS thread, or when called
- recursively.
+ spam
+ spam
+ spam
+ ham
+ ham
+ ham
+ spam
.. method:: copy()
diff --git a/Doc/library/datetime.rst b/Doc/library/datetime.rst
index f0b465bc9ce39cf..2f81080d525f866 100644
--- a/Doc/library/datetime.rst
+++ b/Doc/library/datetime.rst
@@ -180,19 +180,19 @@ Objects of the :class:`date` type are always naive.
An object of type :class:`.time` or :class:`.datetime` may be aware or naive.
-A :class:`.datetime` object *d* is aware if both of the following hold:
+A :class:`.datetime` object ``d`` is aware if both of the following hold:
1. ``d.tzinfo`` is not ``None``
2. ``d.tzinfo.utcoffset(d)`` does not return ``None``
-Otherwise, *d* is naive.
+Otherwise, ``d`` is naive.
-A :class:`.time` object *t* is aware if both of the following hold:
+A :class:`.time` object ``t`` is aware if both of the following hold:
1. ``t.tzinfo`` is not ``None``
2. ``t.tzinfo.utcoffset(None)`` does not return ``None``.
-Otherwise, *t* is naive.
+Otherwise, ``t`` is naive.
The distinction between aware and naive doesn't apply to :class:`timedelta`
objects.
@@ -358,8 +358,8 @@ Supported operations:
+--------------------------------+-----------------------------------------------+
| ``q, r = divmod(t1, t2)`` | Computes the quotient and the remainder: |
| | ``q = t1 // t2`` (3) and ``r = t1 % t2``. |
-| | q is an integer and r is a :class:`timedelta` |
-| | object. |
+| | ``q`` is an integer and ``r`` is a |
+| | :class:`timedelta` object. |
+--------------------------------+-----------------------------------------------+
| ``+t1`` | Returns a :class:`timedelta` object with the |
| | same value. (2) |
@@ -526,7 +526,7 @@ Other constructors, all class methods:
January 1 of year 1 has ordinal 1.
:exc:`ValueError` is raised unless ``1 <= ordinal <=
- date.max.toordinal()``. For any date *d*,
+ date.max.toordinal()``. For any date ``d``,
``date.fromordinal(d.toordinal()) == d``.
@@ -730,7 +730,7 @@ Instance methods:
.. method:: date.toordinal()
Return the proleptic Gregorian ordinal of the date, where January 1 of year 1
- has ordinal 1. For any :class:`date` object *d*,
+ has ordinal 1. For any :class:`date` object ``d``,
``date.fromordinal(d.toordinal()) == d``.
@@ -782,7 +782,7 @@ Instance methods:
.. method:: date.__str__()
- For a date *d*, ``str(d)`` is equivalent to ``d.isoformat()``.
+ For a date ``d``, ``str(d)`` is equivalent to ``d.isoformat()``.
.. method:: date.ctime()
@@ -1063,7 +1063,7 @@ Other constructors, all class methods:
is used. If the *date* argument is a :class:`.datetime` object, its time components
and :attr:`.tzinfo` attributes are ignored.
- For any :class:`.datetime` object *d*,
+ For any :class:`.datetime` object ``d``,
``d == datetime.combine(d.date(), d.time(), d.tzinfo)``.
.. versionchanged:: 3.6
@@ -1270,11 +1270,11 @@ Supported operations:
If both are naive, or both are aware and have the same :attr:`~.datetime.tzinfo` attribute,
the :attr:`~.datetime.tzinfo` attributes are ignored, and the result is a :class:`timedelta`
- object *t* such that ``datetime2 + t == datetime1``. No time zone adjustments
+ object ``t`` such that ``datetime2 + t == datetime1``. No time zone adjustments
are done in this case.
If both are aware and have different :attr:`~.datetime.tzinfo` attributes, ``a-b`` acts
- as if *a* and *b* were first converted to naive UTC datetimes. The
+ as if ``a`` and ``b`` were first converted to naive UTC datetimes. The
result is ``(a.replace(tzinfo=None) - a.utcoffset()) - (b.replace(tzinfo=None)
- b.utcoffset())`` except that the implementation never overflows.
@@ -1454,11 +1454,11 @@ Instance methods:
.. method:: datetime.utctimetuple()
- If :class:`.datetime` instance *d* is naive, this is the same as
+ If :class:`.datetime` instance ``d`` is naive, this is the same as
``d.timetuple()`` except that :attr:`~.time.struct_time.tm_isdst` is forced to 0 regardless of what
``d.dst()`` returns. DST is never in effect for a UTC time.
- If *d* is aware, *d* is normalized to UTC time, by subtracting
+ If ``d`` is aware, ``d`` is normalized to UTC time, by subtracting
``d.utcoffset()``, and a :class:`time.struct_time` for the
normalized time is returned. :attr:`!tm_isdst` is forced to 0. Note
that an :exc:`OverflowError` may be raised if ``d.year`` was
@@ -1606,7 +1606,7 @@ Instance methods:
.. method:: datetime.__str__()
- For a :class:`.datetime` instance *d*, ``str(d)`` is equivalent to
+ For a :class:`.datetime` instance ``d``, ``str(d)`` is equivalent to
``d.isoformat(' ')``.
@@ -1853,7 +1853,7 @@ Instance attributes (read-only):
.. versionadded:: 3.6
:class:`.time` objects support equality and order comparisons,
-where *a* is considered less than *b* when *a* precedes *b* in time.
+where ``a`` is considered less than ``b`` when ``a`` precedes ``b`` in time.
Naive and aware :class:`!time` objects are never equal.
Order comparison between naive and aware :class:`!time` objects raises
@@ -2000,7 +2000,7 @@ Instance methods:
.. method:: time.__str__()
- For a time *t*, ``str(t)`` is equivalent to ``t.isoformat()``.
+ For a time ``t``, ``str(t)`` is equivalent to ``t.isoformat()``.
.. method:: time.strftime(format)
diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst
index 916f17cadfaa7ed..c9a3e448cad0630 100644
--- a/Doc/library/decimal.rst
+++ b/Doc/library/decimal.rst
@@ -598,6 +598,23 @@ Decimal objects
.. versionadded:: 3.1
+ .. classmethod:: from_number(number)
+
+ Alternative constructor that only accepts instances of
+ :class:`float`, :class:`int` or :class:`Decimal`, but not strings
+ or tuples.
+
+ .. doctest::
+
+ >>> Decimal.from_number(314)
+ Decimal('314')
+ >>> Decimal.from_number(0.1)
+ Decimal('0.1000000000000000055511151231257827021181583404541015625')
+ >>> Decimal.from_number(Decimal('3.14'))
+ Decimal('3.14')
+
+ .. versionadded:: 3.14
+
.. method:: fma(other, third, context=None)
Fused multiply-add. Return self*other+third with no rounding of the
diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst
index 2ee154952549aca..fc7f9a6301a9153 100644
--- a/Doc/library/fractions.rst
+++ b/Doc/library/fractions.rst
@@ -166,6 +166,16 @@ another rational number, or from a string.
instance.
+ .. classmethod:: from_number(number)
+
+ Alternative constructor which only accepts instances of
+ :class:`numbers.Integral`, :class:`numbers.Rational`,
+ :class:`float` or :class:`decimal.Decimal`, and objects with
+ the :meth:`!as_integer_ratio` method, but not strings.
+
+ .. versionadded:: 3.14
+
+
.. method:: limit_denominator(max_denominator=1000000)
Finds and returns the closest :class:`Fraction` to ``self`` that has
diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst
index 7f8df704a333276..0638df04c6ff407 100644
--- a/Doc/library/functions.rst
+++ b/Doc/library/functions.rst
@@ -686,7 +686,7 @@ are always available. They are listed here in alphabetical order.
The *closure* argument specifies a closure--a tuple of cellvars.
It's only valid when the *object* is a code object containing
:term:`free (closure) variables `.
- The length of the tuple must exactly match the length of the code object'S
+ The length of the tuple must exactly match the length of the code object's
:attr:`~codeobject.co_freevars` attribute.
.. audit-event:: exec code_object exec
diff --git a/Doc/library/gzip.rst b/Doc/library/gzip.rst
index 6b6e158f6eba2cf..f24e73517e57674 100644
--- a/Doc/library/gzip.rst
+++ b/Doc/library/gzip.rst
@@ -184,11 +184,12 @@ The module defines the following items:
attribute instead.
-.. function:: compress(data, compresslevel=9, *, mtime=None)
+.. function:: compress(data, compresslevel=9, *, mtime=0)
Compress the *data*, returning a :class:`bytes` object containing
the compressed data. *compresslevel* and *mtime* have the same meaning as in
- the :class:`GzipFile` constructor above.
+ the :class:`GzipFile` constructor above,
+ but *mtime* defaults to 0 for reproducible output.
.. versionadded:: 3.2
.. versionchanged:: 3.8
@@ -203,6 +204,10 @@ The module defines the following items:
.. versionchanged:: 3.13
The gzip header OS byte is guaranteed to be set to 255 when this function
is used as was the case in 3.10 and earlier.
+ .. versionchanged:: 3.14
+ The *mtime* parameter now defaults to 0 for reproducible output.
+ For the previous behaviour of using the current time,
+ pass ``None`` to *mtime*.
.. function:: decompress(data)
diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst
index 833c71c4ce4b9a6..a6e2e3b8928ebe6 100644
--- a/Doc/library/stdtypes.rst
+++ b/Doc/library/stdtypes.rst
@@ -4505,14 +4505,14 @@ can be used interchangeably to index the same dictionary entry.
``dict([('foo', 100), ('bar', 200)])``, ``dict(foo=100, bar=200)``
If no positional argument is given, an empty dictionary is created.
- If a positional argument is given and it is a mapping object, a dictionary
- is created with the same key-value pairs as the mapping object. Otherwise,
- the positional argument must be an :term:`iterable` object. Each item in
- the iterable must itself be an iterable with exactly two objects. The
- first object of each item becomes a key in the new dictionary, and the
- second object the corresponding value. If a key occurs more than once, the
- last value for that key becomes the corresponding value in the new
- dictionary.
+ If a positional argument is given and it defines a ``keys()`` method, a
+ dictionary is created by calling :meth:`~object.__getitem__` on the argument with
+ each returned key from the method. Otherwise, the positional argument must be an
+ :term:`iterable` object. Each item in the iterable must itself be an iterable
+ with exactly two elements. The first element of each item becomes a key in the
+ new dictionary, and the second element the corresponding value. If a key occurs
+ more than once, the last value for that key becomes the corresponding value in
+ the new dictionary.
If keyword arguments are given, the keyword arguments and their values are
added to the dictionary created from the positional argument. If a key
@@ -4669,10 +4669,11 @@ can be used interchangeably to index the same dictionary entry.
Update the dictionary with the key/value pairs from *other*, overwriting
existing keys. Return ``None``.
- :meth:`update` accepts either another dictionary object or an iterable of
- key/value pairs (as tuples or other iterables of length two). If keyword
- arguments are specified, the dictionary is then updated with those
- key/value pairs: ``d.update(red=1, blue=2)``.
+ :meth:`update` accepts either another object with a ``keys()`` method (in
+ which case :meth:`~object.__getitem__` is called with every key returned from
+ the method). or an iterable of key/value pairs (as tuples or other iterables
+ of length two). If keyword arguments are specified, the dictionary is then
+ updated with those key/value pairs: ``d.update(red=1, blue=2)``.
.. method:: values()
diff --git a/Doc/library/string.rst b/Doc/library/string.rst
index 57a1f9205230353..49aeb28d57c8d17 100644
--- a/Doc/library/string.rst
+++ b/Doc/library/string.rst
@@ -509,9 +509,8 @@ The available presentation types for :class:`float` and
| | significant digits. With no precision given, uses a |
| | precision of ``6`` digits after the decimal point for |
| | :class:`float`, and shows all coefficient digits |
- | | for :class:`~decimal.Decimal`. If no digits follow the |
- | | decimal point, the decimal point is also removed unless |
- | | the ``#`` option is used. |
+ | | for :class:`~decimal.Decimal`. If ``p=0``, the decimal |
+ | | point is omitted unless the ``#`` option is used. |
+---------+----------------------------------------------------------+
| ``'E'`` | Scientific notation. Same as ``'e'`` except it uses |
| | an upper case 'E' as the separator character. |
@@ -522,9 +521,8 @@ The available presentation types for :class:`float` and
| | precision given, uses a precision of ``6`` digits after |
| | the decimal point for :class:`float`, and uses a |
| | precision large enough to show all coefficient digits |
- | | for :class:`~decimal.Decimal`. If no digits follow the |
- | | decimal point, the decimal point is also removed unless |
- | | the ``#`` option is used. |
+ | | for :class:`~decimal.Decimal`. If ``p=0``, the decimal |
+ | | point is omitted unless the ``#`` option is used. |
+---------+----------------------------------------------------------+
| ``'F'`` | Fixed-point notation. Same as ``'f'``, but converts |
| | ``nan`` to ``NAN`` and ``inf`` to ``INF``. |
diff --git a/Doc/library/traceback.rst b/Doc/library/traceback.rst
index 401e12be45f418d..100a92b73d5497a 100644
--- a/Doc/library/traceback.rst
+++ b/Doc/library/traceback.rst
@@ -8,11 +8,15 @@
--------------
-This module provides a standard interface to extract, format and print stack
-traces of Python programs. It exactly mimics the behavior of the Python
-interpreter when it prints a stack trace. This is useful when you want to print
-stack traces under program control, such as in a "wrapper" around the
-interpreter.
+This module provides a standard interface to extract, format and print
+stack traces of Python programs. It is more flexible than the
+interpreter's default traceback display, and therefore makes it
+possible to configure certain aspects of the output. Finally,
+it contains a utility for capturing enough information about an
+exception to print it later, without the need to save a reference
+to the actual exception. Since exceptions can be the roots of large
+objects graph, this utility can significantly improve
+memory management.
.. index:: pair: object; traceback
@@ -29,7 +33,20 @@ which are assigned to the :attr:`~BaseException.__traceback__` field of
Module :mod:`pdb`
Interactive source code debugger for Python programs.
-The module defines the following functions:
+The module's API can be divided into two parts:
+
+* Module-level functions offering basic functionality, which are useful for interactive
+ inspection of exceptions and tracebacks.
+
+* :class:`TracebackException` class and its helper classes
+ :class:`StackSummary` and :class:`FrameSummary`. These offer both more
+ flexibility in the output generated and the ability to store the information
+ necessary for later formatting without holding references to actual exception
+ and traceback objects.
+
+
+Module-Level Functions
+----------------------
.. function:: print_tb(tb, limit=None, file=None)
@@ -237,7 +254,6 @@ The module defines the following functions:
.. versionadded:: 3.5
-The module also defines the following classes:
:class:`!TracebackException` Objects
------------------------------------
@@ -245,12 +261,17 @@ The module also defines the following classes:
.. versionadded:: 3.5
:class:`!TracebackException` objects are created from actual exceptions to
-capture data for later printing in a lightweight fashion.
+capture data for later printing. They offer a more lightweight method of
+storing this information by avoiding holding references to
+:ref:`traceback` and :ref:`frame` objects
+In addition, they expose more options to configure the output compared to
+the module-level functions described above.
.. class:: TracebackException(exc_type, exc_value, exc_traceback, *, limit=None, lookup_lines=True, capture_locals=False, compact=False, max_group_width=15, max_group_depth=10)
- Capture an exception for later rendering. *limit*, *lookup_lines* and
- *capture_locals* are as for the :class:`StackSummary` class.
+ Capture an exception for later rendering. The meaning of *limit*,
+ *lookup_lines* and *capture_locals* are as for the :class:`StackSummary`
+ class.
If *compact* is true, only data that is required by
:class:`!TracebackException`'s :meth:`format` method
@@ -509,8 +530,8 @@ in a :ref:`traceback `.
.. _traceback-example:
-Traceback Examples
-------------------
+Examples of Using the Module-Level Functions
+--------------------------------------------
This simple example implements a basic read-eval-print loop, similar to (but
less useful than) the standard Python interactive interpreter loop. For a more
@@ -549,8 +570,7 @@ exception and traceback:
try:
lumberjack()
- except IndexError:
- exc = sys.exception()
+ except IndexError as exc:
print("*** print_tb:")
traceback.print_tb(exc.__traceback__, limit=1, file=sys.stdout)
print("*** print_exception:")
@@ -653,5 +673,88 @@ This last example demonstrates the final few formatting functions:
[' File "spam.py", line 3, in \n spam.eggs()\n',
' File "eggs.py", line 42, in eggs\n return "bacon"\n']
>>> an_error = IndexError('tuple index out of range')
- >>> traceback.format_exception_only(type(an_error), an_error)
+ >>> traceback.format_exception_only(an_error)
['IndexError: tuple index out of range\n']
+
+
+Examples of Using :class:`TracebackException`
+---------------------------------------------
+
+With the helper class, we have more options::
+
+ >>> import sys
+ >>> from traceback import TracebackException
+ >>>
+ >>> def lumberjack():
+ ... bright_side_of_life()
+ ...
+ >>> def bright_side_of_life():
+ ... t = "bright", "side", "of", "life"
+ ... return t[5]
+ ...
+ >>> try:
+ ... lumberjack()
+ ... except IndexError as e:
+ ... exc = e
+ ...
+ >>> try:
+ ... try:
+ ... lumberjack()
+ ... except:
+ ... 1/0
+ ... except Exception as e:
+ ... chained_exc = e
+ ...
+ >>> # limit works as with the module-level functions
+ >>> TracebackException.from_exception(exc, limit=-2).print()
+ Traceback (most recent call last):
+ File "", line 6, in lumberjack
+ bright_side_of_life()
+ ~~~~~~~~~~~~~~~~~~~^^
+ File "", line 10, in bright_side_of_life
+ return t[5]
+ ~^^^
+ IndexError: tuple index out of range
+
+ >>> # capture_locals adds local variables in frames
+ >>> TracebackException.from_exception(exc, limit=-2, capture_locals=True).print()
+ Traceback (most recent call last):
+ File "", line 6, in lumberjack
+ bright_side_of_life()
+ ~~~~~~~~~~~~~~~~~~~^^
+ File "", line 10, in bright_side_of_life
+ return t[5]
+ ~^^^
+ t = ("bright", "side", "of", "life")
+ IndexError: tuple index out of range
+
+ >>> # The *chain* kwarg to print() controls whether chained
+ >>> # exceptions are displayed
+ >>> TracebackException.from_exception(chained_exc).print()
+ Traceback (most recent call last):
+ File "", line 4, in
+ lumberjack()
+ ~~~~~~~~~~^^
+ File "", line 7, in lumberjack
+ bright_side_of_life()
+ ~~~~~~~~~~~~~~~~~~~^^
+ File "", line 11, in bright_side_of_life
+ return t[5]
+ ~^^^
+ IndexError: tuple index out of range
+
+ During handling of the above exception, another exception occurred:
+
+ Traceback (most recent call last):
+ File "", line 6, in
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+
+ >>> TracebackException.from_exception(chained_exc).print(chain=False)
+ Traceback (most recent call last):
+ File "", line 6, in
+ 1/0
+ ~^~
+ ZeroDivisionError: division by zero
+
diff --git a/Doc/library/turtle.rst b/Doc/library/turtle.rst
index da801d4dc1f5b36..efa4b6f8f1d3f9b 100644
--- a/Doc/library/turtle.rst
+++ b/Doc/library/turtle.rst
@@ -14,6 +14,11 @@
from turtle import *
turtle = Turtle()
+.. testcleanup::
+
+ import os
+ os.remove("my_drawing.ps")
+
--------------
Introduction
diff --git a/Doc/reference/lexical_analysis.rst b/Doc/reference/lexical_analysis.rst
index ae5408ee386bbd3..f7167032ad7df98 100644
--- a/Doc/reference/lexical_analysis.rst
+++ b/Doc/reference/lexical_analysis.rst
@@ -284,11 +284,10 @@ UAX-31, with elaboration and changes as defined below; see also :pep:`3131` for
further details.
Within the ASCII range (U+0001..U+007F), the valid characters for identifiers
-are the same as in Python 2.x: the uppercase and lowercase letters ``A`` through
+include the uppercase and lowercase letters ``A`` through
``Z``, the underscore ``_`` and, except for the first character, the digits
``0`` through ``9``.
-
-Python 3.0 introduces additional characters from outside the ASCII range (see
+Python 3.0 introduced additional characters from outside the ASCII range (see
:pep:`3131`). For these characters, the classification uses the version of the
Unicode Character Database as included in the :mod:`unicodedata` module.
diff --git a/Doc/requirements.txt b/Doc/requirements.txt
index bf1028020b7af76..5105786ccf283c4 100644
--- a/Doc/requirements.txt
+++ b/Doc/requirements.txt
@@ -6,7 +6,7 @@
# Sphinx version is pinned so that new versions that introduce new warnings
# won't suddenly cause build failures. Updating the version is fine as long
# as no warnings are raised by doing so.
-sphinx~=8.0.0
+sphinx~=8.1.0
blurb
diff --git a/Doc/tutorial/controlflow.rst b/Doc/tutorial/controlflow.rst
index fd765e58ff2485e..b830ce94ba4f475 100644
--- a/Doc/tutorial/controlflow.rst
+++ b/Doc/tutorial/controlflow.rst
@@ -461,8 +461,8 @@ Defining Functions
We can create a function that writes the Fibonacci series to an arbitrary
boundary::
- >>> def fib(n): # write Fibonacci series up to n
- ... """Print a Fibonacci series up to n."""
+ >>> def fib(n): # write Fibonacci series less than n
+ ... """Print a Fibonacci series less than n."""
... a, b = 0, 1
... while a < n:
... print(a, end=' ')
@@ -832,7 +832,7 @@ parameters as there is a ``/`` in the function definition::
File "", line 1, in
TypeError: pos_only_arg() got some positional-only arguments passed as keyword arguments: 'arg'
-The third function ``kwd_only_args`` only allows keyword arguments as indicated
+The third function ``kwd_only_arg`` only allows keyword arguments as indicated
by a ``*`` in the function definition::
>>> kwd_only_arg(3)
diff --git a/Doc/tutorial/datastructures.rst b/Doc/tutorial/datastructures.rst
index 73f17adeea72dec..31941bc112a1358 100644
--- a/Doc/tutorial/datastructures.rst
+++ b/Doc/tutorial/datastructures.rst
@@ -19,13 +19,13 @@ objects:
.. method:: list.append(x)
:noindex:
- Add an item to the end of the list. Equivalent to ``a[len(a):] = [x]``.
+ Add an item to the end of the list. Similar to ``a[len(a):] = [x]``.
.. method:: list.extend(iterable)
:noindex:
- Extend the list by appending all the items from the iterable. Equivalent to
+ Extend the list by appending all the items from the iterable. Similar to
``a[len(a):] = iterable``.
@@ -56,7 +56,7 @@ objects:
.. method:: list.clear()
:noindex:
- Remove all items from the list. Equivalent to ``del a[:]``.
+ Remove all items from the list. Similar to ``del a[:]``.
.. method:: list.index(x[, start[, end]])
@@ -93,7 +93,7 @@ objects:
.. method:: list.copy()
:noindex:
- Return a shallow copy of the list. Equivalent to ``a[:]``.
+ Return a shallow copy of the list. Similar to ``a[:]``.
An example that uses most of the list methods::
diff --git a/Doc/tutorial/venv.rst b/Doc/tutorial/venv.rst
index 91e4ce18acef1dd..f362e1943b666f7 100644
--- a/Doc/tutorial/venv.rst
+++ b/Doc/tutorial/venv.rst
@@ -76,7 +76,7 @@ virtual environment you're using, and modify the environment so that running
``python`` will get you that particular version and installation of Python.
For example:
-.. code-block:: bash
+.. code-block:: console
$ source ~/envs/tutorial-env/bin/activate
(tutorial-env) $ python
@@ -108,7 +108,7 @@ complete documentation for ``pip``.)
You can install the latest version of a package by specifying a package's name:
-.. code-block:: bash
+.. code-block:: console
(tutorial-env) $ python -m pip install novas
Collecting novas
@@ -120,7 +120,7 @@ You can install the latest version of a package by specifying a package's name:
You can also install a specific version of a package by giving the
package name followed by ``==`` and the version number:
-.. code-block:: bash
+.. code-block:: console
(tutorial-env) $ python -m pip install requests==2.6.0
Collecting requests==2.6.0
@@ -133,7 +133,7 @@ version is already installed and do nothing. You can supply a
different version number to get that version, or you can run ``python
-m pip install --upgrade`` to upgrade the package to the latest version:
-.. code-block:: bash
+.. code-block:: console
(tutorial-env) $ python -m pip install --upgrade requests
Collecting requests
@@ -148,7 +148,7 @@ remove the packages from the virtual environment.
``python -m pip show`` will display information about a particular package:
-.. code-block:: bash
+.. code-block:: console
(tutorial-env) $ python -m pip show requests
---
@@ -166,7 +166,7 @@ remove the packages from the virtual environment.
``python -m pip list`` will display all of the packages installed in
the virtual environment:
-.. code-block:: bash
+.. code-block:: console
(tutorial-env) $ python -m pip list
novas (3.1.1.3)
@@ -179,7 +179,7 @@ the virtual environment:
but the output uses the format that ``python -m pip install`` expects.
A common convention is to put this list in a ``requirements.txt`` file:
-.. code-block:: bash
+.. code-block:: console
(tutorial-env) $ python -m pip freeze > requirements.txt
(tutorial-env) $ cat requirements.txt
@@ -191,7 +191,7 @@ The ``requirements.txt`` can then be committed to version control and
shipped as part of an application. Users can then install all the
necessary packages with ``install -r``:
-.. code-block:: bash
+.. code-block:: console
(tutorial-env) $ python -m pip install -r requirements.txt
Collecting novas==3.1.1.3 (from -r requirements.txt (line 1))
diff --git a/Doc/using/configure.rst b/Doc/using/configure.rst
index 4976418ba33cf86..10cdf2376229ff4 100644
--- a/Doc/using/configure.rst
+++ b/Doc/using/configure.rst
@@ -29,7 +29,7 @@ Features and minimum versions required to build CPython:
* Tcl/Tk 8.5.12 for the :mod:`tkinter` module.
-* Autoconf 2.71 and aclocal 1.16.4 are required to regenerate the
+* Autoconf 2.71 and aclocal 1.16.5 are required to regenerate the
:file:`configure` script.
.. versionchanged:: 3.1
@@ -56,7 +56,7 @@ Features and minimum versions required to build CPython:
Tcl/Tk version 8.5.12 is now required for the :mod:`tkinter` module.
.. versionchanged:: 3.13
- Autoconf 2.71, aclocal 1.16.4 and SQLite 3.15.2 are now required.
+ Autoconf 2.71, aclocal 1.16.5 and SQLite 3.15.2 are now required.
See also :pep:`7` "Style Guide for C Code" and :pep:`11` "CPython platform
support".
diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst
index a2897097aaba57d..f9e74a9b8ff9c66 100644
--- a/Doc/whatsnew/3.13.rst
+++ b/Doc/whatsnew/3.13.rst
@@ -2495,9 +2495,9 @@ Build Changes
* Building CPython now requires a compiler with support for the C11 atomic
library, GCC built-in atomic functions, or MSVC interlocked intrinsics.
-* Autoconf 2.71 and aclocal 1.16.4 are now required to regenerate
+* Autoconf 2.71 and aclocal 1.16.5 are now required to regenerate
the :file:`configure` script.
- (Contributed by Christian Heimes in :gh:`89886`.)
+ (Contributed by Christian Heimes in :gh:`89886` and by Victor Stinner in :gh:`112090`.)
* SQLite 3.15.2 or newer is required to build
the :mod:`sqlite3` extension module.
diff --git a/Doc/whatsnew/3.14.rst b/Doc/whatsnew/3.14.rst
index c62a3ca5872eefd..b106578fe9e8b01 100644
--- a/Doc/whatsnew/3.14.rst
+++ b/Doc/whatsnew/3.14.rst
@@ -1,6 +1,6 @@
****************************
- What's New In Python 3.14
+ What's new in Python 3.14
****************************
:Editor: TBD
@@ -56,7 +56,7 @@ For full details, see the :ref:`changelog `.
so it's worth checking back even after reading earlier versions.
-Summary -- Release highlights
+Summary -- release highlights
=============================
.. This section singles out the most important changes in Python 3.14.
@@ -67,12 +67,12 @@ Summary -- Release highlights
-New Features
+New features
============
-.. _whatsnew-314-pep649:
+.. _whatsnew314-pep649:
-PEP 649: Deferred Evaluation of Annotations
+PEP 649: deferred evaluation of annotations
-------------------------------------------
The :term:`annotations ` on functions, classes, and modules are no
@@ -150,12 +150,12 @@ In Python 3.7, :pep:`563` introduced the ``from __future__ import annotations``
directive, which turns all annotations into strings. This directive is now
considered deprecated and it is expected to be removed in a future version of Python.
However, this removal will not happen until after Python 3.13, the last version of
-Python without deferred evaluation of annotations, reaches its end of life.
+Python without deferred evaluation of annotations, reaches its end of life in 2029.
In Python 3.14, the behavior of code using ``from __future__ import annotations``
is unchanged.
-Improved Error Messages
+Improved error messages
-----------------------
* When unpacking assignment fails due to incorrect number of variables, the
@@ -172,16 +172,16 @@ Improved Error Messages
ValueError: too many values to unpack (expected 3, got 4)
-Other Language Changes
+Other language changes
======================
* Incorrect usage of :keyword:`await` and asynchronous comprehensions
is now detected even if the code is optimized away by the :option:`-O`
- command line option. For example, ``python -O -c 'assert await 1'``
+ command-line option. For example, ``python -O -c 'assert await 1'``
now produces a :exc:`SyntaxError`. (Contributed by Jelle Zijlstra in :gh:`121637`.)
* Writes to ``__debug__`` are now detected even if the code is optimized
- away by the :option:`-O` command line option. For example,
+ away by the :option:`-O` command-line option. For example,
``python -O -c 'assert (__debug__ := 1)'`` now produces a
:exc:`SyntaxError`. (Contributed by Irit Katriel in :gh:`122245`.)
@@ -191,7 +191,7 @@ Other Language Changes
(Contributed by Serhiy Storchaka in :gh:`84978`.)
-New Modules
+New modules
===========
* :mod:`annotationlib`: For introspecting :term:`annotations `.
@@ -199,7 +199,7 @@ New Modules
(Contributed by Jelle Zijlstra in :gh:`119180`.)
-Improved Modules
+Improved modules
================
argparse
@@ -214,7 +214,7 @@ ast
---
* Add :func:`ast.compare` for comparing two ASTs.
- (Contributed by Batuhan Taskaya and Jeremy Hylton in :issue:`15987`.)
+ (Contributed by Batuhan Taskaya and Jeremy Hylton in :gh:`60191`.)
* Add support for :func:`copy.replace` for AST nodes.
(Contributed by Bénédikt Tran in :gh:`121141`.)
@@ -239,6 +239,18 @@ ctypes
to help match a non-default ABI.
(Contributed by Petr Viktorin in :gh:`97702`.)
+decimal
+-------
+
+* Add alternative :class:`~decimal.Decimal` constructor
+ :meth:`Decimal.from_number() `.
+ (Contributed by Serhiy Storchaka in :gh:`121798`.)
+
+datetime
+--------
+
+* Add :meth:`datetime.time.strptime` and :meth:`datetime.date.strptime`.
+ (Contributed by Wannes Boeykens in :gh:`41431`.)
dis
---
@@ -248,9 +260,10 @@ dis
This feature is added to the following interfaces via the *show_positions*
keyword argument:
- - :class:`dis.Bytecode`,
- - :func:`dis.dis`, :func:`dis.distb`, and
- - :func:`dis.disassemble`.
+ - :class:`dis.Bytecode`
+ - :func:`dis.dis`
+ - :func:`dis.distb`
+ - :func:`dis.disassemble`
This feature is also exposed via :option:`dis --show-positions`.
(Contributed by Bénédikt Tran in :gh:`123165`.)
@@ -263,6 +276,10 @@ fractions
:meth:`!as_integer_ratio` method to a :class:`~fractions.Fraction`.
(Contributed by Serhiy Storchaka in :gh:`82017`.)
+* Add alternative :class:`~fractions.Fraction` constructor
+ :meth:`Fraction.from_number() `.
+ (Contributed by Serhiy Storchaka in :gh:`121797`.)
+
functools
---------
@@ -300,7 +317,8 @@ json
of the error.
(Contributed by Serhiy Storchaka in :gh:`122163`.)
-* Enable the :mod:`json` module to work as a script using the :option:`-m` switch: ``python -m json``.
+* Enable the :mod:`json` module to work as a script using the :option:`-m`
+ switch: :program:`python -m json`.
See the :ref:`JSON command-line interface ` documentation.
(Contributed by Trey Hunner in :gh:`122873`.)
@@ -315,12 +333,6 @@ operator
(Contributed by Raymond Hettinger and Nico Mexis in :gh:`115808`.)
-datetime
---------
-
-* Add :meth:`datetime.time.strptime` and :meth:`datetime.date.strptime`.
- (Contributed by Wannes Boeykens in :gh:`41431`.)
-
os
--
@@ -347,11 +359,11 @@ pathlib
pdb
---
-* Hard-coded breakpoints (:func:`breakpoint` and :func:`pdb.set_trace`) now
+* Hardcoded breakpoints (:func:`breakpoint` and :func:`pdb.set_trace`) now
reuse the most recent :class:`~pdb.Pdb` instance that calls
:meth:`~pdb.Pdb.set_trace`, instead of creating a new one each time.
As a result, all the instance specific data like :pdbcmd:`display` and
- :pdbcmd:`commands` are preserved across hard-coded breakpoints.
+ :pdbcmd:`commands` are preserved across hardcoded breakpoints.
(Contributed by Tian Gao in :gh:`121450`.)
* Add a new argument *mode* to :class:`pdb.Pdb`. Disable the ``restart``
@@ -381,9 +393,9 @@ symtable
* Expose the following :class:`symtable.Symbol` methods:
- * :meth:`~symtable.Symbol.is_free_class`
- * :meth:`~symtable.Symbol.is_comp_iter`
* :meth:`~symtable.Symbol.is_comp_cell`
+ * :meth:`~symtable.Symbol.is_comp_iter`
+ * :meth:`~symtable.Symbol.is_free_class`
(Contributed by Bénédikt Tran in :gh:`120029`.)
@@ -462,11 +474,11 @@ ast
* Remove the following classes. They were all deprecated since Python 3.8,
and have emitted deprecation warnings since Python 3.12:
- * :class:`!ast.Num`
- * :class:`!ast.Str`
* :class:`!ast.Bytes`
- * :class:`!ast.NameConstant`
* :class:`!ast.Ellipsis`
+ * :class:`!ast.NameConstant`
+ * :class:`!ast.Num`
+ * :class:`!ast.Str`
Use :class:`ast.Constant` instead. As a consequence of these removals,
user-defined ``visit_Num``, ``visit_Str``, ``visit_Bytes``,
@@ -491,16 +503,16 @@ asyncio
* Remove the following classes and functions. They were all deprecated and
emitted deprecation warnings since Python 3.12:
+ * :func:`!asyncio.get_child_watcher`
+ * :func:`!asyncio.set_child_watcher`
+ * :meth:`!asyncio.AbstractEventLoopPolicy.get_child_watcher`
+ * :meth:`!asyncio.AbstractEventLoopPolicy.set_child_watcher`
* :class:`!asyncio.AbstractChildWatcher`
- * :class:`!asyncio.SafeChildWatcher`
- * :class:`!asyncio.MultiLoopChildWatcher`
* :class:`!asyncio.FastChildWatcher`
- * :class:`!asyncio.ThreadedChildWatcher`
+ * :class:`!asyncio.MultiLoopChildWatcher`
* :class:`!asyncio.PidfdChildWatcher`
- * :meth:`!asyncio.AbstractEventLoopPolicy.get_child_watcher`
- * :meth:`!asyncio.AbstractEventLoopPolicy.set_child_watcher`
- * :func:`!asyncio.get_child_watcher`
- * :func:`!asyncio.set_child_watcher`
+ * :class:`!asyncio.SafeChildWatcher`
+ * :class:`!asyncio.ThreadedChildWatcher`
(Contributed by Kumar Aditya in :gh:`120804`.)
@@ -613,14 +625,14 @@ Changes in the Python API
(Contributed by Serhiy Storchaka in :gh:`69998`.)
-Build Changes
+Build changes
=============
-C API Changes
+C API changes
=============
-New Features
+New features
------------
* Add :c:func:`PyLong_GetSign` function to get the sign of :class:`int` objects.
@@ -630,17 +642,17 @@ New Features
object:
* :c:func:`PyUnicodeWriter_Create`
+ * :c:func:`PyUnicodeWriter_DecodeUTF8Stateful`
* :c:func:`PyUnicodeWriter_Discard`
* :c:func:`PyUnicodeWriter_Finish`
+ * :c:func:`PyUnicodeWriter_Format`
* :c:func:`PyUnicodeWriter_WriteChar`
- * :c:func:`PyUnicodeWriter_WriteUTF8`
- * :c:func:`PyUnicodeWriter_WriteUCS4`
- * :c:func:`PyUnicodeWriter_WriteWideChar`
- * :c:func:`PyUnicodeWriter_WriteStr`
* :c:func:`PyUnicodeWriter_WriteRepr`
+ * :c:func:`PyUnicodeWriter_WriteStr`
* :c:func:`PyUnicodeWriter_WriteSubstring`
- * :c:func:`PyUnicodeWriter_Format`
- * :c:func:`PyUnicodeWriter_DecodeUTF8Stateful`
+ * :c:func:`PyUnicodeWriter_WriteUCS4`
+ * :c:func:`PyUnicodeWriter_WriteUTF8`
+ * :c:func:`PyUnicodeWriter_WriteWideChar`
(Contributed by Victor Stinner in :gh:`119182`.)
@@ -661,14 +673,14 @@ New Features
* Add new functions to convert C ```` numbers from/to Python
:class:`int`:
- * :c:func:`PyLong_FromInt32`
- * :c:func:`PyLong_FromInt64`
- * :c:func:`PyLong_FromUInt32`
- * :c:func:`PyLong_FromUInt64`
* :c:func:`PyLong_AsInt32`
* :c:func:`PyLong_AsInt64`
* :c:func:`PyLong_AsUInt32`
* :c:func:`PyLong_AsUInt64`
+ * :c:func:`PyLong_FromInt32`
+ * :c:func:`PyLong_FromInt64`
+ * :c:func:`PyLong_FromUInt32`
+ * :c:func:`PyLong_FromUInt64`
(Contributed by Victor Stinner in :gh:`120389`.)
@@ -691,20 +703,20 @@ New Features
* Add functions to configure the Python initialization (:pep:`741`):
+ * :c:func:`Py_InitializeFromInitConfig`
+ * :c:func:`PyInitConfig_AddModule`
* :c:func:`PyInitConfig_Create`
* :c:func:`PyInitConfig_Free`
+ * :c:func:`PyInitConfig_FreeStrList`
* :c:func:`PyInitConfig_GetError`
* :c:func:`PyInitConfig_GetExitCode`
- * :c:func:`PyInitConfig_HasOption`
* :c:func:`PyInitConfig_GetInt`
* :c:func:`PyInitConfig_GetStr`
* :c:func:`PyInitConfig_GetStrList`
- * :c:func:`PyInitConfig_FreeStrList`
+ * :c:func:`PyInitConfig_HasOption`
* :c:func:`PyInitConfig_SetInt`
* :c:func:`PyInitConfig_SetStr`
* :c:func:`PyInitConfig_SetStrList`
- * :c:func:`PyInitConfig_AddModule`
- * :c:func:`Py_InitializeFromInitConfig`
(Contributed by Victor Stinner in :gh:`107954`.)
diff --git a/Include/cpython/context.h b/Include/cpython/context.h
index ec72966e82c6f9b..3a7a4b459c09ad0 100644
--- a/Include/cpython/context.h
+++ b/Include/cpython/context.h
@@ -28,20 +28,22 @@ PyAPI_FUNC(int) PyContext_Enter(PyObject *);
PyAPI_FUNC(int) PyContext_Exit(PyObject *);
typedef enum {
- Py_CONTEXT_EVENT_ENTER,
- Py_CONTEXT_EVENT_EXIT,
+ /*
+ * The current context has switched to a different context. The object
+ * passed to the watch callback is the now-current contextvars.Context
+ * object, or None if no context is current.
+ */
+ Py_CONTEXT_SWITCHED = 1,
} PyContextEvent;
/*
- * Callback to be invoked when a context object is entered or exited.
- *
- * The callback is invoked with the event and a reference to
- * the context after its entered and before its exited.
+ * Context object watcher callback function. The object passed to the callback
+ * is event-specific; see PyContextEvent for details.
*
* if the callback returns with an exception set, it must return -1. Otherwise
* it should return 0
*/
-typedef int (*PyContext_WatchCallback)(PyContextEvent, PyContext *);
+typedef int (*PyContext_WatchCallback)(PyContextEvent, PyObject *);
/*
* Register a per-interpreter callback that will be invoked for context object
diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h
index 594fbb1c8e443bc..cff2b1f71147933 100644
--- a/Include/internal/pycore_ceval.h
+++ b/Include/internal/pycore_ceval.h
@@ -316,6 +316,8 @@ _Py_eval_breaker_bit_is_set(PyThreadState *tstate, uintptr_t bit)
void _Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
void _Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
+PyAPI_FUNC(PyObject *) _PyFloat_FromDouble_ConsumeInputs(_PyStackRef left, _PyStackRef right, double value);
+
#ifdef __cplusplus
}
diff --git a/Include/internal/pycore_freelist_state.h b/Include/internal/pycore_freelist_state.h
index 762c583ce94e9a0..4e04cf431e0b31e 100644
--- a/Include/internal/pycore_freelist_state.h
+++ b/Include/internal/pycore_freelist_state.h
@@ -20,6 +20,7 @@ extern "C" {
# define Py_async_gen_asends_MAXFREELIST 80
# define Py_futureiters_MAXFREELIST 255
# define Py_object_stack_chunks_MAXFREELIST 4
+# define Py_unicode_writers_MAXFREELIST 1
// A generic freelist of either PyObjects or other data structures.
struct _Py_freelist {
@@ -44,6 +45,7 @@ struct _Py_freelists {
struct _Py_freelist async_gen_asends;
struct _Py_freelist futureiters;
struct _Py_freelist object_stack_chunks;
+ struct _Py_freelist unicode_writers;
};
#ifdef __cplusplus
diff --git a/Include/internal/pycore_global_objects.h b/Include/internal/pycore_global_objects.h
index 913dce6f1ec0fe3..e3f7ac707f0c37e 100644
--- a/Include/internal/pycore_global_objects.h
+++ b/Include/internal/pycore_global_objects.h
@@ -66,9 +66,6 @@ struct _Py_static_objects {
struct _Py_interp_cached_objects {
PyObject *interned_strings;
- /* AST */
- PyObject *str_replace_inf;
-
/* object.__reduce__ */
PyObject *objreduce;
PyObject *type_slots_pname;
diff --git a/Include/internal/pycore_global_objects_fini_generated.h b/Include/internal/pycore_global_objects_fini_generated.h
index 3140a75a47c5ee7..2fd7d5d13a98b29 100644
--- a/Include/internal/pycore_global_objects_fini_generated.h
+++ b/Include/internal/pycore_global_objects_fini_generated.h
@@ -11,7 +11,7 @@ extern "C" {
#ifdef Py_DEBUG
static inline void
_PyStaticObject_CheckRefcnt(PyObject *obj) {
- if (Py_REFCNT(obj) < _Py_IMMORTAL_REFCNT) {
+ if (!_Py_IsImmortal(obj)) {
fprintf(stderr, "Immortal Object has less refcnt than expected.\n");
_PyObject_Dump(obj);
}
@@ -562,6 +562,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) {
_PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(json_decoder));
_PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(kwdefaults));
_PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(list_err));
+ _PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(str_replace_inf));
_PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(type_params));
_PyStaticObject_CheckRefcnt((PyObject *)&_Py_STR(utf_8));
_PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(CANCELLED));
diff --git a/Include/internal/pycore_global_strings.h b/Include/internal/pycore_global_strings.h
index 1591cb0a3f114f1..fc3871570cc49d9 100644
--- a/Include/internal/pycore_global_strings.h
+++ b/Include/internal/pycore_global_strings.h
@@ -48,6 +48,7 @@ struct _Py_global_strings {
STRUCT_FOR_STR(json_decoder, "json.decoder")
STRUCT_FOR_STR(kwdefaults, ".kwdefaults")
STRUCT_FOR_STR(list_err, "list index out of range")
+ STRUCT_FOR_STR(str_replace_inf, "1e309")
STRUCT_FOR_STR(type_params, ".type_params")
STRUCT_FOR_STR(utf_8, "utf-8")
} literals;
diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h
index d7e584094f78395..36cd71e5a007d54 100644
--- a/Include/internal/pycore_interp.h
+++ b/Include/internal/pycore_interp.h
@@ -102,9 +102,8 @@ struct _is {
PyInterpreterState *next;
int64_t id;
- int64_t id_refcount;
+ Py_ssize_t id_refcount;
int requires_idref;
- PyThread_type_lock id_mutex;
#define _PyInterpreterState_WHENCE_NOTSET -1
#define _PyInterpreterState_WHENCE_UNKNOWN 0
@@ -318,8 +317,7 @@ _PyInterpreterState_SetFinalizing(PyInterpreterState *interp, PyThreadState *tst
PyAPI_FUNC(int64_t) _PyInterpreterState_ObjectToID(PyObject *);
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpID(int64_t);
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpIDObject(PyObject *);
-PyAPI_FUNC(int) _PyInterpreterState_IDInitref(PyInterpreterState *);
-PyAPI_FUNC(int) _PyInterpreterState_IDIncref(PyInterpreterState *);
+PyAPI_FUNC(void) _PyInterpreterState_IDIncref(PyInterpreterState *);
PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *);
PyAPI_FUNC(int) _PyInterpreterState_IsReady(PyInterpreterState *interp);
diff --git a/Include/internal/pycore_lock.h b/Include/internal/pycore_lock.h
index e6da083b807ce5b..57cbce8f126acab 100644
--- a/Include/internal/pycore_lock.h
+++ b/Include/internal/pycore_lock.h
@@ -64,8 +64,8 @@ PyMutex_LockFlags(PyMutex *m, _PyLockFlags flags)
}
}
-// Unlock a mutex, returns 0 if the mutex is not locked (used for improved
-// error messages).
+// Unlock a mutex, returns -1 if the mutex is not locked (used for improved
+// error messages) otherwise returns 0.
extern int _PyMutex_TryUnlock(PyMutex *m);
@@ -160,8 +160,9 @@ typedef struct {
PyAPI_FUNC(int) _PyRecursiveMutex_IsLockedByCurrentThread(_PyRecursiveMutex *m);
PyAPI_FUNC(void) _PyRecursiveMutex_Lock(_PyRecursiveMutex *m);
+extern PyLockStatus _PyRecursiveMutex_LockTimed(_PyRecursiveMutex *m, PyTime_t timeout, _PyLockFlags flags);
PyAPI_FUNC(void) _PyRecursiveMutex_Unlock(_PyRecursiveMutex *m);
-
+extern int _PyRecursiveMutex_TryUnlock(_PyRecursiveMutex *m);
// A readers-writer (RW) lock. The lock supports multiple concurrent readers or
// a single writer. The lock is write-preferring: if a writer is waiting while
diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h
index 0af13b1bcda20b7..8832692d03c29e3 100644
--- a/Include/internal/pycore_object.h
+++ b/Include/internal/pycore_object.h
@@ -16,9 +16,6 @@ extern "C" {
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include "pycore_uniqueid.h" // _PyType_IncrefSlow
-
-#define _Py_IMMORTAL_REFCNT_LOOSE ((_Py_IMMORTAL_REFCNT >> 1) + 1)
-
// This value is added to `ob_ref_shared` for objects that use deferred
// reference counting so that they are not immediately deallocated when the
// non-deferred reference count drops to zero.
@@ -27,25 +24,8 @@ extern "C" {
// `ob_ref_shared` are used for flags.
#define _Py_REF_DEFERRED (PY_SSIZE_T_MAX / 8)
-// gh-121528, gh-118997: Similar to _Py_IsImmortal() but be more loose when
-// comparing the reference count to stay compatible with C extensions built
-// with the stable ABI 3.11 or older. Such extensions implement INCREF/DECREF
-// as refcnt++ and refcnt-- without taking in account immortal objects. For
-// example, the reference count of an immortal object can change from
-// _Py_IMMORTAL_REFCNT to _Py_IMMORTAL_REFCNT+1 (INCREF) or
-// _Py_IMMORTAL_REFCNT-1 (DECREF).
-//
-// This function should only be used in assertions. Otherwise, _Py_IsImmortal()
-// must be used instead.
-static inline int _Py_IsImmortalLoose(PyObject *op)
-{
-#if defined(Py_GIL_DISABLED)
- return _Py_IsImmortal(op);
-#else
- return (op->ob_refcnt >= _Py_IMMORTAL_REFCNT_LOOSE);
-#endif
-}
-#define _Py_IsImmortalLoose(op) _Py_IsImmortalLoose(_PyObject_CAST(op))
+/* For backwards compatibility -- Do not use this */
+#define _Py_IsImmortalLoose(op) _Py_IsImmortal
/* Check if an object is consistent. For example, ensure that the reference
@@ -97,7 +77,7 @@ PyAPI_FUNC(int) _PyObject_IsFreed(PyObject *);
#else
#define _PyObject_HEAD_INIT(type) \
{ \
- .ob_refcnt = _Py_IMMORTAL_REFCNT, \
+ .ob_refcnt = _Py_IMMORTAL_INITIAL_REFCNT, \
.ob_type = (type) \
}
#endif
@@ -184,7 +164,7 @@ PyAPI_FUNC(void) _Py_SetImmortalUntracked(PyObject *op);
static inline void _Py_SetMortal(PyObject *op, Py_ssize_t refcnt)
{
if (op) {
- assert(_Py_IsImmortalLoose(op));
+ assert(_Py_IsImmortal(op));
#ifdef Py_GIL_DISABLED
op->ob_tid = _Py_UNOWNED_TID;
op->ob_ref_local = 0;
@@ -316,7 +296,7 @@ static inline void
_Py_INCREF_TYPE(PyTypeObject *type)
{
if (!_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) {
- assert(_Py_IsImmortalLoose(type));
+ assert(_Py_IsImmortal(type));
_Py_INCREF_IMMORTAL_STAT_INC();
return;
}
@@ -357,7 +337,7 @@ static inline void
_Py_DECREF_TYPE(PyTypeObject *type)
{
if (!_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) {
- assert(_Py_IsImmortalLoose(type));
+ assert(_Py_IsImmortal(type));
_Py_DECREF_IMMORTAL_STAT_INC();
return;
}
@@ -393,7 +373,7 @@ _PyObject_Init(PyObject *op, PyTypeObject *typeobj)
{
assert(op != NULL);
Py_SET_TYPE(op, typeobj);
- assert(_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE) || _Py_IsImmortalLoose(typeobj));
+ assert(_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE) || _Py_IsImmortal(typeobj));
_Py_INCREF_TYPE(typeobj);
_Py_NewReference(op);
}
diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h
index 8fec45b1e8d5c31..c18423476d39621 100644
--- a/Include/internal/pycore_opcode_metadata.h
+++ b/Include/internal/pycore_opcode_metadata.h
@@ -1015,13 +1015,13 @@ extern const struct opcode_metadata _PyOpcode_opcode_metadata[266];
#ifdef NEED_OPCODE_METADATA
const struct opcode_metadata _PyOpcode_opcode_metadata[266] = {
[BINARY_OP] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
- [BINARY_OP_ADD_FLOAT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG },
+ [BINARY_OP_ADD_FLOAT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_ADD_INT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_ADD_UNICODE] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_INPLACE_ADD_UNICODE] = { true, INSTR_FMT_IXC, HAS_LOCAL_FLAG | HAS_DEOPT_FLAG | HAS_EXIT_FLAG | HAS_ERROR_FLAG },
- [BINARY_OP_MULTIPLY_FLOAT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG },
+ [BINARY_OP_MULTIPLY_FLOAT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_MULTIPLY_INT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
- [BINARY_OP_SUBTRACT_FLOAT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG },
+ [BINARY_OP_SUBTRACT_FLOAT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_SUBTRACT_INT] = { true, INSTR_FMT_IXC, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_SLICE] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
[BINARY_SUBSCR] = { true, INSTR_FMT_IXC, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
diff --git a/Include/internal/pycore_runtime_init_generated.h b/Include/internal/pycore_runtime_init_generated.h
index c9d20d0b5aacdbf..3b80e265b0ca50a 100644
--- a/Include/internal/pycore_runtime_init_generated.h
+++ b/Include/internal/pycore_runtime_init_generated.h
@@ -557,6 +557,7 @@ extern "C" {
INIT_STR(json_decoder, "json.decoder"), \
INIT_STR(kwdefaults, ".kwdefaults"), \
INIT_STR(list_err, "list index out of range"), \
+ INIT_STR(str_replace_inf, "1e309"), \
INIT_STR(type_params, ".type_params"), \
INIT_STR(utf_8, "utf-8"), \
}
diff --git a/Include/internal/pycore_stackref.h b/Include/internal/pycore_stackref.h
index 7d1eb11aa5ecb83..588e57f6cd97e04 100644
--- a/Include/internal/pycore_stackref.h
+++ b/Include/internal/pycore_stackref.h
@@ -76,6 +76,13 @@ PyStackRef_AsPyObjectBorrow(_PyStackRef stackref)
#define PyStackRef_IsDeferred(ref) (((ref).bits & Py_TAG_BITS) == Py_TAG_DEFERRED)
+static inline PyObject *
+PyStackRef_NotDeferred_AsPyObject(_PyStackRef stackref)
+{
+ assert(!PyStackRef_IsDeferred(stackref));
+ return (PyObject *)stackref.bits;
+}
+
static inline PyObject *
PyStackRef_AsPyObjectSteal(_PyStackRef stackref)
{
@@ -153,6 +160,8 @@ PyStackRef_AsStrongReference(_PyStackRef stackref)
return PyStackRef_FromPyObjectSteal(PyStackRef_AsPyObjectSteal(stackref));
}
+#define PyStackRef_CLOSE_SPECIALIZED(stackref, dealloc) PyStackRef_CLOSE(stackref)
+
#else // Py_GIL_DISABLED
@@ -177,6 +186,7 @@ static const _PyStackRef PyStackRef_NULL = { .bits = 0 };
#define PyStackRef_DUP(stackref) PyStackRef_FromPyObjectSteal(Py_NewRef(PyStackRef_AsPyObjectBorrow(stackref)))
+#define PyStackRef_CLOSE_SPECIALIZED(stackref, dealloc) _Py_DECREF_SPECIALIZED(PyStackRef_AsPyObjectBorrow(stackref), dealloc)
#endif // Py_GIL_DISABLED
diff --git a/Include/internal/pycore_unicodeobject_generated.h b/Include/internal/pycore_unicodeobject_generated.h
index d335373e88ee74d..eb2eca06ec4d4f6 100644
--- a/Include/internal/pycore_unicodeobject_generated.h
+++ b/Include/internal/pycore_unicodeobject_generated.h
@@ -2936,6 +2936,10 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) {
_PyUnicode_InternStatic(interp, &string);
assert(_PyUnicode_CheckConsistency(string, 1));
assert(PyUnicode_GET_LENGTH(string) != 1);
+ string = &_Py_STR(str_replace_inf);
+ _PyUnicode_InternStatic(interp, &string);
+ assert(_PyUnicode_CheckConsistency(string, 1));
+ assert(PyUnicode_GET_LENGTH(string) != 1);
string = &_Py_STR(anon_null);
_PyUnicode_InternStatic(interp, &string);
assert(_PyUnicode_CheckConsistency(string, 1));
diff --git a/Include/internal/pycore_uop_metadata.h b/Include/internal/pycore_uop_metadata.h
index fd41e9a5fe862bd..2f0a7fb2f6e549d 100644
--- a/Include/internal/pycore_uop_metadata.h
+++ b/Include/internal/pycore_uop_metadata.h
@@ -69,9 +69,9 @@ const uint16_t _PyUop_Flags[MAX_UOP_ID+1] = {
[_GUARD_BOTH_FLOAT] = HAS_EXIT_FLAG,
[_GUARD_NOS_FLOAT] = HAS_EXIT_FLAG,
[_GUARD_TOS_FLOAT] = HAS_EXIT_FLAG,
- [_BINARY_OP_MULTIPLY_FLOAT] = HAS_PURE_FLAG,
- [_BINARY_OP_ADD_FLOAT] = HAS_PURE_FLAG,
- [_BINARY_OP_SUBTRACT_FLOAT] = HAS_PURE_FLAG,
+ [_BINARY_OP_MULTIPLY_FLOAT] = HAS_ERROR_FLAG | HAS_PURE_FLAG,
+ [_BINARY_OP_ADD_FLOAT] = HAS_ERROR_FLAG | HAS_PURE_FLAG,
+ [_BINARY_OP_SUBTRACT_FLOAT] = HAS_ERROR_FLAG | HAS_PURE_FLAG,
[_GUARD_BOTH_UNICODE] = HAS_EXIT_FLAG,
[_BINARY_OP_ADD_UNICODE] = HAS_ERROR_FLAG | HAS_PURE_FLAG,
[_BINARY_OP_INPLACE_ADD_UNICODE] = HAS_LOCAL_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG,
diff --git a/Include/object.h b/Include/object.h
index 418f2196062df7c..5be4dedadc20ebd 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -81,7 +81,7 @@ whose size is determined when the object is allocated.
#else
#define PyObject_HEAD_INIT(type) \
{ \
- { _Py_IMMORTAL_REFCNT }, \
+ { _Py_IMMORTAL_INITIAL_REFCNT }, \
(type) \
},
#endif
diff --git a/Include/refcount.h b/Include/refcount.h
index 9a4e15065ecab8c..141cbd34dd72e64 100644
--- a/Include/refcount.h
+++ b/Include/refcount.h
@@ -21,25 +21,30 @@ cleanup during runtime finalization.
#if SIZEOF_VOID_P > 4
/*
-In 64+ bit systems, an object will be marked as immortal by setting all of the
-lower 32 bits of the reference count field, which is equal to: 0xFFFFFFFF
+In 64+ bit systems, any object whose 32 bit reference count is >= 2**31
+will be treated as immortal.
Using the lower 32 bits makes the value backwards compatible by allowing
C-Extensions without the updated checks in Py_INCREF and Py_DECREF to safely
-increase and decrease the objects reference count. The object would lose its
-immortality, but the execution would still be correct.
+increase and decrease the objects reference count.
+
+In order to offer sufficient resilience to C extensions using the stable ABI
+compiled against 3.11 or earlier, we set the initial value near the
+middle of the range (2**31, 2**32). That way the the refcount can be
+off by ~1 billion without affecting immortality.
Reference count increases will use saturated arithmetic, taking advantage of
having all the lower 32 bits set, which will avoid the reference count to go
beyond the refcount limit. Immortality checks for reference count decreases will
be done by checking the bit sign flag in the lower 32 bits.
+
*/
-#define _Py_IMMORTAL_REFCNT _Py_CAST(Py_ssize_t, UINT_MAX)
+#define _Py_IMMORTAL_INITIAL_REFCNT ((Py_ssize_t)(3UL << 30))
#else
/*
-In 32 bit systems, an object will be marked as immortal by setting all of the
-lower 30 bits of the reference count field, which is equal to: 0x3FFFFFFF
+In 32 bit systems, an object will be treated as immortal if its reference
+count equals or exceeds _Py_IMMORTAL_MINIMUM_REFCNT (2**30).
Using the lower 30 bits makes the value backwards compatible by allowing
C-Extensions without the updated checks in Py_INCREF and Py_DECREF to safely
@@ -47,9 +52,10 @@ increase and decrease the objects reference count. The object would lose its
immortality, but the execution would still be correct.
Reference count increases and decreases will first go through an immortality
-check by comparing the reference count field to the immortality reference count.
+check by comparing the reference count field to the minimum immortality refcount.
*/
-#define _Py_IMMORTAL_REFCNT _Py_CAST(Py_ssize_t, UINT_MAX >> 2)
+#define _Py_IMMORTAL_INITIAL_REFCNT ((Py_ssize_t)(3L << 29))
+#define _Py_IMMORTAL_MINIMUM_REFCNT ((Py_ssize_t)(1L << 30))
#endif
// Py_GIL_DISABLED builds indicate immortal objects using `ob_ref_local`, which is
@@ -90,7 +96,7 @@ PyAPI_FUNC(Py_ssize_t) Py_REFCNT(PyObject *ob);
#else
uint32_t local = _Py_atomic_load_uint32_relaxed(&ob->ob_ref_local);
if (local == _Py_IMMORTAL_REFCNT_LOCAL) {
- return _Py_IMMORTAL_REFCNT;
+ return _Py_IMMORTAL_INITIAL_REFCNT;
}
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&ob->ob_ref_shared);
return _Py_STATIC_CAST(Py_ssize_t, local) +
@@ -109,9 +115,9 @@ static inline Py_ALWAYS_INLINE int _Py_IsImmortal(PyObject *op)
return (_Py_atomic_load_uint32_relaxed(&op->ob_ref_local) ==
_Py_IMMORTAL_REFCNT_LOCAL);
#elif SIZEOF_VOID_P > 4
- return (_Py_CAST(PY_INT32_T, op->ob_refcnt) < 0);
+ return _Py_CAST(PY_INT32_T, op->ob_refcnt) < 0;
#else
- return (op->ob_refcnt == _Py_IMMORTAL_REFCNT);
+ return op->ob_refcnt >= _Py_IMMORTAL_MINIMUM_REFCNT;
#endif
}
#define _Py_IsImmortal(op) _Py_IsImmortal(_PyObject_CAST(op))
@@ -236,7 +242,7 @@ static inline Py_ALWAYS_INLINE void Py_INCREF(PyObject *op)
uint32_t new_local = local + 1;
if (new_local == 0) {
_Py_INCREF_IMMORTAL_STAT_INC();
- // local is equal to _Py_IMMORTAL_REFCNT: do nothing
+ // local is equal to _Py_IMMORTAL_REFCNT_LOCAL: do nothing
return;
}
if (_Py_IsOwnedByCurrentThread(op)) {
@@ -246,18 +252,14 @@ static inline Py_ALWAYS_INLINE void Py_INCREF(PyObject *op)
_Py_atomic_add_ssize(&op->ob_ref_shared, (1 << _Py_REF_SHARED_SHIFT));
}
#elif SIZEOF_VOID_P > 4
- // Portable saturated add, branching on the carry flag and set low bits
PY_UINT32_T cur_refcnt = op->ob_refcnt_split[PY_BIG_ENDIAN];
- PY_UINT32_T new_refcnt = cur_refcnt + 1;
- if (new_refcnt == 0) {
+ if (((int32_t)cur_refcnt) < 0) {
+ // the object is immortal
_Py_INCREF_IMMORTAL_STAT_INC();
- // cur_refcnt is equal to _Py_IMMORTAL_REFCNT: the object is immortal,
- // do nothing
return;
}
- op->ob_refcnt_split[PY_BIG_ENDIAN] = new_refcnt;
+ op->ob_refcnt_split[PY_BIG_ENDIAN] = cur_refcnt + 1;
#else
- // Explicitly check immortality against the immortal value
if (_Py_IsImmortal(op)) {
_Py_INCREF_IMMORTAL_STAT_INC();
return;
diff --git a/InternalDocs/README.md b/InternalDocs/README.md
index 8956ecafed2039b..0a6ecf899458ed5 100644
--- a/InternalDocs/README.md
+++ b/InternalDocs/README.md
@@ -11,6 +11,8 @@ The core dev team attempts to keep this documentation up to date. If
it is not, please report that through the
[issue tracker](https://github.com/python/cpython/issues).
+Index:
+-----
[Guide to the parser](parser.md)
@@ -22,4 +24,6 @@ it is not, please report that through the
[The Source Code Locations Table](locations.md)
+[Garbage collector design](garbage_collector.md)
+
[Exception Handling](exception_handling.md)
diff --git a/InternalDocs/garbage_collector.md b/InternalDocs/garbage_collector.md
new file mode 100644
index 000000000000000..fd0246fa1a60e29
--- /dev/null
+++ b/InternalDocs/garbage_collector.md
@@ -0,0 +1,596 @@
+
+Garbage collector design
+========================
+
+Abstract
+========
+
+The main garbage collection algorithm used by CPython is reference counting. The basic idea is
+that CPython counts how many different places there are that have a reference to an
+object. Such a place could be another object, or a global (or static) C variable, or
+a local variable in some C function. When an object’s reference count becomes zero,
+the object is deallocated. If it contains references to other objects, their
+reference counts are decremented. Those other objects may be deallocated in turn, if
+this decrement makes their reference count become zero, and so on. The reference
+count field can be examined using the ``sys.getrefcount()`` function (notice that the
+value returned by this function is always 1 more as the function also has a reference
+to the object when called):
+
+```pycon
+ >>> x = object()
+ >>> sys.getrefcount(x)
+ 2
+ >>> y = x
+ >>> sys.getrefcount(x)
+ 3
+ >>> del y
+ >>> sys.getrefcount(x)
+ 2
+```
+
+The main problem with the reference counting scheme is that it does not handle reference
+cycles. For instance, consider this code:
+
+```pycon
+ >>> container = []
+ >>> container.append(container)
+ >>> sys.getrefcount(container)
+ 3
+ >>> del container
+```
+
+In this example, ``container`` holds a reference to itself, so even when we remove
+our reference to it (the variable "container") the reference count never falls to 0
+because it still has its own internal reference. Therefore it would never be
+cleaned just by simple reference counting. For this reason some additional machinery
+is needed to clean these reference cycles between objects once they become
+unreachable. This is the cyclic garbage collector, usually called just Garbage
+Collector (GC), even though reference counting is also a form of garbage collection.
+
+Starting in version 3.13, CPython contains two GC implementations:
+
+- The default build implementation relies on the
+ [global interpreter lock](https://docs.python.org/3/glossary.html#term-global-interpreter-lock)
+ for thread safety.
+- The free-threaded build implementation pauses other executing threads when
+ performing a collection for thread safety.
+
+Both implementations use the same basic algorithms, but operate on different
+data structures. The the section on
+[Differences between GC implementations](#Differences-between-GC-implementations)
+for the details.
+
+
+Memory layout and object structure
+==================================
+
+The garbage collector requires additional fields in Python objects to support
+garbage collection. These extra fields are different in the default and the
+free-threaded builds.
+
+
+GC for the default build
+------------------------
+
+Normally the C structure supporting a regular Python object looks as follows:
+
+```
+ object -----> +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ \
+ | ob_refcnt | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ | PyObject_HEAD
+ | *ob_type | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ /
+ | ... |
+```
+
+In order to support the garbage collector, the memory layout of objects is altered
+to accommodate extra information **before** the normal layout:
+
+```
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ \
+ | *_gc_next | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ | PyGC_Head
+ | *_gc_prev | |
+ object -----> +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ /
+ | ob_refcnt | \
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ | PyObject_HEAD
+ | *ob_type | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ /
+ | ... |
+```
+
+
+In this way the object can be treated as a normal python object and when the extra
+information associated to the GC is needed the previous fields can be accessed by a
+simple type cast from the original object: `((PyGC_Head *)(the_object)-1)`.
+
+As is explained later in the
+[Optimization: reusing fields to save memory](#optimization-reusing-fields-to-save-memory)
+section, these two extra fields are normally used to keep doubly linked lists of all the
+objects tracked by the garbage collector (these lists are the GC generations, more on
+that in the [Optimization: generations](#Optimization-generations) section), but
+they are also reused to fulfill other purposes when the full doubly linked list
+structure is not needed as a memory optimization.
+
+Doubly linked lists are used because they efficiently support the most frequently required operations. In
+general, the collection of all objects tracked by GC is partitioned into disjoint sets, each in its own
+doubly linked list. Between collections, objects are partitioned into "generations", reflecting how
+often they've survived collection attempts. During collections, the generation(s) being collected
+are further partitioned into, for example, sets of reachable and unreachable objects. Doubly linked lists
+support moving an object from one partition to another, adding a new object, removing an object
+entirely (objects tracked by GC are most often reclaimed by the refcounting system when GC
+isn't running at all!), and merging partitions, all with a small constant number of pointer updates.
+With care, they also support iterating over a partition while objects are being added to - and
+removed from - it, which is frequently required while GC is running.
+
+GC for the free-threaded build
+------------------------------
+
+In the free-threaded build, Python objects contain a 1-byte field
+``ob_gc_bits`` that is used to track garbage collection related state. The
+field exists in all objects, including ones that do not support cyclic
+garbage collection. The field is used to identify objects that are tracked
+by the collector, ensure that finalizers are called only once per object,
+and, during garbage collection, differentiate reachable vs. unreachable objects.
+
+```
+ object -----> +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ \
+ | ob_tid | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ |
+ | pad | ob_mutex | ob_gc_bits | ob_ref_local | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ | PyObject_HEAD
+ | ob_ref_shared | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ |
+ | *ob_type | |
+ +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ /
+ | ... |
+```
+
+Note that not all fields are to scale. ``pad`` is two bytes, ``ob_mutex`` and
+``ob_gc_bits`` are each one byte, and ``ob_ref_local`` is four bytes. The
+other fields, ``ob_tid``, ``ob_ref_shared``, and ``ob_type``, are all
+pointer-sized (that is, eight bytes on a 64-bit platform).
+
+
+The garbage collector also temporarily repurposes the ``ob_tid`` (thread ID)
+and ``ob_ref_local`` (local reference count) fields for other purposes during
+collections.
+
+
+C APIs
+------
+
+Specific APIs are offered to allocate, deallocate, initialize, track, and untrack
+objects with GC support. These APIs can be found in the
+[Garbage Collector C API documentation](https://docs.python.org/3/c-api/gcsupport.html).
+
+Apart from this object structure, the type object for objects supporting garbage
+collection must include the ``Py_TPFLAGS_HAVE_GC`` in its ``tp_flags`` slot and
+provide an implementation of the ``tp_traverse`` handler. Unless it can be proven
+that the objects cannot form reference cycles with only objects of its type or unless
+the type is immutable, a ``tp_clear`` implementation must also be provided.
+
+
+Identifying reference cycles
+============================
+
+The algorithm that CPython uses to detect those reference cycles is
+implemented in the ``gc`` module. The garbage collector **only focuses**
+on cleaning container objects (that is, objects that can contain a reference
+to one or more objects). These can be arrays, dictionaries, lists, custom
+class instances, classes in extension modules, etc. One could think that
+cycles are uncommon but the truth is that many internal references needed by
+the interpreter create cycles everywhere. Some notable examples:
+
+- Exceptions contain traceback objects that contain a list of frames that
+ contain the exception itself.
+- Module-level functions reference the module's dict (which is needed to resolve globals),
+ which in turn contains entries for the module-level functions.
+- Instances have references to their class which itself references its module, and the module
+ contains references to everything that is inside (and maybe other modules)
+ and this can lead back to the original instance.
+- When representing data structures like graphs, it is very typical for them to
+ have internal links to themselves.
+
+To correctly dispose of these objects once they become unreachable, they need
+to be identified first. To understand how the algorithm works, let’s take
+the case of a circular linked list which has one link referenced by a
+variable ``A``, and one self-referencing object which is completely
+unreachable:
+
+```pycon
+ >>> import gc
+
+ >>> class Link:
+ ... def __init__(self, next_link=None):
+ ... self.next_link = next_link
+
+ >>> link_3 = Link()
+ >>> link_2 = Link(link_3)
+ >>> link_1 = Link(link_2)
+ >>> link_3.next_link = link_1
+ >>> A = link_1
+ >>> del link_1, link_2, link_3
+
+ >>> link_4 = Link()
+ >>> link_4.next_link = link_4
+ >>> del link_4
+
+ # Collect the unreachable Link object (and its .__dict__ dict).
+ >>> gc.collect()
+ 2
+```
+
+The GC starts with a set of candidate objects it wants to scan. In the
+default build, these "objects to scan" might be all container objects or a
+smaller subset (or "generation"). In the free-threaded build, the collector
+always scans all container objects.
+
+The objective is to identify all the unreachable objects. The collector does
+this by identifying reachable objects; the remaining objects must be
+unreachable. The first step is to identify all of the "to scan" objects that
+are **directly** reachable from outside the set of candidate objects. These
+objects have a refcount larger than the number of incoming references from
+within the candidate set.
+
+Every object that supports garbage collection will have an extra reference
+count field initialized to the reference count (``gc_ref`` in the figures)
+of that object when the algorithm starts. This is because the algorithm needs
+to modify the reference count to do the computations and in this way the
+interpreter will not modify the real reference count field.
+
+![gc-image1](images/python-cyclic-gc-1-new-page.png)
+
+The GC then iterates over all containers in the first list and decrements by one the
+`gc_ref` field of any other object that container is referencing. Doing
+this makes use of the ``tp_traverse`` slot in the container class (implemented
+using the C API or inherited by a superclass) to know what objects are referenced by
+each container. After all the objects have been scanned, only the objects that have
+references from outside the “objects to scan” list will have ``gc_ref > 0``.
+
+![gc-image2](images/python-cyclic-gc-2-new-page.png)
+
+Notice that having ``gc_ref == 0`` does not imply that the object is unreachable.
+This is because another object that is reachable from the outside (``gc_ref > 0``)
+can still have references to it. For instance, the ``link_2`` object in our example
+ended having ``gc_ref == 0`` but is referenced still by the ``link_1`` object that
+is reachable from the outside. To obtain the set of objects that are really
+unreachable, the garbage collector re-scans the container objects using the
+``tp_traverse`` slot; this time with a different traverse function that marks objects with
+``gc_ref == 0`` as "tentatively unreachable" and then moves them to the
+tentatively unreachable list. The following image depicts the state of the lists in a
+moment when the GC processed the ``link_3`` and ``link_4`` objects but has not
+processed ``link_1`` and ``link_2`` yet.
+
+![gc-image3](images/python-cyclic-gc-3-new-page.png)
+
+Then the GC scans the next ``link_1`` object. Because it has ``gc_ref == 1``,
+the gc does not do anything special because it knows it has to be reachable (and is
+already in what will become the reachable list):
+
+![gc-image4](images/python-cyclic-gc-4-new-page.png)
+
+When the GC encounters an object which is reachable (``gc_ref > 0``), it traverses
+its references using the ``tp_traverse`` slot to find all the objects that are
+reachable from it, moving them to the end of the list of reachable objects (where
+they started originally) and setting its ``gc_ref`` field to 1. This is what happens
+to ``link_2`` and ``link_3`` below as they are reachable from ``link_1``. From the
+state in the previous image and after examining the objects referred to by ``link_1``
+the GC knows that ``link_3`` is reachable after all, so it is moved back to the
+original list and its ``gc_ref`` field is set to 1 so that if the GC visits it again,
+it will know that it's reachable. To avoid visiting an object twice, the GC marks all
+objects that have already been visited once (by unsetting the ``PREV_MASK_COLLECTING``
+flag) so that if an object that has already been processed is referenced by some other
+object, the GC does not process it twice.
+
+![gc-image5](images/python-cyclic-gc-5-new-page.png)
+
+Notice that an object that was marked as "tentatively unreachable" and was later
+moved back to the reachable list will be visited again by the garbage collector
+as now all the references that that object has need to be processed as well. This
+process is really a breadth first search over the object graph. Once all the objects
+are scanned, the GC knows that all container objects in the tentatively unreachable
+list are really unreachable and can thus be garbage collected.
+
+Pragmatically, it's important to note that no recursion is required by any of this,
+and neither does it in any other way require additional memory proportional to the
+number of objects, number of pointers, or the lengths of pointer chains. Apart from
+``O(1)`` storage for internal C needs, the objects themselves contain all the storage
+the GC algorithms require.
+
+Why moving unreachable objects is better
+----------------------------------------
+
+It sounds logical to move the unreachable objects under the premise that most objects
+are usually reachable, until you think about it: the reason it pays isn't actually
+obvious.
+
+Suppose we create objects A, B, C in that order. They appear in the young generation
+in the same order. If B points to A, and C to B, and C is reachable from outside,
+then the adjusted refcounts after the first step of the algorithm runs will be 0, 0,
+and 1 respectively because the only reachable object from the outside is C.
+
+When the next step of the algorithm finds A, A is moved to the unreachable list. The
+same for B when it's first encountered. Then C is traversed, B is moved *back* to
+the reachable list. B is eventually traversed, and then A is moved back to the reachable
+list.
+
+So instead of not moving at all, the reachable objects B and A are each moved twice.
+Why is this a win? A straightforward algorithm to move the reachable objects instead
+would move A, B, and C once each. The key is that this dance leaves the objects in
+order C, B, A - it's reversed from the original order. On all *subsequent* scans,
+none of them will move. Since most objects aren't in cycles, this can save an
+unbounded number of moves across an unbounded number of later collections. The only
+time the cost can be higher is the first time the chain is scanned.
+
+Destroying unreachable objects
+==============================
+
+Once the GC knows the list of unreachable objects, a very delicate process starts
+with the objective of completely destroying these objects. Roughly, the process
+follows these steps in order:
+
+1. Handle and clear weak references (if any). Weak references to unreachable objects
+ are set to ``None``. If the weak reference has an associated callback, the callback
+ is enqueued to be called once the clearing of weak references is finished. We only
+ invoke callbacks for weak references that are themselves reachable. If both the weak
+ reference and the pointed-to object are unreachable we do not execute the callback.
+ This is partly for historical reasons: the callback could resurrect an unreachable
+ object and support for weak references predates support for object resurrection.
+ Ignoring the weak reference's callback is fine because both the object and the weakref
+ are going away, so it's legitimate to say the weak reference is going away first.
+2. If an object has legacy finalizers (``tp_del`` slot) move it to the
+ ``gc.garbage`` list.
+3. Call the finalizers (``tp_finalize`` slot) and mark the objects as already
+ finalized to avoid calling finalizers twice if the objects are resurrected or
+ if other finalizers have removed the object first.
+4. Deal with resurrected objects. If some objects have been resurrected, the GC
+ finds the new subset of objects that are still unreachable by running the cycle
+ detection algorithm again and continues with them.
+5. Call the ``tp_clear`` slot of every object so all internal links are broken and
+ the reference counts fall to 0, triggering the destruction of all unreachable
+ objects.
+
+Optimization: generations
+=========================
+
+In order to limit the time each garbage collection takes, the GC
+implementation for the default build uses a popular optimization:
+generations. The main idea behind this concept is the assumption that most
+objects have a very short lifespan and can thus be collected soon after their
+creation. This has proven to be very close to the reality of many Python
+programs as many temporary objects are created and destroyed very quickly.
+
+To take advantage of this fact, all container objects are segregated into
+three spaces/generations. Every new
+object starts in the first generation (generation 0). The previous algorithm is
+executed only over the objects of a particular generation and if an object
+survives a collection of its generation it will be moved to the next one
+(generation 1), where it will be surveyed for collection less often. If
+the same object survives another GC round in this new generation (generation 1)
+it will be moved to the last generation (generation 2) where it will be
+surveyed the least often.
+
+The GC implementation for the free-threaded build does not use multiple
+generations. Every collection operates on the entire heap.
+
+In order to decide when to run, the collector keeps track of the number of object
+allocations and deallocations since the last collection. When the number of
+allocations minus the number of deallocations exceeds ``threshold_0``,
+collection starts. Initially only generation 0 is examined. If generation 0 has
+been examined more than ``threshold_1`` times since generation 1 has been
+examined, then generation 1 is examined as well. With generation 2,
+things are a bit more complicated; see
+[Collecting the oldest generation](#Collecting-the-oldest-generation) for
+more information. These thresholds can be examined using the
+[`gc.get_threshold()`](https://docs.python.org/3/library/gc.html#gc.get_threshold)
+function:
+
+```pycon
+ >>> import gc
+ >>> gc.get_threshold()
+ (700, 10, 10)
+```
+
+The content of these generations can be examined using the
+``gc.get_objects(generation=NUM)`` function and collections can be triggered
+specifically in a generation by calling ``gc.collect(generation=NUM)``.
+
+```pycon
+ >>> import gc
+ >>> class MyObj:
+ ... pass
+ ...
+
+ # Move everything to the last generation so it's easier to inspect
+ # the younger generations.
+
+ >>> gc.collect()
+ 0
+
+ # Create a reference cycle.
+
+ >>> x = MyObj()
+ >>> x.self = x
+
+ # Initially the object is in the youngest generation.
+
+ >>> gc.get_objects(generation=0)
+ [..., <__main__.MyObj object at 0x7fbcc12a3400>, ...]
+
+ # After a collection of the youngest generation the object
+ # moves to the next generation.
+
+ >>> gc.collect(generation=0)
+ 0
+ >>> gc.get_objects(generation=0)
+ []
+ >>> gc.get_objects(generation=1)
+ [..., <__main__.MyObj object at 0x7fbcc12a3400>, ...]
+```
+
+Collecting the oldest generation
+--------------------------------
+
+In addition to the various configurable thresholds, the GC only triggers a full
+collection of the oldest generation if the ratio ``long_lived_pending / long_lived_total``
+is above a given value (hardwired to 25%). The reason is that, while "non-full"
+collections (that is, collections of the young and middle generations) will always
+examine roughly the same number of objects (determined by the aforementioned
+thresholds) the cost of a full collection is proportional to the total
+number of long-lived objects, which is virtually unbounded. Indeed, it has
+been remarked that doing a full collection every of object
+creations entails a dramatic performance degradation in workloads which consist
+of creating and storing lots of long-lived objects (for example, building a large list
+of GC-tracked objects would show quadratic performance, instead of linear as
+expected). Using the above ratio, instead, yields amortized linear performance
+in the total number of objects (the effect of which can be summarized thusly:
+"each full garbage collection is more and more costly as the number of objects
+grows, but we do fewer and fewer of them").
+
+Optimization: reusing fields to save memory
+===========================================
+
+In order to save memory, the two linked list pointers in every object with GC
+support are reused for several purposes. This is a common optimization known
+as "fat pointers" or "tagged pointers": pointers that carry additional data,
+"folded" into the pointer, meaning stored inline in the data representing the
+address, taking advantage of certain properties of memory addressing. This is
+possible as most architectures align certain types of data
+to the size of the data, often a word or multiple thereof. This discrepancy
+leaves a few of the least significant bits of the pointer unused, which can be
+used for tags or to keep other information – most often as a bit field (each
+bit a separate tag) – as long as code that uses the pointer masks out these
+bits before accessing memory. For example, on a 32-bit architecture (for both
+addresses and word size), a word is 32 bits = 4 bytes, so word-aligned
+addresses are always a multiple of 4, hence end in ``00``, leaving the last 2 bits
+available; while on a 64-bit architecture, a word is 64 bits = 8 bytes, so
+word-aligned addresses end in ``000``, leaving the last 3 bits available.
+
+The CPython GC makes use of two fat pointers that correspond to the extra fields
+of ``PyGC_Head`` discussed in the `Memory layout and object structure`_ section:
+
+> [!WARNING]
+> Because the presence of extra information, "tagged" or "fat" pointers cannot be
+> dereferenced directly and the extra information must be stripped off before
+> obtaining the real memory address. Special care needs to be taken with
+> functions that directly manipulate the linked lists, as these functions
+> normally assume the pointers inside the lists are in a consistent state.
+
+
+- The ``_gc_prev`` field is normally used as the "previous" pointer to maintain the
+ doubly linked list but its lowest two bits are used to keep the flags
+ ``PREV_MASK_COLLECTING`` and ``_PyGC_PREV_MASK_FINALIZED``. Between collections,
+ the only flag that can be present is ``_PyGC_PREV_MASK_FINALIZED`` that indicates
+ if an object has been already finalized. During collections ``_gc_prev`` is
+ temporarily used for storing a copy of the reference count (``gc_ref``), in
+ addition to two flags, and the GC linked list becomes a singly linked list until
+ ``_gc_prev`` is restored.
+
+- The ``_gc_next`` field is used as the "next" pointer to maintain the doubly linked
+ list but during collection its lowest bit is used to keep the
+ ``NEXT_MASK_UNREACHABLE`` flag that indicates if an object is tentatively
+ unreachable during the cycle detection algorithm. This is a drawback to using only
+ doubly linked lists to implement partitions: while most needed operations are
+ constant-time, there is no efficient way to determine which partition an object is
+ currently in. Instead, when that's needed, ad hoc tricks (like the
+ ``NEXT_MASK_UNREACHABLE`` flag) are employed.
+
+Optimization: delay tracking containers
+=======================================
+
+Certain types of containers cannot participate in a reference cycle, and so do
+not need to be tracked by the garbage collector. Untracking these objects
+reduces the cost of garbage collection. However, determining which objects may
+be untracked is not free, and the costs must be weighed against the benefits
+for garbage collection. There are two possible strategies for when to untrack
+a container:
+
+1. When the container is created.
+2. When the container is examined by the garbage collector.
+
+As a general rule, instances of atomic types aren't tracked and instances of
+non-atomic types (containers, user-defined objects...) are. However, some
+type-specific optimizations can be present in order to suppress the garbage
+collector footprint of simple instances. Some examples of native types that
+benefit from delayed tracking:
+
+- Tuples containing only immutable objects (integers, strings etc,
+ and recursively, tuples of immutable objects) do not need to be tracked. The
+ interpreter creates a large number of tuples, many of which will not survive
+ until garbage collection. It is therefore not worthwhile to untrack eligible
+ tuples at creation time. Instead, all tuples except the empty tuple are tracked
+ when created. During garbage collection it is determined whether any surviving
+ tuples can be untracked. A tuple can be untracked if all of its contents are
+ already not tracked. Tuples are examined for untracking in all garbage collection
+ cycles. It may take more than one cycle to untrack a tuple.
+
+- Dictionaries containing only immutable objects also do not need to be tracked.
+ Dictionaries are untracked when created. If a tracked item is inserted into a
+ dictionary (either as a key or value), the dictionary becomes tracked. During a
+ full garbage collection (all generations), the collector will untrack any dictionaries
+ whose contents are not tracked.
+
+The garbage collector module provides the Python function ``is_tracked(obj)``, which returns
+the current tracking status of the object. Subsequent garbage collections may change the
+tracking status of the object.
+
+```pycon
+ >>> gc.is_tracked(0)
+ False
+ >>> gc.is_tracked("a")
+ False
+ >>> gc.is_tracked([])
+ True
+ >>> gc.is_tracked({})
+ False
+ >>> gc.is_tracked({"a": 1})
+ False
+ >>> gc.is_tracked({"a": []})
+ True
+```
+
+Differences between GC implementations
+======================================
+
+This section summarizes the differences between the GC implementation in the
+default build and the implementation in the free-threaded build.
+
+The default build implementation makes extensive use of the ``PyGC_Head`` data
+structure, while the free-threaded build implementation does not use that
+data structure.
+
+- The default build implementation stores all tracked objects in a doubly
+ linked list using ``PyGC_Head``. The free-threaded build implementation
+ instead relies on the embedded mimalloc memory allocator to scan the heap
+ for tracked objects.
+- The default build implementation uses ``PyGC_Head`` for the unreachable
+ object list. The free-threaded build implementation repurposes the
+ ``ob_tid`` field to store a unreachable objects linked list.
+- The default build implementation stores flags in the ``_gc_prev`` field of
+ ``PyGC_Head``. The free-threaded build implementation stores these flags
+ in ``ob_gc_bits``.
+
+
+The default build implementation relies on the
+[global interpreter lock](https://docs.python.org/3/glossary.html#term-global-interpreter-lock)
+for thread safety. The free-threaded build implementation has two "stop the
+world" pauses, in which all other executing threads are temporarily paused so
+that the GC can safely access reference counts and object attributes.
+
+The default build implementation is a generational collector. The
+free-threaded build is non-generational; each collection scans the entire
+heap.
+
+- Keeping track of object generations is simple and inexpensive in the default
+ build. The free-threaded build relies on mimalloc for finding tracked
+ objects; identifying "young" objects without scanning the entire heap would
+ be more difficult.
+
+
+> [!NOTE]
+> **Document history**
+>
+> Pablo Galindo Salgado - Original author
+>
+> Irit Katriel - Convert to Markdown
diff --git a/InternalDocs/images/python-cyclic-gc-1-new-page.png b/InternalDocs/images/python-cyclic-gc-1-new-page.png
new file mode 100644
index 000000000000000..2ddac50f4b55758
Binary files /dev/null and b/InternalDocs/images/python-cyclic-gc-1-new-page.png differ
diff --git a/InternalDocs/images/python-cyclic-gc-2-new-page.png b/InternalDocs/images/python-cyclic-gc-2-new-page.png
new file mode 100644
index 000000000000000..159aeeb05024a32
Binary files /dev/null and b/InternalDocs/images/python-cyclic-gc-2-new-page.png differ
diff --git a/InternalDocs/images/python-cyclic-gc-3-new-page.png b/InternalDocs/images/python-cyclic-gc-3-new-page.png
new file mode 100644
index 000000000000000..29fab0498e5b106
Binary files /dev/null and b/InternalDocs/images/python-cyclic-gc-3-new-page.png differ
diff --git a/InternalDocs/images/python-cyclic-gc-4-new-page.png b/InternalDocs/images/python-cyclic-gc-4-new-page.png
new file mode 100644
index 000000000000000..51a2b1065ea64ea
Binary files /dev/null and b/InternalDocs/images/python-cyclic-gc-4-new-page.png differ
diff --git a/InternalDocs/images/python-cyclic-gc-5-new-page.png b/InternalDocs/images/python-cyclic-gc-5-new-page.png
new file mode 100644
index 000000000000000..fe67a6896fe4b07
Binary files /dev/null and b/InternalDocs/images/python-cyclic-gc-5-new-page.png differ
diff --git a/Lib/_collections_abc.py b/Lib/_collections_abc.py
index c2edf6c8856c212..06667b7434ccefa 100644
--- a/Lib/_collections_abc.py
+++ b/Lib/_collections_abc.py
@@ -962,7 +962,7 @@ def clear(self):
def update(self, other=(), /, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
- If E present and has a .keys() method, does: for k in E: D[k] = E[k]
+ If E present and has a .keys() method, does: for k in E.keys(): D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py
index 75df3db262470b7..5b60570c6c592aa 100644
--- a/Lib/_pydecimal.py
+++ b/Lib/_pydecimal.py
@@ -582,6 +582,21 @@ def __new__(cls, value="0", context=None):
raise TypeError("Cannot convert %r to Decimal" % value)
+ @classmethod
+ def from_number(cls, number):
+ """Converts a real number to a decimal number, exactly.
+
+ >>> Decimal.from_number(314) # int
+ Decimal('314')
+ >>> Decimal.from_number(0.1) # float
+ Decimal('0.1000000000000000055511151231257827021181583404541015625')
+ >>> Decimal.from_number(Decimal('3.14')) # another decimal instance
+ Decimal('3.14')
+ """
+ if isinstance(number, (int, Decimal, float)):
+ return cls(number)
+ raise TypeError("Cannot convert %r to Decimal" % number)
+
@classmethod
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
diff --git a/Lib/_pyrepl/console.py b/Lib/_pyrepl/console.py
index 3e72a56807f6fb1..03266c4dfc2dd85 100644
--- a/Lib/_pyrepl/console.py
+++ b/Lib/_pyrepl/console.py
@@ -174,7 +174,13 @@ def _excepthook(self, typ, value, tb):
def runsource(self, source, filename="", symbol="single"):
try:
- tree = ast.parse(source)
+ tree = self.compile.compiler(
+ source,
+ filename,
+ "exec",
+ ast.PyCF_ONLY_AST,
+ incomplete_input=False,
+ )
except (SyntaxError, OverflowError, ValueError):
self.showsyntaxerror(filename, source=source)
return False
@@ -185,7 +191,7 @@ def runsource(self, source, filename="", symbol="single"):
the_symbol = symbol if stmt is last_stmt else "exec"
item = wrapper([stmt])
try:
- code = self.compile.compiler(item, filename, the_symbol, dont_inherit=True)
+ code = self.compile.compiler(item, filename, the_symbol)
except SyntaxError as e:
if e.args[0] == "'await' outside function":
python = os.path.basename(sys.executable)
diff --git a/Lib/_strptime.py b/Lib/_strptime.py
index a3f8bb544d518dc..5f4d2475c0169bc 100644
--- a/Lib/_strptime.py
+++ b/Lib/_strptime.py
@@ -15,6 +15,7 @@
import locale
import calendar
from re import compile as re_compile
+from re import sub as re_sub
from re import IGNORECASE
from re import escape as re_escape
from datetime import (date as datetime_date,
@@ -28,6 +29,18 @@ def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
+def _findall(haystack, needle):
+ # Find all positions of needle in haystack.
+ if not needle:
+ return
+ i = 0
+ while True:
+ i = haystack.find(needle, i)
+ if i < 0:
+ break
+ yield i
+ i += len(needle)
+
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
@@ -102,7 +115,8 @@ def __calc_am_pm(self):
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
- am_pm.append(time.strftime("%p", time_tuple).lower())
+ # br_FR has AM/PM info (' ',' ').
+ am_pm.append(time.strftime("%p", time_tuple).lower().strip())
self.am_pm = am_pm
def __calc_date_time(self):
@@ -114,42 +128,130 @@ def __calc_date_time(self):
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
- date_time = [None, None, None]
- date_time[0] = time.strftime("%c", time_tuple).lower()
- date_time[1] = time.strftime("%x", time_tuple).lower()
- date_time[2] = time.strftime("%X", time_tuple).lower()
- replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
- (self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
- (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
- ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
- ('44', '%M'), ('55', '%S'), ('76', '%j'),
- ('17', '%d'), ('03', '%m'), ('3', '%m'),
- # '3' needed for when no leading zero.
- ('2', '%w'), ('10', '%I')]
- replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
- for tz in tz_values])
- for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
- current_format = date_time[offset]
- for old, new in replacement_pairs:
+ time_tuple2 = time.struct_time((1999,1,3,1,1,1,6,3,0))
+ replacement_pairs = [
+ ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
+ ('44', '%M'), ('55', '%S'), ('76', '%j'),
+ ('17', '%d'), ('03', '%m'), ('3', '%m'),
+ # '3' needed for when no leading zero.
+ ('2', '%w'), ('10', '%I'),
+ # Non-ASCII digits
+ ('\u0661\u0669\u0669\u0669', '%Y'),
+ ('\u0669\u0669', '%Oy'),
+ ('\u0662\u0662', '%OH'),
+ ('\u0664\u0664', '%OM'),
+ ('\u0665\u0665', '%OS'),
+ ('\u0661\u0667', '%Od'),
+ ('\u0660\u0663', '%Om'),
+ ('\u0663', '%Om'),
+ ('\u0662', '%Ow'),
+ ('\u0661\u0660', '%OI'),
+ ]
+ date_time = []
+ for directive in ('%c', '%x', '%X'):
+ current_format = time.strftime(directive, time_tuple).lower()
+ current_format = current_format.replace('%', '%%')
+ # The month and the day of the week formats are treated specially
+ # because of a possible ambiguity in some locales where the full
+ # and abbreviated names are equal or names of different types
+ # are equal. See doc of __find_month_format for more details.
+ lst, fmt = self.__find_weekday_format(directive)
+ if lst:
+ current_format = current_format.replace(lst[2], fmt, 1)
+ lst, fmt = self.__find_month_format(directive)
+ if lst:
+ current_format = current_format.replace(lst[3], fmt, 1)
+ if self.am_pm[1]:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
- if old:
- current_format = current_format.replace(old, new)
+ current_format = current_format.replace(self.am_pm[1], '%p')
+ for tz_values in self.timezone:
+ for tz in tz_values:
+ if tz:
+ current_format = current_format.replace(tz, "%Z")
+ # Transform all non-ASCII digits to digits in range U+0660 to U+0669.
+ current_format = re_sub(r'\d(?3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P[0-9]{1,6})",
'H': r"(?P2[0-3]|[0-1]\d|\d)",
- 'I': r"(?P1[0-2]|0[1-9]|[1-9])",
+ 'I': r"(?P1[0-2]|0[1-9]|[1-9]| [1-9])",
'G': r"(?P\d\d\d\d)",
'j': r"(?P36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P1[0-2]|0[1-9]|[1-9])",
@@ -211,11 +313,15 @@ def __init__(self, locale_time=None):
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
- '%': '%'})
- base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
- base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
- base.__setitem__('x', self.pattern(self.locale_time.LC_date))
+ '%': '%'}
+ for d in 'dmyHIMS':
+ mapping['O' + d] = r'(?P<%s>\d\d|\d| \d)' % d
+ mapping['Ow'] = r'(?P\d)'
+ mapping['W'] = mapping['U'].replace('U', 'W')
+ base.__init__(mapping)
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
+ base.__setitem__('x', self.pattern(self.locale_time.LC_date))
+ base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
@@ -243,28 +349,25 @@ def pattern(self, format):
regex syntax are escaped.
"""
- processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
- regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
- format = regex_chars.sub(r"\\\1", format)
- whitespace_replacement = re_compile(r'\s+')
- format = whitespace_replacement.sub(r'\\s+', format)
+ format = re_sub(r"([\\.^$*+?\(\){}\[\]|])", r"\\\1", format)
+ format = re_sub(r'\s+', r'\\s+', format)
+ format = re_sub(r"'", "['\u02bc]", format) # needed for br_FR
year_in_format = False
day_of_month_in_format = False
- while '%' in format:
- directive_index = format.index('%')+1
- format_char = format[directive_index]
- processed_format = "%s%s%s" % (processed_format,
- format[:directive_index-1],
- self[format_char])
- format = format[directive_index+1:]
+ def repl(m):
+ format_char = m[1]
match format_char:
case 'Y' | 'y' | 'G':
+ nonlocal year_in_format
year_in_format = True
case 'd':
+ nonlocal day_of_month_in_format
day_of_month_in_format = True
+ return self[format_char]
+ format = re_sub(r'%(O?.)', repl, format)
if day_of_month_in_format and not year_in_format:
import warnings
warnings.warn("""\
@@ -275,7 +378,7 @@ def pattern(self, format):
See https://github.com/python/cpython/issues/70647.""",
DeprecationWarning,
skip_file_prefixes=(os.path.dirname(__file__),))
- return "%s%s" % (processed_format, format)
+ return format
def compile(self, format):
"""Return a compiled re object for the format string."""
diff --git a/Lib/_weakrefset.py b/Lib/_weakrefset.py
index 2071755d71dfc84..d1c7fcaeec9821c 100644
--- a/Lib/_weakrefset.py
+++ b/Lib/_weakrefset.py
@@ -8,31 +8,6 @@
__all__ = ['WeakSet']
-class _IterationGuard:
- # This context manager registers itself in the current iterators of the
- # weak container, such as to delay all removals until the context manager
- # exits.
- # This technique should be relatively thread-safe (since sets are).
-
- def __init__(self, weakcontainer):
- # Don't create cycles
- self.weakcontainer = ref(weakcontainer)
-
- def __enter__(self):
- w = self.weakcontainer()
- if w is not None:
- w._iterating.add(self)
- return self
-
- def __exit__(self, e, t, b):
- w = self.weakcontainer()
- if w is not None:
- s = w._iterating
- s.remove(self)
- if not s:
- w._commit_removals()
-
-
class WeakSet:
def __init__(self, data=None):
self.data = set()
diff --git a/Lib/argparse.py b/Lib/argparse.py
index 21299b69ecd74cb..fa9f5211257e964 100644
--- a/Lib/argparse.py
+++ b/Lib/argparse.py
@@ -547,8 +547,7 @@ def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
- choice_strs = [str(choice) for choice in action.choices]
- result = '{%s}' % ','.join(choice_strs)
+ result = '{%s}' % ','.join(map(str, action.choices))
else:
result = default_metavar
@@ -588,17 +587,19 @@ def _format_args(self, action, default_metavar):
return result
def _expand_help(self, action):
+ help_string = self._get_help_string(action)
+ if '%' not in help_string:
+ return help_string
params = dict(vars(action), prog=self._prog)
for name in list(params):
- if params[name] is SUPPRESS:
+ value = params[name]
+ if value is SUPPRESS:
del params[name]
- for name in list(params):
- if hasattr(params[name], '__name__'):
- params[name] = params[name].__name__
+ elif hasattr(value, '__name__'):
+ params[name] = value.__name__
if params.get('choices') is not None:
- choices_str = ', '.join([str(c) for c in params['choices']])
- params['choices'] = choices_str
- return self._get_help_string(action) % params
+ params['choices'] = ', '.join(map(str, params['choices']))
+ return help_string % params
def _iter_indented_subactions(self, action):
try:
@@ -714,7 +715,7 @@ def _get_action_name(argument):
elif argument.dest not in (None, SUPPRESS):
return argument.dest
elif argument.choices:
- return '{' + ','.join(argument.choices) + '}'
+ return '{%s}' % ','.join(map(str, argument.choices))
else:
return None
@@ -1180,9 +1181,13 @@ def add_parser(self, name, *, deprecated=False, **kwargs):
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
+ else:
+ choice_action = None
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
+ if choice_action is not None:
+ parser._check_help(choice_action)
self._name_parser_map[name] = parser
# make parser available under aliases also
@@ -1367,7 +1372,7 @@ def __init__(self,
self._defaults = {}
# determines whether an "option" looks like a negative number
- self._negative_number_matcher = _re.compile(r'^-(?:\d+(?:_\d+)*(?:\.\d+(?:_\d+)*)?|\.\d+(?:_\d+)*)$')
+ self._negative_number_matcher = _re.compile(r'-\.?\d')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
@@ -1417,7 +1422,8 @@ def add_argument(self, *args, **kwargs):
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
- raise ValueError('dest supplied twice for positional argument')
+ raise ValueError('dest supplied twice for positional argument,'
+ ' did you mean metavar?')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
@@ -1433,11 +1439,17 @@ def add_argument(self, *args, **kwargs):
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
+ action_name = kwargs.get('action')
action_class = self._pop_action_class(kwargs)
if not callable(action_class):
raise ValueError('unknown action "%s"' % (action_class,))
action = action_class(**kwargs)
+ # raise an error if action for positional argument does not
+ # consume arguments
+ if not action.option_strings and action.nargs == 0:
+ raise ValueError(f'action {action_name!r} is not valid for positional arguments')
+
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
@@ -1449,11 +1461,12 @@ def add_argument(self, *args, **kwargs):
# raise an error if the metavar does not match the type
if hasattr(self, "_get_formatter"):
+ formatter = self._get_formatter()
try:
- self._get_formatter()._format_args(action, None)
+ formatter._format_args(action, None)
except TypeError:
raise ValueError("length of metavar tuple does not match nargs")
-
+ self._check_help(action)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
@@ -1521,7 +1534,11 @@ def _add_container_actions(self, container):
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
- mutex_group = self.add_mutually_exclusive_group(
+ if group._container is container:
+ cont = self
+ else:
+ cont = title_group_map[group._container.title]
+ mutex_group = cont.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
@@ -1541,7 +1558,9 @@ def _get_positional_kwargs(self, dest, **kwargs):
# mark positional arguments as required if at least one is
# always required
nargs = kwargs.get('nargs')
- if nargs not in [OPTIONAL, ZERO_OR_MORE, REMAINDER, SUPPRESS, 0]:
+ if nargs == 0:
+ raise ValueError('nargs for positionals must be != 0')
+ if nargs not in [OPTIONAL, ZERO_OR_MORE, REMAINDER, SUPPRESS]:
kwargs['required'] = True
# return the keyword arguments with no option strings
@@ -1631,6 +1650,14 @@ def _handle_conflict_resolve(self, action, conflicting_actions):
if not action.option_strings:
action.container._remove_action(action)
+ def _check_help(self, action):
+ if action.help and hasattr(self, "_get_formatter"):
+ formatter = self._get_formatter()
+ try:
+ formatter._expand_help(action)
+ except (ValueError, TypeError, KeyError) as exc:
+ raise ValueError('badly formed help string') from exc
+
class _ArgumentGroup(_ActionsContainer):
@@ -1848,6 +1875,7 @@ def add_subparsers(self, **kwargs):
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
+ self._check_help(action)
self._subparsers._add_action(action)
# return the created parsers action
@@ -1997,7 +2025,7 @@ def consume_optional(start_index):
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, sep, explicit_arg in option_tuples])
- args = {'option': arg_string, 'matches': options}
+ args = {'option': arg_strings[start_index], 'matches': options}
msg = _('ambiguous option: %(option)s could match %(matches)s')
raise ArgumentError(None, msg % args)
@@ -2577,8 +2605,8 @@ def _check_value(self, action, value):
if isinstance(choices, str):
choices = iter(choices)
if value not in choices:
- args = {'value': value,
- 'choices': ', '.join(map(repr, action.choices))}
+ args = {'value': str(value),
+ 'choices': ', '.join(map(str, action.choices))}
msg = _('invalid choice: %(value)r (choose from %(choices)s)')
raise ArgumentError(action, msg % args)
diff --git a/Lib/ast.py b/Lib/ast.py
index a954d4a97d3c22c..154d2c8c1f9ebb9 100644
--- a/Lib/ast.py
+++ b/Lib/ast.py
@@ -1743,7 +1743,7 @@ def unparse(ast_obj):
def main():
import argparse
- parser = argparse.ArgumentParser(prog='python -m ast')
+ parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', default='-',
help='the file to parse; defaults to stdin')
parser.add_argument('-m', '--mode', default='exec',
diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py
index 5f6fa2348726cfb..c95fce035cd548d 100644
--- a/Lib/asyncio/futures.py
+++ b/Lib/asyncio/futures.py
@@ -190,8 +190,7 @@ def result(self):
the future is done and has an exception set, this exception is raised.
"""
if self._state == _CANCELLED:
- exc = self._make_cancelled_error()
- raise exc
+ raise self._make_cancelled_error()
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Result is not ready.')
self.__log_traceback = False
@@ -208,8 +207,7 @@ def exception(self):
InvalidStateError.
"""
if self._state == _CANCELLED:
- exc = self._make_cancelled_error()
- raise exc
+ raise self._make_cancelled_error()
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Exception is not set.')
self.__log_traceback = False
diff --git a/Lib/asyncio/staggered.py b/Lib/asyncio/staggered.py
index 326c6f708944af2..0f4df8855a80b91 100644
--- a/Lib/asyncio/staggered.py
+++ b/Lib/asyncio/staggered.py
@@ -69,7 +69,11 @@ async def staggered_race(coro_fns, delay, *, loop=None):
exceptions = []
running_tasks = []
- async def run_one_coro(previous_failed) -> None:
+ async def run_one_coro(ok_to_start, previous_failed) -> None:
+ # in eager tasks this waits for the calling task to append this task
+ # to running_tasks, in regular tasks this wait is a no-op that does
+ # not yield a future. See gh-124309.
+ await ok_to_start.wait()
# Wait for the previous task to finish, or for delay seconds
if previous_failed is not None:
with contextlib.suppress(exceptions_mod.TimeoutError):
@@ -85,8 +89,12 @@ async def run_one_coro(previous_failed) -> None:
return
# Start task that will run the next coroutine
this_failed = locks.Event()
- next_task = loop.create_task(run_one_coro(this_failed))
+ next_ok_to_start = locks.Event()
+ next_task = loop.create_task(run_one_coro(next_ok_to_start, this_failed))
running_tasks.append(next_task)
+ # next_task has been appended to running_tasks so next_task is ok to
+ # start.
+ next_ok_to_start.set()
assert len(running_tasks) == this_index + 2
# Prepare place to put this coroutine's exceptions if not won
exceptions.append(None)
@@ -116,8 +124,11 @@ async def run_one_coro(previous_failed) -> None:
if i != this_index:
t.cancel()
- first_task = loop.create_task(run_one_coro(None))
+ ok_to_start = locks.Event()
+ first_task = loop.create_task(run_one_coro(ok_to_start, None))
running_tasks.append(first_task)
+ # first_task has been appended to running_tasks so first_task is ok to start.
+ ok_to_start.set()
try:
# Wait for a growing list of tasks to all finish: poor man's version of
# curio's TaskGroup or trio's nursery
diff --git a/Lib/asyncio/taskgroups.py b/Lib/asyncio/taskgroups.py
index f2ee9648c43876d..9fa772ca9d02cc3 100644
--- a/Lib/asyncio/taskgroups.py
+++ b/Lib/asyncio/taskgroups.py
@@ -66,6 +66,20 @@ async def __aenter__(self):
return self
async def __aexit__(self, et, exc, tb):
+ tb = None
+ try:
+ return await self._aexit(et, exc)
+ finally:
+ # Exceptions are heavy objects that can have object
+ # cycles (bad for GC); let's not keep a reference to
+ # a bunch of them. It would be nicer to use a try/finally
+ # in __aexit__ directly but that introduced some diff noise
+ self._parent_task = None
+ self._errors = None
+ self._base_error = None
+ exc = None
+
+ async def _aexit(self, et, exc):
self._exiting = True
if (exc is not None and
@@ -122,7 +136,10 @@ async def __aexit__(self, et, exc, tb):
assert not self._tasks
if self._base_error is not None:
- raise self._base_error
+ try:
+ raise self._base_error
+ finally:
+ exc = None
if self._parent_cancel_requested:
# If this flag is set we *must* call uncancel().
@@ -133,8 +150,14 @@ async def __aexit__(self, et, exc, tb):
# Propagate CancelledError if there is one, except if there
# are other errors -- those have priority.
- if propagate_cancellation_error is not None and not self._errors:
- raise propagate_cancellation_error
+ try:
+ if propagate_cancellation_error is not None and not self._errors:
+ try:
+ raise propagate_cancellation_error
+ finally:
+ exc = None
+ finally:
+ propagate_cancellation_error = None
if et is not None and not issubclass(et, exceptions.CancelledError):
self._errors.append(exc)
@@ -146,14 +169,14 @@ async def __aexit__(self, et, exc, tb):
if self._parent_task.cancelling():
self._parent_task.uncancel()
self._parent_task.cancel()
- # Exceptions are heavy objects that can have object
- # cycles (bad for GC); let's not keep a reference to
- # a bunch of them.
try:
- me = BaseExceptionGroup('unhandled errors in a TaskGroup', self._errors)
- raise me from None
+ raise BaseExceptionGroup(
+ 'unhandled errors in a TaskGroup',
+ self._errors,
+ ) from None
finally:
- self._errors = None
+ exc = None
+
def create_task(self, coro, *, name=None, context=None):
"""Create a new task in this group and return it.
diff --git a/Lib/codeop.py b/Lib/codeop.py
index a0276b52d484e35..adf000ba29f88c9 100644
--- a/Lib/codeop.py
+++ b/Lib/codeop.py
@@ -44,6 +44,7 @@
# Caveat emptor: These flags are undocumented on purpose and depending
# on their effect outside the standard library is **unsupported**.
PyCF_DONT_IMPLY_DEDENT = 0x200
+PyCF_ONLY_AST = 0x400
PyCF_ALLOW_INCOMPLETE_INPUT = 0x4000
def _maybe_compile(compiler, source, filename, symbol):
@@ -109,12 +110,14 @@ class Compile:
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT
- def __call__(self, source, filename, symbol, **kwargs):
- flags = self.flags
+ def __call__(self, source, filename, symbol, flags=0, **kwargs):
+ flags |= self.flags
if kwargs.get('incomplete_input', True) is False:
flags &= ~PyCF_DONT_IMPLY_DEDENT
flags &= ~PyCF_ALLOW_INCOMPLETE_INPUT
codeob = compile(source, filename, symbol, flags, True)
+ if flags & PyCF_ONLY_AST:
+ return codeob # this is an ast.Module in this case
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
diff --git a/Lib/ensurepip/__init__.py b/Lib/ensurepip/__init__.py
index c5350df270487ab..585afc85836c065 100644
--- a/Lib/ensurepip/__init__.py
+++ b/Lib/ensurepip/__init__.py
@@ -205,7 +205,7 @@ def _uninstall_helper(*, verbosity=0):
def _main(argv=None):
import argparse
- parser = argparse.ArgumentParser(prog="python -m ensurepip")
+ parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
action="version",
diff --git a/Lib/ensurepip/_uninstall.py b/Lib/ensurepip/_uninstall.py
index b257904328d2f5d..4183c28a809008f 100644
--- a/Lib/ensurepip/_uninstall.py
+++ b/Lib/ensurepip/_uninstall.py
@@ -6,7 +6,7 @@
def _main(argv=None):
- parser = argparse.ArgumentParser(prog="python -m ensurepip._uninstall")
+ parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
action="version",
diff --git a/Lib/fractions.py b/Lib/fractions.py
index 34fd0803d1b1ab6..f0cbc8c2e6c012a 100644
--- a/Lib/fractions.py
+++ b/Lib/fractions.py
@@ -279,7 +279,8 @@ def __new__(cls, numerator=0, denominator=None):
numerator = -numerator
else:
- raise TypeError("argument should be a string or a number")
+ raise TypeError("argument should be a string or a Rational "
+ "instance or have the as_integer_ratio() method")
elif type(numerator) is int is type(denominator):
pass # *very* normal case
@@ -305,6 +306,28 @@ def __new__(cls, numerator=0, denominator=None):
self._denominator = denominator
return self
+ @classmethod
+ def from_number(cls, number):
+ """Converts a finite real number to a rational number, exactly.
+
+ Beware that Fraction.from_number(0.3) != Fraction(3, 10).
+
+ """
+ if type(number) is int:
+ return cls._from_coprime_ints(number, 1)
+
+ elif isinstance(number, numbers.Rational):
+ return cls._from_coprime_ints(number.numerator, number.denominator)
+
+ elif (isinstance(number, float) or
+ (not isinstance(number, type) and
+ hasattr(number, 'as_integer_ratio'))):
+ return cls._from_coprime_ints(*number.as_integer_ratio())
+
+ else:
+ raise TypeError("argument should be a Rational instance or "
+ "have the as_integer_ratio() method")
+
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
diff --git a/Lib/gzip.py b/Lib/gzip.py
index ba753ce3050dd84..1a3c82ce7e0711a 100644
--- a/Lib/gzip.py
+++ b/Lib/gzip.py
@@ -580,12 +580,12 @@ def _rewind(self):
self._new_member = True
-def compress(data, compresslevel=_COMPRESS_LEVEL_BEST, *, mtime=None):
+def compress(data, compresslevel=_COMPRESS_LEVEL_BEST, *, mtime=0):
"""Compress data in one shot and return the compressed string.
compresslevel sets the compression level in range of 0-9.
- mtime can be used to set the modification time. The modification time is
- set to the current time by default.
+ mtime can be used to set the modification time.
+ The modification time is set to 0 by default, for reproducibility.
"""
# Wbits=31 automatically includes a gzip header and trailer.
gzip_data = zlib.compress(data, level=compresslevel, wbits=31)
diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html
index 827d230b54e1591..2a4adc6a4d395f0 100644
--- a/Lib/idlelib/help.html
+++ b/Lib/idlelib/help.html
@@ -5,7 +5,7 @@
- IDLE — Python 3.13.0a2 documentation
+ IDLE — Python 3.14.0a0 documentation
@@ -18,7 +18,7 @@
@@ -26,6 +26,7 @@
+
@@ -45,6 +46,8 @@
+
+
@@ -184,7 +187,7 @@