Skip to content

Commit f4ead48

Browse files
authored
General improvements to the itertools docs (GH-98408)
1 parent c051d55 commit f4ead48

File tree

1 file changed

+92
-51
lines changed

1 file changed

+92
-51
lines changed

Doc/library/itertools.rst

+92-51
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@
1010
.. testsetup::
1111

1212
from itertools import *
13+
import collections
14+
import math
15+
import operator
16+
import random
1317

1418
--------------
1519

@@ -133,10 +137,9 @@ loops that truncate the stream.
133137
There are a number of uses for the *func* argument. It can be set to
134138
:func:`min` for a running minimum, :func:`max` for a running maximum, or
135139
:func:`operator.mul` for a running product. Amortization tables can be
136-
built by accumulating interest and applying payments. First-order
137-
`recurrence relations <https://en.wikipedia.org/wiki/Recurrence_relation>`_
138-
can be modeled by supplying the initial value in the iterable and using only
139-
the accumulated total in *func* argument::
140+
built by accumulating interest and applying payments:
141+
142+
.. doctest::
140143

141144
>>> data = [3, 4, 6, 2, 1, 9, 0, 7, 5, 8]
142145
>>> list(accumulate(data, operator.mul)) # running product
@@ -149,17 +152,6 @@ loops that truncate the stream.
149152
>>> list(accumulate(cashflows, lambda bal, pmt: bal*1.05 + pmt))
150153
[1000, 960.0, 918.0, 873.9000000000001, 827.5950000000001]
151154

152-
# Chaotic recurrence relation https://en.wikipedia.org/wiki/Logistic_map
153-
>>> logistic_map = lambda x, _: r * x * (1 - x)
154-
>>> r = 3.8
155-
>>> x0 = 0.4
156-
>>> inputs = repeat(x0, 36) # only the initial value is used
157-
>>> [format(x, '.2f') for x in accumulate(inputs, logistic_map)]
158-
['0.40', '0.91', '0.30', '0.81', '0.60', '0.92', '0.29', '0.79', '0.63',
159-
'0.88', '0.39', '0.90', '0.33', '0.84', '0.52', '0.95', '0.18', '0.57',
160-
'0.93', '0.25', '0.71', '0.79', '0.63', '0.88', '0.39', '0.91', '0.32',
161-
'0.83', '0.54', '0.95', '0.20', '0.60', '0.91', '0.30', '0.80', '0.60']
162-
163155
See :func:`functools.reduce` for a similar function that returns only the
164156
final accumulated value.
165157

@@ -241,10 +233,10 @@ loops that truncate the stream.
241233

242234
The combination tuples are emitted in lexicographic ordering according to
243235
the order of the input *iterable*. So, if the input *iterable* is sorted,
244-
the combination tuples will be produced in sorted order.
236+
the output tuples will be produced in sorted order.
245237

246238
Elements are treated as unique based on their position, not on their
247-
value. So if the input elements are unique, there will be no repeat
239+
value. So if the input elements are unique, there will be no repeated
248240
values in each combination.
249241

250242
Roughly equivalent to::
@@ -290,7 +282,7 @@ loops that truncate the stream.
290282

291283
The combination tuples are emitted in lexicographic ordering according to
292284
the order of the input *iterable*. So, if the input *iterable* is sorted,
293-
the combination tuples will be produced in sorted order.
285+
the output tuples will be produced in sorted order.
294286

295287
Elements are treated as unique based on their position, not on their
296288
value. So if the input elements are unique, the generated combinations
@@ -449,21 +441,25 @@ loops that truncate the stream.
449441
class groupby:
450442
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
451443
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
444+
452445
def __init__(self, iterable, key=None):
453446
if key is None:
454447
key = lambda x: x
455448
self.keyfunc = key
456449
self.it = iter(iterable)
457450
self.tgtkey = self.currkey = self.currvalue = object()
451+
458452
def __iter__(self):
459453
return self
454+
460455
def __next__(self):
461456
self.id = object()
462457
while self.currkey == self.tgtkey:
463458
self.currvalue = next(self.it) # Exit on StopIteration
464459
self.currkey = self.keyfunc(self.currvalue)
465460
self.tgtkey = self.currkey
466461
return (self.currkey, self._grouper(self.tgtkey, self.id))
462+
467463
def _grouper(self, tgtkey, id):
468464
while self.id is id and self.currkey == tgtkey:
469465
yield self.currvalue
@@ -482,10 +478,17 @@ loops that truncate the stream.
482478
Afterward, elements are returned consecutively unless *step* is set higher than
483479
one which results in items being skipped. If *stop* is ``None``, then iteration
484480
continues until the iterator is exhausted, if at all; otherwise, it stops at the
485-
specified position. Unlike regular slicing, :func:`islice` does not support
486-
negative values for *start*, *stop*, or *step*. Can be used to extract related
487-
fields from data where the internal structure has been flattened (for example, a
488-
multi-line report may list a name field on every third line). Roughly equivalent to::
481+
specified position.
482+
483+
If *start* is ``None``, then iteration starts at zero. If *step* is ``None``,
484+
then the step defaults to one.
485+
486+
Unlike regular slicing, :func:`islice` does not support negative values for
487+
*start*, *stop*, or *step*. Can be used to extract related fields from
488+
data where the internal structure has been flattened (for example, a
489+
multi-line report may list a name field on every third line).
490+
491+
Roughly equivalent to::
489492

490493
def islice(iterable, *args):
491494
# islice('ABCDEFG', 2) --> A B
@@ -512,8 +515,6 @@ loops that truncate the stream.
512515
for i, element in zip(range(i + 1, stop), iterable):
513516
pass
514517

515-
If *start* is ``None``, then iteration starts at zero. If *step* is ``None``,
516-
then the step defaults to one.
517518

518519
.. function:: pairwise(iterable)
519520

@@ -542,13 +543,13 @@ loops that truncate the stream.
542543
of the *iterable* and all possible full-length permutations
543544
are generated.
544545

545-
The permutation tuples are emitted in lexicographic ordering according to
546+
The permutation tuples are emitted in lexicographic order according to
546547
the order of the input *iterable*. So, if the input *iterable* is sorted,
547-
the combination tuples will be produced in sorted order.
548+
the output tuples will be produced in sorted order.
548549

549550
Elements are treated as unique based on their position, not on their
550-
value. So if the input elements are unique, there will be no repeat
551-
values in each permutation.
551+
value. So if the input elements are unique, there will be no repeated
552+
values within a permutation.
552553

553554
Roughly equivalent to::
554555

@@ -628,9 +629,7 @@ loops that truncate the stream.
628629
.. function:: repeat(object[, times])
629630

630631
Make an iterator that returns *object* over and over again. Runs indefinitely
631-
unless the *times* argument is specified. Used as argument to :func:`map` for
632-
invariant parameters to the called function. Also used with :func:`zip` to
633-
create an invariant part of a tuple record.
632+
unless the *times* argument is specified.
634633

635634
Roughly equivalent to::
636635

@@ -644,7 +643,9 @@ loops that truncate the stream.
644643
yield object
645644

646645
A common use for *repeat* is to supply a stream of constant values to *map*
647-
or *zip*::
646+
or *zip*:
647+
648+
.. doctest::
648649

649650
>>> list(map(pow, range(10), repeat(2)))
650651
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
@@ -653,9 +654,12 @@ loops that truncate the stream.
653654

654655
Make an iterator that computes the function using arguments obtained from
655656
the iterable. Used instead of :func:`map` when argument parameters are already
656-
grouped in tuples from a single iterable (the data has been "pre-zipped"). The
657-
difference between :func:`map` and :func:`starmap` parallels the distinction
658-
between ``function(a,b)`` and ``function(*c)``. Roughly equivalent to::
657+
grouped in tuples from a single iterable (when the data has been
658+
"pre-zipped").
659+
660+
The difference between :func:`map` and :func:`starmap` parallels the
661+
distinction between ``function(a,b)`` and ``function(*c)``. Roughly
662+
equivalent to::
659663

660664
def starmap(function, iterable):
661665
# starmap(pow, [(2,5), (3,2), (10,3)]) --> 32 9 1000
@@ -683,9 +687,7 @@ loops that truncate the stream.
683687

684688
The following Python code helps explain what *tee* does (although the actual
685689
implementation is more complex and uses only a single underlying
686-
:abbr:`FIFO (first-in, first-out)` queue).
687-
688-
Roughly equivalent to::
690+
:abbr:`FIFO (first-in, first-out)` queue)::
689691

690692
def tee(iterable, n=2):
691693
it = iter(iterable)
@@ -702,7 +704,7 @@ loops that truncate the stream.
702704
yield mydeque.popleft()
703705
return tuple(gen(d) for d in deques)
704706

705-
Once :func:`tee` has made a split, the original *iterable* should not be
707+
Once a :func:`tee` has been created, the original *iterable* should not be
706708
used anywhere else; otherwise, the *iterable* could get advanced without
707709
the tee objects being informed.
708710

@@ -756,14 +758,28 @@ Itertools Recipes
756758
This section shows recipes for creating an extended toolset using the existing
757759
itertools as building blocks.
758760

761+
The primary purpose of the itertools recipes is educational. The recipes show
762+
various ways of thinking about individual tools — for example, that
763+
``chain.from_iterable`` is related to the concept of flattening. The recipes
764+
also give ideas about ways that the tools can be combined — for example, how
765+
`compress()` and `range()` can work together. The recipes also show patterns
766+
for using itertools with the :mod:`operator` and :mod:`collections` modules as
767+
well as with the built-in itertools such as ``map()``, ``filter()``,
768+
``reversed()``, and ``enumerate()``.
769+
770+
A secondary purpose of the recipes is to serve as an incubator. The
771+
``accumulate()``, ``compress()``, and ``pairwise()`` itertools started out as
772+
recipes. Currently, the ``iter_index()`` recipe is being tested to see
773+
whether it proves its worth.
774+
759775
Substantially all of these recipes and many, many others can be installed from
760776
the `more-itertools project <https://pypi.org/project/more-itertools/>`_ found
761777
on the Python Package Index::
762778

763779
python -m pip install more-itertools
764780

765-
The extended tools offer the same high performance as the underlying toolset.
766-
The superior memory performance is kept by processing elements one at a time
781+
Many of the recipes offer the same high performance as the underlying toolset.
782+
Superior memory performance is kept by processing elements one at a time
767783
rather than bringing the whole iterable into memory all at once. Code volume is
768784
kept small by linking the tools together in a functional style which helps
769785
eliminate temporary variables. High speed is retained by preferring
@@ -848,15 +864,25 @@ which incur interpreter overhead.
848864
for k in range(len(roots) + 1)
849865
]
850866

851-
def iter_index(seq, value, start=0):
852-
"Return indices where a value occurs in a sequence."
867+
def iter_index(iterable, value, start=0):
868+
"Return indices where a value occurs in a sequence or iterable."
853869
# iter_index('AABCADEAF', 'A') --> 0 1 4 7
854-
i = start - 1
855870
try:
856-
while True:
857-
yield (i := seq.index(value, i+1))
858-
except ValueError:
859-
pass
871+
seq_index = iterable.index
872+
except AttributeError:
873+
# Slow path for general iterables
874+
it = islice(iterable, start, None)
875+
for i, element in enumerate(it, start):
876+
if element is value or element == value:
877+
yield i
878+
else:
879+
# Fast path for sequences
880+
i = start - 1
881+
try:
882+
while True:
883+
yield (i := seq_index(value, i+1))
884+
except ValueError:
885+
pass
860886

861887
def sieve(n):
862888
"Primes less than n"
@@ -978,16 +1004,19 @@ which incur interpreter overhead.
9781004
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
9791005
# unique_everseen('ABBCcAD', str.lower) --> A B C D
9801006
seen = set()
981-
seen_add = seen.add
9821007
if key is None:
9831008
for element in filterfalse(seen.__contains__, iterable):
984-
seen_add(element)
1009+
seen.add(element)
9851010
yield element
1011+
# Note: The steps shown above are intended to demonstrate
1012+
# filterfalse(). For order preserving deduplication,
1013+
# a better solution is:
1014+
# yield from dict.fromkeys(iterable)
9861015
else:
9871016
for element in iterable:
9881017
k = key(element)
9891018
if k not in seen:
990-
seen_add(k)
1019+
seen.add(k)
9911020
yield element
9921021

9931022
def unique_justseen(iterable, key=None):
@@ -1196,6 +1225,18 @@ which incur interpreter overhead.
11961225
[]
11971226
>>> list(iter_index('', 'X'))
11981227
[]
1228+
>>> list(iter_index('AABCADEAF', 'A', 1))
1229+
[1, 4, 7]
1230+
>>> list(iter_index(iter('AABCADEAF'), 'A', 1))
1231+
[1, 4, 7]
1232+
>>> list(iter_index('AABCADEAF', 'A', 2))
1233+
[4, 7]
1234+
>>> list(iter_index(iter('AABCADEAF'), 'A', 2))
1235+
[4, 7]
1236+
>>> list(iter_index('AABCADEAF', 'A', 10))
1237+
[]
1238+
>>> list(iter_index(iter('AABCADEAF'), 'A', 10))
1239+
[]
11991240

12001241
>>> list(sieve(30))
12011242
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]

0 commit comments

Comments
 (0)