Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
allendowney
GitHub Repository: allendowney/cpython
Path: blob/main/Objects/listobject.c
12 views
1
/* List object implementation */
2
3
#include "Python.h"
4
#include "pycore_abstract.h" // _PyIndex_Check()
5
#include "pycore_interp.h" // PyInterpreterState.list
6
#include "pycore_list.h" // struct _Py_list_state, _PyListIterObject
7
#include "pycore_long.h" // _PyLong_DigitCount
8
#include "pycore_object.h" // _PyObject_GC_TRACK()
9
#include "pycore_tuple.h" // _PyTuple_FromArray()
10
#include <stddef.h>
11
12
/*[clinic input]
13
class list "PyListObject *" "&PyList_Type"
14
[clinic start generated code]*/
15
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=f9b222678f9f71e0]*/
16
17
#include "clinic/listobject.c.h"
18
19
_Py_DECLARE_STR(list_err, "list index out of range");
20
21
#if PyList_MAXFREELIST > 0
22
static struct _Py_list_state *
23
get_list_state(void)
24
{
25
PyInterpreterState *interp = _PyInterpreterState_GET();
26
return &interp->list;
27
}
28
#endif
29
30
31
/* Ensure ob_item has room for at least newsize elements, and set
32
* ob_size to newsize. If newsize > ob_size on entry, the content
33
* of the new slots at exit is undefined heap trash; it's the caller's
34
* responsibility to overwrite them with sane values.
35
* The number of allocated elements may grow, shrink, or stay the same.
36
* Failure is impossible if newsize <= self.allocated on entry, although
37
* that partly relies on an assumption that the system realloc() never
38
* fails when passed a number of bytes <= the number of bytes last
39
* allocated (the C standard doesn't guarantee this, but it's hard to
40
* imagine a realloc implementation where it wouldn't be true).
41
* Note that self->ob_item may change, and even if newsize is less
42
* than ob_size on entry.
43
*/
44
static int
45
list_resize(PyListObject *self, Py_ssize_t newsize)
46
{
47
PyObject **items;
48
size_t new_allocated, num_allocated_bytes;
49
Py_ssize_t allocated = self->allocated;
50
51
/* Bypass realloc() when a previous overallocation is large enough
52
to accommodate the newsize. If the newsize falls lower than half
53
the allocated size, then proceed with the realloc() to shrink the list.
54
*/
55
if (allocated >= newsize && newsize >= (allocated >> 1)) {
56
assert(self->ob_item != NULL || newsize == 0);
57
Py_SET_SIZE(self, newsize);
58
return 0;
59
}
60
61
/* This over-allocates proportional to the list size, making room
62
* for additional growth. The over-allocation is mild, but is
63
* enough to give linear-time amortized behavior over a long
64
* sequence of appends() in the presence of a poorly-performing
65
* system realloc().
66
* Add padding to make the allocated size multiple of 4.
67
* The growth pattern is: 0, 4, 8, 16, 24, 32, 40, 52, 64, 76, ...
68
* Note: new_allocated won't overflow because the largest possible value
69
* is PY_SSIZE_T_MAX * (9 / 8) + 6 which always fits in a size_t.
70
*/
71
new_allocated = ((size_t)newsize + (newsize >> 3) + 6) & ~(size_t)3;
72
/* Do not overallocate if the new size is closer to overallocated size
73
* than to the old size.
74
*/
75
if (newsize - Py_SIZE(self) > (Py_ssize_t)(new_allocated - newsize))
76
new_allocated = ((size_t)newsize + 3) & ~(size_t)3;
77
78
if (newsize == 0)
79
new_allocated = 0;
80
if (new_allocated <= (size_t)PY_SSIZE_T_MAX / sizeof(PyObject *)) {
81
num_allocated_bytes = new_allocated * sizeof(PyObject *);
82
items = (PyObject **)PyMem_Realloc(self->ob_item, num_allocated_bytes);
83
}
84
else {
85
// integer overflow
86
items = NULL;
87
}
88
if (items == NULL) {
89
PyErr_NoMemory();
90
return -1;
91
}
92
self->ob_item = items;
93
Py_SET_SIZE(self, newsize);
94
self->allocated = new_allocated;
95
return 0;
96
}
97
98
static int
99
list_preallocate_exact(PyListObject *self, Py_ssize_t size)
100
{
101
assert(self->ob_item == NULL);
102
assert(size > 0);
103
104
/* Since the Python memory allocator has granularity of 16 bytes on 64-bit
105
* platforms (8 on 32-bit), there is no benefit of allocating space for
106
* the odd number of items, and there is no drawback of rounding the
107
* allocated size up to the nearest even number.
108
*/
109
size = (size + 1) & ~(size_t)1;
110
PyObject **items = PyMem_New(PyObject*, size);
111
if (items == NULL) {
112
PyErr_NoMemory();
113
return -1;
114
}
115
self->ob_item = items;
116
self->allocated = size;
117
return 0;
118
}
119
120
void
121
_PyList_ClearFreeList(PyInterpreterState *interp)
122
{
123
#if PyList_MAXFREELIST > 0
124
struct _Py_list_state *state = &interp->list;
125
while (state->numfree) {
126
PyListObject *op = state->free_list[--state->numfree];
127
assert(PyList_CheckExact(op));
128
PyObject_GC_Del(op);
129
}
130
#endif
131
}
132
133
void
134
_PyList_Fini(PyInterpreterState *interp)
135
{
136
_PyList_ClearFreeList(interp);
137
#if defined(Py_DEBUG) && PyList_MAXFREELIST > 0
138
struct _Py_list_state *state = &interp->list;
139
state->numfree = -1;
140
#endif
141
}
142
143
/* Print summary info about the state of the optimized allocator */
144
void
145
_PyList_DebugMallocStats(FILE *out)
146
{
147
#if PyList_MAXFREELIST > 0
148
struct _Py_list_state *state = get_list_state();
149
_PyDebugAllocatorStats(out,
150
"free PyListObject",
151
state->numfree, sizeof(PyListObject));
152
#endif
153
}
154
155
PyObject *
156
PyList_New(Py_ssize_t size)
157
{
158
PyListObject *op;
159
160
if (size < 0) {
161
PyErr_BadInternalCall();
162
return NULL;
163
}
164
165
#if PyList_MAXFREELIST > 0
166
struct _Py_list_state *state = get_list_state();
167
#ifdef Py_DEBUG
168
// PyList_New() must not be called after _PyList_Fini()
169
assert(state->numfree != -1);
170
#endif
171
if (PyList_MAXFREELIST && state->numfree) {
172
state->numfree--;
173
op = state->free_list[state->numfree];
174
OBJECT_STAT_INC(from_freelist);
175
_Py_NewReference((PyObject *)op);
176
}
177
else
178
#endif
179
{
180
op = PyObject_GC_New(PyListObject, &PyList_Type);
181
if (op == NULL) {
182
return NULL;
183
}
184
}
185
if (size <= 0) {
186
op->ob_item = NULL;
187
}
188
else {
189
op->ob_item = (PyObject **) PyMem_Calloc(size, sizeof(PyObject *));
190
if (op->ob_item == NULL) {
191
Py_DECREF(op);
192
return PyErr_NoMemory();
193
}
194
}
195
Py_SET_SIZE(op, size);
196
op->allocated = size;
197
_PyObject_GC_TRACK(op);
198
return (PyObject *) op;
199
}
200
201
static PyObject *
202
list_new_prealloc(Py_ssize_t size)
203
{
204
assert(size > 0);
205
PyListObject *op = (PyListObject *) PyList_New(0);
206
if (op == NULL) {
207
return NULL;
208
}
209
assert(op->ob_item == NULL);
210
op->ob_item = PyMem_New(PyObject *, size);
211
if (op->ob_item == NULL) {
212
Py_DECREF(op);
213
return PyErr_NoMemory();
214
}
215
op->allocated = size;
216
return (PyObject *) op;
217
}
218
219
Py_ssize_t
220
PyList_Size(PyObject *op)
221
{
222
if (!PyList_Check(op)) {
223
PyErr_BadInternalCall();
224
return -1;
225
}
226
else
227
return Py_SIZE(op);
228
}
229
230
static inline int
231
valid_index(Py_ssize_t i, Py_ssize_t limit)
232
{
233
/* The cast to size_t lets us use just a single comparison
234
to check whether i is in the range: 0 <= i < limit.
235
236
See: Section 14.2 "Bounds Checking" in the Agner Fog
237
optimization manual found at:
238
https://www.agner.org/optimize/optimizing_cpp.pdf
239
*/
240
return (size_t) i < (size_t) limit;
241
}
242
243
PyObject *
244
PyList_GetItem(PyObject *op, Py_ssize_t i)
245
{
246
if (!PyList_Check(op)) {
247
PyErr_BadInternalCall();
248
return NULL;
249
}
250
if (!valid_index(i, Py_SIZE(op))) {
251
_Py_DECLARE_STR(list_err, "list index out of range");
252
PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
253
return NULL;
254
}
255
return ((PyListObject *)op) -> ob_item[i];
256
}
257
258
int
259
PyList_SetItem(PyObject *op, Py_ssize_t i,
260
PyObject *newitem)
261
{
262
PyObject **p;
263
if (!PyList_Check(op)) {
264
Py_XDECREF(newitem);
265
PyErr_BadInternalCall();
266
return -1;
267
}
268
if (!valid_index(i, Py_SIZE(op))) {
269
Py_XDECREF(newitem);
270
PyErr_SetString(PyExc_IndexError,
271
"list assignment index out of range");
272
return -1;
273
}
274
p = ((PyListObject *)op) -> ob_item + i;
275
Py_XSETREF(*p, newitem);
276
return 0;
277
}
278
279
static int
280
ins1(PyListObject *self, Py_ssize_t where, PyObject *v)
281
{
282
Py_ssize_t i, n = Py_SIZE(self);
283
PyObject **items;
284
if (v == NULL) {
285
PyErr_BadInternalCall();
286
return -1;
287
}
288
289
assert((size_t)n + 1 < PY_SSIZE_T_MAX);
290
if (list_resize(self, n+1) < 0)
291
return -1;
292
293
if (where < 0) {
294
where += n;
295
if (where < 0)
296
where = 0;
297
}
298
if (where > n)
299
where = n;
300
items = self->ob_item;
301
for (i = n; --i >= where; )
302
items[i+1] = items[i];
303
items[where] = Py_NewRef(v);
304
return 0;
305
}
306
307
int
308
PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem)
309
{
310
if (!PyList_Check(op)) {
311
PyErr_BadInternalCall();
312
return -1;
313
}
314
return ins1((PyListObject *)op, where, newitem);
315
}
316
317
/* internal, used by _PyList_AppendTakeRef */
318
int
319
_PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem)
320
{
321
Py_ssize_t len = PyList_GET_SIZE(self);
322
assert(self->allocated == -1 || self->allocated == len);
323
if (list_resize(self, len + 1) < 0) {
324
Py_DECREF(newitem);
325
return -1;
326
}
327
PyList_SET_ITEM(self, len, newitem);
328
return 0;
329
}
330
331
int
332
PyList_Append(PyObject *op, PyObject *newitem)
333
{
334
if (PyList_Check(op) && (newitem != NULL)) {
335
return _PyList_AppendTakeRef((PyListObject *)op, Py_NewRef(newitem));
336
}
337
PyErr_BadInternalCall();
338
return -1;
339
}
340
341
/* Methods */
342
343
static void
344
list_dealloc(PyListObject *op)
345
{
346
Py_ssize_t i;
347
PyObject_GC_UnTrack(op);
348
Py_TRASHCAN_BEGIN(op, list_dealloc)
349
if (op->ob_item != NULL) {
350
/* Do it backwards, for Christian Tismer.
351
There's a simple test case where somehow this reduces
352
thrashing when a *very* large list is created and
353
immediately deleted. */
354
i = Py_SIZE(op);
355
while (--i >= 0) {
356
Py_XDECREF(op->ob_item[i]);
357
}
358
PyMem_Free(op->ob_item);
359
}
360
#if PyList_MAXFREELIST > 0
361
struct _Py_list_state *state = get_list_state();
362
#ifdef Py_DEBUG
363
// list_dealloc() must not be called after _PyList_Fini()
364
assert(state->numfree != -1);
365
#endif
366
if (state->numfree < PyList_MAXFREELIST && PyList_CheckExact(op)) {
367
state->free_list[state->numfree++] = op;
368
OBJECT_STAT_INC(to_freelist);
369
}
370
else
371
#endif
372
{
373
Py_TYPE(op)->tp_free((PyObject *)op);
374
}
375
Py_TRASHCAN_END
376
}
377
378
static PyObject *
379
list_repr(PyListObject *v)
380
{
381
Py_ssize_t i;
382
PyObject *s;
383
_PyUnicodeWriter writer;
384
385
if (Py_SIZE(v) == 0) {
386
return PyUnicode_FromString("[]");
387
}
388
389
i = Py_ReprEnter((PyObject*)v);
390
if (i != 0) {
391
return i > 0 ? PyUnicode_FromString("[...]") : NULL;
392
}
393
394
_PyUnicodeWriter_Init(&writer);
395
writer.overallocate = 1;
396
/* "[" + "1" + ", 2" * (len - 1) + "]" */
397
writer.min_length = 1 + 1 + (2 + 1) * (Py_SIZE(v) - 1) + 1;
398
399
if (_PyUnicodeWriter_WriteChar(&writer, '[') < 0)
400
goto error;
401
402
/* Do repr() on each element. Note that this may mutate the list,
403
so must refetch the list size on each iteration. */
404
for (i = 0; i < Py_SIZE(v); ++i) {
405
if (i > 0) {
406
if (_PyUnicodeWriter_WriteASCIIString(&writer, ", ", 2) < 0)
407
goto error;
408
}
409
410
s = PyObject_Repr(v->ob_item[i]);
411
if (s == NULL)
412
goto error;
413
414
if (_PyUnicodeWriter_WriteStr(&writer, s) < 0) {
415
Py_DECREF(s);
416
goto error;
417
}
418
Py_DECREF(s);
419
}
420
421
writer.overallocate = 0;
422
if (_PyUnicodeWriter_WriteChar(&writer, ']') < 0)
423
goto error;
424
425
Py_ReprLeave((PyObject *)v);
426
return _PyUnicodeWriter_Finish(&writer);
427
428
error:
429
_PyUnicodeWriter_Dealloc(&writer);
430
Py_ReprLeave((PyObject *)v);
431
return NULL;
432
}
433
434
static Py_ssize_t
435
list_length(PyListObject *a)
436
{
437
return Py_SIZE(a);
438
}
439
440
static int
441
list_contains(PyListObject *a, PyObject *el)
442
{
443
PyObject *item;
444
Py_ssize_t i;
445
int cmp;
446
447
for (i = 0, cmp = 0 ; cmp == 0 && i < Py_SIZE(a); ++i) {
448
item = PyList_GET_ITEM(a, i);
449
Py_INCREF(item);
450
cmp = PyObject_RichCompareBool(item, el, Py_EQ);
451
Py_DECREF(item);
452
}
453
return cmp;
454
}
455
456
static PyObject *
457
list_item(PyListObject *a, Py_ssize_t i)
458
{
459
if (!valid_index(i, Py_SIZE(a))) {
460
PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
461
return NULL;
462
}
463
return Py_NewRef(a->ob_item[i]);
464
}
465
466
static PyObject *
467
list_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
468
{
469
PyListObject *np;
470
PyObject **src, **dest;
471
Py_ssize_t i, len;
472
len = ihigh - ilow;
473
if (len <= 0) {
474
return PyList_New(0);
475
}
476
np = (PyListObject *) list_new_prealloc(len);
477
if (np == NULL)
478
return NULL;
479
480
src = a->ob_item + ilow;
481
dest = np->ob_item;
482
for (i = 0; i < len; i++) {
483
PyObject *v = src[i];
484
dest[i] = Py_NewRef(v);
485
}
486
Py_SET_SIZE(np, len);
487
return (PyObject *)np;
488
}
489
490
PyObject *
491
PyList_GetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
492
{
493
if (!PyList_Check(a)) {
494
PyErr_BadInternalCall();
495
return NULL;
496
}
497
if (ilow < 0) {
498
ilow = 0;
499
}
500
else if (ilow > Py_SIZE(a)) {
501
ilow = Py_SIZE(a);
502
}
503
if (ihigh < ilow) {
504
ihigh = ilow;
505
}
506
else if (ihigh > Py_SIZE(a)) {
507
ihigh = Py_SIZE(a);
508
}
509
return list_slice((PyListObject *)a, ilow, ihigh);
510
}
511
512
static PyObject *
513
list_concat(PyListObject *a, PyObject *bb)
514
{
515
Py_ssize_t size;
516
Py_ssize_t i;
517
PyObject **src, **dest;
518
PyListObject *np;
519
if (!PyList_Check(bb)) {
520
PyErr_Format(PyExc_TypeError,
521
"can only concatenate list (not \"%.200s\") to list",
522
Py_TYPE(bb)->tp_name);
523
return NULL;
524
}
525
#define b ((PyListObject *)bb)
526
assert((size_t)Py_SIZE(a) + (size_t)Py_SIZE(b) < PY_SSIZE_T_MAX);
527
size = Py_SIZE(a) + Py_SIZE(b);
528
if (size == 0) {
529
return PyList_New(0);
530
}
531
np = (PyListObject *) list_new_prealloc(size);
532
if (np == NULL) {
533
return NULL;
534
}
535
src = a->ob_item;
536
dest = np->ob_item;
537
for (i = 0; i < Py_SIZE(a); i++) {
538
PyObject *v = src[i];
539
dest[i] = Py_NewRef(v);
540
}
541
src = b->ob_item;
542
dest = np->ob_item + Py_SIZE(a);
543
for (i = 0; i < Py_SIZE(b); i++) {
544
PyObject *v = src[i];
545
dest[i] = Py_NewRef(v);
546
}
547
Py_SET_SIZE(np, size);
548
return (PyObject *)np;
549
#undef b
550
}
551
552
static PyObject *
553
list_repeat(PyListObject *a, Py_ssize_t n)
554
{
555
const Py_ssize_t input_size = Py_SIZE(a);
556
if (input_size == 0 || n <= 0)
557
return PyList_New(0);
558
assert(n > 0);
559
560
if (input_size > PY_SSIZE_T_MAX / n)
561
return PyErr_NoMemory();
562
Py_ssize_t output_size = input_size * n;
563
564
PyListObject *np = (PyListObject *) list_new_prealloc(output_size);
565
if (np == NULL)
566
return NULL;
567
568
PyObject **dest = np->ob_item;
569
if (input_size == 1) {
570
PyObject *elem = a->ob_item[0];
571
_Py_RefcntAdd(elem, n);
572
PyObject **dest_end = dest + output_size;
573
while (dest < dest_end) {
574
*dest++ = elem;
575
}
576
}
577
else {
578
PyObject **src = a->ob_item;
579
PyObject **src_end = src + input_size;
580
while (src < src_end) {
581
_Py_RefcntAdd(*src, n);
582
*dest++ = *src++;
583
}
584
585
_Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size,
586
sizeof(PyObject *)*input_size);
587
}
588
589
Py_SET_SIZE(np, output_size);
590
return (PyObject *) np;
591
}
592
593
static int
594
_list_clear(PyListObject *a)
595
{
596
Py_ssize_t i;
597
PyObject **item = a->ob_item;
598
if (item != NULL) {
599
/* Because XDECREF can recursively invoke operations on
600
this list, we make it empty first. */
601
i = Py_SIZE(a);
602
Py_SET_SIZE(a, 0);
603
a->ob_item = NULL;
604
a->allocated = 0;
605
while (--i >= 0) {
606
Py_XDECREF(item[i]);
607
}
608
PyMem_Free(item);
609
}
610
/* Never fails; the return value can be ignored.
611
Note that there is no guarantee that the list is actually empty
612
at this point, because XDECREF may have populated it again! */
613
return 0;
614
}
615
616
/* a[ilow:ihigh] = v if v != NULL.
617
* del a[ilow:ihigh] if v == NULL.
618
*
619
* Special speed gimmick: when v is NULL and ihigh - ilow <= 8, it's
620
* guaranteed the call cannot fail.
621
*/
622
static int
623
list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
624
{
625
/* Because [X]DECREF can recursively invoke list operations on
626
this list, we must postpone all [X]DECREF activity until
627
after the list is back in its canonical shape. Therefore
628
we must allocate an additional array, 'recycle', into which
629
we temporarily copy the items that are deleted from the
630
list. :-( */
631
PyObject *recycle_on_stack[8];
632
PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
633
PyObject **item;
634
PyObject **vitem = NULL;
635
PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */
636
Py_ssize_t n; /* # of elements in replacement list */
637
Py_ssize_t norig; /* # of elements in list getting replaced */
638
Py_ssize_t d; /* Change in size */
639
Py_ssize_t k;
640
size_t s;
641
int result = -1; /* guilty until proved innocent */
642
#define b ((PyListObject *)v)
643
if (v == NULL)
644
n = 0;
645
else {
646
if (a == b) {
647
/* Special case "a[i:j] = a" -- copy b first */
648
v = list_slice(b, 0, Py_SIZE(b));
649
if (v == NULL)
650
return result;
651
result = list_ass_slice(a, ilow, ihigh, v);
652
Py_DECREF(v);
653
return result;
654
}
655
v_as_SF = PySequence_Fast(v, "can only assign an iterable");
656
if(v_as_SF == NULL)
657
goto Error;
658
n = PySequence_Fast_GET_SIZE(v_as_SF);
659
vitem = PySequence_Fast_ITEMS(v_as_SF);
660
}
661
if (ilow < 0)
662
ilow = 0;
663
else if (ilow > Py_SIZE(a))
664
ilow = Py_SIZE(a);
665
666
if (ihigh < ilow)
667
ihigh = ilow;
668
else if (ihigh > Py_SIZE(a))
669
ihigh = Py_SIZE(a);
670
671
norig = ihigh - ilow;
672
assert(norig >= 0);
673
d = n - norig;
674
if (Py_SIZE(a) + d == 0) {
675
Py_XDECREF(v_as_SF);
676
return _list_clear(a);
677
}
678
item = a->ob_item;
679
/* recycle the items that we are about to remove */
680
s = norig * sizeof(PyObject *);
681
/* If norig == 0, item might be NULL, in which case we may not memcpy from it. */
682
if (s) {
683
if (s > sizeof(recycle_on_stack)) {
684
recycle = (PyObject **)PyMem_Malloc(s);
685
if (recycle == NULL) {
686
PyErr_NoMemory();
687
goto Error;
688
}
689
}
690
memcpy(recycle, &item[ilow], s);
691
}
692
693
if (d < 0) { /* Delete -d items */
694
Py_ssize_t tail;
695
tail = (Py_SIZE(a) - ihigh) * sizeof(PyObject *);
696
memmove(&item[ihigh+d], &item[ihigh], tail);
697
if (list_resize(a, Py_SIZE(a) + d) < 0) {
698
memmove(&item[ihigh], &item[ihigh+d], tail);
699
memcpy(&item[ilow], recycle, s);
700
goto Error;
701
}
702
item = a->ob_item;
703
}
704
else if (d > 0) { /* Insert d items */
705
k = Py_SIZE(a);
706
if (list_resize(a, k+d) < 0)
707
goto Error;
708
item = a->ob_item;
709
memmove(&item[ihigh+d], &item[ihigh],
710
(k - ihigh)*sizeof(PyObject *));
711
}
712
for (k = 0; k < n; k++, ilow++) {
713
PyObject *w = vitem[k];
714
item[ilow] = Py_XNewRef(w);
715
}
716
for (k = norig - 1; k >= 0; --k)
717
Py_XDECREF(recycle[k]);
718
result = 0;
719
Error:
720
if (recycle != recycle_on_stack)
721
PyMem_Free(recycle);
722
Py_XDECREF(v_as_SF);
723
return result;
724
#undef b
725
}
726
727
int
728
PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
729
{
730
if (!PyList_Check(a)) {
731
PyErr_BadInternalCall();
732
return -1;
733
}
734
return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
735
}
736
737
static PyObject *
738
list_inplace_repeat(PyListObject *self, Py_ssize_t n)
739
{
740
Py_ssize_t input_size = PyList_GET_SIZE(self);
741
if (input_size == 0 || n == 1) {
742
return Py_NewRef(self);
743
}
744
745
if (n < 1) {
746
(void)_list_clear(self);
747
return Py_NewRef(self);
748
}
749
750
if (input_size > PY_SSIZE_T_MAX / n) {
751
return PyErr_NoMemory();
752
}
753
Py_ssize_t output_size = input_size * n;
754
755
if (list_resize(self, output_size) < 0)
756
return NULL;
757
758
PyObject **items = self->ob_item;
759
for (Py_ssize_t j = 0; j < input_size; j++) {
760
_Py_RefcntAdd(items[j], n-1);
761
}
762
_Py_memory_repeat((char *)items, sizeof(PyObject *)*output_size,
763
sizeof(PyObject *)*input_size);
764
765
return Py_NewRef(self);
766
}
767
768
static int
769
list_ass_item(PyListObject *a, Py_ssize_t i, PyObject *v)
770
{
771
if (!valid_index(i, Py_SIZE(a))) {
772
PyErr_SetString(PyExc_IndexError,
773
"list assignment index out of range");
774
return -1;
775
}
776
if (v == NULL)
777
return list_ass_slice(a, i, i+1, v);
778
Py_SETREF(a->ob_item[i], Py_NewRef(v));
779
return 0;
780
}
781
782
/*[clinic input]
783
list.insert
784
785
index: Py_ssize_t
786
object: object
787
/
788
789
Insert object before index.
790
[clinic start generated code]*/
791
792
static PyObject *
793
list_insert_impl(PyListObject *self, Py_ssize_t index, PyObject *object)
794
/*[clinic end generated code: output=7f35e32f60c8cb78 input=858514cf894c7eab]*/
795
{
796
if (ins1(self, index, object) == 0)
797
Py_RETURN_NONE;
798
return NULL;
799
}
800
801
/*[clinic input]
802
list.clear
803
804
Remove all items from list.
805
[clinic start generated code]*/
806
807
static PyObject *
808
list_clear_impl(PyListObject *self)
809
/*[clinic end generated code: output=67a1896c01f74362 input=ca3c1646856742f6]*/
810
{
811
_list_clear(self);
812
Py_RETURN_NONE;
813
}
814
815
/*[clinic input]
816
list.copy
817
818
Return a shallow copy of the list.
819
[clinic start generated code]*/
820
821
static PyObject *
822
list_copy_impl(PyListObject *self)
823
/*[clinic end generated code: output=ec6b72d6209d418e input=6453ab159e84771f]*/
824
{
825
return list_slice(self, 0, Py_SIZE(self));
826
}
827
828
/*[clinic input]
829
list.append
830
831
object: object
832
/
833
834
Append object to the end of the list.
835
[clinic start generated code]*/
836
837
static PyObject *
838
list_append(PyListObject *self, PyObject *object)
839
/*[clinic end generated code: output=7c096003a29c0eae input=43a3fe48a7066e91]*/
840
{
841
if (_PyList_AppendTakeRef(self, Py_NewRef(object)) < 0) {
842
return NULL;
843
}
844
Py_RETURN_NONE;
845
}
846
847
/*[clinic input]
848
list.extend
849
850
iterable: object
851
/
852
853
Extend list by appending elements from the iterable.
854
[clinic start generated code]*/
855
856
static PyObject *
857
list_extend(PyListObject *self, PyObject *iterable)
858
/*[clinic end generated code: output=630fb3bca0c8e789 input=9ec5ba3a81be3a4d]*/
859
{
860
PyObject *it; /* iter(v) */
861
Py_ssize_t m; /* size of self */
862
Py_ssize_t n; /* guess for size of iterable */
863
Py_ssize_t i;
864
PyObject *(*iternext)(PyObject *);
865
866
/* Special cases:
867
1) lists and tuples which can use PySequence_Fast ops
868
2) extending self to self requires making a copy first
869
*/
870
if (PyList_CheckExact(iterable) || PyTuple_CheckExact(iterable) ||
871
(PyObject *)self == iterable) {
872
PyObject **src, **dest;
873
iterable = PySequence_Fast(iterable, "argument must be iterable");
874
if (!iterable)
875
return NULL;
876
n = PySequence_Fast_GET_SIZE(iterable);
877
if (n == 0) {
878
/* short circuit when iterable is empty */
879
Py_DECREF(iterable);
880
Py_RETURN_NONE;
881
}
882
m = Py_SIZE(self);
883
/* It should not be possible to allocate a list large enough to cause
884
an overflow on any relevant platform */
885
assert(m < PY_SSIZE_T_MAX - n);
886
if (self->ob_item == NULL) {
887
if (list_preallocate_exact(self, n) < 0) {
888
return NULL;
889
}
890
Py_SET_SIZE(self, n);
891
}
892
else if (list_resize(self, m + n) < 0) {
893
Py_DECREF(iterable);
894
return NULL;
895
}
896
/* note that we may still have self == iterable here for the
897
* situation a.extend(a), but the following code works
898
* in that case too. Just make sure to resize self
899
* before calling PySequence_Fast_ITEMS.
900
*/
901
/* populate the end of self with iterable's items */
902
src = PySequence_Fast_ITEMS(iterable);
903
dest = self->ob_item + m;
904
for (i = 0; i < n; i++) {
905
PyObject *o = src[i];
906
dest[i] = Py_NewRef(o);
907
}
908
Py_DECREF(iterable);
909
Py_RETURN_NONE;
910
}
911
912
it = PyObject_GetIter(iterable);
913
if (it == NULL)
914
return NULL;
915
iternext = *Py_TYPE(it)->tp_iternext;
916
917
/* Guess a result list size. */
918
n = PyObject_LengthHint(iterable, 8);
919
if (n < 0) {
920
Py_DECREF(it);
921
return NULL;
922
}
923
m = Py_SIZE(self);
924
if (m > PY_SSIZE_T_MAX - n) {
925
/* m + n overflowed; on the chance that n lied, and there really
926
* is enough room, ignore it. If n was telling the truth, we'll
927
* eventually run out of memory during the loop.
928
*/
929
}
930
else if (self->ob_item == NULL) {
931
if (n && list_preallocate_exact(self, n) < 0)
932
goto error;
933
}
934
else {
935
/* Make room. */
936
if (list_resize(self, m + n) < 0)
937
goto error;
938
/* Make the list sane again. */
939
Py_SET_SIZE(self, m);
940
}
941
942
/* Run iterator to exhaustion. */
943
for (;;) {
944
PyObject *item = iternext(it);
945
if (item == NULL) {
946
if (PyErr_Occurred()) {
947
if (PyErr_ExceptionMatches(PyExc_StopIteration))
948
PyErr_Clear();
949
else
950
goto error;
951
}
952
break;
953
}
954
if (Py_SIZE(self) < self->allocated) {
955
/* steals ref */
956
Py_ssize_t len = Py_SIZE(self);
957
Py_SET_SIZE(self, len + 1);
958
PyList_SET_ITEM(self, len, item);
959
}
960
else {
961
if (_PyList_AppendTakeRef(self, item) < 0)
962
goto error;
963
}
964
}
965
966
/* Cut back result list if initial guess was too large. */
967
if (Py_SIZE(self) < self->allocated) {
968
if (list_resize(self, Py_SIZE(self)) < 0)
969
goto error;
970
}
971
972
Py_DECREF(it);
973
Py_RETURN_NONE;
974
975
error:
976
Py_DECREF(it);
977
return NULL;
978
}
979
980
PyObject *
981
_PyList_Extend(PyListObject *self, PyObject *iterable)
982
{
983
return list_extend(self, iterable);
984
}
985
986
static PyObject *
987
list_inplace_concat(PyListObject *self, PyObject *other)
988
{
989
PyObject *result;
990
991
result = list_extend(self, other);
992
if (result == NULL)
993
return result;
994
Py_DECREF(result);
995
return Py_NewRef(self);
996
}
997
998
/*[clinic input]
999
list.pop
1000
1001
index: Py_ssize_t = -1
1002
/
1003
1004
Remove and return item at index (default last).
1005
1006
Raises IndexError if list is empty or index is out of range.
1007
[clinic start generated code]*/
1008
1009
static PyObject *
1010
list_pop_impl(PyListObject *self, Py_ssize_t index)
1011
/*[clinic end generated code: output=6bd69dcb3f17eca8 input=b83675976f329e6f]*/
1012
{
1013
PyObject *v;
1014
int status;
1015
1016
if (Py_SIZE(self) == 0) {
1017
/* Special-case most common failure cause */
1018
PyErr_SetString(PyExc_IndexError, "pop from empty list");
1019
return NULL;
1020
}
1021
if (index < 0)
1022
index += Py_SIZE(self);
1023
if (!valid_index(index, Py_SIZE(self))) {
1024
PyErr_SetString(PyExc_IndexError, "pop index out of range");
1025
return NULL;
1026
}
1027
1028
PyObject **items = self->ob_item;
1029
v = items[index];
1030
const Py_ssize_t size_after_pop = Py_SIZE(self) - 1;
1031
if (size_after_pop == 0) {
1032
Py_INCREF(v);
1033
status = _list_clear(self);
1034
}
1035
else {
1036
if ((size_after_pop - index) > 0) {
1037
memmove(&items[index], &items[index+1], (size_after_pop - index) * sizeof(PyObject *));
1038
}
1039
status = list_resize(self, size_after_pop);
1040
}
1041
if (status >= 0) {
1042
return v; // and v now owns the reference the list had
1043
}
1044
else {
1045
// list resize failed, need to restore
1046
memmove(&items[index+1], &items[index], (size_after_pop - index)* sizeof(PyObject *));
1047
items[index] = v;
1048
return NULL;
1049
}
1050
}
1051
1052
/* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
1053
static void
1054
reverse_slice(PyObject **lo, PyObject **hi)
1055
{
1056
assert(lo && hi);
1057
1058
--hi;
1059
while (lo < hi) {
1060
PyObject *t = *lo;
1061
*lo = *hi;
1062
*hi = t;
1063
++lo;
1064
--hi;
1065
}
1066
}
1067
1068
/* Lots of code for an adaptive, stable, natural mergesort. There are many
1069
* pieces to this algorithm; read listsort.txt for overviews and details.
1070
*/
1071
1072
/* A sortslice contains a pointer to an array of keys and a pointer to
1073
* an array of corresponding values. In other words, keys[i]
1074
* corresponds with values[i]. If values == NULL, then the keys are
1075
* also the values.
1076
*
1077
* Several convenience routines are provided here, so that keys and
1078
* values are always moved in sync.
1079
*/
1080
1081
typedef struct {
1082
PyObject **keys;
1083
PyObject **values;
1084
} sortslice;
1085
1086
Py_LOCAL_INLINE(void)
1087
sortslice_copy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j)
1088
{
1089
s1->keys[i] = s2->keys[j];
1090
if (s1->values != NULL)
1091
s1->values[i] = s2->values[j];
1092
}
1093
1094
Py_LOCAL_INLINE(void)
1095
sortslice_copy_incr(sortslice *dst, sortslice *src)
1096
{
1097
*dst->keys++ = *src->keys++;
1098
if (dst->values != NULL)
1099
*dst->values++ = *src->values++;
1100
}
1101
1102
Py_LOCAL_INLINE(void)
1103
sortslice_copy_decr(sortslice *dst, sortslice *src)
1104
{
1105
*dst->keys-- = *src->keys--;
1106
if (dst->values != NULL)
1107
*dst->values-- = *src->values--;
1108
}
1109
1110
1111
Py_LOCAL_INLINE(void)
1112
sortslice_memcpy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1113
Py_ssize_t n)
1114
{
1115
memcpy(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1116
if (s1->values != NULL)
1117
memcpy(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1118
}
1119
1120
Py_LOCAL_INLINE(void)
1121
sortslice_memmove(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
1122
Py_ssize_t n)
1123
{
1124
memmove(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
1125
if (s1->values != NULL)
1126
memmove(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
1127
}
1128
1129
Py_LOCAL_INLINE(void)
1130
sortslice_advance(sortslice *slice, Py_ssize_t n)
1131
{
1132
slice->keys += n;
1133
if (slice->values != NULL)
1134
slice->values += n;
1135
}
1136
1137
/* Comparison function: ms->key_compare, which is set at run-time in
1138
* listsort_impl to optimize for various special cases.
1139
* Returns -1 on error, 1 if x < y, 0 if x >= y.
1140
*/
1141
1142
#define ISLT(X, Y) (*(ms->key_compare))(X, Y, ms)
1143
1144
/* Compare X to Y via "<". Goto "fail" if the comparison raises an
1145
error. Else "k" is set to true iff X<Y, and an "if (k)" block is
1146
started. It makes more sense in context <wink>. X and Y are PyObject*s.
1147
*/
1148
#define IFLT(X, Y) if ((k = ISLT(X, Y)) < 0) goto fail; \
1149
if (k)
1150
1151
/* The maximum number of entries in a MergeState's pending-runs stack.
1152
* For a list with n elements, this needs at most floor(log2(n)) + 1 entries
1153
* even if we didn't force runs to a minimal length. So the number of bits
1154
* in a Py_ssize_t is plenty large enough for all cases.
1155
*/
1156
#define MAX_MERGE_PENDING (SIZEOF_SIZE_T * 8)
1157
1158
/* When we get into galloping mode, we stay there until both runs win less
1159
* often than MIN_GALLOP consecutive times. See listsort.txt for more info.
1160
*/
1161
#define MIN_GALLOP 7
1162
1163
/* Avoid malloc for small temp arrays. */
1164
#define MERGESTATE_TEMP_SIZE 256
1165
1166
/* One MergeState exists on the stack per invocation of mergesort. It's just
1167
* a convenient way to pass state around among the helper functions.
1168
*/
1169
struct s_slice {
1170
sortslice base;
1171
Py_ssize_t len; /* length of run */
1172
int power; /* node "level" for powersort merge strategy */
1173
};
1174
1175
typedef struct s_MergeState MergeState;
1176
struct s_MergeState {
1177
/* This controls when we get *into* galloping mode. It's initialized
1178
* to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for
1179
* random data, and lower for highly structured data.
1180
*/
1181
Py_ssize_t min_gallop;
1182
1183
Py_ssize_t listlen; /* len(input_list) - read only */
1184
PyObject **basekeys; /* base address of keys array - read only */
1185
1186
/* 'a' is temp storage to help with merges. It contains room for
1187
* alloced entries.
1188
*/
1189
sortslice a; /* may point to temparray below */
1190
Py_ssize_t alloced;
1191
1192
/* A stack of n pending runs yet to be merged. Run #i starts at
1193
* address base[i] and extends for len[i] elements. It's always
1194
* true (so long as the indices are in bounds) that
1195
*
1196
* pending[i].base + pending[i].len == pending[i+1].base
1197
*
1198
* so we could cut the storage for this, but it's a minor amount,
1199
* and keeping all the info explicit simplifies the code.
1200
*/
1201
int n;
1202
struct s_slice pending[MAX_MERGE_PENDING];
1203
1204
/* 'a' points to this when possible, rather than muck with malloc. */
1205
PyObject *temparray[MERGESTATE_TEMP_SIZE];
1206
1207
/* This is the function we will use to compare two keys,
1208
* even when none of our special cases apply and we have to use
1209
* safe_object_compare. */
1210
int (*key_compare)(PyObject *, PyObject *, MergeState *);
1211
1212
/* This function is used by unsafe_object_compare to optimize comparisons
1213
* when we know our list is type-homogeneous but we can't assume anything else.
1214
* In the pre-sort check it is set equal to Py_TYPE(key)->tp_richcompare */
1215
PyObject *(*key_richcompare)(PyObject *, PyObject *, int);
1216
1217
/* This function is used by unsafe_tuple_compare to compare the first elements
1218
* of tuples. It may be set to safe_object_compare, but the idea is that hopefully
1219
* we can assume more, and use one of the special-case compares. */
1220
int (*tuple_elem_compare)(PyObject *, PyObject *, MergeState *);
1221
};
1222
1223
/* binarysort is the best method for sorting small arrays: it does
1224
few compares, but can do data movement quadratic in the number of
1225
elements.
1226
[lo, hi) is a contiguous slice of a list, and is sorted via
1227
binary insertion. This sort is stable.
1228
On entry, must have lo <= start <= hi, and that [lo, start) is already
1229
sorted (pass start == lo if you don't know!).
1230
If islt() complains return -1, else 0.
1231
Even in case of error, the output slice will be some permutation of
1232
the input (nothing is lost or duplicated).
1233
*/
1234
static int
1235
binarysort(MergeState *ms, sortslice lo, PyObject **hi, PyObject **start)
1236
{
1237
Py_ssize_t k;
1238
PyObject **l, **p, **r;
1239
PyObject *pivot;
1240
1241
assert(lo.keys <= start && start <= hi);
1242
/* assert [lo, start) is sorted */
1243
if (lo.keys == start)
1244
++start;
1245
for (; start < hi; ++start) {
1246
/* set l to where *start belongs */
1247
l = lo.keys;
1248
r = start;
1249
pivot = *r;
1250
/* Invariants:
1251
* pivot >= all in [lo, l).
1252
* pivot < all in [r, start).
1253
* The second is vacuously true at the start.
1254
*/
1255
assert(l < r);
1256
do {
1257
p = l + ((r - l) >> 1);
1258
IFLT(pivot, *p)
1259
r = p;
1260
else
1261
l = p+1;
1262
} while (l < r);
1263
assert(l == r);
1264
/* The invariants still hold, so pivot >= all in [lo, l) and
1265
pivot < all in [l, start), so pivot belongs at l. Note
1266
that if there are elements equal to pivot, l points to the
1267
first slot after them -- that's why this sort is stable.
1268
Slide over to make room.
1269
Caution: using memmove is much slower under MSVC 5;
1270
we're not usually moving many slots. */
1271
for (p = start; p > l; --p)
1272
*p = *(p-1);
1273
*l = pivot;
1274
if (lo.values != NULL) {
1275
Py_ssize_t offset = lo.values - lo.keys;
1276
p = start + offset;
1277
pivot = *p;
1278
l += offset;
1279
for (p = start + offset; p > l; --p)
1280
*p = *(p-1);
1281
*l = pivot;
1282
}
1283
}
1284
return 0;
1285
1286
fail:
1287
return -1;
1288
}
1289
1290
/*
1291
Return the length of the run beginning at lo, in the slice [lo, hi). lo < hi
1292
is required on entry. "A run" is the longest ascending sequence, with
1293
1294
lo[0] <= lo[1] <= lo[2] <= ...
1295
1296
or the longest descending sequence, with
1297
1298
lo[0] > lo[1] > lo[2] > ...
1299
1300
Boolean *descending is set to 0 in the former case, or to 1 in the latter.
1301
For its intended use in a stable mergesort, the strictness of the defn of
1302
"descending" is needed so that the caller can safely reverse a descending
1303
sequence without violating stability (strict > ensures there are no equal
1304
elements to get out of order).
1305
1306
Returns -1 in case of error.
1307
*/
1308
static Py_ssize_t
1309
count_run(MergeState *ms, PyObject **lo, PyObject **hi, int *descending)
1310
{
1311
Py_ssize_t k;
1312
Py_ssize_t n;
1313
1314
assert(lo < hi);
1315
*descending = 0;
1316
++lo;
1317
if (lo == hi)
1318
return 1;
1319
1320
n = 2;
1321
IFLT(*lo, *(lo-1)) {
1322
*descending = 1;
1323
for (lo = lo+1; lo < hi; ++lo, ++n) {
1324
IFLT(*lo, *(lo-1))
1325
;
1326
else
1327
break;
1328
}
1329
}
1330
else {
1331
for (lo = lo+1; lo < hi; ++lo, ++n) {
1332
IFLT(*lo, *(lo-1))
1333
break;
1334
}
1335
}
1336
1337
return n;
1338
fail:
1339
return -1;
1340
}
1341
1342
/*
1343
Locate the proper position of key in a sorted vector; if the vector contains
1344
an element equal to key, return the position immediately to the left of
1345
the leftmost equal element. [gallop_right() does the same except returns
1346
the position to the right of the rightmost equal element (if any).]
1347
1348
"a" is a sorted vector with n elements, starting at a[0]. n must be > 0.
1349
1350
"hint" is an index at which to begin the search, 0 <= hint < n. The closer
1351
hint is to the final result, the faster this runs.
1352
1353
The return value is the int k in 0..n such that
1354
1355
a[k-1] < key <= a[k]
1356
1357
pretending that *(a-1) is minus infinity and a[n] is plus infinity. IOW,
1358
key belongs at index k; or, IOW, the first k elements of a should precede
1359
key, and the last n-k should follow key.
1360
1361
Returns -1 on error. See listsort.txt for info on the method.
1362
*/
1363
static Py_ssize_t
1364
gallop_left(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
1365
{
1366
Py_ssize_t ofs;
1367
Py_ssize_t lastofs;
1368
Py_ssize_t k;
1369
1370
assert(key && a && n > 0 && hint >= 0 && hint < n);
1371
1372
a += hint;
1373
lastofs = 0;
1374
ofs = 1;
1375
IFLT(*a, key) {
1376
/* a[hint] < key -- gallop right, until
1377
* a[hint + lastofs] < key <= a[hint + ofs]
1378
*/
1379
const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
1380
while (ofs < maxofs) {
1381
IFLT(a[ofs], key) {
1382
lastofs = ofs;
1383
assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1384
ofs = (ofs << 1) + 1;
1385
}
1386
else /* key <= a[hint + ofs] */
1387
break;
1388
}
1389
if (ofs > maxofs)
1390
ofs = maxofs;
1391
/* Translate back to offsets relative to &a[0]. */
1392
lastofs += hint;
1393
ofs += hint;
1394
}
1395
else {
1396
/* key <= a[hint] -- gallop left, until
1397
* a[hint - ofs] < key <= a[hint - lastofs]
1398
*/
1399
const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
1400
while (ofs < maxofs) {
1401
IFLT(*(a-ofs), key)
1402
break;
1403
/* key <= a[hint - ofs] */
1404
lastofs = ofs;
1405
assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1406
ofs = (ofs << 1) + 1;
1407
}
1408
if (ofs > maxofs)
1409
ofs = maxofs;
1410
/* Translate back to positive offsets relative to &a[0]. */
1411
k = lastofs;
1412
lastofs = hint - ofs;
1413
ofs = hint - k;
1414
}
1415
a -= hint;
1416
1417
assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1418
/* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
1419
* right of lastofs but no farther right than ofs. Do a binary
1420
* search, with invariant a[lastofs-1] < key <= a[ofs].
1421
*/
1422
++lastofs;
1423
while (lastofs < ofs) {
1424
Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1425
1426
IFLT(a[m], key)
1427
lastofs = m+1; /* a[m] < key */
1428
else
1429
ofs = m; /* key <= a[m] */
1430
}
1431
assert(lastofs == ofs); /* so a[ofs-1] < key <= a[ofs] */
1432
return ofs;
1433
1434
fail:
1435
return -1;
1436
}
1437
1438
/*
1439
Exactly like gallop_left(), except that if key already exists in a[0:n],
1440
finds the position immediately to the right of the rightmost equal value.
1441
1442
The return value is the int k in 0..n such that
1443
1444
a[k-1] <= key < a[k]
1445
1446
or -1 if error.
1447
1448
The code duplication is massive, but this is enough different given that
1449
we're sticking to "<" comparisons that it's much harder to follow if
1450
written as one routine with yet another "left or right?" flag.
1451
*/
1452
static Py_ssize_t
1453
gallop_right(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
1454
{
1455
Py_ssize_t ofs;
1456
Py_ssize_t lastofs;
1457
Py_ssize_t k;
1458
1459
assert(key && a && n > 0 && hint >= 0 && hint < n);
1460
1461
a += hint;
1462
lastofs = 0;
1463
ofs = 1;
1464
IFLT(key, *a) {
1465
/* key < a[hint] -- gallop left, until
1466
* a[hint - ofs] <= key < a[hint - lastofs]
1467
*/
1468
const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
1469
while (ofs < maxofs) {
1470
IFLT(key, *(a-ofs)) {
1471
lastofs = ofs;
1472
assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1473
ofs = (ofs << 1) + 1;
1474
}
1475
else /* a[hint - ofs] <= key */
1476
break;
1477
}
1478
if (ofs > maxofs)
1479
ofs = maxofs;
1480
/* Translate back to positive offsets relative to &a[0]. */
1481
k = lastofs;
1482
lastofs = hint - ofs;
1483
ofs = hint - k;
1484
}
1485
else {
1486
/* a[hint] <= key -- gallop right, until
1487
* a[hint + lastofs] <= key < a[hint + ofs]
1488
*/
1489
const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
1490
while (ofs < maxofs) {
1491
IFLT(key, a[ofs])
1492
break;
1493
/* a[hint + ofs] <= key */
1494
lastofs = ofs;
1495
assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
1496
ofs = (ofs << 1) + 1;
1497
}
1498
if (ofs > maxofs)
1499
ofs = maxofs;
1500
/* Translate back to offsets relative to &a[0]. */
1501
lastofs += hint;
1502
ofs += hint;
1503
}
1504
a -= hint;
1505
1506
assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
1507
/* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
1508
* right of lastofs but no farther right than ofs. Do a binary
1509
* search, with invariant a[lastofs-1] <= key < a[ofs].
1510
*/
1511
++lastofs;
1512
while (lastofs < ofs) {
1513
Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
1514
1515
IFLT(key, a[m])
1516
ofs = m; /* key < a[m] */
1517
else
1518
lastofs = m+1; /* a[m] <= key */
1519
}
1520
assert(lastofs == ofs); /* so a[ofs-1] <= key < a[ofs] */
1521
return ofs;
1522
1523
fail:
1524
return -1;
1525
}
1526
1527
/* Conceptually a MergeState's constructor. */
1528
static void
1529
merge_init(MergeState *ms, Py_ssize_t list_size, int has_keyfunc,
1530
sortslice *lo)
1531
{
1532
assert(ms != NULL);
1533
if (has_keyfunc) {
1534
/* The temporary space for merging will need at most half the list
1535
* size rounded up. Use the minimum possible space so we can use the
1536
* rest of temparray for other things. In particular, if there is
1537
* enough extra space, listsort() will use it to store the keys.
1538
*/
1539
ms->alloced = (list_size + 1) / 2;
1540
1541
/* ms->alloced describes how many keys will be stored at
1542
ms->temparray, but we also need to store the values. Hence,
1543
ms->alloced is capped at half of MERGESTATE_TEMP_SIZE. */
1544
if (MERGESTATE_TEMP_SIZE / 2 < ms->alloced)
1545
ms->alloced = MERGESTATE_TEMP_SIZE / 2;
1546
ms->a.values = &ms->temparray[ms->alloced];
1547
}
1548
else {
1549
ms->alloced = MERGESTATE_TEMP_SIZE;
1550
ms->a.values = NULL;
1551
}
1552
ms->a.keys = ms->temparray;
1553
ms->n = 0;
1554
ms->min_gallop = MIN_GALLOP;
1555
ms->listlen = list_size;
1556
ms->basekeys = lo->keys;
1557
}
1558
1559
/* Free all the temp memory owned by the MergeState. This must be called
1560
* when you're done with a MergeState, and may be called before then if
1561
* you want to free the temp memory early.
1562
*/
1563
static void
1564
merge_freemem(MergeState *ms)
1565
{
1566
assert(ms != NULL);
1567
if (ms->a.keys != ms->temparray) {
1568
PyMem_Free(ms->a.keys);
1569
ms->a.keys = NULL;
1570
}
1571
}
1572
1573
/* Ensure enough temp memory for 'need' array slots is available.
1574
* Returns 0 on success and -1 if the memory can't be gotten.
1575
*/
1576
static int
1577
merge_getmem(MergeState *ms, Py_ssize_t need)
1578
{
1579
int multiplier;
1580
1581
assert(ms != NULL);
1582
if (need <= ms->alloced)
1583
return 0;
1584
1585
multiplier = ms->a.values != NULL ? 2 : 1;
1586
1587
/* Don't realloc! That can cost cycles to copy the old data, but
1588
* we don't care what's in the block.
1589
*/
1590
merge_freemem(ms);
1591
if ((size_t)need > PY_SSIZE_T_MAX / sizeof(PyObject *) / multiplier) {
1592
PyErr_NoMemory();
1593
return -1;
1594
}
1595
ms->a.keys = (PyObject **)PyMem_Malloc(multiplier * need
1596
* sizeof(PyObject *));
1597
if (ms->a.keys != NULL) {
1598
ms->alloced = need;
1599
if (ms->a.values != NULL)
1600
ms->a.values = &ms->a.keys[need];
1601
return 0;
1602
}
1603
PyErr_NoMemory();
1604
return -1;
1605
}
1606
#define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 : \
1607
merge_getmem(MS, NEED))
1608
1609
/* Merge the na elements starting at ssa with the nb elements starting at
1610
* ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
1611
* Must also have that ssa.keys[na-1] belongs at the end of the merge, and
1612
* should have na <= nb. See listsort.txt for more info. Return 0 if
1613
* successful, -1 if error.
1614
*/
1615
static Py_ssize_t
1616
merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na,
1617
sortslice ssb, Py_ssize_t nb)
1618
{
1619
Py_ssize_t k;
1620
sortslice dest;
1621
int result = -1; /* guilty until proved innocent */
1622
Py_ssize_t min_gallop;
1623
1624
assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
1625
assert(ssa.keys + na == ssb.keys);
1626
if (MERGE_GETMEM(ms, na) < 0)
1627
return -1;
1628
sortslice_memcpy(&ms->a, 0, &ssa, 0, na);
1629
dest = ssa;
1630
ssa = ms->a;
1631
1632
sortslice_copy_incr(&dest, &ssb);
1633
--nb;
1634
if (nb == 0)
1635
goto Succeed;
1636
if (na == 1)
1637
goto CopyB;
1638
1639
min_gallop = ms->min_gallop;
1640
for (;;) {
1641
Py_ssize_t acount = 0; /* # of times A won in a row */
1642
Py_ssize_t bcount = 0; /* # of times B won in a row */
1643
1644
/* Do the straightforward thing until (if ever) one run
1645
* appears to win consistently.
1646
*/
1647
for (;;) {
1648
assert(na > 1 && nb > 0);
1649
k = ISLT(ssb.keys[0], ssa.keys[0]);
1650
if (k) {
1651
if (k < 0)
1652
goto Fail;
1653
sortslice_copy_incr(&dest, &ssb);
1654
++bcount;
1655
acount = 0;
1656
--nb;
1657
if (nb == 0)
1658
goto Succeed;
1659
if (bcount >= min_gallop)
1660
break;
1661
}
1662
else {
1663
sortslice_copy_incr(&dest, &ssa);
1664
++acount;
1665
bcount = 0;
1666
--na;
1667
if (na == 1)
1668
goto CopyB;
1669
if (acount >= min_gallop)
1670
break;
1671
}
1672
}
1673
1674
/* One run is winning so consistently that galloping may
1675
* be a huge win. So try that, and continue galloping until
1676
* (if ever) neither run appears to be winning consistently
1677
* anymore.
1678
*/
1679
++min_gallop;
1680
do {
1681
assert(na > 1 && nb > 0);
1682
min_gallop -= min_gallop > 1;
1683
ms->min_gallop = min_gallop;
1684
k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0);
1685
acount = k;
1686
if (k) {
1687
if (k < 0)
1688
goto Fail;
1689
sortslice_memcpy(&dest, 0, &ssa, 0, k);
1690
sortslice_advance(&dest, k);
1691
sortslice_advance(&ssa, k);
1692
na -= k;
1693
if (na == 1)
1694
goto CopyB;
1695
/* na==0 is impossible now if the comparison
1696
* function is consistent, but we can't assume
1697
* that it is.
1698
*/
1699
if (na == 0)
1700
goto Succeed;
1701
}
1702
sortslice_copy_incr(&dest, &ssb);
1703
--nb;
1704
if (nb == 0)
1705
goto Succeed;
1706
1707
k = gallop_left(ms, ssa.keys[0], ssb.keys, nb, 0);
1708
bcount = k;
1709
if (k) {
1710
if (k < 0)
1711
goto Fail;
1712
sortslice_memmove(&dest, 0, &ssb, 0, k);
1713
sortslice_advance(&dest, k);
1714
sortslice_advance(&ssb, k);
1715
nb -= k;
1716
if (nb == 0)
1717
goto Succeed;
1718
}
1719
sortslice_copy_incr(&dest, &ssa);
1720
--na;
1721
if (na == 1)
1722
goto CopyB;
1723
} while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1724
++min_gallop; /* penalize it for leaving galloping mode */
1725
ms->min_gallop = min_gallop;
1726
}
1727
Succeed:
1728
result = 0;
1729
Fail:
1730
if (na)
1731
sortslice_memcpy(&dest, 0, &ssa, 0, na);
1732
return result;
1733
CopyB:
1734
assert(na == 1 && nb > 0);
1735
/* The last element of ssa belongs at the end of the merge. */
1736
sortslice_memmove(&dest, 0, &ssb, 0, nb);
1737
sortslice_copy(&dest, nb, &ssa, 0);
1738
return 0;
1739
}
1740
1741
/* Merge the na elements starting at pa with the nb elements starting at
1742
* ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
1743
* Must also have that ssa.keys[na-1] belongs at the end of the merge, and
1744
* should have na >= nb. See listsort.txt for more info. Return 0 if
1745
* successful, -1 if error.
1746
*/
1747
static Py_ssize_t
1748
merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na,
1749
sortslice ssb, Py_ssize_t nb)
1750
{
1751
Py_ssize_t k;
1752
sortslice dest, basea, baseb;
1753
int result = -1; /* guilty until proved innocent */
1754
Py_ssize_t min_gallop;
1755
1756
assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
1757
assert(ssa.keys + na == ssb.keys);
1758
if (MERGE_GETMEM(ms, nb) < 0)
1759
return -1;
1760
dest = ssb;
1761
sortslice_advance(&dest, nb-1);
1762
sortslice_memcpy(&ms->a, 0, &ssb, 0, nb);
1763
basea = ssa;
1764
baseb = ms->a;
1765
ssb.keys = ms->a.keys + nb - 1;
1766
if (ssb.values != NULL)
1767
ssb.values = ms->a.values + nb - 1;
1768
sortslice_advance(&ssa, na - 1);
1769
1770
sortslice_copy_decr(&dest, &ssa);
1771
--na;
1772
if (na == 0)
1773
goto Succeed;
1774
if (nb == 1)
1775
goto CopyA;
1776
1777
min_gallop = ms->min_gallop;
1778
for (;;) {
1779
Py_ssize_t acount = 0; /* # of times A won in a row */
1780
Py_ssize_t bcount = 0; /* # of times B won in a row */
1781
1782
/* Do the straightforward thing until (if ever) one run
1783
* appears to win consistently.
1784
*/
1785
for (;;) {
1786
assert(na > 0 && nb > 1);
1787
k = ISLT(ssb.keys[0], ssa.keys[0]);
1788
if (k) {
1789
if (k < 0)
1790
goto Fail;
1791
sortslice_copy_decr(&dest, &ssa);
1792
++acount;
1793
bcount = 0;
1794
--na;
1795
if (na == 0)
1796
goto Succeed;
1797
if (acount >= min_gallop)
1798
break;
1799
}
1800
else {
1801
sortslice_copy_decr(&dest, &ssb);
1802
++bcount;
1803
acount = 0;
1804
--nb;
1805
if (nb == 1)
1806
goto CopyA;
1807
if (bcount >= min_gallop)
1808
break;
1809
}
1810
}
1811
1812
/* One run is winning so consistently that galloping may
1813
* be a huge win. So try that, and continue galloping until
1814
* (if ever) neither run appears to be winning consistently
1815
* anymore.
1816
*/
1817
++min_gallop;
1818
do {
1819
assert(na > 0 && nb > 1);
1820
min_gallop -= min_gallop > 1;
1821
ms->min_gallop = min_gallop;
1822
k = gallop_right(ms, ssb.keys[0], basea.keys, na, na-1);
1823
if (k < 0)
1824
goto Fail;
1825
k = na - k;
1826
acount = k;
1827
if (k) {
1828
sortslice_advance(&dest, -k);
1829
sortslice_advance(&ssa, -k);
1830
sortslice_memmove(&dest, 1, &ssa, 1, k);
1831
na -= k;
1832
if (na == 0)
1833
goto Succeed;
1834
}
1835
sortslice_copy_decr(&dest, &ssb);
1836
--nb;
1837
if (nb == 1)
1838
goto CopyA;
1839
1840
k = gallop_left(ms, ssa.keys[0], baseb.keys, nb, nb-1);
1841
if (k < 0)
1842
goto Fail;
1843
k = nb - k;
1844
bcount = k;
1845
if (k) {
1846
sortslice_advance(&dest, -k);
1847
sortslice_advance(&ssb, -k);
1848
sortslice_memcpy(&dest, 1, &ssb, 1, k);
1849
nb -= k;
1850
if (nb == 1)
1851
goto CopyA;
1852
/* nb==0 is impossible now if the comparison
1853
* function is consistent, but we can't assume
1854
* that it is.
1855
*/
1856
if (nb == 0)
1857
goto Succeed;
1858
}
1859
sortslice_copy_decr(&dest, &ssa);
1860
--na;
1861
if (na == 0)
1862
goto Succeed;
1863
} while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
1864
++min_gallop; /* penalize it for leaving galloping mode */
1865
ms->min_gallop = min_gallop;
1866
}
1867
Succeed:
1868
result = 0;
1869
Fail:
1870
if (nb)
1871
sortslice_memcpy(&dest, -(nb-1), &baseb, 0, nb);
1872
return result;
1873
CopyA:
1874
assert(nb == 1 && na > 0);
1875
/* The first element of ssb belongs at the front of the merge. */
1876
sortslice_memmove(&dest, 1-na, &ssa, 1-na, na);
1877
sortslice_advance(&dest, -na);
1878
sortslice_advance(&ssa, -na);
1879
sortslice_copy(&dest, 0, &ssb, 0);
1880
return 0;
1881
}
1882
1883
/* Merge the two runs at stack indices i and i+1.
1884
* Returns 0 on success, -1 on error.
1885
*/
1886
static Py_ssize_t
1887
merge_at(MergeState *ms, Py_ssize_t i)
1888
{
1889
sortslice ssa, ssb;
1890
Py_ssize_t na, nb;
1891
Py_ssize_t k;
1892
1893
assert(ms != NULL);
1894
assert(ms->n >= 2);
1895
assert(i >= 0);
1896
assert(i == ms->n - 2 || i == ms->n - 3);
1897
1898
ssa = ms->pending[i].base;
1899
na = ms->pending[i].len;
1900
ssb = ms->pending[i+1].base;
1901
nb = ms->pending[i+1].len;
1902
assert(na > 0 && nb > 0);
1903
assert(ssa.keys + na == ssb.keys);
1904
1905
/* Record the length of the combined runs; if i is the 3rd-last
1906
* run now, also slide over the last run (which isn't involved
1907
* in this merge). The current run i+1 goes away in any case.
1908
*/
1909
ms->pending[i].len = na + nb;
1910
if (i == ms->n - 3)
1911
ms->pending[i+1] = ms->pending[i+2];
1912
--ms->n;
1913
1914
/* Where does b start in a? Elements in a before that can be
1915
* ignored (already in place).
1916
*/
1917
k = gallop_right(ms, *ssb.keys, ssa.keys, na, 0);
1918
if (k < 0)
1919
return -1;
1920
sortslice_advance(&ssa, k);
1921
na -= k;
1922
if (na == 0)
1923
return 0;
1924
1925
/* Where does a end in b? Elements in b after that can be
1926
* ignored (already in place).
1927
*/
1928
nb = gallop_left(ms, ssa.keys[na-1], ssb.keys, nb, nb-1);
1929
if (nb <= 0)
1930
return nb;
1931
1932
/* Merge what remains of the runs, using a temp array with
1933
* min(na, nb) elements.
1934
*/
1935
if (na <= nb)
1936
return merge_lo(ms, ssa, na, ssb, nb);
1937
else
1938
return merge_hi(ms, ssa, na, ssb, nb);
1939
}
1940
1941
/* Two adjacent runs begin at index s1. The first run has length n1, and
1942
* the second run (starting at index s1+n1) has length n2. The list has total
1943
* length n.
1944
* Compute the "power" of the first run. See listsort.txt for details.
1945
*/
1946
static int
1947
powerloop(Py_ssize_t s1, Py_ssize_t n1, Py_ssize_t n2, Py_ssize_t n)
1948
{
1949
int result = 0;
1950
assert(s1 >= 0);
1951
assert(n1 > 0 && n2 > 0);
1952
assert(s1 + n1 + n2 <= n);
1953
/* midpoints a and b:
1954
* a = s1 + n1/2
1955
* b = s1 + n1 + n2/2 = a + (n1 + n2)/2
1956
*
1957
* Those may not be integers, though, because of the "/2". So we work with
1958
* 2*a and 2*b instead, which are necessarily integers. It makes no
1959
* difference to the outcome, since the bits in the expansion of (2*i)/n
1960
* are merely shifted one position from those of i/n.
1961
*/
1962
Py_ssize_t a = 2 * s1 + n1; /* 2*a */
1963
Py_ssize_t b = a + n1 + n2; /* 2*b */
1964
/* Emulate a/n and b/n one bit a time, until bits differ. */
1965
for (;;) {
1966
++result;
1967
if (a >= n) { /* both quotient bits are 1 */
1968
assert(b >= a);
1969
a -= n;
1970
b -= n;
1971
}
1972
else if (b >= n) { /* a/n bit is 0, b/n bit is 1 */
1973
break;
1974
} /* else both quotient bits are 0 */
1975
assert(a < b && b < n);
1976
a <<= 1;
1977
b <<= 1;
1978
}
1979
return result;
1980
}
1981
1982
/* The next run has been identified, of length n2.
1983
* If there's already a run on the stack, apply the "powersort" merge strategy:
1984
* compute the topmost run's "power" (depth in a conceptual binary merge tree)
1985
* and merge adjacent runs on the stack with greater power. See listsort.txt
1986
* for more info.
1987
*
1988
* It's the caller's responsibility to push the new run on the stack when this
1989
* returns.
1990
*
1991
* Returns 0 on success, -1 on error.
1992
*/
1993
static int
1994
found_new_run(MergeState *ms, Py_ssize_t n2)
1995
{
1996
assert(ms);
1997
if (ms->n) {
1998
assert(ms->n > 0);
1999
struct s_slice *p = ms->pending;
2000
Py_ssize_t s1 = p[ms->n - 1].base.keys - ms->basekeys; /* start index */
2001
Py_ssize_t n1 = p[ms->n - 1].len;
2002
int power = powerloop(s1, n1, n2, ms->listlen);
2003
while (ms->n > 1 && p[ms->n - 2].power > power) {
2004
if (merge_at(ms, ms->n - 2) < 0)
2005
return -1;
2006
}
2007
assert(ms->n < 2 || p[ms->n - 2].power < power);
2008
p[ms->n - 1].power = power;
2009
}
2010
return 0;
2011
}
2012
2013
/* Regardless of invariants, merge all runs on the stack until only one
2014
* remains. This is used at the end of the mergesort.
2015
*
2016
* Returns 0 on success, -1 on error.
2017
*/
2018
static int
2019
merge_force_collapse(MergeState *ms)
2020
{
2021
struct s_slice *p = ms->pending;
2022
2023
assert(ms);
2024
while (ms->n > 1) {
2025
Py_ssize_t n = ms->n - 2;
2026
if (n > 0 && p[n-1].len < p[n+1].len)
2027
--n;
2028
if (merge_at(ms, n) < 0)
2029
return -1;
2030
}
2031
return 0;
2032
}
2033
2034
/* Compute a good value for the minimum run length; natural runs shorter
2035
* than this are boosted artificially via binary insertion.
2036
*
2037
* If n < 64, return n (it's too small to bother with fancy stuff).
2038
* Else if n is an exact power of 2, return 32.
2039
* Else return an int k, 32 <= k <= 64, such that n/k is close to, but
2040
* strictly less than, an exact power of 2.
2041
*
2042
* See listsort.txt for more info.
2043
*/
2044
static Py_ssize_t
2045
merge_compute_minrun(Py_ssize_t n)
2046
{
2047
Py_ssize_t r = 0; /* becomes 1 if any 1 bits are shifted off */
2048
2049
assert(n >= 0);
2050
while (n >= 64) {
2051
r |= n & 1;
2052
n >>= 1;
2053
}
2054
return n + r;
2055
}
2056
2057
static void
2058
reverse_sortslice(sortslice *s, Py_ssize_t n)
2059
{
2060
reverse_slice(s->keys, &s->keys[n]);
2061
if (s->values != NULL)
2062
reverse_slice(s->values, &s->values[n]);
2063
}
2064
2065
/* Here we define custom comparison functions to optimize for the cases one commonly
2066
* encounters in practice: homogeneous lists, often of one of the basic types. */
2067
2068
/* This struct holds the comparison function and helper functions
2069
* selected in the pre-sort check. */
2070
2071
/* These are the special case compare functions.
2072
* ms->key_compare will always point to one of these: */
2073
2074
/* Heterogeneous compare: default, always safe to fall back on. */
2075
static int
2076
safe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2077
{
2078
/* No assumptions necessary! */
2079
return PyObject_RichCompareBool(v, w, Py_LT);
2080
}
2081
2082
/* Homogeneous compare: safe for any two comparable objects of the same type.
2083
* (ms->key_richcompare is set to ob_type->tp_richcompare in the
2084
* pre-sort check.)
2085
*/
2086
static int
2087
unsafe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
2088
{
2089
PyObject *res_obj; int res;
2090
2091
/* No assumptions, because we check first: */
2092
if (Py_TYPE(v)->tp_richcompare != ms->key_richcompare)
2093
return PyObject_RichCompareBool(v, w, Py_LT);
2094
2095
assert(ms->key_richcompare != NULL);
2096
res_obj = (*(ms->key_richcompare))(v, w, Py_LT);
2097
2098
if (res_obj == Py_NotImplemented) {
2099
Py_DECREF(res_obj);
2100
return PyObject_RichCompareBool(v, w, Py_LT);
2101
}
2102
if (res_obj == NULL)
2103
return -1;
2104
2105
if (PyBool_Check(res_obj)) {
2106
res = (res_obj == Py_True);
2107
}
2108
else {
2109
res = PyObject_IsTrue(res_obj);
2110
}
2111
Py_DECREF(res_obj);
2112
2113
/* Note that we can't assert
2114
* res == PyObject_RichCompareBool(v, w, Py_LT);
2115
* because of evil compare functions like this:
2116
* lambda a, b: int(random.random() * 3) - 1)
2117
* (which is actually in test_sort.py) */
2118
return res;
2119
}
2120
2121
/* Latin string compare: safe for any two latin (one byte per char) strings. */
2122
static int
2123
unsafe_latin_compare(PyObject *v, PyObject *w, MergeState *ms)
2124
{
2125
Py_ssize_t len;
2126
int res;
2127
2128
/* Modified from Objects/unicodeobject.c:unicode_compare, assuming: */
2129
assert(Py_IS_TYPE(v, &PyUnicode_Type));
2130
assert(Py_IS_TYPE(w, &PyUnicode_Type));
2131
assert(PyUnicode_KIND(v) == PyUnicode_KIND(w));
2132
assert(PyUnicode_KIND(v) == PyUnicode_1BYTE_KIND);
2133
2134
len = Py_MIN(PyUnicode_GET_LENGTH(v), PyUnicode_GET_LENGTH(w));
2135
res = memcmp(PyUnicode_DATA(v), PyUnicode_DATA(w), len);
2136
2137
res = (res != 0 ?
2138
res < 0 :
2139
PyUnicode_GET_LENGTH(v) < PyUnicode_GET_LENGTH(w));
2140
2141
assert(res == PyObject_RichCompareBool(v, w, Py_LT));;
2142
return res;
2143
}
2144
2145
/* Bounded int compare: compare any two longs that fit in a single machine word. */
2146
static int
2147
unsafe_long_compare(PyObject *v, PyObject *w, MergeState *ms)
2148
{
2149
PyLongObject *vl, *wl;
2150
intptr_t v0, w0;
2151
int res;
2152
2153
/* Modified from Objects/longobject.c:long_compare, assuming: */
2154
assert(Py_IS_TYPE(v, &PyLong_Type));
2155
assert(Py_IS_TYPE(w, &PyLong_Type));
2156
assert(_PyLong_IsCompact((PyLongObject *)v));
2157
assert(_PyLong_IsCompact((PyLongObject *)w));
2158
2159
vl = (PyLongObject*)v;
2160
wl = (PyLongObject*)w;
2161
2162
v0 = _PyLong_CompactValue(vl);
2163
w0 = _PyLong_CompactValue(wl);
2164
2165
res = v0 < w0;
2166
assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2167
return res;
2168
}
2169
2170
/* Float compare: compare any two floats. */
2171
static int
2172
unsafe_float_compare(PyObject *v, PyObject *w, MergeState *ms)
2173
{
2174
int res;
2175
2176
/* Modified from Objects/floatobject.c:float_richcompare, assuming: */
2177
assert(Py_IS_TYPE(v, &PyFloat_Type));
2178
assert(Py_IS_TYPE(w, &PyFloat_Type));
2179
2180
res = PyFloat_AS_DOUBLE(v) < PyFloat_AS_DOUBLE(w);
2181
assert(res == PyObject_RichCompareBool(v, w, Py_LT));
2182
return res;
2183
}
2184
2185
/* Tuple compare: compare *any* two tuples, using
2186
* ms->tuple_elem_compare to compare the first elements, which is set
2187
* using the same pre-sort check as we use for ms->key_compare,
2188
* but run on the list [x[0] for x in L]. This allows us to optimize compares
2189
* on two levels (as long as [x[0] for x in L] is type-homogeneous.) The idea is
2190
* that most tuple compares don't involve x[1:]. */
2191
static int
2192
unsafe_tuple_compare(PyObject *v, PyObject *w, MergeState *ms)
2193
{
2194
PyTupleObject *vt, *wt;
2195
Py_ssize_t i, vlen, wlen;
2196
int k;
2197
2198
/* Modified from Objects/tupleobject.c:tuplerichcompare, assuming: */
2199
assert(Py_IS_TYPE(v, &PyTuple_Type));
2200
assert(Py_IS_TYPE(w, &PyTuple_Type));
2201
assert(Py_SIZE(v) > 0);
2202
assert(Py_SIZE(w) > 0);
2203
2204
vt = (PyTupleObject *)v;
2205
wt = (PyTupleObject *)w;
2206
2207
vlen = Py_SIZE(vt);
2208
wlen = Py_SIZE(wt);
2209
2210
for (i = 0; i < vlen && i < wlen; i++) {
2211
k = PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_EQ);
2212
if (k < 0)
2213
return -1;
2214
if (!k)
2215
break;
2216
}
2217
2218
if (i >= vlen || i >= wlen)
2219
return vlen < wlen;
2220
2221
if (i == 0)
2222
return ms->tuple_elem_compare(vt->ob_item[i], wt->ob_item[i], ms);
2223
else
2224
return PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_LT);
2225
}
2226
2227
/* An adaptive, stable, natural mergesort. See listsort.txt.
2228
* Returns Py_None on success, NULL on error. Even in case of error, the
2229
* list will be some permutation of its input state (nothing is lost or
2230
* duplicated).
2231
*/
2232
/*[clinic input]
2233
list.sort
2234
2235
*
2236
key as keyfunc: object = None
2237
reverse: bool = False
2238
2239
Sort the list in ascending order and return None.
2240
2241
The sort is in-place (i.e. the list itself is modified) and stable (i.e. the
2242
order of two equal elements is maintained).
2243
2244
If a key function is given, apply it once to each list item and sort them,
2245
ascending or descending, according to their function values.
2246
2247
The reverse flag can be set to sort in descending order.
2248
[clinic start generated code]*/
2249
2250
static PyObject *
2251
list_sort_impl(PyListObject *self, PyObject *keyfunc, int reverse)
2252
/*[clinic end generated code: output=57b9f9c5e23fbe42 input=a74c4cd3ec6b5c08]*/
2253
{
2254
MergeState ms;
2255
Py_ssize_t nremaining;
2256
Py_ssize_t minrun;
2257
sortslice lo;
2258
Py_ssize_t saved_ob_size, saved_allocated;
2259
PyObject **saved_ob_item;
2260
PyObject **final_ob_item;
2261
PyObject *result = NULL; /* guilty until proved innocent */
2262
Py_ssize_t i;
2263
PyObject **keys;
2264
2265
assert(self != NULL);
2266
assert(PyList_Check(self));
2267
if (keyfunc == Py_None)
2268
keyfunc = NULL;
2269
2270
/* The list is temporarily made empty, so that mutations performed
2271
* by comparison functions can't affect the slice of memory we're
2272
* sorting (allowing mutations during sorting is a core-dump
2273
* factory, since ob_item may change).
2274
*/
2275
saved_ob_size = Py_SIZE(self);
2276
saved_ob_item = self->ob_item;
2277
saved_allocated = self->allocated;
2278
Py_SET_SIZE(self, 0);
2279
self->ob_item = NULL;
2280
self->allocated = -1; /* any operation will reset it to >= 0 */
2281
2282
if (keyfunc == NULL) {
2283
keys = NULL;
2284
lo.keys = saved_ob_item;
2285
lo.values = NULL;
2286
}
2287
else {
2288
if (saved_ob_size < MERGESTATE_TEMP_SIZE/2)
2289
/* Leverage stack space we allocated but won't otherwise use */
2290
keys = &ms.temparray[saved_ob_size+1];
2291
else {
2292
keys = PyMem_Malloc(sizeof(PyObject *) * saved_ob_size);
2293
if (keys == NULL) {
2294
PyErr_NoMemory();
2295
goto keyfunc_fail;
2296
}
2297
}
2298
2299
for (i = 0; i < saved_ob_size ; i++) {
2300
keys[i] = PyObject_CallOneArg(keyfunc, saved_ob_item[i]);
2301
if (keys[i] == NULL) {
2302
for (i=i-1 ; i>=0 ; i--)
2303
Py_DECREF(keys[i]);
2304
if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
2305
PyMem_Free(keys);
2306
goto keyfunc_fail;
2307
}
2308
}
2309
2310
lo.keys = keys;
2311
lo.values = saved_ob_item;
2312
}
2313
2314
2315
/* The pre-sort check: here's where we decide which compare function to use.
2316
* How much optimization is safe? We test for homogeneity with respect to
2317
* several properties that are expensive to check at compare-time, and
2318
* set ms appropriately. */
2319
if (saved_ob_size > 1) {
2320
/* Assume the first element is representative of the whole list. */
2321
int keys_are_in_tuples = (Py_IS_TYPE(lo.keys[0], &PyTuple_Type) &&
2322
Py_SIZE(lo.keys[0]) > 0);
2323
2324
PyTypeObject* key_type = (keys_are_in_tuples ?
2325
Py_TYPE(PyTuple_GET_ITEM(lo.keys[0], 0)) :
2326
Py_TYPE(lo.keys[0]));
2327
2328
int keys_are_all_same_type = 1;
2329
int strings_are_latin = 1;
2330
int ints_are_bounded = 1;
2331
2332
/* Prove that assumption by checking every key. */
2333
for (i=0; i < saved_ob_size; i++) {
2334
2335
if (keys_are_in_tuples &&
2336
!(Py_IS_TYPE(lo.keys[i], &PyTuple_Type) && Py_SIZE(lo.keys[i]) != 0)) {
2337
keys_are_in_tuples = 0;
2338
keys_are_all_same_type = 0;
2339
break;
2340
}
2341
2342
/* Note: for lists of tuples, key is the first element of the tuple
2343
* lo.keys[i], not lo.keys[i] itself! We verify type-homogeneity
2344
* for lists of tuples in the if-statement directly above. */
2345
PyObject *key = (keys_are_in_tuples ?
2346
PyTuple_GET_ITEM(lo.keys[i], 0) :
2347
lo.keys[i]);
2348
2349
if (!Py_IS_TYPE(key, key_type)) {
2350
keys_are_all_same_type = 0;
2351
/* If keys are in tuple we must loop over the whole list to make
2352
sure all items are tuples */
2353
if (!keys_are_in_tuples) {
2354
break;
2355
}
2356
}
2357
2358
if (keys_are_all_same_type) {
2359
if (key_type == &PyLong_Type &&
2360
ints_are_bounded &&
2361
!_PyLong_IsCompact((PyLongObject *)key)) {
2362
2363
ints_are_bounded = 0;
2364
}
2365
else if (key_type == &PyUnicode_Type &&
2366
strings_are_latin &&
2367
PyUnicode_KIND(key) != PyUnicode_1BYTE_KIND) {
2368
2369
strings_are_latin = 0;
2370
}
2371
}
2372
}
2373
2374
/* Choose the best compare, given what we now know about the keys. */
2375
if (keys_are_all_same_type) {
2376
2377
if (key_type == &PyUnicode_Type && strings_are_latin) {
2378
ms.key_compare = unsafe_latin_compare;
2379
}
2380
else if (key_type == &PyLong_Type && ints_are_bounded) {
2381
ms.key_compare = unsafe_long_compare;
2382
}
2383
else if (key_type == &PyFloat_Type) {
2384
ms.key_compare = unsafe_float_compare;
2385
}
2386
else if ((ms.key_richcompare = key_type->tp_richcompare) != NULL) {
2387
ms.key_compare = unsafe_object_compare;
2388
}
2389
else {
2390
ms.key_compare = safe_object_compare;
2391
}
2392
}
2393
else {
2394
ms.key_compare = safe_object_compare;
2395
}
2396
2397
if (keys_are_in_tuples) {
2398
/* Make sure we're not dealing with tuples of tuples
2399
* (remember: here, key_type refers list [key[0] for key in keys]) */
2400
if (key_type == &PyTuple_Type) {
2401
ms.tuple_elem_compare = safe_object_compare;
2402
}
2403
else {
2404
ms.tuple_elem_compare = ms.key_compare;
2405
}
2406
2407
ms.key_compare = unsafe_tuple_compare;
2408
}
2409
}
2410
/* End of pre-sort check: ms is now set properly! */
2411
2412
merge_init(&ms, saved_ob_size, keys != NULL, &lo);
2413
2414
nremaining = saved_ob_size;
2415
if (nremaining < 2)
2416
goto succeed;
2417
2418
/* Reverse sort stability achieved by initially reversing the list,
2419
applying a stable forward sort, then reversing the final result. */
2420
if (reverse) {
2421
if (keys != NULL)
2422
reverse_slice(&keys[0], &keys[saved_ob_size]);
2423
reverse_slice(&saved_ob_item[0], &saved_ob_item[saved_ob_size]);
2424
}
2425
2426
/* March over the array once, left to right, finding natural runs,
2427
* and extending short natural runs to minrun elements.
2428
*/
2429
minrun = merge_compute_minrun(nremaining);
2430
do {
2431
int descending;
2432
Py_ssize_t n;
2433
2434
/* Identify next run. */
2435
n = count_run(&ms, lo.keys, lo.keys + nremaining, &descending);
2436
if (n < 0)
2437
goto fail;
2438
if (descending)
2439
reverse_sortslice(&lo, n);
2440
/* If short, extend to min(minrun, nremaining). */
2441
if (n < minrun) {
2442
const Py_ssize_t force = nremaining <= minrun ?
2443
nremaining : minrun;
2444
if (binarysort(&ms, lo, lo.keys + force, lo.keys + n) < 0)
2445
goto fail;
2446
n = force;
2447
}
2448
/* Maybe merge pending runs. */
2449
assert(ms.n == 0 || ms.pending[ms.n -1].base.keys +
2450
ms.pending[ms.n-1].len == lo.keys);
2451
if (found_new_run(&ms, n) < 0)
2452
goto fail;
2453
/* Push new run on stack. */
2454
assert(ms.n < MAX_MERGE_PENDING);
2455
ms.pending[ms.n].base = lo;
2456
ms.pending[ms.n].len = n;
2457
++ms.n;
2458
/* Advance to find next run. */
2459
sortslice_advance(&lo, n);
2460
nremaining -= n;
2461
} while (nremaining);
2462
2463
if (merge_force_collapse(&ms) < 0)
2464
goto fail;
2465
assert(ms.n == 1);
2466
assert(keys == NULL
2467
? ms.pending[0].base.keys == saved_ob_item
2468
: ms.pending[0].base.keys == &keys[0]);
2469
assert(ms.pending[0].len == saved_ob_size);
2470
lo = ms.pending[0].base;
2471
2472
succeed:
2473
result = Py_None;
2474
fail:
2475
if (keys != NULL) {
2476
for (i = 0; i < saved_ob_size; i++)
2477
Py_DECREF(keys[i]);
2478
if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
2479
PyMem_Free(keys);
2480
}
2481
2482
if (self->allocated != -1 && result != NULL) {
2483
/* The user mucked with the list during the sort,
2484
* and we don't already have another error to report.
2485
*/
2486
PyErr_SetString(PyExc_ValueError, "list modified during sort");
2487
result = NULL;
2488
}
2489
2490
if (reverse && saved_ob_size > 1)
2491
reverse_slice(saved_ob_item, saved_ob_item + saved_ob_size);
2492
2493
merge_freemem(&ms);
2494
2495
keyfunc_fail:
2496
final_ob_item = self->ob_item;
2497
i = Py_SIZE(self);
2498
Py_SET_SIZE(self, saved_ob_size);
2499
self->ob_item = saved_ob_item;
2500
self->allocated = saved_allocated;
2501
if (final_ob_item != NULL) {
2502
/* we cannot use _list_clear() for this because it does not
2503
guarantee that the list is really empty when it returns */
2504
while (--i >= 0) {
2505
Py_XDECREF(final_ob_item[i]);
2506
}
2507
PyMem_Free(final_ob_item);
2508
}
2509
return Py_XNewRef(result);
2510
}
2511
#undef IFLT
2512
#undef ISLT
2513
2514
int
2515
PyList_Sort(PyObject *v)
2516
{
2517
if (v == NULL || !PyList_Check(v)) {
2518
PyErr_BadInternalCall();
2519
return -1;
2520
}
2521
v = list_sort_impl((PyListObject *)v, NULL, 0);
2522
if (v == NULL)
2523
return -1;
2524
Py_DECREF(v);
2525
return 0;
2526
}
2527
2528
/*[clinic input]
2529
list.reverse
2530
2531
Reverse *IN PLACE*.
2532
[clinic start generated code]*/
2533
2534
static PyObject *
2535
list_reverse_impl(PyListObject *self)
2536
/*[clinic end generated code: output=482544fc451abea9 input=eefd4c3ae1bc9887]*/
2537
{
2538
if (Py_SIZE(self) > 1)
2539
reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
2540
Py_RETURN_NONE;
2541
}
2542
2543
int
2544
PyList_Reverse(PyObject *v)
2545
{
2546
PyListObject *self = (PyListObject *)v;
2547
2548
if (v == NULL || !PyList_Check(v)) {
2549
PyErr_BadInternalCall();
2550
return -1;
2551
}
2552
if (Py_SIZE(self) > 1)
2553
reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
2554
return 0;
2555
}
2556
2557
PyObject *
2558
PyList_AsTuple(PyObject *v)
2559
{
2560
if (v == NULL || !PyList_Check(v)) {
2561
PyErr_BadInternalCall();
2562
return NULL;
2563
}
2564
return _PyTuple_FromArray(((PyListObject *)v)->ob_item, Py_SIZE(v));
2565
}
2566
2567
PyObject *
2568
_PyList_FromArraySteal(PyObject *const *src, Py_ssize_t n)
2569
{
2570
if (n == 0) {
2571
return PyList_New(0);
2572
}
2573
2574
PyListObject *list = (PyListObject *)PyList_New(n);
2575
if (list == NULL) {
2576
for (Py_ssize_t i = 0; i < n; i++) {
2577
Py_DECREF(src[i]);
2578
}
2579
return NULL;
2580
}
2581
2582
PyObject **dst = list->ob_item;
2583
memcpy(dst, src, n * sizeof(PyObject *));
2584
2585
return (PyObject *)list;
2586
}
2587
2588
/*[clinic input]
2589
list.index
2590
2591
value: object
2592
start: slice_index(accept={int}) = 0
2593
stop: slice_index(accept={int}, c_default="PY_SSIZE_T_MAX") = sys.maxsize
2594
/
2595
2596
Return first index of value.
2597
2598
Raises ValueError if the value is not present.
2599
[clinic start generated code]*/
2600
2601
static PyObject *
2602
list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start,
2603
Py_ssize_t stop)
2604
/*[clinic end generated code: output=ec51b88787e4e481 input=40ec5826303a0eb1]*/
2605
{
2606
Py_ssize_t i;
2607
2608
if (start < 0) {
2609
start += Py_SIZE(self);
2610
if (start < 0)
2611
start = 0;
2612
}
2613
if (stop < 0) {
2614
stop += Py_SIZE(self);
2615
if (stop < 0)
2616
stop = 0;
2617
}
2618
for (i = start; i < stop && i < Py_SIZE(self); i++) {
2619
PyObject *obj = self->ob_item[i];
2620
Py_INCREF(obj);
2621
int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
2622
Py_DECREF(obj);
2623
if (cmp > 0)
2624
return PyLong_FromSsize_t(i);
2625
else if (cmp < 0)
2626
return NULL;
2627
}
2628
PyErr_Format(PyExc_ValueError, "%R is not in list", value);
2629
return NULL;
2630
}
2631
2632
/*[clinic input]
2633
list.count
2634
2635
value: object
2636
/
2637
2638
Return number of occurrences of value.
2639
[clinic start generated code]*/
2640
2641
static PyObject *
2642
list_count(PyListObject *self, PyObject *value)
2643
/*[clinic end generated code: output=b1f5d284205ae714 input=3bdc3a5e6f749565]*/
2644
{
2645
Py_ssize_t count = 0;
2646
Py_ssize_t i;
2647
2648
for (i = 0; i < Py_SIZE(self); i++) {
2649
PyObject *obj = self->ob_item[i];
2650
if (obj == value) {
2651
count++;
2652
continue;
2653
}
2654
Py_INCREF(obj);
2655
int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
2656
Py_DECREF(obj);
2657
if (cmp > 0)
2658
count++;
2659
else if (cmp < 0)
2660
return NULL;
2661
}
2662
return PyLong_FromSsize_t(count);
2663
}
2664
2665
/*[clinic input]
2666
list.remove
2667
2668
value: object
2669
/
2670
2671
Remove first occurrence of value.
2672
2673
Raises ValueError if the value is not present.
2674
[clinic start generated code]*/
2675
2676
static PyObject *
2677
list_remove(PyListObject *self, PyObject *value)
2678
/*[clinic end generated code: output=f087e1951a5e30d1 input=2dc2ba5bb2fb1f82]*/
2679
{
2680
Py_ssize_t i;
2681
2682
for (i = 0; i < Py_SIZE(self); i++) {
2683
PyObject *obj = self->ob_item[i];
2684
Py_INCREF(obj);
2685
int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
2686
Py_DECREF(obj);
2687
if (cmp > 0) {
2688
if (list_ass_slice(self, i, i+1,
2689
(PyObject *)NULL) == 0)
2690
Py_RETURN_NONE;
2691
return NULL;
2692
}
2693
else if (cmp < 0)
2694
return NULL;
2695
}
2696
PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list");
2697
return NULL;
2698
}
2699
2700
static int
2701
list_traverse(PyListObject *o, visitproc visit, void *arg)
2702
{
2703
Py_ssize_t i;
2704
2705
for (i = Py_SIZE(o); --i >= 0; )
2706
Py_VISIT(o->ob_item[i]);
2707
return 0;
2708
}
2709
2710
static PyObject *
2711
list_richcompare(PyObject *v, PyObject *w, int op)
2712
{
2713
PyListObject *vl, *wl;
2714
Py_ssize_t i;
2715
2716
if (!PyList_Check(v) || !PyList_Check(w))
2717
Py_RETURN_NOTIMPLEMENTED;
2718
2719
vl = (PyListObject *)v;
2720
wl = (PyListObject *)w;
2721
2722
if (Py_SIZE(vl) != Py_SIZE(wl) && (op == Py_EQ || op == Py_NE)) {
2723
/* Shortcut: if the lengths differ, the lists differ */
2724
if (op == Py_EQ)
2725
Py_RETURN_FALSE;
2726
else
2727
Py_RETURN_TRUE;
2728
}
2729
2730
/* Search for the first index where items are different */
2731
for (i = 0; i < Py_SIZE(vl) && i < Py_SIZE(wl); i++) {
2732
PyObject *vitem = vl->ob_item[i];
2733
PyObject *witem = wl->ob_item[i];
2734
if (vitem == witem) {
2735
continue;
2736
}
2737
2738
Py_INCREF(vitem);
2739
Py_INCREF(witem);
2740
int k = PyObject_RichCompareBool(vitem, witem, Py_EQ);
2741
Py_DECREF(vitem);
2742
Py_DECREF(witem);
2743
if (k < 0)
2744
return NULL;
2745
if (!k)
2746
break;
2747
}
2748
2749
if (i >= Py_SIZE(vl) || i >= Py_SIZE(wl)) {
2750
/* No more items to compare -- compare sizes */
2751
Py_RETURN_RICHCOMPARE(Py_SIZE(vl), Py_SIZE(wl), op);
2752
}
2753
2754
/* We have an item that differs -- shortcuts for EQ/NE */
2755
if (op == Py_EQ) {
2756
Py_RETURN_FALSE;
2757
}
2758
if (op == Py_NE) {
2759
Py_RETURN_TRUE;
2760
}
2761
2762
/* Compare the final item again using the proper operator */
2763
return PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op);
2764
}
2765
2766
/*[clinic input]
2767
list.__init__
2768
2769
iterable: object(c_default="NULL") = ()
2770
/
2771
2772
Built-in mutable sequence.
2773
2774
If no argument is given, the constructor creates a new empty list.
2775
The argument must be an iterable if specified.
2776
[clinic start generated code]*/
2777
2778
static int
2779
list___init___impl(PyListObject *self, PyObject *iterable)
2780
/*[clinic end generated code: output=0f3c21379d01de48 input=b3f3fe7206af8f6b]*/
2781
{
2782
/* Verify list invariants established by PyType_GenericAlloc() */
2783
assert(0 <= Py_SIZE(self));
2784
assert(Py_SIZE(self) <= self->allocated || self->allocated == -1);
2785
assert(self->ob_item != NULL ||
2786
self->allocated == 0 || self->allocated == -1);
2787
2788
/* Empty previous contents */
2789
if (self->ob_item != NULL) {
2790
(void)_list_clear(self);
2791
}
2792
if (iterable != NULL) {
2793
PyObject *rv = list_extend(self, iterable);
2794
if (rv == NULL)
2795
return -1;
2796
Py_DECREF(rv);
2797
}
2798
return 0;
2799
}
2800
2801
static PyObject *
2802
list_vectorcall(PyObject *type, PyObject * const*args,
2803
size_t nargsf, PyObject *kwnames)
2804
{
2805
if (!_PyArg_NoKwnames("list", kwnames)) {
2806
return NULL;
2807
}
2808
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
2809
if (!_PyArg_CheckPositional("list", nargs, 0, 1)) {
2810
return NULL;
2811
}
2812
2813
PyObject *list = PyType_GenericAlloc(_PyType_CAST(type), 0);
2814
if (list == NULL) {
2815
return NULL;
2816
}
2817
if (nargs) {
2818
if (list___init___impl((PyListObject *)list, args[0])) {
2819
Py_DECREF(list);
2820
return NULL;
2821
}
2822
}
2823
return list;
2824
}
2825
2826
2827
/*[clinic input]
2828
list.__sizeof__
2829
2830
Return the size of the list in memory, in bytes.
2831
[clinic start generated code]*/
2832
2833
static PyObject *
2834
list___sizeof___impl(PyListObject *self)
2835
/*[clinic end generated code: output=3417541f95f9a53e input=b8030a5d5ce8a187]*/
2836
{
2837
size_t res = _PyObject_SIZE(Py_TYPE(self));
2838
res += (size_t)self->allocated * sizeof(void*);
2839
return PyLong_FromSize_t(res);
2840
}
2841
2842
static PyObject *list_iter(PyObject *seq);
2843
static PyObject *list_subscript(PyListObject*, PyObject*);
2844
2845
static PyMethodDef list_methods[] = {
2846
{"__getitem__", (PyCFunction)list_subscript, METH_O|METH_COEXIST,
2847
PyDoc_STR("__getitem__($self, index, /)\n--\n\nReturn self[index].")},
2848
LIST___REVERSED___METHODDEF
2849
LIST___SIZEOF___METHODDEF
2850
LIST_CLEAR_METHODDEF
2851
LIST_COPY_METHODDEF
2852
LIST_APPEND_METHODDEF
2853
LIST_INSERT_METHODDEF
2854
LIST_EXTEND_METHODDEF
2855
LIST_POP_METHODDEF
2856
LIST_REMOVE_METHODDEF
2857
LIST_INDEX_METHODDEF
2858
LIST_COUNT_METHODDEF
2859
LIST_REVERSE_METHODDEF
2860
LIST_SORT_METHODDEF
2861
{"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")},
2862
{NULL, NULL} /* sentinel */
2863
};
2864
2865
static PySequenceMethods list_as_sequence = {
2866
(lenfunc)list_length, /* sq_length */
2867
(binaryfunc)list_concat, /* sq_concat */
2868
(ssizeargfunc)list_repeat, /* sq_repeat */
2869
(ssizeargfunc)list_item, /* sq_item */
2870
0, /* sq_slice */
2871
(ssizeobjargproc)list_ass_item, /* sq_ass_item */
2872
0, /* sq_ass_slice */
2873
(objobjproc)list_contains, /* sq_contains */
2874
(binaryfunc)list_inplace_concat, /* sq_inplace_concat */
2875
(ssizeargfunc)list_inplace_repeat, /* sq_inplace_repeat */
2876
};
2877
2878
static PyObject *
2879
list_subscript(PyListObject* self, PyObject* item)
2880
{
2881
if (_PyIndex_Check(item)) {
2882
Py_ssize_t i;
2883
i = PyNumber_AsSsize_t(item, PyExc_IndexError);
2884
if (i == -1 && PyErr_Occurred())
2885
return NULL;
2886
if (i < 0)
2887
i += PyList_GET_SIZE(self);
2888
return list_item(self, i);
2889
}
2890
else if (PySlice_Check(item)) {
2891
Py_ssize_t start, stop, step, slicelength, i;
2892
size_t cur;
2893
PyObject* result;
2894
PyObject* it;
2895
PyObject **src, **dest;
2896
2897
if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
2898
return NULL;
2899
}
2900
slicelength = PySlice_AdjustIndices(Py_SIZE(self), &start, &stop,
2901
step);
2902
2903
if (slicelength <= 0) {
2904
return PyList_New(0);
2905
}
2906
else if (step == 1) {
2907
return list_slice(self, start, stop);
2908
}
2909
else {
2910
result = list_new_prealloc(slicelength);
2911
if (!result) return NULL;
2912
2913
src = self->ob_item;
2914
dest = ((PyListObject *)result)->ob_item;
2915
for (cur = start, i = 0; i < slicelength;
2916
cur += (size_t)step, i++) {
2917
it = Py_NewRef(src[cur]);
2918
dest[i] = it;
2919
}
2920
Py_SET_SIZE(result, slicelength);
2921
return result;
2922
}
2923
}
2924
else {
2925
PyErr_Format(PyExc_TypeError,
2926
"list indices must be integers or slices, not %.200s",
2927
Py_TYPE(item)->tp_name);
2928
return NULL;
2929
}
2930
}
2931
2932
static int
2933
list_ass_subscript(PyListObject* self, PyObject* item, PyObject* value)
2934
{
2935
if (_PyIndex_Check(item)) {
2936
Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
2937
if (i == -1 && PyErr_Occurred())
2938
return -1;
2939
if (i < 0)
2940
i += PyList_GET_SIZE(self);
2941
return list_ass_item(self, i, value);
2942
}
2943
else if (PySlice_Check(item)) {
2944
Py_ssize_t start, stop, step, slicelength;
2945
2946
if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
2947
return -1;
2948
}
2949
slicelength = PySlice_AdjustIndices(Py_SIZE(self), &start, &stop,
2950
step);
2951
2952
if (step == 1)
2953
return list_ass_slice(self, start, stop, value);
2954
2955
/* Make sure s[5:2] = [..] inserts at the right place:
2956
before 5, not before 2. */
2957
if ((step < 0 && start < stop) ||
2958
(step > 0 && start > stop))
2959
stop = start;
2960
2961
if (value == NULL) {
2962
/* delete slice */
2963
PyObject **garbage;
2964
size_t cur;
2965
Py_ssize_t i;
2966
int res;
2967
2968
if (slicelength <= 0)
2969
return 0;
2970
2971
if (step < 0) {
2972
stop = start + 1;
2973
start = stop + step*(slicelength - 1) - 1;
2974
step = -step;
2975
}
2976
2977
garbage = (PyObject**)
2978
PyMem_Malloc(slicelength*sizeof(PyObject*));
2979
if (!garbage) {
2980
PyErr_NoMemory();
2981
return -1;
2982
}
2983
2984
/* drawing pictures might help understand these for
2985
loops. Basically, we memmove the parts of the
2986
list that are *not* part of the slice: step-1
2987
items for each item that is part of the slice,
2988
and then tail end of the list that was not
2989
covered by the slice */
2990
for (cur = start, i = 0;
2991
cur < (size_t)stop;
2992
cur += step, i++) {
2993
Py_ssize_t lim = step - 1;
2994
2995
garbage[i] = PyList_GET_ITEM(self, cur);
2996
2997
if (cur + step >= (size_t)Py_SIZE(self)) {
2998
lim = Py_SIZE(self) - cur - 1;
2999
}
3000
3001
memmove(self->ob_item + cur - i,
3002
self->ob_item + cur + 1,
3003
lim * sizeof(PyObject *));
3004
}
3005
cur = start + (size_t)slicelength * step;
3006
if (cur < (size_t)Py_SIZE(self)) {
3007
memmove(self->ob_item + cur - slicelength,
3008
self->ob_item + cur,
3009
(Py_SIZE(self) - cur) *
3010
sizeof(PyObject *));
3011
}
3012
3013
Py_SET_SIZE(self, Py_SIZE(self) - slicelength);
3014
res = list_resize(self, Py_SIZE(self));
3015
3016
for (i = 0; i < slicelength; i++) {
3017
Py_DECREF(garbage[i]);
3018
}
3019
PyMem_Free(garbage);
3020
3021
return res;
3022
}
3023
else {
3024
/* assign slice */
3025
PyObject *ins, *seq;
3026
PyObject **garbage, **seqitems, **selfitems;
3027
Py_ssize_t i;
3028
size_t cur;
3029
3030
/* protect against a[::-1] = a */
3031
if (self == (PyListObject*)value) {
3032
seq = list_slice((PyListObject*)value, 0,
3033
PyList_GET_SIZE(value));
3034
}
3035
else {
3036
seq = PySequence_Fast(value,
3037
"must assign iterable "
3038
"to extended slice");
3039
}
3040
if (!seq)
3041
return -1;
3042
3043
if (PySequence_Fast_GET_SIZE(seq) != slicelength) {
3044
PyErr_Format(PyExc_ValueError,
3045
"attempt to assign sequence of "
3046
"size %zd to extended slice of "
3047
"size %zd",
3048
PySequence_Fast_GET_SIZE(seq),
3049
slicelength);
3050
Py_DECREF(seq);
3051
return -1;
3052
}
3053
3054
if (!slicelength) {
3055
Py_DECREF(seq);
3056
return 0;
3057
}
3058
3059
garbage = (PyObject**)
3060
PyMem_Malloc(slicelength*sizeof(PyObject*));
3061
if (!garbage) {
3062
Py_DECREF(seq);
3063
PyErr_NoMemory();
3064
return -1;
3065
}
3066
3067
selfitems = self->ob_item;
3068
seqitems = PySequence_Fast_ITEMS(seq);
3069
for (cur = start, i = 0; i < slicelength;
3070
cur += (size_t)step, i++) {
3071
garbage[i] = selfitems[cur];
3072
ins = Py_NewRef(seqitems[i]);
3073
selfitems[cur] = ins;
3074
}
3075
3076
for (i = 0; i < slicelength; i++) {
3077
Py_DECREF(garbage[i]);
3078
}
3079
3080
PyMem_Free(garbage);
3081
Py_DECREF(seq);
3082
3083
return 0;
3084
}
3085
}
3086
else {
3087
PyErr_Format(PyExc_TypeError,
3088
"list indices must be integers or slices, not %.200s",
3089
Py_TYPE(item)->tp_name);
3090
return -1;
3091
}
3092
}
3093
3094
static PyMappingMethods list_as_mapping = {
3095
(lenfunc)list_length,
3096
(binaryfunc)list_subscript,
3097
(objobjargproc)list_ass_subscript
3098
};
3099
3100
PyTypeObject PyList_Type = {
3101
PyVarObject_HEAD_INIT(&PyType_Type, 0)
3102
"list",
3103
sizeof(PyListObject),
3104
0,
3105
(destructor)list_dealloc, /* tp_dealloc */
3106
0, /* tp_vectorcall_offset */
3107
0, /* tp_getattr */
3108
0, /* tp_setattr */
3109
0, /* tp_as_async */
3110
(reprfunc)list_repr, /* tp_repr */
3111
0, /* tp_as_number */
3112
&list_as_sequence, /* tp_as_sequence */
3113
&list_as_mapping, /* tp_as_mapping */
3114
PyObject_HashNotImplemented, /* tp_hash */
3115
0, /* tp_call */
3116
0, /* tp_str */
3117
PyObject_GenericGetAttr, /* tp_getattro */
3118
0, /* tp_setattro */
3119
0, /* tp_as_buffer */
3120
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3121
Py_TPFLAGS_BASETYPE | Py_TPFLAGS_LIST_SUBCLASS |
3122
_Py_TPFLAGS_MATCH_SELF | Py_TPFLAGS_SEQUENCE, /* tp_flags */
3123
list___init____doc__, /* tp_doc */
3124
(traverseproc)list_traverse, /* tp_traverse */
3125
(inquiry)_list_clear, /* tp_clear */
3126
list_richcompare, /* tp_richcompare */
3127
0, /* tp_weaklistoffset */
3128
list_iter, /* tp_iter */
3129
0, /* tp_iternext */
3130
list_methods, /* tp_methods */
3131
0, /* tp_members */
3132
0, /* tp_getset */
3133
0, /* tp_base */
3134
0, /* tp_dict */
3135
0, /* tp_descr_get */
3136
0, /* tp_descr_set */
3137
0, /* tp_dictoffset */
3138
(initproc)list___init__, /* tp_init */
3139
PyType_GenericAlloc, /* tp_alloc */
3140
PyType_GenericNew, /* tp_new */
3141
PyObject_GC_Del, /* tp_free */
3142
.tp_vectorcall = list_vectorcall,
3143
};
3144
3145
/*********************** List Iterator **************************/
3146
3147
static void listiter_dealloc(_PyListIterObject *);
3148
static int listiter_traverse(_PyListIterObject *, visitproc, void *);
3149
static PyObject *listiter_next(_PyListIterObject *);
3150
static PyObject *listiter_len(_PyListIterObject *, PyObject *);
3151
static PyObject *listiter_reduce_general(void *_it, int forward);
3152
static PyObject *listiter_reduce(_PyListIterObject *, PyObject *);
3153
static PyObject *listiter_setstate(_PyListIterObject *, PyObject *state);
3154
3155
PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
3156
PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
3157
PyDoc_STRVAR(setstate_doc, "Set state information for unpickling.");
3158
3159
static PyMethodDef listiter_methods[] = {
3160
{"__length_hint__", (PyCFunction)listiter_len, METH_NOARGS, length_hint_doc},
3161
{"__reduce__", (PyCFunction)listiter_reduce, METH_NOARGS, reduce_doc},
3162
{"__setstate__", (PyCFunction)listiter_setstate, METH_O, setstate_doc},
3163
{NULL, NULL} /* sentinel */
3164
};
3165
3166
PyTypeObject PyListIter_Type = {
3167
PyVarObject_HEAD_INIT(&PyType_Type, 0)
3168
"list_iterator", /* tp_name */
3169
sizeof(_PyListIterObject), /* tp_basicsize */
3170
0, /* tp_itemsize */
3171
/* methods */
3172
(destructor)listiter_dealloc, /* tp_dealloc */
3173
0, /* tp_vectorcall_offset */
3174
0, /* tp_getattr */
3175
0, /* tp_setattr */
3176
0, /* tp_as_async */
3177
0, /* tp_repr */
3178
0, /* tp_as_number */
3179
0, /* tp_as_sequence */
3180
0, /* tp_as_mapping */
3181
0, /* tp_hash */
3182
0, /* tp_call */
3183
0, /* tp_str */
3184
PyObject_GenericGetAttr, /* tp_getattro */
3185
0, /* tp_setattro */
3186
0, /* tp_as_buffer */
3187
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
3188
0, /* tp_doc */
3189
(traverseproc)listiter_traverse, /* tp_traverse */
3190
0, /* tp_clear */
3191
0, /* tp_richcompare */
3192
0, /* tp_weaklistoffset */
3193
PyObject_SelfIter, /* tp_iter */
3194
(iternextfunc)listiter_next, /* tp_iternext */
3195
listiter_methods, /* tp_methods */
3196
0, /* tp_members */
3197
};
3198
3199
3200
static PyObject *
3201
list_iter(PyObject *seq)
3202
{
3203
_PyListIterObject *it;
3204
3205
if (!PyList_Check(seq)) {
3206
PyErr_BadInternalCall();
3207
return NULL;
3208
}
3209
it = PyObject_GC_New(_PyListIterObject, &PyListIter_Type);
3210
if (it == NULL)
3211
return NULL;
3212
it->it_index = 0;
3213
it->it_seq = (PyListObject *)Py_NewRef(seq);
3214
_PyObject_GC_TRACK(it);
3215
return (PyObject *)it;
3216
}
3217
3218
static void
3219
listiter_dealloc(_PyListIterObject *it)
3220
{
3221
_PyObject_GC_UNTRACK(it);
3222
Py_XDECREF(it->it_seq);
3223
PyObject_GC_Del(it);
3224
}
3225
3226
static int
3227
listiter_traverse(_PyListIterObject *it, visitproc visit, void *arg)
3228
{
3229
Py_VISIT(it->it_seq);
3230
return 0;
3231
}
3232
3233
static PyObject *
3234
listiter_next(_PyListIterObject *it)
3235
{
3236
PyListObject *seq;
3237
PyObject *item;
3238
3239
assert(it != NULL);
3240
seq = it->it_seq;
3241
if (seq == NULL)
3242
return NULL;
3243
assert(PyList_Check(seq));
3244
3245
if (it->it_index < PyList_GET_SIZE(seq)) {
3246
item = PyList_GET_ITEM(seq, it->it_index);
3247
++it->it_index;
3248
return Py_NewRef(item);
3249
}
3250
3251
it->it_seq = NULL;
3252
Py_DECREF(seq);
3253
return NULL;
3254
}
3255
3256
static PyObject *
3257
listiter_len(_PyListIterObject *it, PyObject *Py_UNUSED(ignored))
3258
{
3259
Py_ssize_t len;
3260
if (it->it_seq) {
3261
len = PyList_GET_SIZE(it->it_seq) - it->it_index;
3262
if (len >= 0)
3263
return PyLong_FromSsize_t(len);
3264
}
3265
return PyLong_FromLong(0);
3266
}
3267
3268
static PyObject *
3269
listiter_reduce(_PyListIterObject *it, PyObject *Py_UNUSED(ignored))
3270
{
3271
return listiter_reduce_general(it, 1);
3272
}
3273
3274
static PyObject *
3275
listiter_setstate(_PyListIterObject *it, PyObject *state)
3276
{
3277
Py_ssize_t index = PyLong_AsSsize_t(state);
3278
if (index == -1 && PyErr_Occurred())
3279
return NULL;
3280
if (it->it_seq != NULL) {
3281
if (index < 0)
3282
index = 0;
3283
else if (index > PyList_GET_SIZE(it->it_seq))
3284
index = PyList_GET_SIZE(it->it_seq); /* iterator exhausted */
3285
it->it_index = index;
3286
}
3287
Py_RETURN_NONE;
3288
}
3289
3290
/*********************** List Reverse Iterator **************************/
3291
3292
typedef struct {
3293
PyObject_HEAD
3294
Py_ssize_t it_index;
3295
PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
3296
} listreviterobject;
3297
3298
static void listreviter_dealloc(listreviterobject *);
3299
static int listreviter_traverse(listreviterobject *, visitproc, void *);
3300
static PyObject *listreviter_next(listreviterobject *);
3301
static PyObject *listreviter_len(listreviterobject *, PyObject *);
3302
static PyObject *listreviter_reduce(listreviterobject *, PyObject *);
3303
static PyObject *listreviter_setstate(listreviterobject *, PyObject *);
3304
3305
static PyMethodDef listreviter_methods[] = {
3306
{"__length_hint__", (PyCFunction)listreviter_len, METH_NOARGS, length_hint_doc},
3307
{"__reduce__", (PyCFunction)listreviter_reduce, METH_NOARGS, reduce_doc},
3308
{"__setstate__", (PyCFunction)listreviter_setstate, METH_O, setstate_doc},
3309
{NULL, NULL} /* sentinel */
3310
};
3311
3312
PyTypeObject PyListRevIter_Type = {
3313
PyVarObject_HEAD_INIT(&PyType_Type, 0)
3314
"list_reverseiterator", /* tp_name */
3315
sizeof(listreviterobject), /* tp_basicsize */
3316
0, /* tp_itemsize */
3317
/* methods */
3318
(destructor)listreviter_dealloc, /* tp_dealloc */
3319
0, /* tp_vectorcall_offset */
3320
0, /* tp_getattr */
3321
0, /* tp_setattr */
3322
0, /* tp_as_async */
3323
0, /* tp_repr */
3324
0, /* tp_as_number */
3325
0, /* tp_as_sequence */
3326
0, /* tp_as_mapping */
3327
0, /* tp_hash */
3328
0, /* tp_call */
3329
0, /* tp_str */
3330
PyObject_GenericGetAttr, /* tp_getattro */
3331
0, /* tp_setattro */
3332
0, /* tp_as_buffer */
3333
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
3334
0, /* tp_doc */
3335
(traverseproc)listreviter_traverse, /* tp_traverse */
3336
0, /* tp_clear */
3337
0, /* tp_richcompare */
3338
0, /* tp_weaklistoffset */
3339
PyObject_SelfIter, /* tp_iter */
3340
(iternextfunc)listreviter_next, /* tp_iternext */
3341
listreviter_methods, /* tp_methods */
3342
0,
3343
};
3344
3345
/*[clinic input]
3346
list.__reversed__
3347
3348
Return a reverse iterator over the list.
3349
[clinic start generated code]*/
3350
3351
static PyObject *
3352
list___reversed___impl(PyListObject *self)
3353
/*[clinic end generated code: output=b166f073208c888c input=eadb6e17f8a6a280]*/
3354
{
3355
listreviterobject *it;
3356
3357
it = PyObject_GC_New(listreviterobject, &PyListRevIter_Type);
3358
if (it == NULL)
3359
return NULL;
3360
assert(PyList_Check(self));
3361
it->it_index = PyList_GET_SIZE(self) - 1;
3362
it->it_seq = (PyListObject*)Py_NewRef(self);
3363
PyObject_GC_Track(it);
3364
return (PyObject *)it;
3365
}
3366
3367
static void
3368
listreviter_dealloc(listreviterobject *it)
3369
{
3370
PyObject_GC_UnTrack(it);
3371
Py_XDECREF(it->it_seq);
3372
PyObject_GC_Del(it);
3373
}
3374
3375
static int
3376
listreviter_traverse(listreviterobject *it, visitproc visit, void *arg)
3377
{
3378
Py_VISIT(it->it_seq);
3379
return 0;
3380
}
3381
3382
static PyObject *
3383
listreviter_next(listreviterobject *it)
3384
{
3385
PyObject *item;
3386
Py_ssize_t index;
3387
PyListObject *seq;
3388
3389
assert(it != NULL);
3390
seq = it->it_seq;
3391
if (seq == NULL) {
3392
return NULL;
3393
}
3394
assert(PyList_Check(seq));
3395
3396
index = it->it_index;
3397
if (index>=0 && index < PyList_GET_SIZE(seq)) {
3398
item = PyList_GET_ITEM(seq, index);
3399
it->it_index--;
3400
return Py_NewRef(item);
3401
}
3402
it->it_index = -1;
3403
it->it_seq = NULL;
3404
Py_DECREF(seq);
3405
return NULL;
3406
}
3407
3408
static PyObject *
3409
listreviter_len(listreviterobject *it, PyObject *Py_UNUSED(ignored))
3410
{
3411
Py_ssize_t len = it->it_index + 1;
3412
if (it->it_seq == NULL || PyList_GET_SIZE(it->it_seq) < len)
3413
len = 0;
3414
return PyLong_FromSsize_t(len);
3415
}
3416
3417
static PyObject *
3418
listreviter_reduce(listreviterobject *it, PyObject *Py_UNUSED(ignored))
3419
{
3420
return listiter_reduce_general(it, 0);
3421
}
3422
3423
static PyObject *
3424
listreviter_setstate(listreviterobject *it, PyObject *state)
3425
{
3426
Py_ssize_t index = PyLong_AsSsize_t(state);
3427
if (index == -1 && PyErr_Occurred())
3428
return NULL;
3429
if (it->it_seq != NULL) {
3430
if (index < -1)
3431
index = -1;
3432
else if (index > PyList_GET_SIZE(it->it_seq) - 1)
3433
index = PyList_GET_SIZE(it->it_seq) - 1;
3434
it->it_index = index;
3435
}
3436
Py_RETURN_NONE;
3437
}
3438
3439
/* common pickling support */
3440
3441
static PyObject *
3442
listiter_reduce_general(void *_it, int forward)
3443
{
3444
PyObject *list;
3445
3446
/* _PyEval_GetBuiltin can invoke arbitrary code,
3447
* call must be before access of iterator pointers.
3448
* see issue #101765 */
3449
3450
/* the objects are not the same, index is of different types! */
3451
if (forward) {
3452
PyObject *iter = _PyEval_GetBuiltin(&_Py_ID(iter));
3453
if (!iter) {
3454
return NULL;
3455
}
3456
_PyListIterObject *it = (_PyListIterObject *)_it;
3457
if (it->it_seq) {
3458
return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index);
3459
}
3460
Py_DECREF(iter);
3461
} else {
3462
PyObject *reversed = _PyEval_GetBuiltin(&_Py_ID(reversed));
3463
if (!reversed) {
3464
return NULL;
3465
}
3466
listreviterobject *it = (listreviterobject *)_it;
3467
if (it->it_seq) {
3468
return Py_BuildValue("N(O)n", reversed, it->it_seq, it->it_index);
3469
}
3470
Py_DECREF(reversed);
3471
}
3472
/* empty iterator, create an empty list */
3473
list = PyList_New(0);
3474
if (list == NULL)
3475
return NULL;
3476
return Py_BuildValue("N(N)", _PyEval_GetBuiltin(&_Py_ID(iter)), list);
3477
}
3478
3479