1 /* Drop in replacement for heapq.py
2
3 C implementation derived directly from heapq.py in Py2.3
4 which was written by Kevin O'Connor, augmented by Tim Peters,
5 annotated by François Pinard, and converted to C by Raymond Hettinger.
6
7 */
8
9 #ifndef Py_BUILD_CORE_BUILTIN
10 # define Py_BUILD_CORE_MODULE 1
11 #endif
12
13 #include "Python.h"
14 #include "pycore_list.h" // _PyList_ITEMS()
15
16 #include "clinic/_heapqmodule.c.h"
17
18
19 /*[clinic input]
20 module _heapq
21 [clinic start generated code]*/
22 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=d7cca0a2e4c0ceb3]*/
23
24 static int
siftdown(PyListObject * heap,Py_ssize_t startpos,Py_ssize_t pos)25 siftdown(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
26 {
27 PyObject *newitem, *parent, **arr;
28 Py_ssize_t parentpos, size;
29 int cmp;
30
31 assert(PyList_Check(heap));
32 size = PyList_GET_SIZE(heap);
33 if (pos >= size) {
34 PyErr_SetString(PyExc_IndexError, "index out of range");
35 return -1;
36 }
37
38 /* Follow the path to the root, moving parents down until finding
39 a place newitem fits. */
40 arr = _PyList_ITEMS(heap);
41 newitem = arr[pos];
42 while (pos > startpos) {
43 parentpos = (pos - 1) >> 1;
44 parent = arr[parentpos];
45 Py_INCREF(newitem);
46 Py_INCREF(parent);
47 cmp = PyObject_RichCompareBool(newitem, parent, Py_LT);
48 Py_DECREF(parent);
49 Py_DECREF(newitem);
50 if (cmp < 0)
51 return -1;
52 if (size != PyList_GET_SIZE(heap)) {
53 PyErr_SetString(PyExc_RuntimeError,
54 "list changed size during iteration");
55 return -1;
56 }
57 if (cmp == 0)
58 break;
59 arr = _PyList_ITEMS(heap);
60 parent = arr[parentpos];
61 newitem = arr[pos];
62 arr[parentpos] = newitem;
63 arr[pos] = parent;
64 pos = parentpos;
65 }
66 return 0;
67 }
68
69 static int
siftup(PyListObject * heap,Py_ssize_t pos)70 siftup(PyListObject *heap, Py_ssize_t pos)
71 {
72 Py_ssize_t startpos, endpos, childpos, limit;
73 PyObject *tmp1, *tmp2, **arr;
74 int cmp;
75
76 assert(PyList_Check(heap));
77 endpos = PyList_GET_SIZE(heap);
78 startpos = pos;
79 if (pos >= endpos) {
80 PyErr_SetString(PyExc_IndexError, "index out of range");
81 return -1;
82 }
83
84 /* Bubble up the smaller child until hitting a leaf. */
85 arr = _PyList_ITEMS(heap);
86 limit = endpos >> 1; /* smallest pos that has no child */
87 while (pos < limit) {
88 /* Set childpos to index of smaller child. */
89 childpos = 2*pos + 1; /* leftmost child position */
90 if (childpos + 1 < endpos) {
91 PyObject* a = arr[childpos];
92 PyObject* b = arr[childpos + 1];
93 Py_INCREF(a);
94 Py_INCREF(b);
95 cmp = PyObject_RichCompareBool(a, b, Py_LT);
96 Py_DECREF(a);
97 Py_DECREF(b);
98 if (cmp < 0)
99 return -1;
100 childpos += ((unsigned)cmp ^ 1); /* increment when cmp==0 */
101 arr = _PyList_ITEMS(heap); /* arr may have changed */
102 if (endpos != PyList_GET_SIZE(heap)) {
103 PyErr_SetString(PyExc_RuntimeError,
104 "list changed size during iteration");
105 return -1;
106 }
107 }
108 /* Move the smaller child up. */
109 tmp1 = arr[childpos];
110 tmp2 = arr[pos];
111 arr[childpos] = tmp2;
112 arr[pos] = tmp1;
113 pos = childpos;
114 }
115 /* Bubble it up to its final resting place (by sifting its parents down). */
116 return siftdown(heap, startpos, pos);
117 }
118
119 /*[clinic input]
120 _heapq.heappush
121
122 heap: object(subclass_of='&PyList_Type')
123 item: object
124 /
125
126 Push item onto heap, maintaining the heap invariant.
127 [clinic start generated code]*/
128
129 static PyObject *
_heapq_heappush_impl(PyObject * module,PyObject * heap,PyObject * item)130 _heapq_heappush_impl(PyObject *module, PyObject *heap, PyObject *item)
131 /*[clinic end generated code: output=912c094f47663935 input=7c69611f3698aceb]*/
132 {
133 if (PyList_Append(heap, item))
134 return NULL;
135
136 if (siftdown((PyListObject *)heap, 0, PyList_GET_SIZE(heap)-1))
137 return NULL;
138 Py_RETURN_NONE;
139 }
140
141 static PyObject *
heappop_internal(PyObject * heap,int siftup_func (PyListObject *,Py_ssize_t))142 heappop_internal(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t))
143 {
144 PyObject *lastelt, *returnitem;
145 Py_ssize_t n;
146
147 /* raises IndexError if the heap is empty */
148 n = PyList_GET_SIZE(heap);
149 if (n == 0) {
150 PyErr_SetString(PyExc_IndexError, "index out of range");
151 return NULL;
152 }
153
154 lastelt = PyList_GET_ITEM(heap, n-1) ;
155 Py_INCREF(lastelt);
156 if (PyList_SetSlice(heap, n-1, n, NULL)) {
157 Py_DECREF(lastelt);
158 return NULL;
159 }
160 n--;
161
162 if (!n)
163 return lastelt;
164 returnitem = PyList_GET_ITEM(heap, 0);
165 PyList_SET_ITEM(heap, 0, lastelt);
166 if (siftup_func((PyListObject *)heap, 0)) {
167 Py_DECREF(returnitem);
168 return NULL;
169 }
170 return returnitem;
171 }
172
173 /*[clinic input]
174 _heapq.heappop
175
176 heap: object(subclass_of='&PyList_Type')
177 /
178
179 Pop the smallest item off the heap, maintaining the heap invariant.
180 [clinic start generated code]*/
181
182 static PyObject *
_heapq_heappop_impl(PyObject * module,PyObject * heap)183 _heapq_heappop_impl(PyObject *module, PyObject *heap)
184 /*[clinic end generated code: output=96dfe82d37d9af76 input=91487987a583c856]*/
185 {
186 return heappop_internal(heap, siftup);
187 }
188
189 static PyObject *
heapreplace_internal(PyObject * heap,PyObject * item,int siftup_func (PyListObject *,Py_ssize_t))190 heapreplace_internal(PyObject *heap, PyObject *item, int siftup_func(PyListObject *, Py_ssize_t))
191 {
192 PyObject *returnitem;
193
194 if (PyList_GET_SIZE(heap) == 0) {
195 PyErr_SetString(PyExc_IndexError, "index out of range");
196 return NULL;
197 }
198
199 returnitem = PyList_GET_ITEM(heap, 0);
200 Py_INCREF(item);
201 PyList_SET_ITEM(heap, 0, item);
202 if (siftup_func((PyListObject *)heap, 0)) {
203 Py_DECREF(returnitem);
204 return NULL;
205 }
206 return returnitem;
207 }
208
209
210 /*[clinic input]
211 _heapq.heapreplace
212
213 heap: object(subclass_of='&PyList_Type')
214 item: object
215 /
216
217 Pop and return the current smallest value, and add the new item.
218
219 This is more efficient than heappop() followed by heappush(), and can be
220 more appropriate when using a fixed-size heap. Note that the value
221 returned may be larger than item! That constrains reasonable uses of
222 this routine unless written as part of a conditional replacement:
223
224 if item > heap[0]:
225 item = heapreplace(heap, item)
226 [clinic start generated code]*/
227
228 static PyObject *
_heapq_heapreplace_impl(PyObject * module,PyObject * heap,PyObject * item)229 _heapq_heapreplace_impl(PyObject *module, PyObject *heap, PyObject *item)
230 /*[clinic end generated code: output=82ea55be8fbe24b4 input=719202ac02ba10c8]*/
231 {
232 return heapreplace_internal(heap, item, siftup);
233 }
234
235 /*[clinic input]
236 _heapq.heappushpop
237
238 heap: object(subclass_of='&PyList_Type')
239 item: object
240 /
241
242 Push item on the heap, then pop and return the smallest item from the heap.
243
244 The combined action runs more efficiently than heappush() followed by
245 a separate call to heappop().
246 [clinic start generated code]*/
247
248 static PyObject *
_heapq_heappushpop_impl(PyObject * module,PyObject * heap,PyObject * item)249 _heapq_heappushpop_impl(PyObject *module, PyObject *heap, PyObject *item)
250 /*[clinic end generated code: output=67231dc98ed5774f input=5dc701f1eb4a4aa7]*/
251 {
252 PyObject *returnitem;
253 int cmp;
254
255 if (PyList_GET_SIZE(heap) == 0) {
256 Py_INCREF(item);
257 return item;
258 }
259
260 PyObject* top = PyList_GET_ITEM(heap, 0);
261 Py_INCREF(top);
262 cmp = PyObject_RichCompareBool(top, item, Py_LT);
263 Py_DECREF(top);
264 if (cmp < 0)
265 return NULL;
266 if (cmp == 0) {
267 Py_INCREF(item);
268 return item;
269 }
270
271 if (PyList_GET_SIZE(heap) == 0) {
272 PyErr_SetString(PyExc_IndexError, "index out of range");
273 return NULL;
274 }
275
276 returnitem = PyList_GET_ITEM(heap, 0);
277 Py_INCREF(item);
278 PyList_SET_ITEM(heap, 0, item);
279 if (siftup((PyListObject *)heap, 0)) {
280 Py_DECREF(returnitem);
281 return NULL;
282 }
283 return returnitem;
284 }
285
286 static Py_ssize_t
keep_top_bit(Py_ssize_t n)287 keep_top_bit(Py_ssize_t n)
288 {
289 int i = 0;
290
291 while (n > 1) {
292 n >>= 1;
293 i++;
294 }
295 return n << i;
296 }
297
298 /* Cache friendly version of heapify()
299 -----------------------------------
300
301 Build-up a heap in O(n) time by performing siftup() operations
302 on nodes whose children are already heaps.
303
304 The simplest way is to sift the nodes in reverse order from
305 n//2-1 to 0 inclusive. The downside is that children may be
306 out of cache by the time their parent is reached.
307
308 A better way is to not wait for the children to go out of cache.
309 Once a sibling pair of child nodes have been sifted, immediately
310 sift their parent node (while the children are still in cache).
311
312 Both ways build child heaps before their parents, so both ways
313 do the exact same number of comparisons and produce exactly
314 the same heap. The only difference is that the traversal
315 order is optimized for cache efficiency.
316 */
317
318 static PyObject *
cache_friendly_heapify(PyObject * heap,int siftup_func (PyListObject *,Py_ssize_t))319 cache_friendly_heapify(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t))
320 {
321 Py_ssize_t i, j, m, mhalf, leftmost;
322
323 m = PyList_GET_SIZE(heap) >> 1; /* index of first childless node */
324 leftmost = keep_top_bit(m + 1) - 1; /* leftmost node in row of m */
325 mhalf = m >> 1; /* parent of first childless node */
326
327 for (i = leftmost - 1 ; i >= mhalf ; i--) {
328 j = i;
329 while (1) {
330 if (siftup_func((PyListObject *)heap, j))
331 return NULL;
332 if (!(j & 1))
333 break;
334 j >>= 1;
335 }
336 }
337
338 for (i = m - 1 ; i >= leftmost ; i--) {
339 j = i;
340 while (1) {
341 if (siftup_func((PyListObject *)heap, j))
342 return NULL;
343 if (!(j & 1))
344 break;
345 j >>= 1;
346 }
347 }
348 Py_RETURN_NONE;
349 }
350
351 static PyObject *
heapify_internal(PyObject * heap,int siftup_func (PyListObject *,Py_ssize_t))352 heapify_internal(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t))
353 {
354 Py_ssize_t i, n;
355
356 /* For heaps likely to be bigger than L1 cache, we use the cache
357 friendly heapify function. For smaller heaps that fit entirely
358 in cache, we prefer the simpler algorithm with less branching.
359 */
360 n = PyList_GET_SIZE(heap);
361 if (n > 2500)
362 return cache_friendly_heapify(heap, siftup_func);
363
364 /* Transform bottom-up. The largest index there's any point to
365 looking at is the largest with a child index in-range, so must
366 have 2*i + 1 < n, or i < (n-1)/2. If n is even = 2*j, this is
367 (2*j-1)/2 = j-1/2 so j-1 is the largest, which is n//2 - 1. If
368 n is odd = 2*j+1, this is (2*j+1-1)/2 = j so j-1 is the largest,
369 and that's again n//2-1.
370 */
371 for (i = (n >> 1) - 1 ; i >= 0 ; i--)
372 if (siftup_func((PyListObject *)heap, i))
373 return NULL;
374 Py_RETURN_NONE;
375 }
376
377 /*[clinic input]
378 _heapq.heapify
379
380 heap: object(subclass_of='&PyList_Type')
381 /
382
383 Transform list into a heap, in-place, in O(len(heap)) time.
384 [clinic start generated code]*/
385
386 static PyObject *
_heapq_heapify_impl(PyObject * module,PyObject * heap)387 _heapq_heapify_impl(PyObject *module, PyObject *heap)
388 /*[clinic end generated code: output=e63a636fcf83d6d0 input=53bb7a2166febb73]*/
389 {
390 return heapify_internal(heap, siftup);
391 }
392
393 static int
siftdown_max(PyListObject * heap,Py_ssize_t startpos,Py_ssize_t pos)394 siftdown_max(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
395 {
396 PyObject *newitem, *parent, **arr;
397 Py_ssize_t parentpos, size;
398 int cmp;
399
400 assert(PyList_Check(heap));
401 size = PyList_GET_SIZE(heap);
402 if (pos >= size) {
403 PyErr_SetString(PyExc_IndexError, "index out of range");
404 return -1;
405 }
406
407 /* Follow the path to the root, moving parents down until finding
408 a place newitem fits. */
409 arr = _PyList_ITEMS(heap);
410 newitem = arr[pos];
411 while (pos > startpos) {
412 parentpos = (pos - 1) >> 1;
413 parent = arr[parentpos];
414 Py_INCREF(parent);
415 Py_INCREF(newitem);
416 cmp = PyObject_RichCompareBool(parent, newitem, Py_LT);
417 Py_DECREF(parent);
418 Py_DECREF(newitem);
419 if (cmp < 0)
420 return -1;
421 if (size != PyList_GET_SIZE(heap)) {
422 PyErr_SetString(PyExc_RuntimeError,
423 "list changed size during iteration");
424 return -1;
425 }
426 if (cmp == 0)
427 break;
428 arr = _PyList_ITEMS(heap);
429 parent = arr[parentpos];
430 newitem = arr[pos];
431 arr[parentpos] = newitem;
432 arr[pos] = parent;
433 pos = parentpos;
434 }
435 return 0;
436 }
437
438 static int
siftup_max(PyListObject * heap,Py_ssize_t pos)439 siftup_max(PyListObject *heap, Py_ssize_t pos)
440 {
441 Py_ssize_t startpos, endpos, childpos, limit;
442 PyObject *tmp1, *tmp2, **arr;
443 int cmp;
444
445 assert(PyList_Check(heap));
446 endpos = PyList_GET_SIZE(heap);
447 startpos = pos;
448 if (pos >= endpos) {
449 PyErr_SetString(PyExc_IndexError, "index out of range");
450 return -1;
451 }
452
453 /* Bubble up the smaller child until hitting a leaf. */
454 arr = _PyList_ITEMS(heap);
455 limit = endpos >> 1; /* smallest pos that has no child */
456 while (pos < limit) {
457 /* Set childpos to index of smaller child. */
458 childpos = 2*pos + 1; /* leftmost child position */
459 if (childpos + 1 < endpos) {
460 PyObject* a = arr[childpos + 1];
461 PyObject* b = arr[childpos];
462 Py_INCREF(a);
463 Py_INCREF(b);
464 cmp = PyObject_RichCompareBool(a, b, Py_LT);
465 Py_DECREF(a);
466 Py_DECREF(b);
467 if (cmp < 0)
468 return -1;
469 childpos += ((unsigned)cmp ^ 1); /* increment when cmp==0 */
470 arr = _PyList_ITEMS(heap); /* arr may have changed */
471 if (endpos != PyList_GET_SIZE(heap)) {
472 PyErr_SetString(PyExc_RuntimeError,
473 "list changed size during iteration");
474 return -1;
475 }
476 }
477 /* Move the smaller child up. */
478 tmp1 = arr[childpos];
479 tmp2 = arr[pos];
480 arr[childpos] = tmp2;
481 arr[pos] = tmp1;
482 pos = childpos;
483 }
484 /* Bubble it up to its final resting place (by sifting its parents down). */
485 return siftdown_max(heap, startpos, pos);
486 }
487
488
489 /*[clinic input]
490 _heapq._heappop_max
491
492 heap: object(subclass_of='&PyList_Type')
493 /
494
495 Maxheap variant of heappop.
496 [clinic start generated code]*/
497
498 static PyObject *
_heapq__heappop_max_impl(PyObject * module,PyObject * heap)499 _heapq__heappop_max_impl(PyObject *module, PyObject *heap)
500 /*[clinic end generated code: output=9e77aadd4e6a8760 input=362c06e1c7484793]*/
501 {
502 return heappop_internal(heap, siftup_max);
503 }
504
505 /*[clinic input]
506 _heapq._heapreplace_max
507
508 heap: object(subclass_of='&PyList_Type')
509 item: object
510 /
511
512 Maxheap variant of heapreplace.
513 [clinic start generated code]*/
514
515 static PyObject *
_heapq__heapreplace_max_impl(PyObject * module,PyObject * heap,PyObject * item)516 _heapq__heapreplace_max_impl(PyObject *module, PyObject *heap,
517 PyObject *item)
518 /*[clinic end generated code: output=8ad7545e4a5e8adb input=f2dd27cbadb948d7]*/
519 {
520 return heapreplace_internal(heap, item, siftup_max);
521 }
522
523 /*[clinic input]
524 _heapq._heapify_max
525
526 heap: object(subclass_of='&PyList_Type')
527 /
528
529 Maxheap variant of heapify.
530 [clinic start generated code]*/
531
532 static PyObject *
_heapq__heapify_max_impl(PyObject * module,PyObject * heap)533 _heapq__heapify_max_impl(PyObject *module, PyObject *heap)
534 /*[clinic end generated code: output=2cb028beb4a8b65e input=c1f765ee69f124b8]*/
535 {
536 return heapify_internal(heap, siftup_max);
537 }
538
539 static PyMethodDef heapq_methods[] = {
540 _HEAPQ_HEAPPUSH_METHODDEF
541 _HEAPQ_HEAPPUSHPOP_METHODDEF
542 _HEAPQ_HEAPPOP_METHODDEF
543 _HEAPQ_HEAPREPLACE_METHODDEF
544 _HEAPQ_HEAPIFY_METHODDEF
545 _HEAPQ__HEAPPOP_MAX_METHODDEF
546 _HEAPQ__HEAPIFY_MAX_METHODDEF
547 _HEAPQ__HEAPREPLACE_MAX_METHODDEF
548 {NULL, NULL} /* sentinel */
549 };
550
551 PyDoc_STRVAR(module_doc,
552 "Heap queue algorithm (a.k.a. priority queue).\n\
553 \n\
554 Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for\n\
555 all k, counting elements from 0. For the sake of comparison,\n\
556 non-existing elements are considered to be infinite. The interesting\n\
557 property of a heap is that a[0] is always its smallest element.\n\
558 \n\
559 Usage:\n\
560 \n\
561 heap = [] # creates an empty heap\n\
562 heappush(heap, item) # pushes a new item on the heap\n\
563 item = heappop(heap) # pops the smallest item from the heap\n\
564 item = heap[0] # smallest item on the heap without popping it\n\
565 heapify(x) # transforms list into a heap, in-place, in linear time\n\
566 item = heapreplace(heap, item) # pops and returns smallest item, and adds\n\
567 # new item; the heap size is unchanged\n\
568 \n\
569 Our API differs from textbook heap algorithms as follows:\n\
570 \n\
571 - We use 0-based indexing. This makes the relationship between the\n\
572 index for a node and the indexes for its children slightly less\n\
573 obvious, but is more suitable since Python uses 0-based indexing.\n\
574 \n\
575 - Our heappop() method returns the smallest item, not the largest.\n\
576 \n\
577 These two make it possible to view the heap as a regular Python list\n\
578 without surprises: heap[0] is the smallest item, and heap.sort()\n\
579 maintains the heap invariant!\n");
580
581
582 PyDoc_STRVAR(__about__,
583 "Heap queues\n\
584 \n\
585 [explanation by Fran\xc3\xa7ois Pinard]\n\
586 \n\
587 Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for\n\
588 all k, counting elements from 0. For the sake of comparison,\n\
589 non-existing elements are considered to be infinite. The interesting\n\
590 property of a heap is that a[0] is always its smallest element.\n"
591 "\n\
592 The strange invariant above is meant to be an efficient memory\n\
593 representation for a tournament. The numbers below are `k', not a[k]:\n\
594 \n\
595 0\n\
596 \n\
597 1 2\n\
598 \n\
599 3 4 5 6\n\
600 \n\
601 7 8 9 10 11 12 13 14\n\
602 \n\
603 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30\n\
604 \n\
605 \n\
606 In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In\n\
607 a usual binary tournament we see in sports, each cell is the winner\n\
608 over the two cells it tops, and we can trace the winner down the tree\n\
609 to see all opponents s/he had. However, in many computer applications\n\
610 of such tournaments, we do not need to trace the history of a winner.\n\
611 To be more memory efficient, when a winner is promoted, we try to\n\
612 replace it by something else at a lower level, and the rule becomes\n\
613 that a cell and the two cells it tops contain three different items,\n\
614 but the top cell \"wins\" over the two topped cells.\n"
615 "\n\
616 If this heap invariant is protected at all time, index 0 is clearly\n\
617 the overall winner. The simplest algorithmic way to remove it and\n\
618 find the \"next\" winner is to move some loser (let's say cell 30 in the\n\
619 diagram above) into the 0 position, and then percolate this new 0 down\n\
620 the tree, exchanging values, until the invariant is re-established.\n\
621 This is clearly logarithmic on the total number of items in the tree.\n\
622 By iterating over all items, you get an O(n ln n) sort.\n"
623 "\n\
624 A nice feature of this sort is that you can efficiently insert new\n\
625 items while the sort is going on, provided that the inserted items are\n\
626 not \"better\" than the last 0'th element you extracted. This is\n\
627 especially useful in simulation contexts, where the tree holds all\n\
628 incoming events, and the \"win\" condition means the smallest scheduled\n\
629 time. When an event schedule other events for execution, they are\n\
630 scheduled into the future, so they can easily go into the heap. So, a\n\
631 heap is a good structure for implementing schedulers (this is what I\n\
632 used for my MIDI sequencer :-).\n"
633 "\n\
634 Various structures for implementing schedulers have been extensively\n\
635 studied, and heaps are good for this, as they are reasonably speedy,\n\
636 the speed is almost constant, and the worst case is not much different\n\
637 than the average case. However, there are other representations which\n\
638 are more efficient overall, yet the worst cases might be terrible.\n"
639 "\n\
640 Heaps are also very useful in big disk sorts. You most probably all\n\
641 know that a big sort implies producing \"runs\" (which are pre-sorted\n\
642 sequences, which size is usually related to the amount of CPU memory),\n\
643 followed by a merging passes for these runs, which merging is often\n\
644 very cleverly organised[1]. It is very important that the initial\n\
645 sort produces the longest runs possible. Tournaments are a good way\n\
646 to that. If, using all the memory available to hold a tournament, you\n\
647 replace and percolate items that happen to fit the current run, you'll\n\
648 produce runs which are twice the size of the memory for random input,\n\
649 and much better for input fuzzily ordered.\n"
650 "\n\
651 Moreover, if you output the 0'th item on disk and get an input which\n\
652 may not fit in the current tournament (because the value \"wins\" over\n\
653 the last output value), it cannot fit in the heap, so the size of the\n\
654 heap decreases. The freed memory could be cleverly reused immediately\n\
655 for progressively building a second heap, which grows at exactly the\n\
656 same rate the first heap is melting. When the first heap completely\n\
657 vanishes, you switch heaps and start a new run. Clever and quite\n\
658 effective!\n\
659 \n\
660 In a word, heaps are useful memory structures to know. I use them in\n\
661 a few applications, and I think it is good to keep a `heap' module\n\
662 around. :-)\n"
663 "\n\
664 --------------------\n\
665 [1] The disk balancing algorithms which are current, nowadays, are\n\
666 more annoying than clever, and this is a consequence of the seeking\n\
667 capabilities of the disks. On devices which cannot seek, like big\n\
668 tape drives, the story was quite different, and one had to be very\n\
669 clever to ensure (far in advance) that each tape movement will be the\n\
670 most effective possible (that is, will best participate at\n\
671 \"progressing\" the merge). Some tapes were even able to read\n\
672 backwards, and this was also used to avoid the rewinding time.\n\
673 Believe me, real good tape sorts were quite spectacular to watch!\n\
674 From all times, sorting has always been a Great Art! :-)\n");
675
676
677 static int
heapq_exec(PyObject * m)678 heapq_exec(PyObject *m)
679 {
680 PyObject *about = PyUnicode_FromString(__about__);
681 if (PyModule_AddObject(m, "__about__", about) < 0) {
682 Py_DECREF(about);
683 return -1;
684 }
685 return 0;
686 }
687
688 static struct PyModuleDef_Slot heapq_slots[] = {
689 {Py_mod_exec, heapq_exec},
690 {0, NULL}
691 };
692
693 static struct PyModuleDef _heapqmodule = {
694 PyModuleDef_HEAD_INIT,
695 "_heapq",
696 module_doc,
697 0,
698 heapq_methods,
699 heapq_slots,
700 NULL,
701 NULL,
702 NULL
703 };
704
705 PyMODINIT_FUNC
PyInit__heapq(void)706 PyInit__heapq(void)
707 {
708 return PyModuleDef_Init(&_heapqmodule);
709 }
710