1 /*
2 * Memoryview object implementation
3 * --------------------------------
4 *
5 * This implementation is a complete rewrite contributed by Stefan Krah in
6 * Python 3.3. Substantial credit goes to Antoine Pitrou (who had already
7 * fortified and rewritten the previous implementation) and Nick Coghlan
8 * (who came up with the idea of the ManagedBuffer) for analyzing the complex
9 * ownership rules.
10 *
11 */
12
13 #include "Python.h"
14 #include "pycore_abstract.h" // _PyIndex_Check()
15 #include "pycore_object.h" // _PyObject_GC_UNTRACK()
16 #include "pycore_strhex.h" // _Py_strhex_with_sep()
17 #include <stddef.h> // offsetof()
18
19 /*[clinic input]
20 class memoryview "PyMemoryViewObject *" "&PyMemoryView_Type"
21 [clinic start generated code]*/
22 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=e2e49d2192835219]*/
23
24 #include "clinic/memoryobject.c.h"
25
26 /****************************************************************************/
27 /* ManagedBuffer Object */
28 /****************************************************************************/
29
30 /*
31 ManagedBuffer Object:
32 ---------------------
33
34 The purpose of this object is to facilitate the handling of chained
35 memoryviews that have the same underlying exporting object. PEP-3118
36 allows the underlying object to change while a view is exported. This
37 could lead to unexpected results when constructing a new memoryview
38 from an existing memoryview.
39
40 Rather than repeatedly redirecting buffer requests to the original base
41 object, all chained memoryviews use a single buffer snapshot. This
42 snapshot is generated by the constructor _PyManagedBuffer_FromObject().
43
44 Ownership rules:
45 ----------------
46
47 The master buffer inside a managed buffer is filled in by the original
48 base object. shape, strides, suboffsets and format are read-only for
49 all consumers.
50
51 A memoryview's buffer is a private copy of the exporter's buffer. shape,
52 strides and suboffsets belong to the memoryview and are thus writable.
53
54 If a memoryview itself exports several buffers via memory_getbuf(), all
55 buffer copies share shape, strides and suboffsets. In this case, the
56 arrays are NOT writable.
57
58 Reference count assumptions:
59 ----------------------------
60
61 The 'obj' member of a Py_buffer must either be NULL or refer to the
62 exporting base object. In the Python codebase, all getbufferprocs
63 return a new reference to view.obj (example: bytes_buffer_getbuffer()).
64
65 PyBuffer_Release() decrements view.obj (if non-NULL), so the
66 releasebufferprocs must NOT decrement view.obj.
67 */
68
69
70 static inline _PyManagedBufferObject *
mbuf_alloc(void)71 mbuf_alloc(void)
72 {
73 _PyManagedBufferObject *mbuf;
74
75 mbuf = (_PyManagedBufferObject *)
76 PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
77 if (mbuf == NULL)
78 return NULL;
79 mbuf->flags = 0;
80 mbuf->exports = 0;
81 mbuf->master.obj = NULL;
82 _PyObject_GC_TRACK(mbuf);
83
84 return mbuf;
85 }
86
87 static PyObject *
_PyManagedBuffer_FromObject(PyObject * base)88 _PyManagedBuffer_FromObject(PyObject *base)
89 {
90 _PyManagedBufferObject *mbuf;
91
92 mbuf = mbuf_alloc();
93 if (mbuf == NULL)
94 return NULL;
95
96 if (PyObject_GetBuffer(base, &mbuf->master, PyBUF_FULL_RO) < 0) {
97 mbuf->master.obj = NULL;
98 Py_DECREF(mbuf);
99 return NULL;
100 }
101
102 return (PyObject *)mbuf;
103 }
104
105 static void
mbuf_release(_PyManagedBufferObject * self)106 mbuf_release(_PyManagedBufferObject *self)
107 {
108 if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
109 return;
110
111 /* NOTE: at this point self->exports can still be > 0 if this function
112 is called from mbuf_clear() to break up a reference cycle. */
113 self->flags |= _Py_MANAGED_BUFFER_RELEASED;
114
115 /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
116 _PyObject_GC_UNTRACK(self);
117 PyBuffer_Release(&self->master);
118 }
119
120 static void
mbuf_dealloc(_PyManagedBufferObject * self)121 mbuf_dealloc(_PyManagedBufferObject *self)
122 {
123 assert(self->exports == 0);
124 mbuf_release(self);
125 if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
126 PyMem_Free(self->master.format);
127 PyObject_GC_Del(self);
128 }
129
130 static int
mbuf_traverse(_PyManagedBufferObject * self,visitproc visit,void * arg)131 mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
132 {
133 Py_VISIT(self->master.obj);
134 return 0;
135 }
136
137 static int
mbuf_clear(_PyManagedBufferObject * self)138 mbuf_clear(_PyManagedBufferObject *self)
139 {
140 assert(self->exports >= 0);
141 mbuf_release(self);
142 return 0;
143 }
144
145 PyTypeObject _PyManagedBuffer_Type = {
146 PyVarObject_HEAD_INIT(&PyType_Type, 0)
147 "managedbuffer",
148 sizeof(_PyManagedBufferObject),
149 0,
150 (destructor)mbuf_dealloc, /* tp_dealloc */
151 0, /* tp_vectorcall_offset */
152 0, /* tp_getattr */
153 0, /* tp_setattr */
154 0, /* tp_as_async */
155 0, /* tp_repr */
156 0, /* tp_as_number */
157 0, /* tp_as_sequence */
158 0, /* tp_as_mapping */
159 0, /* tp_hash */
160 0, /* tp_call */
161 0, /* tp_str */
162 PyObject_GenericGetAttr, /* tp_getattro */
163 0, /* tp_setattro */
164 0, /* tp_as_buffer */
165 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
166 0, /* tp_doc */
167 (traverseproc)mbuf_traverse, /* tp_traverse */
168 (inquiry)mbuf_clear /* tp_clear */
169 };
170
171
172 /****************************************************************************/
173 /* MemoryView Object */
174 /****************************************************************************/
175
176 /* In the process of breaking reference cycles mbuf_release() can be
177 called before memory_release(). */
178 #define BASE_INACCESSIBLE(mv) \
179 (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
180 ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
181
182 #define CHECK_RELEASED(mv) \
183 if (BASE_INACCESSIBLE(mv)) { \
184 PyErr_SetString(PyExc_ValueError, \
185 "operation forbidden on released memoryview object"); \
186 return NULL; \
187 }
188
189 #define CHECK_RELEASED_INT(mv) \
190 if (BASE_INACCESSIBLE(mv)) { \
191 PyErr_SetString(PyExc_ValueError, \
192 "operation forbidden on released memoryview object"); \
193 return -1; \
194 }
195
196 /* See gh-92888. These macros signal that we need to check the memoryview
197 again due to possible read after frees. */
198 #define CHECK_RELEASED_AGAIN(mv) CHECK_RELEASED(mv)
199 #define CHECK_RELEASED_INT_AGAIN(mv) CHECK_RELEASED_INT(mv)
200
201 #define CHECK_LIST_OR_TUPLE(v) \
202 if (!PyList_Check(v) && !PyTuple_Check(v)) { \
203 PyErr_SetString(PyExc_TypeError, \
204 #v " must be a list or a tuple"); \
205 return NULL; \
206 }
207
208 #define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
209
210 /* Check for the presence of suboffsets in the first dimension. */
211 #define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
212 /* Adjust ptr if suboffsets are present. */
213 #define ADJUST_PTR(ptr, suboffsets, dim) \
214 (HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
215
216 /* Memoryview buffer properties */
217 #define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
218 #define MV_F_CONTIGUOUS(flags) \
219 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
220 #define MV_ANY_CONTIGUOUS(flags) \
221 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
222
223 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
224 #define MV_CONTIGUOUS_NDIM1(view) \
225 ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
226
227 /* getbuffer() requests */
228 #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
229 #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
230 #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
231 #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
232 #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
233 #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
234 #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
235 #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
236
237
238 /**************************************************************************/
239 /* Copy memoryview buffers */
240 /**************************************************************************/
241
242 /* The functions in this section take a source and a destination buffer
243 with the same logical structure: format, itemsize, ndim and shape
244 are identical, with ndim > 0.
245
246 NOTE: All buffers are assumed to have PyBUF_FULL information, which
247 is the case for memoryviews! */
248
249
250 /* Assumptions: ndim >= 1. The macro tests for a corner case that should
251 perhaps be explicitly forbidden in the PEP. */
252 #define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
253 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
254
255 static inline int
last_dim_is_contiguous(const Py_buffer * dest,const Py_buffer * src)256 last_dim_is_contiguous(const Py_buffer *dest, const Py_buffer *src)
257 {
258 assert(dest->ndim > 0 && src->ndim > 0);
259 return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
260 !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
261 dest->strides[dest->ndim-1] == dest->itemsize &&
262 src->strides[src->ndim-1] == src->itemsize);
263 }
264
265 /* This is not a general function for determining format equivalence.
266 It is used in copy_single() and copy_buffer() to weed out non-matching
267 formats. Skipping the '@' character is specifically used in slice
268 assignments, where the lvalue is already known to have a single character
269 format. This is a performance hack that could be rewritten (if properly
270 benchmarked). */
271 static inline int
equiv_format(const Py_buffer * dest,const Py_buffer * src)272 equiv_format(const Py_buffer *dest, const Py_buffer *src)
273 {
274 const char *dfmt, *sfmt;
275
276 assert(dest->format && src->format);
277 dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
278 sfmt = src->format[0] == '@' ? src->format+1 : src->format;
279
280 if (strcmp(dfmt, sfmt) != 0 ||
281 dest->itemsize != src->itemsize) {
282 return 0;
283 }
284
285 return 1;
286 }
287
288 /* Two shapes are equivalent if they are either equal or identical up
289 to a zero element at the same position. For example, in NumPy arrays
290 the shapes [1, 0, 5] and [1, 0, 7] are equivalent. */
291 static inline int
equiv_shape(const Py_buffer * dest,const Py_buffer * src)292 equiv_shape(const Py_buffer *dest, const Py_buffer *src)
293 {
294 int i;
295
296 if (dest->ndim != src->ndim)
297 return 0;
298
299 for (i = 0; i < dest->ndim; i++) {
300 if (dest->shape[i] != src->shape[i])
301 return 0;
302 if (dest->shape[i] == 0)
303 break;
304 }
305
306 return 1;
307 }
308
309 /* Check that the logical structure of the destination and source buffers
310 is identical. */
311 static int
equiv_structure(const Py_buffer * dest,const Py_buffer * src)312 equiv_structure(const Py_buffer *dest, const Py_buffer *src)
313 {
314 if (!equiv_format(dest, src) ||
315 !equiv_shape(dest, src)) {
316 PyErr_SetString(PyExc_ValueError,
317 "memoryview assignment: lvalue and rvalue have different "
318 "structures");
319 return 0;
320 }
321
322 return 1;
323 }
324
325 /* Base case for recursive multi-dimensional copying. Contiguous arrays are
326 copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
327 sizeof(mem) == shape[0] * itemsize. */
328 static void
copy_base(const Py_ssize_t * shape,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)329 copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
330 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
331 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
332 char *mem)
333 {
334 if (mem == NULL) { /* contiguous */
335 Py_ssize_t size = shape[0] * itemsize;
336 if (dptr + size < sptr || sptr + size < dptr)
337 memcpy(dptr, sptr, size); /* no overlapping */
338 else
339 memmove(dptr, sptr, size);
340 }
341 else {
342 char *p;
343 Py_ssize_t i;
344 for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
345 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
346 memcpy(p, xsptr, itemsize);
347 }
348 for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
349 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
350 memcpy(xdptr, p, itemsize);
351 }
352 }
353
354 }
355
356 /* Recursively copy a source buffer to a destination buffer. The two buffers
357 have the same ndim, shape and itemsize. */
358 static void
copy_rec(const Py_ssize_t * shape,Py_ssize_t ndim,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)359 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
360 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
361 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
362 char *mem)
363 {
364 Py_ssize_t i;
365
366 assert(ndim >= 1);
367
368 if (ndim == 1) {
369 copy_base(shape, itemsize,
370 dptr, dstrides, dsuboffsets,
371 sptr, sstrides, ssuboffsets,
372 mem);
373 return;
374 }
375
376 for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
377 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
378 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
379
380 copy_rec(shape+1, ndim-1, itemsize,
381 xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
382 xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
383 mem);
384 }
385 }
386
387 /* Faster copying of one-dimensional arrays. */
388 static int
copy_single(PyMemoryViewObject * self,const Py_buffer * dest,const Py_buffer * src)389 copy_single(PyMemoryViewObject *self, const Py_buffer *dest, const Py_buffer *src)
390 {
391 CHECK_RELEASED_INT_AGAIN(self);
392 char *mem = NULL;
393
394 assert(dest->ndim == 1);
395
396 if (!equiv_structure(dest, src))
397 return -1;
398
399 if (!last_dim_is_contiguous(dest, src)) {
400 mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
401 if (mem == NULL) {
402 PyErr_NoMemory();
403 return -1;
404 }
405 }
406
407 copy_base(dest->shape, dest->itemsize,
408 dest->buf, dest->strides, dest->suboffsets,
409 src->buf, src->strides, src->suboffsets,
410 mem);
411
412 if (mem)
413 PyMem_Free(mem);
414
415 return 0;
416 }
417
418 /* Recursively copy src to dest. Both buffers must have the same basic
419 structure. Copying is atomic, the function never fails with a partial
420 copy. */
421 static int
copy_buffer(const Py_buffer * dest,const Py_buffer * src)422 copy_buffer(const Py_buffer *dest, const Py_buffer *src)
423 {
424 char *mem = NULL;
425
426 assert(dest->ndim > 0);
427
428 if (!equiv_structure(dest, src))
429 return -1;
430
431 if (!last_dim_is_contiguous(dest, src)) {
432 mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
433 if (mem == NULL) {
434 PyErr_NoMemory();
435 return -1;
436 }
437 }
438
439 copy_rec(dest->shape, dest->ndim, dest->itemsize,
440 dest->buf, dest->strides, dest->suboffsets,
441 src->buf, src->strides, src->suboffsets,
442 mem);
443
444 if (mem)
445 PyMem_Free(mem);
446
447 return 0;
448 }
449
450 /* Initialize strides for a C-contiguous array. */
451 static inline void
init_strides_from_shape(Py_buffer * view)452 init_strides_from_shape(Py_buffer *view)
453 {
454 Py_ssize_t i;
455
456 assert(view->ndim > 0);
457
458 view->strides[view->ndim-1] = view->itemsize;
459 for (i = view->ndim-2; i >= 0; i--)
460 view->strides[i] = view->strides[i+1] * view->shape[i+1];
461 }
462
463 /* Initialize strides for a Fortran-contiguous array. */
464 static inline void
init_fortran_strides_from_shape(Py_buffer * view)465 init_fortran_strides_from_shape(Py_buffer *view)
466 {
467 Py_ssize_t i;
468
469 assert(view->ndim > 0);
470
471 view->strides[0] = view->itemsize;
472 for (i = 1; i < view->ndim; i++)
473 view->strides[i] = view->strides[i-1] * view->shape[i-1];
474 }
475
476 /* Copy src to a contiguous representation. order is one of 'C', 'F' (Fortran)
477 or 'A' (Any). Assumptions: src has PyBUF_FULL information, src->ndim >= 1,
478 len(mem) == src->len. */
479 static int
buffer_to_contiguous(char * mem,const Py_buffer * src,char order)480 buffer_to_contiguous(char *mem, const Py_buffer *src, char order)
481 {
482 Py_buffer dest;
483 Py_ssize_t *strides;
484 int ret;
485
486 assert(src->ndim >= 1);
487 assert(src->shape != NULL);
488 assert(src->strides != NULL);
489
490 strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
491 if (strides == NULL) {
492 PyErr_NoMemory();
493 return -1;
494 }
495
496 /* initialize dest */
497 dest = *src;
498 dest.buf = mem;
499 /* shape is constant and shared: the logical representation of the
500 array is unaltered. */
501
502 /* The physical representation determined by strides (and possibly
503 suboffsets) may change. */
504 dest.strides = strides;
505 if (order == 'C' || order == 'A') {
506 init_strides_from_shape(&dest);
507 }
508 else {
509 init_fortran_strides_from_shape(&dest);
510 }
511
512 dest.suboffsets = NULL;
513
514 ret = copy_buffer(&dest, src);
515
516 PyMem_Free(strides);
517 return ret;
518 }
519
520
521 /****************************************************************************/
522 /* Constructors */
523 /****************************************************************************/
524
525 /* Initialize values that are shared with the managed buffer. */
526 static inline void
init_shared_values(Py_buffer * dest,const Py_buffer * src)527 init_shared_values(Py_buffer *dest, const Py_buffer *src)
528 {
529 dest->obj = src->obj;
530 dest->buf = src->buf;
531 dest->len = src->len;
532 dest->itemsize = src->itemsize;
533 dest->readonly = src->readonly;
534 dest->format = src->format ? src->format : "B";
535 dest->internal = src->internal;
536 }
537
538 /* Copy shape and strides. Reconstruct missing values. */
539 static void
init_shape_strides(Py_buffer * dest,const Py_buffer * src)540 init_shape_strides(Py_buffer *dest, const Py_buffer *src)
541 {
542 Py_ssize_t i;
543
544 if (src->ndim == 0) {
545 dest->shape = NULL;
546 dest->strides = NULL;
547 return;
548 }
549 if (src->ndim == 1) {
550 dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
551 dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
552 return;
553 }
554
555 for (i = 0; i < src->ndim; i++)
556 dest->shape[i] = src->shape[i];
557 if (src->strides) {
558 for (i = 0; i < src->ndim; i++)
559 dest->strides[i] = src->strides[i];
560 }
561 else {
562 init_strides_from_shape(dest);
563 }
564 }
565
566 static inline void
init_suboffsets(Py_buffer * dest,const Py_buffer * src)567 init_suboffsets(Py_buffer *dest, const Py_buffer *src)
568 {
569 Py_ssize_t i;
570
571 if (src->suboffsets == NULL) {
572 dest->suboffsets = NULL;
573 return;
574 }
575 for (i = 0; i < src->ndim; i++)
576 dest->suboffsets[i] = src->suboffsets[i];
577 }
578
579 /* len = product(shape) * itemsize */
580 static inline void
init_len(Py_buffer * view)581 init_len(Py_buffer *view)
582 {
583 Py_ssize_t i, len;
584
585 len = 1;
586 for (i = 0; i < view->ndim; i++)
587 len *= view->shape[i];
588 len *= view->itemsize;
589
590 view->len = len;
591 }
592
593 /* Initialize memoryview buffer properties. */
594 static void
init_flags(PyMemoryViewObject * mv)595 init_flags(PyMemoryViewObject *mv)
596 {
597 const Py_buffer *view = &mv->view;
598 int flags = 0;
599
600 switch (view->ndim) {
601 case 0:
602 flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
603 _Py_MEMORYVIEW_FORTRAN);
604 break;
605 case 1:
606 if (MV_CONTIGUOUS_NDIM1(view))
607 flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
608 break;
609 default:
610 if (PyBuffer_IsContiguous(view, 'C'))
611 flags |= _Py_MEMORYVIEW_C;
612 if (PyBuffer_IsContiguous(view, 'F'))
613 flags |= _Py_MEMORYVIEW_FORTRAN;
614 break;
615 }
616
617 if (view->suboffsets) {
618 flags |= _Py_MEMORYVIEW_PIL;
619 flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
620 }
621
622 mv->flags = flags;
623 }
624
625 /* Allocate a new memoryview and perform basic initialization. New memoryviews
626 are exclusively created through the mbuf_add functions. */
627 static inline PyMemoryViewObject *
memory_alloc(int ndim)628 memory_alloc(int ndim)
629 {
630 PyMemoryViewObject *mv;
631
632 mv = (PyMemoryViewObject *)
633 PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
634 if (mv == NULL)
635 return NULL;
636
637 mv->mbuf = NULL;
638 mv->hash = -1;
639 mv->flags = 0;
640 mv->exports = 0;
641 mv->view.ndim = ndim;
642 mv->view.shape = mv->ob_array;
643 mv->view.strides = mv->ob_array + ndim;
644 mv->view.suboffsets = mv->ob_array + 2 * ndim;
645 mv->weakreflist = NULL;
646
647 _PyObject_GC_TRACK(mv);
648 return mv;
649 }
650
651 /*
652 Return a new memoryview that is registered with mbuf. If src is NULL,
653 use mbuf->master as the underlying buffer. Otherwise, use src.
654
655 The new memoryview has full buffer information: shape and strides
656 are always present, suboffsets as needed. Arrays are copied to
657 the memoryview's ob_array field.
658 */
659 static PyObject *
mbuf_add_view(_PyManagedBufferObject * mbuf,const Py_buffer * src)660 mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
661 {
662 PyMemoryViewObject *mv;
663 Py_buffer *dest;
664
665 if (src == NULL)
666 src = &mbuf->master;
667
668 if (src->ndim > PyBUF_MAX_NDIM) {
669 PyErr_SetString(PyExc_ValueError,
670 "memoryview: number of dimensions must not exceed "
671 Py_STRINGIFY(PyBUF_MAX_NDIM));
672 return NULL;
673 }
674
675 mv = memory_alloc(src->ndim);
676 if (mv == NULL)
677 return NULL;
678
679 dest = &mv->view;
680 init_shared_values(dest, src);
681 init_shape_strides(dest, src);
682 init_suboffsets(dest, src);
683 init_flags(mv);
684
685 mv->mbuf = mbuf;
686 Py_INCREF(mbuf);
687 mbuf->exports++;
688
689 return (PyObject *)mv;
690 }
691
692 /* Register an incomplete view: shape, strides, suboffsets and flags still
693 need to be initialized. Use 'ndim' instead of src->ndim to determine the
694 size of the memoryview's ob_array.
695
696 Assumption: ndim <= PyBUF_MAX_NDIM. */
697 static PyObject *
mbuf_add_incomplete_view(_PyManagedBufferObject * mbuf,const Py_buffer * src,int ndim)698 mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
699 int ndim)
700 {
701 PyMemoryViewObject *mv;
702 Py_buffer *dest;
703
704 if (src == NULL)
705 src = &mbuf->master;
706
707 assert(ndim <= PyBUF_MAX_NDIM);
708
709 mv = memory_alloc(ndim);
710 if (mv == NULL)
711 return NULL;
712
713 dest = &mv->view;
714 init_shared_values(dest, src);
715
716 mv->mbuf = mbuf;
717 Py_INCREF(mbuf);
718 mbuf->exports++;
719
720 return (PyObject *)mv;
721 }
722
723 /* Expose a raw memory area as a view of contiguous bytes. flags can be
724 PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
725 The memoryview has complete buffer information. */
726 PyObject *
PyMemoryView_FromMemory(char * mem,Py_ssize_t size,int flags)727 PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
728 {
729 _PyManagedBufferObject *mbuf;
730 PyObject *mv;
731 int readonly;
732
733 assert(mem != NULL);
734 assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
735
736 mbuf = mbuf_alloc();
737 if (mbuf == NULL)
738 return NULL;
739
740 readonly = (flags == PyBUF_WRITE) ? 0 : 1;
741 (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
742 PyBUF_FULL_RO);
743
744 mv = mbuf_add_view(mbuf, NULL);
745 Py_DECREF(mbuf);
746
747 return mv;
748 }
749
750 /* Create a memoryview from a given Py_buffer. For simple byte views,
751 PyMemoryView_FromMemory() should be used instead.
752 This function is the only entry point that can create a master buffer
753 without full information. Because of this fact init_shape_strides()
754 must be able to reconstruct missing values. */
755 PyObject *
PyMemoryView_FromBuffer(const Py_buffer * info)756 PyMemoryView_FromBuffer(const Py_buffer *info)
757 {
758 _PyManagedBufferObject *mbuf;
759 PyObject *mv;
760
761 if (info->buf == NULL) {
762 PyErr_SetString(PyExc_ValueError,
763 "PyMemoryView_FromBuffer(): info->buf must not be NULL");
764 return NULL;
765 }
766
767 mbuf = mbuf_alloc();
768 if (mbuf == NULL)
769 return NULL;
770
771 /* info->obj is either NULL or a borrowed reference. This reference
772 should not be decremented in PyBuffer_Release(). */
773 mbuf->master = *info;
774 mbuf->master.obj = NULL;
775
776 mv = mbuf_add_view(mbuf, NULL);
777 Py_DECREF(mbuf);
778
779 return mv;
780 }
781
782 /* Create a memoryview from an object that implements the buffer protocol.
783 If the object is a memoryview, the new memoryview must be registered
784 with the same managed buffer. Otherwise, a new managed buffer is created. */
785 PyObject *
PyMemoryView_FromObject(PyObject * v)786 PyMemoryView_FromObject(PyObject *v)
787 {
788 _PyManagedBufferObject *mbuf;
789
790 if (PyMemoryView_Check(v)) {
791 PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
792 CHECK_RELEASED(mv);
793 return mbuf_add_view(mv->mbuf, &mv->view);
794 }
795 else if (PyObject_CheckBuffer(v)) {
796 PyObject *ret;
797 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v);
798 if (mbuf == NULL)
799 return NULL;
800 ret = mbuf_add_view(mbuf, NULL);
801 Py_DECREF(mbuf);
802 return ret;
803 }
804
805 PyErr_Format(PyExc_TypeError,
806 "memoryview: a bytes-like object is required, not '%.200s'",
807 Py_TYPE(v)->tp_name);
808 return NULL;
809 }
810
811 /* Copy the format string from a base object that might vanish. */
812 static int
mbuf_copy_format(_PyManagedBufferObject * mbuf,const char * fmt)813 mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
814 {
815 if (fmt != NULL) {
816 char *cp = PyMem_Malloc(strlen(fmt)+1);
817 if (cp == NULL) {
818 PyErr_NoMemory();
819 return -1;
820 }
821 mbuf->master.format = strcpy(cp, fmt);
822 mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
823 }
824
825 return 0;
826 }
827
828 /*
829 Return a memoryview that is based on a contiguous copy of src.
830 Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
831
832 Ownership rules:
833 1) As usual, the returned memoryview has a private copy
834 of src->shape, src->strides and src->suboffsets.
835 2) src->format is copied to the master buffer and released
836 in mbuf_dealloc(). The releasebufferproc of the bytes
837 object is NULL, so it does not matter that mbuf_release()
838 passes the altered format pointer to PyBuffer_Release().
839 */
840 static PyObject *
memory_from_contiguous_copy(const Py_buffer * src,char order)841 memory_from_contiguous_copy(const Py_buffer *src, char order)
842 {
843 _PyManagedBufferObject *mbuf;
844 PyMemoryViewObject *mv;
845 PyObject *bytes;
846 Py_buffer *dest;
847 int i;
848
849 assert(src->ndim > 0);
850 assert(src->shape != NULL);
851
852 bytes = PyBytes_FromStringAndSize(NULL, src->len);
853 if (bytes == NULL)
854 return NULL;
855
856 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes);
857 Py_DECREF(bytes);
858 if (mbuf == NULL)
859 return NULL;
860
861 if (mbuf_copy_format(mbuf, src->format) < 0) {
862 Py_DECREF(mbuf);
863 return NULL;
864 }
865
866 mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
867 Py_DECREF(mbuf);
868 if (mv == NULL)
869 return NULL;
870
871 dest = &mv->view;
872
873 /* shared values are initialized correctly except for itemsize */
874 dest->itemsize = src->itemsize;
875
876 /* shape and strides */
877 for (i = 0; i < src->ndim; i++) {
878 dest->shape[i] = src->shape[i];
879 }
880 if (order == 'C' || order == 'A') {
881 init_strides_from_shape(dest);
882 }
883 else {
884 init_fortran_strides_from_shape(dest);
885 }
886 /* suboffsets */
887 dest->suboffsets = NULL;
888
889 /* flags */
890 init_flags(mv);
891
892 if (copy_buffer(dest, src) < 0) {
893 Py_DECREF(mv);
894 return NULL;
895 }
896
897 return (PyObject *)mv;
898 }
899
900 /*
901 Return a new memoryview object based on a contiguous exporter with
902 buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
903 The logical structure of the input and output buffers is the same
904 (i.e. tolist(input) == tolist(output)), but the physical layout in
905 memory can be explicitly chosen.
906
907 As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
908 otherwise it may be writable or read-only.
909
910 If the exporter is already contiguous with the desired target order,
911 the memoryview will be directly based on the exporter.
912
913 Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
914 based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
915 'F'ortran order otherwise.
916 */
917 PyObject *
PyMemoryView_GetContiguous(PyObject * obj,int buffertype,char order)918 PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
919 {
920 PyMemoryViewObject *mv;
921 PyObject *ret;
922 Py_buffer *view;
923
924 assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
925 assert(order == 'C' || order == 'F' || order == 'A');
926
927 mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
928 if (mv == NULL)
929 return NULL;
930
931 view = &mv->view;
932 if (buffertype == PyBUF_WRITE && view->readonly) {
933 PyErr_SetString(PyExc_BufferError,
934 "underlying buffer is not writable");
935 Py_DECREF(mv);
936 return NULL;
937 }
938
939 if (PyBuffer_IsContiguous(view, order))
940 return (PyObject *)mv;
941
942 if (buffertype == PyBUF_WRITE) {
943 PyErr_SetString(PyExc_BufferError,
944 "writable contiguous buffer requested "
945 "for a non-contiguous object.");
946 Py_DECREF(mv);
947 return NULL;
948 }
949
950 ret = memory_from_contiguous_copy(view, order);
951 Py_DECREF(mv);
952 return ret;
953 }
954
955
956 /*[clinic input]
957 @classmethod
958 memoryview.__new__
959
960 object: object
961
962 Create a new memoryview object which references the given object.
963 [clinic start generated code]*/
964
965 static PyObject *
memoryview_impl(PyTypeObject * type,PyObject * object)966 memoryview_impl(PyTypeObject *type, PyObject *object)
967 /*[clinic end generated code: output=7de78e184ed66db8 input=f04429eb0bdf8c6e]*/
968 {
969 return PyMemoryView_FromObject(object);
970 }
971
972
973 /****************************************************************************/
974 /* Previously in abstract.c */
975 /****************************************************************************/
976
977 typedef struct {
978 Py_buffer view;
979 Py_ssize_t array[1];
980 } Py_buffer_full;
981
982 int
PyBuffer_ToContiguous(void * buf,const Py_buffer * src,Py_ssize_t len,char order)983 PyBuffer_ToContiguous(void *buf, const Py_buffer *src, Py_ssize_t len, char order)
984 {
985 Py_buffer_full *fb = NULL;
986 int ret;
987
988 assert(order == 'C' || order == 'F' || order == 'A');
989
990 if (len != src->len) {
991 PyErr_SetString(PyExc_ValueError,
992 "PyBuffer_ToContiguous: len != view->len");
993 return -1;
994 }
995
996 if (PyBuffer_IsContiguous(src, order)) {
997 memcpy((char *)buf, src->buf, len);
998 return 0;
999 }
1000
1001 /* buffer_to_contiguous() assumes PyBUF_FULL */
1002 fb = PyMem_Malloc(sizeof *fb + 3 * src->ndim * (sizeof *fb->array));
1003 if (fb == NULL) {
1004 PyErr_NoMemory();
1005 return -1;
1006 }
1007 fb->view.ndim = src->ndim;
1008 fb->view.shape = fb->array;
1009 fb->view.strides = fb->array + src->ndim;
1010 fb->view.suboffsets = fb->array + 2 * src->ndim;
1011
1012 init_shared_values(&fb->view, src);
1013 init_shape_strides(&fb->view, src);
1014 init_suboffsets(&fb->view, src);
1015
1016 src = &fb->view;
1017
1018 ret = buffer_to_contiguous(buf, src, order);
1019 PyMem_Free(fb);
1020 return ret;
1021 }
1022
1023
1024 /****************************************************************************/
1025 /* Release/GC management */
1026 /****************************************************************************/
1027
1028 /* Inform the managed buffer that this particular memoryview will not access
1029 the underlying buffer again. If no other memoryviews are registered with
1030 the managed buffer, the underlying buffer is released instantly and
1031 marked as inaccessible for both the memoryview and the managed buffer.
1032
1033 This function fails if the memoryview itself has exported buffers. */
1034 static int
_memory_release(PyMemoryViewObject * self)1035 _memory_release(PyMemoryViewObject *self)
1036 {
1037 if (self->flags & _Py_MEMORYVIEW_RELEASED)
1038 return 0;
1039
1040 if (self->exports == 0) {
1041 self->flags |= _Py_MEMORYVIEW_RELEASED;
1042 assert(self->mbuf->exports > 0);
1043 if (--self->mbuf->exports == 0)
1044 mbuf_release(self->mbuf);
1045 return 0;
1046 }
1047 if (self->exports > 0) {
1048 PyErr_Format(PyExc_BufferError,
1049 "memoryview has %zd exported buffer%s", self->exports,
1050 self->exports==1 ? "" : "s");
1051 return -1;
1052 }
1053
1054 PyErr_SetString(PyExc_SystemError,
1055 "_memory_release(): negative export count");
1056 return -1;
1057 }
1058
1059 /*[clinic input]
1060 memoryview.release
1061
1062 Release the underlying buffer exposed by the memoryview object.
1063 [clinic start generated code]*/
1064
1065 static PyObject *
memoryview_release_impl(PyMemoryViewObject * self)1066 memoryview_release_impl(PyMemoryViewObject *self)
1067 /*[clinic end generated code: output=d0b7e3ba95b7fcb9 input=bc71d1d51f4a52f0]*/
1068 {
1069 if (_memory_release(self) < 0)
1070 return NULL;
1071 Py_RETURN_NONE;
1072 }
1073
1074 static void
memory_dealloc(PyMemoryViewObject * self)1075 memory_dealloc(PyMemoryViewObject *self)
1076 {
1077 assert(self->exports == 0);
1078 _PyObject_GC_UNTRACK(self);
1079 (void)_memory_release(self);
1080 Py_CLEAR(self->mbuf);
1081 if (self->weakreflist != NULL)
1082 PyObject_ClearWeakRefs((PyObject *) self);
1083 PyObject_GC_Del(self);
1084 }
1085
1086 static int
memory_traverse(PyMemoryViewObject * self,visitproc visit,void * arg)1087 memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
1088 {
1089 Py_VISIT(self->mbuf);
1090 return 0;
1091 }
1092
1093 static int
memory_clear(PyMemoryViewObject * self)1094 memory_clear(PyMemoryViewObject *self)
1095 {
1096 (void)_memory_release(self);
1097 Py_CLEAR(self->mbuf);
1098 return 0;
1099 }
1100
1101 static PyObject *
memory_enter(PyObject * self,PyObject * args)1102 memory_enter(PyObject *self, PyObject *args)
1103 {
1104 CHECK_RELEASED(self);
1105 Py_INCREF(self);
1106 return self;
1107 }
1108
1109 static PyObject *
memory_exit(PyObject * self,PyObject * args)1110 memory_exit(PyObject *self, PyObject *args)
1111 {
1112 return memoryview_release_impl((PyMemoryViewObject *)self);
1113 }
1114
1115
1116 /****************************************************************************/
1117 /* Casting format and shape */
1118 /****************************************************************************/
1119
1120 #define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
1121
1122 static inline Py_ssize_t
get_native_fmtchar(char * result,const char * fmt)1123 get_native_fmtchar(char *result, const char *fmt)
1124 {
1125 Py_ssize_t size = -1;
1126
1127 if (fmt[0] == '@') fmt++;
1128
1129 switch (fmt[0]) {
1130 case 'c': case 'b': case 'B': size = sizeof(char); break;
1131 case 'h': case 'H': size = sizeof(short); break;
1132 case 'i': case 'I': size = sizeof(int); break;
1133 case 'l': case 'L': size = sizeof(long); break;
1134 case 'q': case 'Q': size = sizeof(long long); break;
1135 case 'n': case 'N': size = sizeof(Py_ssize_t); break;
1136 case 'f': size = sizeof(float); break;
1137 case 'd': size = sizeof(double); break;
1138 case '?': size = sizeof(_Bool); break;
1139 case 'P': size = sizeof(void *); break;
1140 }
1141
1142 if (size > 0 && fmt[1] == '\0') {
1143 *result = fmt[0];
1144 return size;
1145 }
1146
1147 return -1;
1148 }
1149
1150 static inline const char *
get_native_fmtstr(const char * fmt)1151 get_native_fmtstr(const char *fmt)
1152 {
1153 int at = 0;
1154
1155 if (fmt[0] == '@') {
1156 at = 1;
1157 fmt++;
1158 }
1159 if (fmt[0] == '\0' || fmt[1] != '\0') {
1160 return NULL;
1161 }
1162
1163 #define RETURN(s) do { return at ? "@" s : s; } while (0)
1164
1165 switch (fmt[0]) {
1166 case 'c': RETURN("c");
1167 case 'b': RETURN("b");
1168 case 'B': RETURN("B");
1169 case 'h': RETURN("h");
1170 case 'H': RETURN("H");
1171 case 'i': RETURN("i");
1172 case 'I': RETURN("I");
1173 case 'l': RETURN("l");
1174 case 'L': RETURN("L");
1175 case 'q': RETURN("q");
1176 case 'Q': RETURN("Q");
1177 case 'n': RETURN("n");
1178 case 'N': RETURN("N");
1179 case 'f': RETURN("f");
1180 case 'd': RETURN("d");
1181 case '?': RETURN("?");
1182 case 'P': RETURN("P");
1183 }
1184
1185 return NULL;
1186 }
1187
1188
1189 /* Cast a memoryview's data type to 'format'. The input array must be
1190 C-contiguous. At least one of input-format, output-format must have
1191 byte size. The output array is 1-D, with the same byte length as the
1192 input array. Thus, view->len must be a multiple of the new itemsize. */
1193 static int
cast_to_1D(PyMemoryViewObject * mv,PyObject * format)1194 cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
1195 {
1196 Py_buffer *view = &mv->view;
1197 PyObject *asciifmt;
1198 char srcchar, destchar;
1199 Py_ssize_t itemsize;
1200 int ret = -1;
1201
1202 assert(view->ndim >= 1);
1203 assert(Py_SIZE(mv) == 3*view->ndim);
1204 assert(view->shape == mv->ob_array);
1205 assert(view->strides == mv->ob_array + view->ndim);
1206 assert(view->suboffsets == mv->ob_array + 2*view->ndim);
1207
1208 asciifmt = PyUnicode_AsASCIIString(format);
1209 if (asciifmt == NULL)
1210 return ret;
1211
1212 itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
1213 if (itemsize < 0) {
1214 PyErr_SetString(PyExc_ValueError,
1215 "memoryview: destination format must be a native single "
1216 "character format prefixed with an optional '@'");
1217 goto out;
1218 }
1219
1220 if ((get_native_fmtchar(&srcchar, view->format) < 0 ||
1221 !IS_BYTE_FORMAT(srcchar)) && !IS_BYTE_FORMAT(destchar)) {
1222 PyErr_SetString(PyExc_TypeError,
1223 "memoryview: cannot cast between two non-byte formats");
1224 goto out;
1225 }
1226 if (view->len % itemsize) {
1227 PyErr_SetString(PyExc_TypeError,
1228 "memoryview: length is not a multiple of itemsize");
1229 goto out;
1230 }
1231
1232 view->format = (char *)get_native_fmtstr(PyBytes_AS_STRING(asciifmt));
1233 if (view->format == NULL) {
1234 /* NOT_REACHED: get_native_fmtchar() already validates the format. */
1235 PyErr_SetString(PyExc_RuntimeError,
1236 "memoryview: internal error");
1237 goto out;
1238 }
1239 view->itemsize = itemsize;
1240
1241 view->ndim = 1;
1242 view->shape[0] = view->len / view->itemsize;
1243 view->strides[0] = view->itemsize;
1244 view->suboffsets = NULL;
1245
1246 init_flags(mv);
1247
1248 ret = 0;
1249
1250 out:
1251 Py_DECREF(asciifmt);
1252 return ret;
1253 }
1254
1255 /* The memoryview must have space for 3*len(seq) elements. */
1256 static Py_ssize_t
copy_shape(Py_ssize_t * shape,const PyObject * seq,Py_ssize_t ndim,Py_ssize_t itemsize)1257 copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
1258 Py_ssize_t itemsize)
1259 {
1260 Py_ssize_t x, i;
1261 Py_ssize_t len = itemsize;
1262
1263 for (i = 0; i < ndim; i++) {
1264 PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
1265 if (!PyLong_Check(tmp)) {
1266 PyErr_SetString(PyExc_TypeError,
1267 "memoryview.cast(): elements of shape must be integers");
1268 return -1;
1269 }
1270 x = PyLong_AsSsize_t(tmp);
1271 if (x == -1 && PyErr_Occurred()) {
1272 return -1;
1273 }
1274 if (x <= 0) {
1275 /* In general elements of shape may be 0, but not for casting. */
1276 PyErr_Format(PyExc_ValueError,
1277 "memoryview.cast(): elements of shape must be integers > 0");
1278 return -1;
1279 }
1280 if (x > PY_SSIZE_T_MAX / len) {
1281 PyErr_Format(PyExc_ValueError,
1282 "memoryview.cast(): product(shape) > SSIZE_MAX");
1283 return -1;
1284 }
1285 len *= x;
1286 shape[i] = x;
1287 }
1288
1289 return len;
1290 }
1291
1292 /* Cast a 1-D array to a new shape. The result array will be C-contiguous.
1293 If the result array does not have exactly the same byte length as the
1294 input array, raise ValueError. */
1295 static int
cast_to_ND(PyMemoryViewObject * mv,const PyObject * shape,int ndim)1296 cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
1297 {
1298 Py_buffer *view = &mv->view;
1299 Py_ssize_t len;
1300
1301 assert(view->ndim == 1); /* ndim from cast_to_1D() */
1302 assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
1303 assert(view->shape == mv->ob_array);
1304 assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
1305 assert(view->suboffsets == NULL);
1306
1307 view->ndim = ndim;
1308 if (view->ndim == 0) {
1309 view->shape = NULL;
1310 view->strides = NULL;
1311 len = view->itemsize;
1312 }
1313 else {
1314 len = copy_shape(view->shape, shape, ndim, view->itemsize);
1315 if (len < 0)
1316 return -1;
1317 init_strides_from_shape(view);
1318 }
1319
1320 if (view->len != len) {
1321 PyErr_SetString(PyExc_TypeError,
1322 "memoryview: product(shape) * itemsize != buffer size");
1323 return -1;
1324 }
1325
1326 init_flags(mv);
1327
1328 return 0;
1329 }
1330
1331 static int
zero_in_shape(PyMemoryViewObject * mv)1332 zero_in_shape(PyMemoryViewObject *mv)
1333 {
1334 Py_buffer *view = &mv->view;
1335 Py_ssize_t i;
1336
1337 for (i = 0; i < view->ndim; i++)
1338 if (view->shape[i] == 0)
1339 return 1;
1340
1341 return 0;
1342 }
1343
1344 /*
1345 Cast a copy of 'self' to a different view. The input view must
1346 be C-contiguous. The function always casts the input view to a
1347 1-D output according to 'format'. At least one of input-format,
1348 output-format must have byte size.
1349
1350 If 'shape' is given, the 1-D view from the previous step will
1351 be cast to a C-contiguous view with new shape and strides.
1352
1353 All casts must result in views that will have the exact byte
1354 size of the original input. Otherwise, an error is raised.
1355 */
1356 /*[clinic input]
1357 memoryview.cast
1358
1359 format: unicode
1360 shape: object = NULL
1361
1362 Cast a memoryview to a new format or shape.
1363 [clinic start generated code]*/
1364
1365 static PyObject *
memoryview_cast_impl(PyMemoryViewObject * self,PyObject * format,PyObject * shape)1366 memoryview_cast_impl(PyMemoryViewObject *self, PyObject *format,
1367 PyObject *shape)
1368 /*[clinic end generated code: output=bae520b3a389cbab input=138936cc9041b1a3]*/
1369 {
1370 PyMemoryViewObject *mv = NULL;
1371 Py_ssize_t ndim = 1;
1372
1373 CHECK_RELEASED(self);
1374
1375 if (!MV_C_CONTIGUOUS(self->flags)) {
1376 PyErr_SetString(PyExc_TypeError,
1377 "memoryview: casts are restricted to C-contiguous views");
1378 return NULL;
1379 }
1380 if ((shape || self->view.ndim != 1) && zero_in_shape(self)) {
1381 PyErr_SetString(PyExc_TypeError,
1382 "memoryview: cannot cast view with zeros in shape or strides");
1383 return NULL;
1384 }
1385 if (shape) {
1386 CHECK_LIST_OR_TUPLE(shape)
1387 ndim = PySequence_Fast_GET_SIZE(shape);
1388 if (ndim > PyBUF_MAX_NDIM) {
1389 PyErr_SetString(PyExc_ValueError,
1390 "memoryview: number of dimensions must not exceed "
1391 Py_STRINGIFY(PyBUF_MAX_NDIM));
1392 return NULL;
1393 }
1394 if (self->view.ndim != 1 && ndim != 1) {
1395 PyErr_SetString(PyExc_TypeError,
1396 "memoryview: cast must be 1D -> ND or ND -> 1D");
1397 return NULL;
1398 }
1399 }
1400
1401 mv = (PyMemoryViewObject *)
1402 mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
1403 if (mv == NULL)
1404 return NULL;
1405
1406 if (cast_to_1D(mv, format) < 0)
1407 goto error;
1408 if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
1409 goto error;
1410
1411 return (PyObject *)mv;
1412
1413 error:
1414 Py_DECREF(mv);
1415 return NULL;
1416 }
1417
1418 /*[clinic input]
1419 memoryview.toreadonly
1420
1421 Return a readonly version of the memoryview.
1422 [clinic start generated code]*/
1423
1424 static PyObject *
memoryview_toreadonly_impl(PyMemoryViewObject * self)1425 memoryview_toreadonly_impl(PyMemoryViewObject *self)
1426 /*[clinic end generated code: output=2c7e056f04c99e62 input=dc06d20f19ba236f]*/
1427 {
1428 CHECK_RELEASED(self);
1429 /* Even if self is already readonly, we still need to create a new
1430 * object for .release() to work correctly.
1431 */
1432 self = (PyMemoryViewObject *) mbuf_add_view(self->mbuf, &self->view);
1433 if (self != NULL) {
1434 self->view.readonly = 1;
1435 };
1436 return (PyObject *) self;
1437 }
1438
1439
1440 /**************************************************************************/
1441 /* getbuffer */
1442 /**************************************************************************/
1443
1444 static int
memory_getbuf(PyMemoryViewObject * self,Py_buffer * view,int flags)1445 memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
1446 {
1447 Py_buffer *base = &self->view;
1448 int baseflags = self->flags;
1449
1450 CHECK_RELEASED_INT(self);
1451
1452 /* start with complete information */
1453 *view = *base;
1454 view->obj = NULL;
1455
1456 if (REQ_WRITABLE(flags) && base->readonly) {
1457 PyErr_SetString(PyExc_BufferError,
1458 "memoryview: underlying buffer is not writable");
1459 return -1;
1460 }
1461 if (!REQ_FORMAT(flags)) {
1462 /* NULL indicates that the buffer's data type has been cast to 'B'.
1463 view->itemsize is the _previous_ itemsize. If shape is present,
1464 the equality product(shape) * itemsize = len still holds at this
1465 point. The equality calcsize(format) = itemsize does _not_ hold
1466 from here on! */
1467 view->format = NULL;
1468 }
1469
1470 if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
1471 PyErr_SetString(PyExc_BufferError,
1472 "memoryview: underlying buffer is not C-contiguous");
1473 return -1;
1474 }
1475 if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
1476 PyErr_SetString(PyExc_BufferError,
1477 "memoryview: underlying buffer is not Fortran contiguous");
1478 return -1;
1479 }
1480 if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
1481 PyErr_SetString(PyExc_BufferError,
1482 "memoryview: underlying buffer is not contiguous");
1483 return -1;
1484 }
1485 if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
1486 PyErr_SetString(PyExc_BufferError,
1487 "memoryview: underlying buffer requires suboffsets");
1488 return -1;
1489 }
1490 if (!REQ_STRIDES(flags)) {
1491 if (!MV_C_CONTIGUOUS(baseflags)) {
1492 PyErr_SetString(PyExc_BufferError,
1493 "memoryview: underlying buffer is not C-contiguous");
1494 return -1;
1495 }
1496 view->strides = NULL;
1497 }
1498 if (!REQ_SHAPE(flags)) {
1499 /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
1500 so base->buf = ndbuf->data. */
1501 if (view->format != NULL) {
1502 /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
1503 not make sense. */
1504 PyErr_Format(PyExc_BufferError,
1505 "memoryview: cannot cast to unsigned bytes if the format flag "
1506 "is present");
1507 return -1;
1508 }
1509 /* product(shape) * itemsize = len and calcsize(format) = itemsize
1510 do _not_ hold from here on! */
1511 view->ndim = 1;
1512 view->shape = NULL;
1513 }
1514
1515
1516 view->obj = (PyObject *)self;
1517 Py_INCREF(view->obj);
1518 self->exports++;
1519
1520 return 0;
1521 }
1522
1523 static void
memory_releasebuf(PyMemoryViewObject * self,Py_buffer * view)1524 memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
1525 {
1526 self->exports--;
1527 return;
1528 /* PyBuffer_Release() decrements view->obj after this function returns. */
1529 }
1530
1531 /* Buffer methods */
1532 static PyBufferProcs memory_as_buffer = {
1533 (getbufferproc)memory_getbuf, /* bf_getbuffer */
1534 (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
1535 };
1536
1537
1538 /****************************************************************************/
1539 /* Optimized pack/unpack for all native format specifiers */
1540 /****************************************************************************/
1541
1542 /*
1543 Fix exceptions:
1544 1) Include format string in the error message.
1545 2) OverflowError -> ValueError.
1546 3) The error message from PyNumber_Index() is not ideal.
1547 */
1548 static int
type_error_int(const char * fmt)1549 type_error_int(const char *fmt)
1550 {
1551 PyErr_Format(PyExc_TypeError,
1552 "memoryview: invalid type for format '%s'", fmt);
1553 return -1;
1554 }
1555
1556 static int
value_error_int(const char * fmt)1557 value_error_int(const char *fmt)
1558 {
1559 PyErr_Format(PyExc_ValueError,
1560 "memoryview: invalid value for format '%s'", fmt);
1561 return -1;
1562 }
1563
1564 static int
fix_error_int(const char * fmt)1565 fix_error_int(const char *fmt)
1566 {
1567 assert(PyErr_Occurred());
1568 if (PyErr_ExceptionMatches(PyExc_TypeError)) {
1569 PyErr_Clear();
1570 return type_error_int(fmt);
1571 }
1572 else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
1573 PyErr_ExceptionMatches(PyExc_ValueError)) {
1574 PyErr_Clear();
1575 return value_error_int(fmt);
1576 }
1577
1578 return -1;
1579 }
1580
1581 /* Accept integer objects or objects with an __index__() method. */
1582 static long
pylong_as_ld(PyObject * item)1583 pylong_as_ld(PyObject *item)
1584 {
1585 PyObject *tmp;
1586 long ld;
1587
1588 tmp = _PyNumber_Index(item);
1589 if (tmp == NULL)
1590 return -1;
1591
1592 ld = PyLong_AsLong(tmp);
1593 Py_DECREF(tmp);
1594 return ld;
1595 }
1596
1597 static unsigned long
pylong_as_lu(PyObject * item)1598 pylong_as_lu(PyObject *item)
1599 {
1600 PyObject *tmp;
1601 unsigned long lu;
1602
1603 tmp = _PyNumber_Index(item);
1604 if (tmp == NULL)
1605 return (unsigned long)-1;
1606
1607 lu = PyLong_AsUnsignedLong(tmp);
1608 Py_DECREF(tmp);
1609 return lu;
1610 }
1611
1612 static long long
pylong_as_lld(PyObject * item)1613 pylong_as_lld(PyObject *item)
1614 {
1615 PyObject *tmp;
1616 long long lld;
1617
1618 tmp = _PyNumber_Index(item);
1619 if (tmp == NULL)
1620 return -1;
1621
1622 lld = PyLong_AsLongLong(tmp);
1623 Py_DECREF(tmp);
1624 return lld;
1625 }
1626
1627 static unsigned long long
pylong_as_llu(PyObject * item)1628 pylong_as_llu(PyObject *item)
1629 {
1630 PyObject *tmp;
1631 unsigned long long llu;
1632
1633 tmp = _PyNumber_Index(item);
1634 if (tmp == NULL)
1635 return (unsigned long long)-1;
1636
1637 llu = PyLong_AsUnsignedLongLong(tmp);
1638 Py_DECREF(tmp);
1639 return llu;
1640 }
1641
1642 static Py_ssize_t
pylong_as_zd(PyObject * item)1643 pylong_as_zd(PyObject *item)
1644 {
1645 PyObject *tmp;
1646 Py_ssize_t zd;
1647
1648 tmp = _PyNumber_Index(item);
1649 if (tmp == NULL)
1650 return -1;
1651
1652 zd = PyLong_AsSsize_t(tmp);
1653 Py_DECREF(tmp);
1654 return zd;
1655 }
1656
1657 static size_t
pylong_as_zu(PyObject * item)1658 pylong_as_zu(PyObject *item)
1659 {
1660 PyObject *tmp;
1661 size_t zu;
1662
1663 tmp = _PyNumber_Index(item);
1664 if (tmp == NULL)
1665 return (size_t)-1;
1666
1667 zu = PyLong_AsSize_t(tmp);
1668 Py_DECREF(tmp);
1669 return zu;
1670 }
1671
1672 /* Timings with the ndarray from _testbuffer.c indicate that using the
1673 struct module is around 15x slower than the two functions below. */
1674
1675 #define UNPACK_SINGLE(dest, ptr, type) \
1676 do { \
1677 type x; \
1678 memcpy((char *)&x, ptr, sizeof x); \
1679 dest = x; \
1680 } while (0)
1681
1682 /* Unpack a single item. 'fmt' can be any native format character in struct
1683 module syntax. This function is very sensitive to small changes. With this
1684 layout gcc automatically generates a fast jump table. */
1685 static inline PyObject *
unpack_single(PyMemoryViewObject * self,const char * ptr,const char * fmt)1686 unpack_single(PyMemoryViewObject *self, const char *ptr, const char *fmt)
1687 {
1688 unsigned long long llu;
1689 unsigned long lu;
1690 size_t zu;
1691 long long lld;
1692 long ld;
1693 Py_ssize_t zd;
1694 double d;
1695 unsigned char uc;
1696 void *p;
1697
1698 CHECK_RELEASED_AGAIN(self);
1699
1700 switch (fmt[0]) {
1701
1702 /* signed integers and fast path for 'B' */
1703 case 'B': uc = *((const unsigned char *)ptr); goto convert_uc;
1704 case 'b': ld = *((const signed char *)ptr); goto convert_ld;
1705 case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
1706 case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
1707 case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
1708
1709 /* boolean */
1710 case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
1711
1712 /* unsigned integers */
1713 case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
1714 case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
1715 case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
1716
1717 /* native 64-bit */
1718 case 'q': UNPACK_SINGLE(lld, ptr, long long); goto convert_lld;
1719 case 'Q': UNPACK_SINGLE(llu, ptr, unsigned long long); goto convert_llu;
1720
1721 /* ssize_t and size_t */
1722 case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
1723 case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
1724
1725 /* floats */
1726 case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
1727 case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
1728
1729 /* bytes object */
1730 case 'c': goto convert_bytes;
1731
1732 /* pointer */
1733 case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
1734
1735 /* default */
1736 default: goto err_format;
1737 }
1738
1739 convert_uc:
1740 /* PyLong_FromUnsignedLong() is slower */
1741 return PyLong_FromLong(uc);
1742 convert_ld:
1743 return PyLong_FromLong(ld);
1744 convert_lu:
1745 return PyLong_FromUnsignedLong(lu);
1746 convert_lld:
1747 return PyLong_FromLongLong(lld);
1748 convert_llu:
1749 return PyLong_FromUnsignedLongLong(llu);
1750 convert_zd:
1751 return PyLong_FromSsize_t(zd);
1752 convert_zu:
1753 return PyLong_FromSize_t(zu);
1754 convert_double:
1755 return PyFloat_FromDouble(d);
1756 convert_bool:
1757 return PyBool_FromLong(ld);
1758 convert_bytes:
1759 return PyBytes_FromStringAndSize(ptr, 1);
1760 convert_pointer:
1761 return PyLong_FromVoidPtr(p);
1762 err_format:
1763 PyErr_Format(PyExc_NotImplementedError,
1764 "memoryview: format %s not supported", fmt);
1765 return NULL;
1766 }
1767
1768 #define PACK_SINGLE(ptr, src, type) \
1769 do { \
1770 type x; \
1771 x = (type)src; \
1772 memcpy(ptr, (char *)&x, sizeof x); \
1773 } while (0)
1774
1775 /* Pack a single item. 'fmt' can be any native format character in
1776 struct module syntax. */
1777 static int
pack_single(PyMemoryViewObject * self,char * ptr,PyObject * item,const char * fmt)1778 pack_single(PyMemoryViewObject *self, char *ptr, PyObject *item, const char *fmt)
1779 {
1780 unsigned long long llu;
1781 unsigned long lu;
1782 size_t zu;
1783 long long lld;
1784 long ld;
1785 Py_ssize_t zd;
1786 double d;
1787 void *p;
1788
1789 switch (fmt[0]) {
1790 /* signed integers */
1791 case 'b': case 'h': case 'i': case 'l':
1792 ld = pylong_as_ld(item);
1793 if (ld == -1 && PyErr_Occurred())
1794 goto err_occurred;
1795 CHECK_RELEASED_INT_AGAIN(self);
1796 switch (fmt[0]) {
1797 case 'b':
1798 if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
1799 *((signed char *)ptr) = (signed char)ld; break;
1800 case 'h':
1801 if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
1802 PACK_SINGLE(ptr, ld, short); break;
1803 case 'i':
1804 if (ld < INT_MIN || ld > INT_MAX) goto err_range;
1805 PACK_SINGLE(ptr, ld, int); break;
1806 default: /* 'l' */
1807 PACK_SINGLE(ptr, ld, long); break;
1808 }
1809 break;
1810
1811 /* unsigned integers */
1812 case 'B': case 'H': case 'I': case 'L':
1813 lu = pylong_as_lu(item);
1814 if (lu == (unsigned long)-1 && PyErr_Occurred())
1815 goto err_occurred;
1816 CHECK_RELEASED_INT_AGAIN(self);
1817 switch (fmt[0]) {
1818 case 'B':
1819 if (lu > UCHAR_MAX) goto err_range;
1820 *((unsigned char *)ptr) = (unsigned char)lu; break;
1821 case 'H':
1822 if (lu > USHRT_MAX) goto err_range;
1823 PACK_SINGLE(ptr, lu, unsigned short); break;
1824 case 'I':
1825 if (lu > UINT_MAX) goto err_range;
1826 PACK_SINGLE(ptr, lu, unsigned int); break;
1827 default: /* 'L' */
1828 PACK_SINGLE(ptr, lu, unsigned long); break;
1829 }
1830 break;
1831
1832 /* native 64-bit */
1833 case 'q':
1834 lld = pylong_as_lld(item);
1835 if (lld == -1 && PyErr_Occurred())
1836 goto err_occurred;
1837 CHECK_RELEASED_INT_AGAIN(self);
1838 PACK_SINGLE(ptr, lld, long long);
1839 break;
1840 case 'Q':
1841 llu = pylong_as_llu(item);
1842 if (llu == (unsigned long long)-1 && PyErr_Occurred())
1843 goto err_occurred;
1844 CHECK_RELEASED_INT_AGAIN(self);
1845 PACK_SINGLE(ptr, llu, unsigned long long);
1846 break;
1847
1848 /* ssize_t and size_t */
1849 case 'n':
1850 zd = pylong_as_zd(item);
1851 if (zd == -1 && PyErr_Occurred())
1852 goto err_occurred;
1853 CHECK_RELEASED_INT_AGAIN(self);
1854 PACK_SINGLE(ptr, zd, Py_ssize_t);
1855 break;
1856 case 'N':
1857 zu = pylong_as_zu(item);
1858 if (zu == (size_t)-1 && PyErr_Occurred())
1859 goto err_occurred;
1860 CHECK_RELEASED_INT_AGAIN(self);
1861 PACK_SINGLE(ptr, zu, size_t);
1862 break;
1863
1864 /* floats */
1865 case 'f': case 'd':
1866 d = PyFloat_AsDouble(item);
1867 if (d == -1.0 && PyErr_Occurred())
1868 goto err_occurred;
1869 CHECK_RELEASED_INT_AGAIN(self);
1870 if (fmt[0] == 'f') {
1871 PACK_SINGLE(ptr, d, float);
1872 }
1873 else {
1874 PACK_SINGLE(ptr, d, double);
1875 }
1876 break;
1877
1878 /* bool */
1879 case '?':
1880 ld = PyObject_IsTrue(item);
1881 if (ld < 0)
1882 return -1; /* preserve original error */
1883 CHECK_RELEASED_INT_AGAIN(self);
1884 PACK_SINGLE(ptr, ld, _Bool);
1885 break;
1886
1887 /* bytes object */
1888 case 'c':
1889 if (!PyBytes_Check(item))
1890 return type_error_int(fmt);
1891 if (PyBytes_GET_SIZE(item) != 1)
1892 return value_error_int(fmt);
1893 *ptr = PyBytes_AS_STRING(item)[0];
1894 break;
1895
1896 /* pointer */
1897 case 'P':
1898 p = PyLong_AsVoidPtr(item);
1899 if (p == NULL && PyErr_Occurred())
1900 goto err_occurred;
1901 CHECK_RELEASED_INT_AGAIN(self);
1902 PACK_SINGLE(ptr, p, void *);
1903 break;
1904
1905 /* default */
1906 default: goto err_format;
1907 }
1908
1909 return 0;
1910
1911 err_occurred:
1912 return fix_error_int(fmt);
1913 err_range:
1914 return value_error_int(fmt);
1915 err_format:
1916 PyErr_Format(PyExc_NotImplementedError,
1917 "memoryview: format %s not supported", fmt);
1918 return -1;
1919 }
1920
1921
1922 /****************************************************************************/
1923 /* unpack using the struct module */
1924 /****************************************************************************/
1925
1926 /* For reasonable performance it is necessary to cache all objects required
1927 for unpacking. An unpacker can handle the format passed to unpack_from().
1928 Invariant: All pointer fields of the struct should either be NULL or valid
1929 pointers. */
1930 struct unpacker {
1931 PyObject *unpack_from; /* Struct.unpack_from(format) */
1932 PyObject *mview; /* cached memoryview */
1933 char *item; /* buffer for mview */
1934 Py_ssize_t itemsize; /* len(item) */
1935 };
1936
1937 static struct unpacker *
unpacker_new(void)1938 unpacker_new(void)
1939 {
1940 struct unpacker *x = PyMem_Malloc(sizeof *x);
1941
1942 if (x == NULL) {
1943 PyErr_NoMemory();
1944 return NULL;
1945 }
1946
1947 x->unpack_from = NULL;
1948 x->mview = NULL;
1949 x->item = NULL;
1950 x->itemsize = 0;
1951
1952 return x;
1953 }
1954
1955 static void
unpacker_free(struct unpacker * x)1956 unpacker_free(struct unpacker *x)
1957 {
1958 if (x) {
1959 Py_XDECREF(x->unpack_from);
1960 Py_XDECREF(x->mview);
1961 PyMem_Free(x->item);
1962 PyMem_Free(x);
1963 }
1964 }
1965
1966 /* Return a new unpacker for the given format. */
1967 static struct unpacker *
struct_get_unpacker(const char * fmt,Py_ssize_t itemsize)1968 struct_get_unpacker(const char *fmt, Py_ssize_t itemsize)
1969 {
1970 PyObject *structmodule; /* XXX cache these two */
1971 PyObject *Struct = NULL; /* XXX in globals? */
1972 PyObject *structobj = NULL;
1973 PyObject *format = NULL;
1974 struct unpacker *x = NULL;
1975
1976 structmodule = PyImport_ImportModule("struct");
1977 if (structmodule == NULL)
1978 return NULL;
1979
1980 Struct = PyObject_GetAttrString(structmodule, "Struct");
1981 Py_DECREF(structmodule);
1982 if (Struct == NULL)
1983 return NULL;
1984
1985 x = unpacker_new();
1986 if (x == NULL)
1987 goto error;
1988
1989 format = PyBytes_FromString(fmt);
1990 if (format == NULL)
1991 goto error;
1992
1993 structobj = PyObject_CallOneArg(Struct, format);
1994 if (structobj == NULL)
1995 goto error;
1996
1997 x->unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
1998 if (x->unpack_from == NULL)
1999 goto error;
2000
2001 x->item = PyMem_Malloc(itemsize);
2002 if (x->item == NULL) {
2003 PyErr_NoMemory();
2004 goto error;
2005 }
2006 x->itemsize = itemsize;
2007
2008 x->mview = PyMemoryView_FromMemory(x->item, itemsize, PyBUF_WRITE);
2009 if (x->mview == NULL)
2010 goto error;
2011
2012
2013 out:
2014 Py_XDECREF(Struct);
2015 Py_XDECREF(format);
2016 Py_XDECREF(structobj);
2017 return x;
2018
2019 error:
2020 unpacker_free(x);
2021 x = NULL;
2022 goto out;
2023 }
2024
2025 /* unpack a single item */
2026 static PyObject *
struct_unpack_single(const char * ptr,struct unpacker * x)2027 struct_unpack_single(const char *ptr, struct unpacker *x)
2028 {
2029 PyObject *v;
2030
2031 memcpy(x->item, ptr, x->itemsize);
2032 v = PyObject_CallOneArg(x->unpack_from, x->mview);
2033 if (v == NULL)
2034 return NULL;
2035
2036 if (PyTuple_GET_SIZE(v) == 1) {
2037 PyObject *tmp = PyTuple_GET_ITEM(v, 0);
2038 Py_INCREF(tmp);
2039 Py_DECREF(v);
2040 return tmp;
2041 }
2042
2043 return v;
2044 }
2045
2046
2047 /****************************************************************************/
2048 /* Representations */
2049 /****************************************************************************/
2050
2051 /* allow explicit form of native format */
2052 static inline const char *
adjust_fmt(const Py_buffer * view)2053 adjust_fmt(const Py_buffer *view)
2054 {
2055 const char *fmt;
2056
2057 fmt = (view->format[0] == '@') ? view->format+1 : view->format;
2058 if (fmt[0] && fmt[1] == '\0')
2059 return fmt;
2060
2061 PyErr_Format(PyExc_NotImplementedError,
2062 "memoryview: unsupported format %s", view->format);
2063 return NULL;
2064 }
2065
2066 /* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
2067 static PyObject *
tolist_base(PyMemoryViewObject * self,const char * ptr,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2068 tolist_base(PyMemoryViewObject *self, const char *ptr, const Py_ssize_t *shape,
2069 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2070 const char *fmt)
2071 {
2072 PyObject *lst, *item;
2073 Py_ssize_t i;
2074
2075 lst = PyList_New(shape[0]);
2076 if (lst == NULL)
2077 return NULL;
2078
2079 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2080 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2081 item = unpack_single(self, xptr, fmt);
2082 if (item == NULL) {
2083 Py_DECREF(lst);
2084 return NULL;
2085 }
2086 PyList_SET_ITEM(lst, i, item);
2087 }
2088
2089 return lst;
2090 }
2091
2092 /* Unpack a multi-dimensional array into a nested list.
2093 Assumption: ndim >= 1. */
2094 static PyObject *
tolist_rec(PyMemoryViewObject * self,const char * ptr,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2095 tolist_rec(PyMemoryViewObject *self, const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
2096 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2097 const char *fmt)
2098 {
2099 PyObject *lst, *item;
2100 Py_ssize_t i;
2101
2102 assert(ndim >= 1);
2103 assert(shape != NULL);
2104 assert(strides != NULL);
2105
2106 if (ndim == 1)
2107 return tolist_base(self, ptr, shape, strides, suboffsets, fmt);
2108
2109 lst = PyList_New(shape[0]);
2110 if (lst == NULL)
2111 return NULL;
2112
2113 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2114 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2115 item = tolist_rec(self, xptr, ndim-1, shape+1,
2116 strides+1, suboffsets ? suboffsets+1 : NULL,
2117 fmt);
2118 if (item == NULL) {
2119 Py_DECREF(lst);
2120 return NULL;
2121 }
2122 PyList_SET_ITEM(lst, i, item);
2123 }
2124
2125 return lst;
2126 }
2127
2128 /* Return a list representation of the memoryview. Currently only buffers
2129 with native format strings are supported. */
2130 /*[clinic input]
2131 memoryview.tolist
2132
2133 Return the data in the buffer as a list of elements.
2134 [clinic start generated code]*/
2135
2136 static PyObject *
memoryview_tolist_impl(PyMemoryViewObject * self)2137 memoryview_tolist_impl(PyMemoryViewObject *self)
2138 /*[clinic end generated code: output=a6cda89214fd5a1b input=21e7d0c1860b211a]*/
2139 {
2140 const Py_buffer *view = &self->view;
2141 const char *fmt;
2142
2143 CHECK_RELEASED(self);
2144
2145 fmt = adjust_fmt(view);
2146 if (fmt == NULL)
2147 return NULL;
2148 if (view->ndim == 0) {
2149 return unpack_single(self, view->buf, fmt);
2150 }
2151 else if (view->ndim == 1) {
2152 return tolist_base(self, view->buf, view->shape,
2153 view->strides, view->suboffsets,
2154 fmt);
2155 }
2156 else {
2157 return tolist_rec(self, view->buf, view->ndim, view->shape,
2158 view->strides, view->suboffsets,
2159 fmt);
2160 }
2161 }
2162
2163 /*[clinic input]
2164 memoryview.tobytes
2165
2166 order: str(accept={str, NoneType}, c_default="NULL") = 'C'
2167
2168 Return the data in the buffer as a byte string.
2169
2170 Order can be {'C', 'F', 'A'}. When order is 'C' or 'F', the data of the
2171 original array is converted to C or Fortran order. For contiguous views,
2172 'A' returns an exact copy of the physical memory. In particular, in-memory
2173 Fortran order is preserved. For non-contiguous views, the data is converted
2174 to C first. order=None is the same as order='C'.
2175 [clinic start generated code]*/
2176
2177 static PyObject *
memoryview_tobytes_impl(PyMemoryViewObject * self,const char * order)2178 memoryview_tobytes_impl(PyMemoryViewObject *self, const char *order)
2179 /*[clinic end generated code: output=1288b62560a32a23 input=0efa3ddaeda573a8]*/
2180 {
2181 Py_buffer *src = VIEW_ADDR(self);
2182 char ord = 'C';
2183 PyObject *bytes;
2184
2185 CHECK_RELEASED(self);
2186
2187 if (order) {
2188 if (strcmp(order, "F") == 0) {
2189 ord = 'F';
2190 }
2191 else if (strcmp(order, "A") == 0) {
2192 ord = 'A';
2193 }
2194 else if (strcmp(order, "C") != 0) {
2195 PyErr_SetString(PyExc_ValueError,
2196 "order must be 'C', 'F' or 'A'");
2197 return NULL;
2198 }
2199 }
2200
2201 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2202 if (bytes == NULL)
2203 return NULL;
2204
2205 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, ord) < 0) {
2206 Py_DECREF(bytes);
2207 return NULL;
2208 }
2209
2210 return bytes;
2211 }
2212
2213 /*[clinic input]
2214 memoryview.hex
2215
2216 sep: object = NULL
2217 An optional single character or byte to separate hex bytes.
2218 bytes_per_sep: int = 1
2219 How many bytes between separators. Positive values count from the
2220 right, negative values count from the left.
2221
2222 Return the data in the buffer as a str of hexadecimal numbers.
2223
2224 Example:
2225 >>> value = memoryview(b'\xb9\x01\xef')
2226 >>> value.hex()
2227 'b901ef'
2228 >>> value.hex(':')
2229 'b9:01:ef'
2230 >>> value.hex(':', 2)
2231 'b9:01ef'
2232 >>> value.hex(':', -2)
2233 'b901:ef'
2234 [clinic start generated code]*/
2235
2236 static PyObject *
memoryview_hex_impl(PyMemoryViewObject * self,PyObject * sep,int bytes_per_sep)2237 memoryview_hex_impl(PyMemoryViewObject *self, PyObject *sep,
2238 int bytes_per_sep)
2239 /*[clinic end generated code: output=430ca760f94f3ca7 input=539f6a3a5fb56946]*/
2240 {
2241 Py_buffer *src = VIEW_ADDR(self);
2242 PyObject *bytes;
2243 PyObject *ret;
2244
2245 CHECK_RELEASED(self);
2246
2247 if (MV_C_CONTIGUOUS(self->flags)) {
2248 return _Py_strhex_with_sep(src->buf, src->len, sep, bytes_per_sep);
2249 }
2250
2251 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2252 if (bytes == NULL)
2253 return NULL;
2254
2255 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, 'C') < 0) {
2256 Py_DECREF(bytes);
2257 return NULL;
2258 }
2259
2260 ret = _Py_strhex_with_sep(
2261 PyBytes_AS_STRING(bytes), PyBytes_GET_SIZE(bytes),
2262 sep, bytes_per_sep);
2263 Py_DECREF(bytes);
2264
2265 return ret;
2266 }
2267
2268 static PyObject *
memory_repr(PyMemoryViewObject * self)2269 memory_repr(PyMemoryViewObject *self)
2270 {
2271 if (self->flags & _Py_MEMORYVIEW_RELEASED)
2272 return PyUnicode_FromFormat("<released memory at %p>", self);
2273 else
2274 return PyUnicode_FromFormat("<memory at %p>", self);
2275 }
2276
2277
2278 /**************************************************************************/
2279 /* Indexing and slicing */
2280 /**************************************************************************/
2281
2282 static char *
lookup_dimension(const Py_buffer * view,char * ptr,int dim,Py_ssize_t index)2283 lookup_dimension(const Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
2284 {
2285 Py_ssize_t nitems; /* items in the given dimension */
2286
2287 assert(view->shape);
2288 assert(view->strides);
2289
2290 nitems = view->shape[dim];
2291 if (index < 0) {
2292 index += nitems;
2293 }
2294 if (index < 0 || index >= nitems) {
2295 PyErr_Format(PyExc_IndexError,
2296 "index out of bounds on dimension %d", dim + 1);
2297 return NULL;
2298 }
2299
2300 ptr += view->strides[dim] * index;
2301
2302 ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
2303
2304 return ptr;
2305 }
2306
2307 /* Get the pointer to the item at index. */
2308 static char *
ptr_from_index(const Py_buffer * view,Py_ssize_t index)2309 ptr_from_index(const Py_buffer *view, Py_ssize_t index)
2310 {
2311 char *ptr = (char *)view->buf;
2312 return lookup_dimension(view, ptr, 0, index);
2313 }
2314
2315 /* Get the pointer to the item at tuple. */
2316 static char *
ptr_from_tuple(const Py_buffer * view,PyObject * tup)2317 ptr_from_tuple(const Py_buffer *view, PyObject *tup)
2318 {
2319 char *ptr = (char *)view->buf;
2320 Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
2321
2322 if (nindices > view->ndim) {
2323 PyErr_Format(PyExc_TypeError,
2324 "cannot index %zd-dimension view with %zd-element tuple",
2325 view->ndim, nindices);
2326 return NULL;
2327 }
2328
2329 for (dim = 0; dim < nindices; dim++) {
2330 Py_ssize_t index;
2331 index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
2332 PyExc_IndexError);
2333 if (index == -1 && PyErr_Occurred())
2334 return NULL;
2335 ptr = lookup_dimension(view, ptr, (int)dim, index);
2336 if (ptr == NULL)
2337 return NULL;
2338 }
2339 return ptr;
2340 }
2341
2342 /* Return the item at index. In a one-dimensional view, this is an object
2343 with the type specified by view->format. Otherwise, the item is a sub-view.
2344 The function is used in memory_subscript() and memory_as_sequence. */
2345 static PyObject *
memory_item(PyMemoryViewObject * self,Py_ssize_t index)2346 memory_item(PyMemoryViewObject *self, Py_ssize_t index)
2347 {
2348 Py_buffer *view = &(self->view);
2349 const char *fmt;
2350
2351 CHECK_RELEASED(self);
2352
2353 fmt = adjust_fmt(view);
2354 if (fmt == NULL)
2355 return NULL;
2356
2357 if (view->ndim == 0) {
2358 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
2359 return NULL;
2360 }
2361 if (view->ndim == 1) {
2362 char *ptr = ptr_from_index(view, index);
2363 if (ptr == NULL)
2364 return NULL;
2365 return unpack_single(self, ptr, fmt);
2366 }
2367
2368 PyErr_SetString(PyExc_NotImplementedError,
2369 "multi-dimensional sub-views are not implemented");
2370 return NULL;
2371 }
2372
2373 /* Return the item at position *key* (a tuple of indices). */
2374 static PyObject *
memory_item_multi(PyMemoryViewObject * self,PyObject * tup)2375 memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
2376 {
2377 Py_buffer *view = &(self->view);
2378 const char *fmt;
2379 Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
2380 char *ptr;
2381
2382 CHECK_RELEASED(self);
2383
2384 fmt = adjust_fmt(view);
2385 if (fmt == NULL)
2386 return NULL;
2387
2388 if (nindices < view->ndim) {
2389 PyErr_SetString(PyExc_NotImplementedError,
2390 "sub-views are not implemented");
2391 return NULL;
2392 }
2393 ptr = ptr_from_tuple(view, tup);
2394 if (ptr == NULL)
2395 return NULL;
2396 return unpack_single(self, ptr, fmt);
2397 }
2398
2399 static inline int
init_slice(Py_buffer * base,PyObject * key,int dim)2400 init_slice(Py_buffer *base, PyObject *key, int dim)
2401 {
2402 Py_ssize_t start, stop, step, slicelength;
2403
2404 if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
2405 return -1;
2406 }
2407 slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
2408
2409
2410 if (base->suboffsets == NULL || dim == 0) {
2411 adjust_buf:
2412 base->buf = (char *)base->buf + base->strides[dim] * start;
2413 }
2414 else {
2415 Py_ssize_t n = dim-1;
2416 while (n >= 0 && base->suboffsets[n] < 0)
2417 n--;
2418 if (n < 0)
2419 goto adjust_buf; /* all suboffsets are negative */
2420 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
2421 }
2422 base->shape[dim] = slicelength;
2423 base->strides[dim] = base->strides[dim] * step;
2424
2425 return 0;
2426 }
2427
2428 static int
is_multislice(PyObject * key)2429 is_multislice(PyObject *key)
2430 {
2431 Py_ssize_t size, i;
2432
2433 if (!PyTuple_Check(key))
2434 return 0;
2435 size = PyTuple_GET_SIZE(key);
2436 if (size == 0)
2437 return 0;
2438
2439 for (i = 0; i < size; i++) {
2440 PyObject *x = PyTuple_GET_ITEM(key, i);
2441 if (!PySlice_Check(x))
2442 return 0;
2443 }
2444 return 1;
2445 }
2446
2447 static Py_ssize_t
is_multiindex(PyObject * key)2448 is_multiindex(PyObject *key)
2449 {
2450 Py_ssize_t size, i;
2451
2452 if (!PyTuple_Check(key))
2453 return 0;
2454 size = PyTuple_GET_SIZE(key);
2455 for (i = 0; i < size; i++) {
2456 PyObject *x = PyTuple_GET_ITEM(key, i);
2457 if (!_PyIndex_Check(x)) {
2458 return 0;
2459 }
2460 }
2461 return 1;
2462 }
2463
2464 /* mv[obj] returns an object holding the data for one element if obj
2465 fully indexes the memoryview or another memoryview object if it
2466 does not.
2467
2468 0-d memoryview objects can be referenced using mv[...] or mv[()]
2469 but not with anything else. */
2470 static PyObject *
memory_subscript(PyMemoryViewObject * self,PyObject * key)2471 memory_subscript(PyMemoryViewObject *self, PyObject *key)
2472 {
2473 Py_buffer *view;
2474 view = &(self->view);
2475
2476 CHECK_RELEASED(self);
2477
2478 if (view->ndim == 0) {
2479 if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
2480 const char *fmt = adjust_fmt(view);
2481 if (fmt == NULL)
2482 return NULL;
2483 return unpack_single(self, view->buf, fmt);
2484 }
2485 else if (key == Py_Ellipsis) {
2486 Py_INCREF(self);
2487 return (PyObject *)self;
2488 }
2489 else {
2490 PyErr_SetString(PyExc_TypeError,
2491 "invalid indexing of 0-dim memory");
2492 return NULL;
2493 }
2494 }
2495
2496 if (_PyIndex_Check(key)) {
2497 Py_ssize_t index;
2498 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2499 if (index == -1 && PyErr_Occurred())
2500 return NULL;
2501 return memory_item(self, index);
2502 }
2503 else if (PySlice_Check(key)) {
2504 PyMemoryViewObject *sliced;
2505
2506 sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
2507 if (sliced == NULL)
2508 return NULL;
2509
2510 if (init_slice(&sliced->view, key, 0) < 0) {
2511 Py_DECREF(sliced);
2512 return NULL;
2513 }
2514 init_len(&sliced->view);
2515 init_flags(sliced);
2516
2517 return (PyObject *)sliced;
2518 }
2519 else if (is_multiindex(key)) {
2520 return memory_item_multi(self, key);
2521 }
2522 else if (is_multislice(key)) {
2523 PyErr_SetString(PyExc_NotImplementedError,
2524 "multi-dimensional slicing is not implemented");
2525 return NULL;
2526 }
2527
2528 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2529 return NULL;
2530 }
2531
2532 static int
memory_ass_sub(PyMemoryViewObject * self,PyObject * key,PyObject * value)2533 memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
2534 {
2535 Py_buffer *view = &(self->view);
2536 Py_buffer src;
2537 const char *fmt;
2538 char *ptr;
2539
2540 CHECK_RELEASED_INT(self);
2541
2542 fmt = adjust_fmt(view);
2543 if (fmt == NULL)
2544 return -1;
2545
2546 if (view->readonly) {
2547 PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
2548 return -1;
2549 }
2550 if (value == NULL) {
2551 PyErr_SetString(PyExc_TypeError, "cannot delete memory");
2552 return -1;
2553 }
2554 if (view->ndim == 0) {
2555 if (key == Py_Ellipsis ||
2556 (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
2557 ptr = (char *)view->buf;
2558 return pack_single(self, ptr, value, fmt);
2559 }
2560 else {
2561 PyErr_SetString(PyExc_TypeError,
2562 "invalid indexing of 0-dim memory");
2563 return -1;
2564 }
2565 }
2566
2567 if (_PyIndex_Check(key)) {
2568 Py_ssize_t index;
2569 if (1 < view->ndim) {
2570 PyErr_SetString(PyExc_NotImplementedError,
2571 "sub-views are not implemented");
2572 return -1;
2573 }
2574 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2575 if (index == -1 && PyErr_Occurred())
2576 return -1;
2577 ptr = ptr_from_index(view, index);
2578 if (ptr == NULL)
2579 return -1;
2580 return pack_single(self, ptr, value, fmt);
2581 }
2582 /* one-dimensional: fast path */
2583 if (PySlice_Check(key) && view->ndim == 1) {
2584 Py_buffer dest; /* sliced view */
2585 Py_ssize_t arrays[3];
2586 int ret = -1;
2587
2588 /* rvalue must be an exporter */
2589 if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
2590 return ret;
2591
2592 dest = *view;
2593 dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
2594 dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
2595 if (view->suboffsets) {
2596 dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
2597 }
2598
2599 if (init_slice(&dest, key, 0) < 0)
2600 goto end_block;
2601 dest.len = dest.shape[0] * dest.itemsize;
2602
2603 ret = copy_single(self, &dest, &src);
2604
2605 end_block:
2606 PyBuffer_Release(&src);
2607 return ret;
2608 }
2609 if (is_multiindex(key)) {
2610 char *ptr;
2611 if (PyTuple_GET_SIZE(key) < view->ndim) {
2612 PyErr_SetString(PyExc_NotImplementedError,
2613 "sub-views are not implemented");
2614 return -1;
2615 }
2616 ptr = ptr_from_tuple(view, key);
2617 if (ptr == NULL)
2618 return -1;
2619 return pack_single(self, ptr, value, fmt);
2620 }
2621 if (PySlice_Check(key) || is_multislice(key)) {
2622 /* Call memory_subscript() to produce a sliced lvalue, then copy
2623 rvalue into lvalue. This is already implemented in _testbuffer.c. */
2624 PyErr_SetString(PyExc_NotImplementedError,
2625 "memoryview slice assignments are currently restricted "
2626 "to ndim = 1");
2627 return -1;
2628 }
2629
2630 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2631 return -1;
2632 }
2633
2634 static Py_ssize_t
memory_length(PyMemoryViewObject * self)2635 memory_length(PyMemoryViewObject *self)
2636 {
2637 CHECK_RELEASED_INT(self);
2638 return self->view.ndim == 0 ? 1 : self->view.shape[0];
2639 }
2640
2641 /* As mapping */
2642 static PyMappingMethods memory_as_mapping = {
2643 (lenfunc)memory_length, /* mp_length */
2644 (binaryfunc)memory_subscript, /* mp_subscript */
2645 (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
2646 };
2647
2648 /* As sequence */
2649 static PySequenceMethods memory_as_sequence = {
2650 (lenfunc)memory_length, /* sq_length */
2651 0, /* sq_concat */
2652 0, /* sq_repeat */
2653 (ssizeargfunc)memory_item, /* sq_item */
2654 };
2655
2656
2657 /**************************************************************************/
2658 /* Comparisons */
2659 /**************************************************************************/
2660
2661 #define MV_COMPARE_EX -1 /* exception */
2662 #define MV_COMPARE_NOT_IMPL -2 /* not implemented */
2663
2664 /* Translate a StructError to "not equal". Preserve other exceptions. */
2665 static int
fix_struct_error_int(void)2666 fix_struct_error_int(void)
2667 {
2668 assert(PyErr_Occurred());
2669 /* XXX Cannot get at StructError directly? */
2670 if (PyErr_ExceptionMatches(PyExc_ImportError) ||
2671 PyErr_ExceptionMatches(PyExc_MemoryError)) {
2672 return MV_COMPARE_EX;
2673 }
2674 /* StructError: invalid or unknown format -> not equal */
2675 PyErr_Clear();
2676 return 0;
2677 }
2678
2679 /* Unpack and compare single items of p and q using the struct module. */
2680 static int
struct_unpack_cmp(const char * p,const char * q,struct unpacker * unpack_p,struct unpacker * unpack_q)2681 struct_unpack_cmp(const char *p, const char *q,
2682 struct unpacker *unpack_p, struct unpacker *unpack_q)
2683 {
2684 PyObject *v, *w;
2685 int ret;
2686
2687 /* At this point any exception from the struct module should not be
2688 StructError, since both formats have been accepted already. */
2689 v = struct_unpack_single(p, unpack_p);
2690 if (v == NULL)
2691 return MV_COMPARE_EX;
2692
2693 w = struct_unpack_single(q, unpack_q);
2694 if (w == NULL) {
2695 Py_DECREF(v);
2696 return MV_COMPARE_EX;
2697 }
2698
2699 /* MV_COMPARE_EX == -1: exceptions are preserved */
2700 ret = PyObject_RichCompareBool(v, w, Py_EQ);
2701 Py_DECREF(v);
2702 Py_DECREF(w);
2703
2704 return ret;
2705 }
2706
2707 /* Unpack and compare single items of p and q. If both p and q have the same
2708 single element native format, the comparison uses a fast path (gcc creates
2709 a jump table and converts memcpy into simple assignments on x86/x64).
2710
2711 Otherwise, the comparison is delegated to the struct module, which is
2712 30-60x slower. */
2713 #define CMP_SINGLE(p, q, type) \
2714 do { \
2715 type x; \
2716 type y; \
2717 memcpy((char *)&x, p, sizeof x); \
2718 memcpy((char *)&y, q, sizeof y); \
2719 equal = (x == y); \
2720 } while (0)
2721
2722 static inline int
unpack_cmp(const char * p,const char * q,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2723 unpack_cmp(const char *p, const char *q, char fmt,
2724 struct unpacker *unpack_p, struct unpacker *unpack_q)
2725 {
2726 int equal;
2727
2728 switch (fmt) {
2729
2730 /* signed integers and fast path for 'B' */
2731 case 'B': return *((const unsigned char *)p) == *((const unsigned char *)q);
2732 case 'b': return *((const signed char *)p) == *((const signed char *)q);
2733 case 'h': CMP_SINGLE(p, q, short); return equal;
2734 case 'i': CMP_SINGLE(p, q, int); return equal;
2735 case 'l': CMP_SINGLE(p, q, long); return equal;
2736
2737 /* boolean */
2738 case '?': CMP_SINGLE(p, q, _Bool); return equal;
2739
2740 /* unsigned integers */
2741 case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
2742 case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
2743 case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
2744
2745 /* native 64-bit */
2746 case 'q': CMP_SINGLE(p, q, long long); return equal;
2747 case 'Q': CMP_SINGLE(p, q, unsigned long long); return equal;
2748
2749 /* ssize_t and size_t */
2750 case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
2751 case 'N': CMP_SINGLE(p, q, size_t); return equal;
2752
2753 /* floats */
2754 /* XXX DBL_EPSILON? */
2755 case 'f': CMP_SINGLE(p, q, float); return equal;
2756 case 'd': CMP_SINGLE(p, q, double); return equal;
2757
2758 /* bytes object */
2759 case 'c': return *p == *q;
2760
2761 /* pointer */
2762 case 'P': CMP_SINGLE(p, q, void *); return equal;
2763
2764 /* use the struct module */
2765 case '_':
2766 assert(unpack_p);
2767 assert(unpack_q);
2768 return struct_unpack_cmp(p, q, unpack_p, unpack_q);
2769 }
2770
2771 /* NOT REACHED */
2772 PyErr_SetString(PyExc_RuntimeError,
2773 "memoryview: internal error in richcompare");
2774 return MV_COMPARE_EX;
2775 }
2776
2777 /* Base case for recursive array comparisons. Assumption: ndim == 1. */
2778 static int
cmp_base(const char * p,const char * q,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2779 cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
2780 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2781 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2782 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2783 {
2784 Py_ssize_t i;
2785 int equal;
2786
2787 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2788 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2789 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2790 equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
2791 if (equal <= 0)
2792 return equal;
2793 }
2794
2795 return 1;
2796 }
2797
2798 /* Recursively compare two multi-dimensional arrays that have the same
2799 logical structure. Assumption: ndim >= 1. */
2800 static int
cmp_rec(const char * p,const char * q,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2801 cmp_rec(const char *p, const char *q,
2802 Py_ssize_t ndim, const Py_ssize_t *shape,
2803 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2804 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2805 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2806 {
2807 Py_ssize_t i;
2808 int equal;
2809
2810 assert(ndim >= 1);
2811 assert(shape != NULL);
2812 assert(pstrides != NULL);
2813 assert(qstrides != NULL);
2814
2815 if (ndim == 1) {
2816 return cmp_base(p, q, shape,
2817 pstrides, psuboffsets,
2818 qstrides, qsuboffsets,
2819 fmt, unpack_p, unpack_q);
2820 }
2821
2822 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2823 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2824 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2825 equal = cmp_rec(xp, xq, ndim-1, shape+1,
2826 pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
2827 qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
2828 fmt, unpack_p, unpack_q);
2829 if (equal <= 0)
2830 return equal;
2831 }
2832
2833 return 1;
2834 }
2835
2836 static PyObject *
memory_richcompare(PyObject * v,PyObject * w,int op)2837 memory_richcompare(PyObject *v, PyObject *w, int op)
2838 {
2839 PyObject *res;
2840 Py_buffer wbuf, *vv;
2841 Py_buffer *ww = NULL;
2842 struct unpacker *unpack_v = NULL;
2843 struct unpacker *unpack_w = NULL;
2844 char vfmt, wfmt;
2845 int equal = MV_COMPARE_NOT_IMPL;
2846
2847 if (op != Py_EQ && op != Py_NE)
2848 goto result; /* Py_NotImplemented */
2849
2850 assert(PyMemoryView_Check(v));
2851 if (BASE_INACCESSIBLE(v)) {
2852 equal = (v == w);
2853 goto result;
2854 }
2855 vv = VIEW_ADDR(v);
2856
2857 if (PyMemoryView_Check(w)) {
2858 if (BASE_INACCESSIBLE(w)) {
2859 equal = (v == w);
2860 goto result;
2861 }
2862 ww = VIEW_ADDR(w);
2863 }
2864 else {
2865 if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
2866 PyErr_Clear();
2867 goto result; /* Py_NotImplemented */
2868 }
2869 ww = &wbuf;
2870 }
2871
2872 if (!equiv_shape(vv, ww)) {
2873 PyErr_Clear();
2874 equal = 0;
2875 goto result;
2876 }
2877
2878 /* Use fast unpacking for identical primitive C type formats. */
2879 if (get_native_fmtchar(&vfmt, vv->format) < 0)
2880 vfmt = '_';
2881 if (get_native_fmtchar(&wfmt, ww->format) < 0)
2882 wfmt = '_';
2883 if (vfmt == '_' || wfmt == '_' || vfmt != wfmt) {
2884 /* Use struct module unpacking. NOTE: Even for equal format strings,
2885 memcmp() cannot be used for item comparison since it would give
2886 incorrect results in the case of NaNs or uninitialized padding
2887 bytes. */
2888 vfmt = '_';
2889 unpack_v = struct_get_unpacker(vv->format, vv->itemsize);
2890 if (unpack_v == NULL) {
2891 equal = fix_struct_error_int();
2892 goto result;
2893 }
2894 unpack_w = struct_get_unpacker(ww->format, ww->itemsize);
2895 if (unpack_w == NULL) {
2896 equal = fix_struct_error_int();
2897 goto result;
2898 }
2899 }
2900
2901 if (vv->ndim == 0) {
2902 equal = unpack_cmp(vv->buf, ww->buf,
2903 vfmt, unpack_v, unpack_w);
2904 }
2905 else if (vv->ndim == 1) {
2906 equal = cmp_base(vv->buf, ww->buf, vv->shape,
2907 vv->strides, vv->suboffsets,
2908 ww->strides, ww->suboffsets,
2909 vfmt, unpack_v, unpack_w);
2910 }
2911 else {
2912 equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
2913 vv->strides, vv->suboffsets,
2914 ww->strides, ww->suboffsets,
2915 vfmt, unpack_v, unpack_w);
2916 }
2917
2918 result:
2919 if (equal < 0) {
2920 if (equal == MV_COMPARE_NOT_IMPL)
2921 res = Py_NotImplemented;
2922 else /* exception */
2923 res = NULL;
2924 }
2925 else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
2926 res = Py_True;
2927 else
2928 res = Py_False;
2929
2930 if (ww == &wbuf)
2931 PyBuffer_Release(ww);
2932
2933 unpacker_free(unpack_v);
2934 unpacker_free(unpack_w);
2935
2936 Py_XINCREF(res);
2937 return res;
2938 }
2939
2940 /**************************************************************************/
2941 /* Hash */
2942 /**************************************************************************/
2943
2944 static Py_hash_t
memory_hash(PyMemoryViewObject * self)2945 memory_hash(PyMemoryViewObject *self)
2946 {
2947 if (self->hash == -1) {
2948 Py_buffer *view = &self->view;
2949 char *mem = view->buf;
2950 Py_ssize_t ret;
2951 char fmt;
2952
2953 CHECK_RELEASED_INT(self);
2954
2955 if (!view->readonly) {
2956 PyErr_SetString(PyExc_ValueError,
2957 "cannot hash writable memoryview object");
2958 return -1;
2959 }
2960 ret = get_native_fmtchar(&fmt, view->format);
2961 if (ret < 0 || !IS_BYTE_FORMAT(fmt)) {
2962 PyErr_SetString(PyExc_ValueError,
2963 "memoryview: hashing is restricted to formats 'B', 'b' or 'c'");
2964 return -1;
2965 }
2966 if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
2967 /* Keep the original error message */
2968 return -1;
2969 }
2970
2971 if (!MV_C_CONTIGUOUS(self->flags)) {
2972 mem = PyMem_Malloc(view->len);
2973 if (mem == NULL) {
2974 PyErr_NoMemory();
2975 return -1;
2976 }
2977 if (buffer_to_contiguous(mem, view, 'C') < 0) {
2978 PyMem_Free(mem);
2979 return -1;
2980 }
2981 }
2982
2983 /* Can't fail */
2984 self->hash = _Py_HashBytes(mem, view->len);
2985
2986 if (mem != view->buf)
2987 PyMem_Free(mem);
2988 }
2989
2990 return self->hash;
2991 }
2992
2993
2994 /**************************************************************************/
2995 /* getters */
2996 /**************************************************************************/
2997
2998 static PyObject *
_IntTupleFromSsizet(int len,Py_ssize_t * vals)2999 _IntTupleFromSsizet(int len, Py_ssize_t *vals)
3000 {
3001 int i;
3002 PyObject *o;
3003 PyObject *intTuple;
3004
3005 if (vals == NULL)
3006 return PyTuple_New(0);
3007
3008 intTuple = PyTuple_New(len);
3009 if (!intTuple)
3010 return NULL;
3011 for (i=0; i<len; i++) {
3012 o = PyLong_FromSsize_t(vals[i]);
3013 if (!o) {
3014 Py_DECREF(intTuple);
3015 return NULL;
3016 }
3017 PyTuple_SET_ITEM(intTuple, i, o);
3018 }
3019 return intTuple;
3020 }
3021
3022 static PyObject *
memory_obj_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3023 memory_obj_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3024 {
3025 Py_buffer *view = &self->view;
3026
3027 CHECK_RELEASED(self);
3028 if (view->obj == NULL) {
3029 Py_RETURN_NONE;
3030 }
3031 Py_INCREF(view->obj);
3032 return view->obj;
3033 }
3034
3035 static PyObject *
memory_nbytes_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3036 memory_nbytes_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3037 {
3038 CHECK_RELEASED(self);
3039 return PyLong_FromSsize_t(self->view.len);
3040 }
3041
3042 static PyObject *
memory_format_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3043 memory_format_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3044 {
3045 CHECK_RELEASED(self);
3046 return PyUnicode_FromString(self->view.format);
3047 }
3048
3049 static PyObject *
memory_itemsize_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3050 memory_itemsize_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3051 {
3052 CHECK_RELEASED(self);
3053 return PyLong_FromSsize_t(self->view.itemsize);
3054 }
3055
3056 static PyObject *
memory_shape_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3057 memory_shape_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3058 {
3059 CHECK_RELEASED(self);
3060 return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
3061 }
3062
3063 static PyObject *
memory_strides_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3064 memory_strides_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3065 {
3066 CHECK_RELEASED(self);
3067 return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
3068 }
3069
3070 static PyObject *
memory_suboffsets_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3071 memory_suboffsets_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3072 {
3073 CHECK_RELEASED(self);
3074 return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
3075 }
3076
3077 static PyObject *
memory_readonly_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3078 memory_readonly_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3079 {
3080 CHECK_RELEASED(self);
3081 return PyBool_FromLong(self->view.readonly);
3082 }
3083
3084 static PyObject *
memory_ndim_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3085 memory_ndim_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3086 {
3087 CHECK_RELEASED(self);
3088 return PyLong_FromLong(self->view.ndim);
3089 }
3090
3091 static PyObject *
memory_c_contiguous(PyMemoryViewObject * self,PyObject * dummy)3092 memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3093 {
3094 CHECK_RELEASED(self);
3095 return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
3096 }
3097
3098 static PyObject *
memory_f_contiguous(PyMemoryViewObject * self,PyObject * dummy)3099 memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3100 {
3101 CHECK_RELEASED(self);
3102 return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
3103 }
3104
3105 static PyObject *
memory_contiguous(PyMemoryViewObject * self,PyObject * dummy)3106 memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3107 {
3108 CHECK_RELEASED(self);
3109 return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
3110 }
3111
3112 PyDoc_STRVAR(memory_obj_doc,
3113 "The underlying object of the memoryview.");
3114 PyDoc_STRVAR(memory_nbytes_doc,
3115 "The amount of space in bytes that the array would use in\n"
3116 " a contiguous representation.");
3117 PyDoc_STRVAR(memory_readonly_doc,
3118 "A bool indicating whether the memory is read only.");
3119 PyDoc_STRVAR(memory_itemsize_doc,
3120 "The size in bytes of each element of the memoryview.");
3121 PyDoc_STRVAR(memory_format_doc,
3122 "A string containing the format (in struct module style)\n"
3123 " for each element in the view.");
3124 PyDoc_STRVAR(memory_ndim_doc,
3125 "An integer indicating how many dimensions of a multi-dimensional\n"
3126 " array the memory represents.");
3127 PyDoc_STRVAR(memory_shape_doc,
3128 "A tuple of ndim integers giving the shape of the memory\n"
3129 " as an N-dimensional array.");
3130 PyDoc_STRVAR(memory_strides_doc,
3131 "A tuple of ndim integers giving the size in bytes to access\n"
3132 " each element for each dimension of the array.");
3133 PyDoc_STRVAR(memory_suboffsets_doc,
3134 "A tuple of integers used internally for PIL-style arrays.");
3135 PyDoc_STRVAR(memory_c_contiguous_doc,
3136 "A bool indicating whether the memory is C contiguous.");
3137 PyDoc_STRVAR(memory_f_contiguous_doc,
3138 "A bool indicating whether the memory is Fortran contiguous.");
3139 PyDoc_STRVAR(memory_contiguous_doc,
3140 "A bool indicating whether the memory is contiguous.");
3141
3142
3143 static PyGetSetDef memory_getsetlist[] = {
3144 {"obj", (getter)memory_obj_get, NULL, memory_obj_doc},
3145 {"nbytes", (getter)memory_nbytes_get, NULL, memory_nbytes_doc},
3146 {"readonly", (getter)memory_readonly_get, NULL, memory_readonly_doc},
3147 {"itemsize", (getter)memory_itemsize_get, NULL, memory_itemsize_doc},
3148 {"format", (getter)memory_format_get, NULL, memory_format_doc},
3149 {"ndim", (getter)memory_ndim_get, NULL, memory_ndim_doc},
3150 {"shape", (getter)memory_shape_get, NULL, memory_shape_doc},
3151 {"strides", (getter)memory_strides_get, NULL, memory_strides_doc},
3152 {"suboffsets", (getter)memory_suboffsets_get, NULL, memory_suboffsets_doc},
3153 {"c_contiguous", (getter)memory_c_contiguous, NULL, memory_c_contiguous_doc},
3154 {"f_contiguous", (getter)memory_f_contiguous, NULL, memory_f_contiguous_doc},
3155 {"contiguous", (getter)memory_contiguous, NULL, memory_contiguous_doc},
3156 {NULL, NULL, NULL, NULL},
3157 };
3158
3159
3160 static PyMethodDef memory_methods[] = {
3161 MEMORYVIEW_RELEASE_METHODDEF
3162 MEMORYVIEW_TOBYTES_METHODDEF
3163 MEMORYVIEW_HEX_METHODDEF
3164 MEMORYVIEW_TOLIST_METHODDEF
3165 MEMORYVIEW_CAST_METHODDEF
3166 MEMORYVIEW_TOREADONLY_METHODDEF
3167 {"__enter__", memory_enter, METH_NOARGS, NULL},
3168 {"__exit__", memory_exit, METH_VARARGS, NULL},
3169 {NULL, NULL}
3170 };
3171
3172 /**************************************************************************/
3173 /* Memoryview Iterator */
3174 /**************************************************************************/
3175
3176 PyTypeObject _PyMemoryIter_Type;
3177
3178 typedef struct {
3179 PyObject_HEAD
3180 Py_ssize_t it_index;
3181 PyMemoryViewObject *it_seq; // Set to NULL when iterator is exhausted
3182 Py_ssize_t it_length;
3183 const char *it_fmt;
3184 } memoryiterobject;
3185
3186 static void
memoryiter_dealloc(memoryiterobject * it)3187 memoryiter_dealloc(memoryiterobject *it)
3188 {
3189 _PyObject_GC_UNTRACK(it);
3190 Py_XDECREF(it->it_seq);
3191 PyObject_GC_Del(it);
3192 }
3193
3194 static int
memoryiter_traverse(memoryiterobject * it,visitproc visit,void * arg)3195 memoryiter_traverse(memoryiterobject *it, visitproc visit, void *arg)
3196 {
3197 Py_VISIT(it->it_seq);
3198 return 0;
3199 }
3200
3201 static PyObject *
memoryiter_next(memoryiterobject * it)3202 memoryiter_next(memoryiterobject *it)
3203 {
3204 PyMemoryViewObject *seq;
3205 seq = it->it_seq;
3206 if (seq == NULL) {
3207 return NULL;
3208 }
3209
3210 if (it->it_index < it->it_length) {
3211 CHECK_RELEASED(seq);
3212 Py_buffer *view = &(seq->view);
3213 char *ptr = (char *)seq->view.buf;
3214
3215 ptr += view->strides[0] * it->it_index++;
3216 ptr = ADJUST_PTR(ptr, view->suboffsets, 0);
3217 if (ptr == NULL) {
3218 return NULL;
3219 }
3220 return unpack_single(seq, ptr, it->it_fmt);
3221 }
3222
3223 it->it_seq = NULL;
3224 Py_DECREF(seq);
3225 return NULL;
3226 }
3227
3228 static PyObject *
memory_iter(PyObject * seq)3229 memory_iter(PyObject *seq)
3230 {
3231 if (!PyMemoryView_Check(seq)) {
3232 PyErr_BadInternalCall();
3233 return NULL;
3234 }
3235 PyMemoryViewObject *obj = (PyMemoryViewObject *)seq;
3236 int ndims = obj->view.ndim;
3237 if (ndims == 0) {
3238 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
3239 return NULL;
3240 }
3241 if (ndims != 1) {
3242 PyErr_SetString(PyExc_NotImplementedError,
3243 "multi-dimensional sub-views are not implemented");
3244 return NULL;
3245 }
3246
3247 const char *fmt = adjust_fmt(&obj->view);
3248 if (fmt == NULL) {
3249 return NULL;
3250 }
3251
3252 memoryiterobject *it;
3253 it = PyObject_GC_New(memoryiterobject, &_PyMemoryIter_Type);
3254 if (it == NULL) {
3255 return NULL;
3256 }
3257 it->it_fmt = fmt;
3258 it->it_length = memory_length(obj);
3259 it->it_index = 0;
3260 Py_INCREF(seq);
3261 it->it_seq = obj;
3262 _PyObject_GC_TRACK(it);
3263 return (PyObject *)it;
3264 }
3265
3266 PyTypeObject _PyMemoryIter_Type = {
3267 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3268 .tp_name = "memory_iterator",
3269 .tp_basicsize = sizeof(memoryiterobject),
3270 // methods
3271 .tp_dealloc = (destructor)memoryiter_dealloc,
3272 .tp_getattro = PyObject_GenericGetAttr,
3273 .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
3274 .tp_traverse = (traverseproc)memoryiter_traverse,
3275 .tp_iter = PyObject_SelfIter,
3276 .tp_iternext = (iternextfunc)memoryiter_next,
3277 };
3278
3279 PyTypeObject PyMemoryView_Type = {
3280 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3281 "memoryview", /* tp_name */
3282 offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
3283 sizeof(Py_ssize_t), /* tp_itemsize */
3284 (destructor)memory_dealloc, /* tp_dealloc */
3285 0, /* tp_vectorcall_offset */
3286 0, /* tp_getattr */
3287 0, /* tp_setattr */
3288 0, /* tp_as_async */
3289 (reprfunc)memory_repr, /* tp_repr */
3290 0, /* tp_as_number */
3291 &memory_as_sequence, /* tp_as_sequence */
3292 &memory_as_mapping, /* tp_as_mapping */
3293 (hashfunc)memory_hash, /* tp_hash */
3294 0, /* tp_call */
3295 0, /* tp_str */
3296 PyObject_GenericGetAttr, /* tp_getattro */
3297 0, /* tp_setattro */
3298 &memory_as_buffer, /* tp_as_buffer */
3299 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3300 Py_TPFLAGS_SEQUENCE, /* tp_flags */
3301 memoryview__doc__, /* tp_doc */
3302 (traverseproc)memory_traverse, /* tp_traverse */
3303 (inquiry)memory_clear, /* tp_clear */
3304 memory_richcompare, /* tp_richcompare */
3305 offsetof(PyMemoryViewObject, weakreflist),/* tp_weaklistoffset */
3306 memory_iter, /* tp_iter */
3307 0, /* tp_iternext */
3308 memory_methods, /* tp_methods */
3309 0, /* tp_members */
3310 memory_getsetlist, /* tp_getset */
3311 0, /* tp_base */
3312 0, /* tp_dict */
3313 0, /* tp_descr_get */
3314 0, /* tp_descr_set */
3315 0, /* tp_dictoffset */
3316 0, /* tp_init */
3317 0, /* tp_alloc */
3318 memoryview, /* tp_new */
3319 };
3320