OLD | NEW |
(Empty) | |
| 1 ////////// MemviewSliceStruct.proto ////////// |
| 2 |
| 3 /* memoryview slice struct */ |
| 4 struct {{memview_struct_name}}; |
| 5 |
| 6 typedef struct { |
| 7 struct {{memview_struct_name}} *memview; |
| 8 char *data; |
| 9 Py_ssize_t shape[{{max_dims}}]; |
| 10 Py_ssize_t strides[{{max_dims}}]; |
| 11 Py_ssize_t suboffsets[{{max_dims}}]; |
| 12 } {{memviewslice_name}}; |
| 13 |
| 14 |
| 15 /////////// Atomics.proto ///////////// |
| 16 |
| 17 #include <pythread.h> |
| 18 |
| 19 #ifndef CYTHON_ATOMICS |
| 20 #define CYTHON_ATOMICS 1 |
| 21 #endif |
| 22 |
| 23 #define __pyx_atomic_int_type int |
| 24 // todo: Portland pgcc, maybe OS X's OSAtomicIncrement32, |
| 25 // libatomic + autotools-like distutils support? Such a pain... |
| 26 #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \ |
| 27 (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \ |
| 28 !defined(__i386__) |
| 29 /* gcc >= 4.1.2 */ |
| 30 #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1
) |
| 31 #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1
) |
| 32 |
| 33 #ifdef __PYX_DEBUG_ATOMICS |
| 34 #warning "Using GNU atomics" |
| 35 #endif |
| 36 #elif CYTHON_ATOMICS && MSC_VER |
| 37 /* msvc */ |
| 38 #include <Windows.h> |
| 39 #define __pyx_atomic_int_type LONG |
| 40 #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) |
| 41 #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) |
| 42 |
| 43 #ifdef __PYX_DEBUG_ATOMICS |
| 44 #warning "Using MSVC atomics" |
| 45 #endif |
| 46 #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 |
| 47 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) |
| 48 #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) |
| 49 |
| 50 #ifdef __PYX_DEBUG_ATOMICS |
| 51 #warning "Using Intel atomics" |
| 52 #endif |
| 53 #else |
| 54 #undef CYTHON_ATOMICS |
| 55 #define CYTHON_ATOMICS 0 |
| 56 |
| 57 #ifdef __PYX_DEBUG_ATOMICS |
| 58 #warning "Not using atomics" |
| 59 #endif |
| 60 #endif |
| 61 |
| 62 typedef volatile __pyx_atomic_int_type __pyx_atomic_int; |
| 63 |
| 64 #if CYTHON_ATOMICS |
| 65 #define __pyx_add_acquisition_count(memview) \ |
| 66 __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), m
emview->lock) |
| 67 #define __pyx_sub_acquisition_count(memview) \ |
| 68 __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), me
mview->lock) |
| 69 #else |
| 70 #define __pyx_add_acquisition_count(memview) \ |
| 71 __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(mem
view), memview->lock) |
| 72 #define __pyx_sub_acquisition_count(memview) \ |
| 73 __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(mem
view), memview->lock) |
| 74 #endif |
| 75 |
| 76 |
| 77 /////////////// ObjectToMemviewSlice.proto /////////////// |
| 78 |
| 79 static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *); |
| 80 |
| 81 |
| 82 ////////// MemviewSliceInit.proto ////////// |
| 83 |
| 84 #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d |
| 85 |
| 86 #define __Pyx_MEMVIEW_DIRECT 1 |
| 87 #define __Pyx_MEMVIEW_PTR 2 |
| 88 #define __Pyx_MEMVIEW_FULL 4 |
| 89 #define __Pyx_MEMVIEW_CONTIG 8 |
| 90 #define __Pyx_MEMVIEW_STRIDED 16 |
| 91 #define __Pyx_MEMVIEW_FOLLOW 32 |
| 92 |
| 93 #define __Pyx_IS_C_CONTIG 1 |
| 94 #define __Pyx_IS_F_CONTIG 2 |
| 95 |
| 96 static int __Pyx_init_memviewslice( |
| 97 struct __pyx_memoryview_obj *memview, |
| 98 int ndim, |
| 99 __Pyx_memviewslice *memviewslice, |
| 100 int memview_is_new_reference); |
| 101 |
| 102 static CYTHON_INLINE int __pyx_add_acquisition_count_locked( |
| 103 __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); |
| 104 static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( |
| 105 __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); |
| 106 |
| 107 #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_align
ed_p) |
| 108 #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) |
| 109 #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __
LINE__) |
| 110 #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil,
__LINE__) |
| 111 static CYTHON_INLINE void __Pyx_INC_MEMVIEW({{memviewslice_name}} *, int, int); |
| 112 static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *, int, int); |
| 113 |
| 114 |
| 115 /////////////// MemviewSliceIndex.proto /////////////// |
| 116 |
| 117 static CYTHON_INLINE char *__pyx_memviewslice_index_full( |
| 118 const char *bufp, Py_ssize_t idx, Py_ssize_t stride, Py_ssize_t suboffset); |
| 119 |
| 120 |
| 121 /////////////// ObjectToMemviewSlice /////////////// |
| 122 //@requires: MemviewSliceValidateAndInit |
| 123 |
| 124 static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj) { |
| 125 {{memviewslice_name}} result = {{memslice_init}}; |
| 126 __Pyx_BufFmt_StackElem stack[{{struct_nesting_depth}}]; |
| 127 int axes_specs[] = { {{axes_specs}} }; |
| 128 int retcode; |
| 129 |
| 130 if (obj == Py_None) { |
| 131 /* We don't bother to refcount None */ |
| 132 result.memview = (struct __pyx_memoryview_obj *) Py_None; |
| 133 return result; |
| 134 } |
| 135 |
| 136 retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, {{c_or_f_flag}}, |
| 137 {{buf_flag}}, {{ndim}}, |
| 138 &{{dtype_typeinfo}}, stack, |
| 139 &result, obj); |
| 140 |
| 141 if (unlikely(retcode == -1)) |
| 142 goto __pyx_fail; |
| 143 |
| 144 return result; |
| 145 __pyx_fail: |
| 146 result.memview = NULL; |
| 147 result.data = NULL; |
| 148 return result; |
| 149 } |
| 150 |
| 151 |
| 152 /////////////// MemviewSliceValidateAndInit.proto /////////////// |
| 153 |
| 154 static int __Pyx_ValidateAndInit_memviewslice( |
| 155 int *axes_specs, |
| 156 int c_or_f_flag, |
| 157 int buf_flags, |
| 158 int ndim, |
| 159 __Pyx_TypeInfo *dtype, |
| 160 __Pyx_BufFmt_StackElem stack[], |
| 161 __Pyx_memviewslice *memviewslice, |
| 162 PyObject *original_obj); |
| 163 |
| 164 /////////////// MemviewSliceValidateAndInit /////////////// |
| 165 //@requires: Buffer.c::TypeInfoCompare |
| 166 |
| 167 static int |
| 168 __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) |
| 169 { |
| 170 if (buf->shape[dim] <= 1) |
| 171 return 1; |
| 172 |
| 173 if (buf->strides) { |
| 174 if (spec & __Pyx_MEMVIEW_CONTIG) { |
| 175 if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { |
| 176 if (buf->strides[dim] != sizeof(void *)) { |
| 177 PyErr_Format(PyExc_ValueError, |
| 178 "Buffer is not indirectly contiguous " |
| 179 "in dimension %d.", dim); |
| 180 goto fail; |
| 181 } |
| 182 } else if (buf->strides[dim] != buf->itemsize) { |
| 183 PyErr_SetString(PyExc_ValueError, |
| 184 "Buffer and memoryview are not contiguous " |
| 185 "in the same dimension."); |
| 186 goto fail; |
| 187 } |
| 188 } |
| 189 |
| 190 if (spec & __Pyx_MEMVIEW_FOLLOW) { |
| 191 Py_ssize_t stride = buf->strides[dim]; |
| 192 if (stride < 0) |
| 193 stride = -stride; |
| 194 if (stride < buf->itemsize) { |
| 195 PyErr_SetString(PyExc_ValueError, |
| 196 "Buffer and memoryview are not contiguous " |
| 197 "in the same dimension."); |
| 198 goto fail; |
| 199 } |
| 200 } |
| 201 } else { |
| 202 if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { |
| 203 PyErr_Format(PyExc_ValueError, |
| 204 "C-contiguous buffer is not contiguous in " |
| 205 "dimension %d", dim); |
| 206 goto fail; |
| 207 } else if (spec & (__Pyx_MEMVIEW_PTR)) { |
| 208 PyErr_Format(PyExc_ValueError, |
| 209 "C-contiguous buffer is not indirect in " |
| 210 "dimension %d", dim); |
| 211 goto fail; |
| 212 } else if (buf->suboffsets) { |
| 213 PyErr_SetString(PyExc_ValueError, |
| 214 "Buffer exposes suboffsets but no strides"); |
| 215 goto fail; |
| 216 } |
| 217 } |
| 218 |
| 219 return 1; |
| 220 fail: |
| 221 return 0; |
| 222 } |
| 223 |
| 224 static int |
| 225 __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec
) |
| 226 { |
| 227 // Todo: without PyBUF_INDIRECT we may not have suboffset information, i.e.,
the |
| 228 // ptr may not be set to NULL but may be uninitialized? |
| 229 if (spec & __Pyx_MEMVIEW_DIRECT) { |
| 230 if (buf->suboffsets && buf->suboffsets[dim] >= 0) { |
| 231 PyErr_Format(PyExc_ValueError, |
| 232 "Buffer not compatible with direct access " |
| 233 "in dimension %d.", dim); |
| 234 goto fail; |
| 235 } |
| 236 } |
| 237 |
| 238 if (spec & __Pyx_MEMVIEW_PTR) { |
| 239 if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { |
| 240 PyErr_Format(PyExc_ValueError, |
| 241 "Buffer is not indirectly accessible " |
| 242 "in dimension %d.", dim); |
| 243 goto fail; |
| 244 } |
| 245 } |
| 246 |
| 247 return 1; |
| 248 fail: |
| 249 return 0; |
| 250 } |
| 251 |
| 252 static int |
| 253 __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) |
| 254 { |
| 255 int i; |
| 256 |
| 257 if (c_or_f_flag & __Pyx_IS_F_CONTIG) { |
| 258 Py_ssize_t stride = 1; |
| 259 for (i = 0; i < ndim; i++) { |
| 260 if (stride * buf->itemsize != buf->strides[i] && |
| 261 buf->shape[i] > 1) |
| 262 { |
| 263 PyErr_SetString(PyExc_ValueError, |
| 264 "Buffer not fortran contiguous."); |
| 265 goto fail; |
| 266 } |
| 267 stride = stride * buf->shape[i]; |
| 268 } |
| 269 } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { |
| 270 Py_ssize_t stride = 1; |
| 271 for (i = ndim - 1; i >- 1; i--) { |
| 272 if (stride * buf->itemsize != buf->strides[i] && |
| 273 buf->shape[i] > 1) { |
| 274 PyErr_SetString(PyExc_ValueError, |
| 275 "Buffer not C contiguous."); |
| 276 goto fail; |
| 277 } |
| 278 stride = stride * buf->shape[i]; |
| 279 } |
| 280 } |
| 281 |
| 282 return 1; |
| 283 fail: |
| 284 return 0; |
| 285 } |
| 286 |
| 287 static int __Pyx_ValidateAndInit_memviewslice( |
| 288 int *axes_specs, |
| 289 int c_or_f_flag, |
| 290 int buf_flags, |
| 291 int ndim, |
| 292 __Pyx_TypeInfo *dtype, |
| 293 __Pyx_BufFmt_StackElem stack[], |
| 294 __Pyx_memviewslice *memviewslice, |
| 295 PyObject *original_obj) |
| 296 { |
| 297 struct __pyx_memoryview_obj *memview, *new_memview; |
| 298 __Pyx_RefNannyDeclarations |
| 299 Py_buffer *buf; |
| 300 int i, spec = 0, retval = -1; |
| 301 __Pyx_BufFmt_Context ctx; |
| 302 int from_memoryview = __pyx_memoryview_check(original_obj); |
| 303 |
| 304 __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); |
| 305 |
| 306 if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_o
bj *) |
| 307 original_obj)->typei
nfo)) { |
| 308 /* We have a matching dtype, skip format parsing */ |
| 309 memview = (struct __pyx_memoryview_obj *) original_obj; |
| 310 new_memview = NULL; |
| 311 } else { |
| 312 memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( |
| 313 original_obj, buf_flags, 0, dtype); |
| 314 new_memview = memview; |
| 315 if (unlikely(!memview)) |
| 316 goto fail; |
| 317 } |
| 318 |
| 319 buf = &memview->view; |
| 320 if (buf->ndim != ndim) { |
| 321 PyErr_Format(PyExc_ValueError, |
| 322 "Buffer has wrong number of dimensions (expected %d, got %d)", |
| 323 ndim, buf->ndim); |
| 324 goto fail; |
| 325 } |
| 326 |
| 327 if (new_memview) { |
| 328 __Pyx_BufFmt_Init(&ctx, stack, dtype); |
| 329 if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; |
| 330 } |
| 331 |
| 332 if ((unsigned) buf->itemsize != dtype->size) { |
| 333 PyErr_Format(PyExc_ValueError, |
| 334 "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " |
| 335 "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u b
yte%s)", |
| 336 buf->itemsize, |
| 337 (buf->itemsize > 1) ? "s" : "", |
| 338 dtype->name, |
| 339 dtype->size, |
| 340 (dtype->size > 1) ? "s" : ""); |
| 341 goto fail; |
| 342 } |
| 343 |
| 344 /* Check axes */ |
| 345 for (i = 0; i < ndim; i++) { |
| 346 spec = axes_specs[i]; |
| 347 if (!__pyx_check_strides(buf, i, ndim, spec)) |
| 348 goto fail; |
| 349 if (!__pyx_check_suboffsets(buf, i, ndim, spec)) |
| 350 goto fail; |
| 351 } |
| 352 |
| 353 /* Check contiguity */ |
| 354 if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) |
| 355 goto fail; |
| 356 |
| 357 /* Initialize */ |
| 358 if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, |
| 359 new_memview != NULL) == -1)) { |
| 360 goto fail; |
| 361 } |
| 362 |
| 363 retval = 0; |
| 364 goto no_fail; |
| 365 |
| 366 fail: |
| 367 Py_XDECREF(new_memview); |
| 368 retval = -1; |
| 369 |
| 370 no_fail: |
| 371 __Pyx_RefNannyFinishContext(); |
| 372 return retval; |
| 373 } |
| 374 |
| 375 |
| 376 ////////// MemviewSliceInit ////////// |
| 377 |
| 378 static int |
| 379 __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, |
| 380 int ndim, |
| 381 {{memviewslice_name}} *memviewslice, |
| 382 int memview_is_new_reference) |
| 383 { |
| 384 __Pyx_RefNannyDeclarations |
| 385 int i, retval=-1; |
| 386 Py_buffer *buf = &memview->view; |
| 387 __Pyx_RefNannySetupContext("init_memviewslice", 0); |
| 388 |
| 389 if (!buf) { |
| 390 PyErr_SetString(PyExc_ValueError, |
| 391 "buf is NULL."); |
| 392 goto fail; |
| 393 } else if (memviewslice->memview || memviewslice->data) { |
| 394 PyErr_SetString(PyExc_ValueError, |
| 395 "memviewslice is already initialized!"); |
| 396 goto fail; |
| 397 } |
| 398 |
| 399 if (buf->strides) { |
| 400 for (i = 0; i < ndim; i++) { |
| 401 memviewslice->strides[i] = buf->strides[i]; |
| 402 } |
| 403 } else { |
| 404 Py_ssize_t stride = buf->itemsize; |
| 405 for (i = ndim - 1; i >= 0; i--) { |
| 406 memviewslice->strides[i] = stride; |
| 407 stride *= buf->shape[i]; |
| 408 } |
| 409 } |
| 410 |
| 411 for (i = 0; i < ndim; i++) { |
| 412 memviewslice->shape[i] = buf->shape[i]; |
| 413 if (buf->suboffsets) { |
| 414 memviewslice->suboffsets[i] = buf->suboffsets[i]; |
| 415 } else { |
| 416 memviewslice->suboffsets[i] = -1; |
| 417 } |
| 418 } |
| 419 |
| 420 memviewslice->memview = memview; |
| 421 memviewslice->data = (char *)buf->buf; |
| 422 if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference)
{ |
| 423 Py_INCREF(memview); |
| 424 } |
| 425 retval = 0; |
| 426 goto no_fail; |
| 427 |
| 428 fail: |
| 429 /* Don't decref, the memoryview may be borrowed. Let the caller do the clean
up */ |
| 430 /* __Pyx_XDECREF(memviewslice->memview); */ |
| 431 memviewslice->memview = 0; |
| 432 memviewslice->data = 0; |
| 433 retval = -1; |
| 434 no_fail: |
| 435 __Pyx_RefNannyFinishContext(); |
| 436 return retval; |
| 437 } |
| 438 |
| 439 |
| 440 static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { |
| 441 va_list vargs; |
| 442 char msg[200]; |
| 443 |
| 444 va_start(vargs, fmt); |
| 445 |
| 446 #ifdef HAVE_STDARG_PROTOTYPES |
| 447 va_start(vargs, fmt); |
| 448 #else |
| 449 va_start(vargs); |
| 450 #endif |
| 451 |
| 452 vsnprintf(msg, 200, fmt, vargs); |
| 453 Py_FatalError(msg); |
| 454 |
| 455 va_end(vargs); |
| 456 } |
| 457 |
| 458 static CYTHON_INLINE int |
| 459 __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, |
| 460 PyThread_type_lock lock) |
| 461 { |
| 462 int result; |
| 463 PyThread_acquire_lock(lock, 1); |
| 464 result = (*acquisition_count)++; |
| 465 PyThread_release_lock(lock); |
| 466 return result; |
| 467 } |
| 468 |
| 469 static CYTHON_INLINE int |
| 470 __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, |
| 471 PyThread_type_lock lock) |
| 472 { |
| 473 int result; |
| 474 PyThread_acquire_lock(lock, 1); |
| 475 result = (*acquisition_count)--; |
| 476 PyThread_release_lock(lock); |
| 477 return result; |
| 478 } |
| 479 |
| 480 |
| 481 static CYTHON_INLINE void |
| 482 __Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil, int lineno) |
| 483 { |
| 484 int first_time; |
| 485 struct {{memview_struct_name}} *memview = memslice->memview; |
| 486 if (!memview || (PyObject *) memview == Py_None) |
| 487 return; /* allow uninitialized memoryview assignment */ |
| 488 |
| 489 if (__pyx_get_slice_count(memview) < 0) |
| 490 __pyx_fatalerror("Acquisition count is %d (line %d)", |
| 491 __pyx_get_slice_count(memview), lineno); |
| 492 |
| 493 first_time = __pyx_add_acquisition_count(memview) == 0; |
| 494 |
| 495 if (first_time) { |
| 496 if (have_gil) { |
| 497 Py_INCREF((PyObject *) memview); |
| 498 } else { |
| 499 PyGILState_STATE _gilstate = PyGILState_Ensure(); |
| 500 Py_INCREF((PyObject *) memview); |
| 501 PyGILState_Release(_gilstate); |
| 502 } |
| 503 } |
| 504 } |
| 505 |
| 506 static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *memslice, |
| 507 int have_gil, int lineno) { |
| 508 int last_time; |
| 509 struct {{memview_struct_name}} *memview = memslice->memview; |
| 510 |
| 511 if (!memview ) { |
| 512 return; |
| 513 } else if ((PyObject *) memview == Py_None) { |
| 514 memslice->memview = NULL; |
| 515 return; |
| 516 } |
| 517 |
| 518 if (__pyx_get_slice_count(memview) <= 0) |
| 519 __pyx_fatalerror("Acquisition count is %d (line %d)", |
| 520 __pyx_get_slice_count(memview), lineno); |
| 521 |
| 522 last_time = __pyx_sub_acquisition_count(memview) == 1; |
| 523 memslice->data = NULL; |
| 524 if (last_time) { |
| 525 if (have_gil) { |
| 526 Py_CLEAR(memslice->memview); |
| 527 } else { |
| 528 PyGILState_STATE _gilstate = PyGILState_Ensure(); |
| 529 Py_CLEAR(memslice->memview); |
| 530 PyGILState_Release(_gilstate); |
| 531 } |
| 532 } else { |
| 533 memslice->memview = NULL; |
| 534 } |
| 535 } |
| 536 |
| 537 |
| 538 ////////// MemviewSliceCopyTemplate.proto ////////// |
| 539 |
| 540 static {{memviewslice_name}} |
| 541 __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, |
| 542 const char *mode, int ndim, |
| 543 size_t sizeof_dtype, int contig_flag, |
| 544 int dtype_is_object); |
| 545 |
| 546 |
| 547 ////////// MemviewSliceCopyTemplate ////////// |
| 548 |
| 549 static {{memviewslice_name}} |
| 550 __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, |
| 551 const char *mode, int ndim, |
| 552 size_t sizeof_dtype, int contig_flag, |
| 553 int dtype_is_object) |
| 554 { |
| 555 __Pyx_RefNannyDeclarations |
| 556 int i; |
| 557 __Pyx_memviewslice new_mvs = {{memslice_init}}; |
| 558 struct __pyx_memoryview_obj *from_memview = from_mvs->memview; |
| 559 Py_buffer *buf = &from_memview->view; |
| 560 PyObject *shape_tuple = NULL; |
| 561 PyObject *temp_int = NULL; |
| 562 struct __pyx_array_obj *array_obj = NULL; |
| 563 struct __pyx_memoryview_obj *memview_obj = NULL; |
| 564 |
| 565 __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); |
| 566 |
| 567 for (i = 0; i < ndim; i++) { |
| 568 if (from_mvs->suboffsets[i] >= 0) { |
| 569 PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " |
| 570 "indirect dimensions (axis %d)", i); |
| 571 goto fail; |
| 572 } |
| 573 } |
| 574 |
| 575 shape_tuple = PyTuple_New(ndim); |
| 576 if (unlikely(!shape_tuple)) { |
| 577 goto fail; |
| 578 } |
| 579 __Pyx_GOTREF(shape_tuple); |
| 580 |
| 581 |
| 582 for(i = 0; i < ndim; i++) { |
| 583 temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); |
| 584 if(unlikely(!temp_int)) { |
| 585 goto fail; |
| 586 } else { |
| 587 PyTuple_SET_ITEM(shape_tuple, i, temp_int); |
| 588 temp_int = NULL; |
| 589 } |
| 590 } |
| 591 |
| 592 array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *)
mode, NULL); |
| 593 if (unlikely(!array_obj)) { |
| 594 goto fail; |
| 595 } |
| 596 __Pyx_GOTREF(array_obj); |
| 597 |
| 598 memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( |
| 599 (PyObject *) array_obj, contig_flag, |
| 600 dtype_is_object, |
| 601 from_mvs->memview->typeinfo); |
| 602 if (unlikely(!memview_obj)) |
| 603 goto fail; |
| 604 |
| 605 /* initialize new_mvs */ |
| 606 if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) |
| 607 goto fail; |
| 608 |
| 609 if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, |
| 610 dtype_is_object) < 0)) |
| 611 goto fail; |
| 612 |
| 613 goto no_fail; |
| 614 |
| 615 fail: |
| 616 __Pyx_XDECREF(new_mvs.memview); |
| 617 new_mvs.memview = NULL; |
| 618 new_mvs.data = NULL; |
| 619 no_fail: |
| 620 __Pyx_XDECREF(shape_tuple); |
| 621 __Pyx_XDECREF(temp_int); |
| 622 __Pyx_XDECREF(array_obj); |
| 623 __Pyx_RefNannyFinishContext(); |
| 624 return new_mvs; |
| 625 } |
| 626 |
| 627 |
| 628 ////////// CopyContentsUtility.proto ///////// |
| 629 |
| 630 #define {{func_cname}}(slice) \ |
| 631 __pyx_memoryview_copy_new_contig(&slice, "{{mode}}", {{ndim}},
\ |
| 632 sizeof({{dtype_decl}}), {{contig_flag}}
, \ |
| 633 {{dtype_is_object}}) |
| 634 |
| 635 |
| 636 ////////// OverlappingSlices.proto ////////// |
| 637 |
| 638 static int __pyx_slices_overlap({{memviewslice_name}} *slice1, |
| 639 {{memviewslice_name}} *slice2, |
| 640 int ndim, size_t itemsize); |
| 641 |
| 642 |
| 643 ////////// OverlappingSlices ////////// |
| 644 |
| 645 /* Based on numpy's core/src/multiarray/array_assign.c */ |
| 646 |
| 647 /* Gets a half-open range [start, end) which contains the array data */ |
| 648 static void |
| 649 __pyx_get_array_memory_extents({{memviewslice_name}} *slice, |
| 650 void **out_start, void **out_end, |
| 651 int ndim, size_t itemsize) |
| 652 { |
| 653 char *start, *end; |
| 654 int i; |
| 655 |
| 656 start = end = slice->data; |
| 657 |
| 658 for (i = 0; i < ndim; i++) { |
| 659 Py_ssize_t stride = slice->strides[i]; |
| 660 Py_ssize_t extent = slice->shape[i]; |
| 661 |
| 662 if (extent == 0) { |
| 663 *out_start = *out_end = start; |
| 664 return; |
| 665 } else { |
| 666 if (stride > 0) |
| 667 end += stride * (extent - 1); |
| 668 else |
| 669 start += stride * (extent - 1); |
| 670 } |
| 671 } |
| 672 |
| 673 /* Return a half-open range */ |
| 674 *out_start = start; |
| 675 *out_end = end + itemsize; |
| 676 } |
| 677 |
| 678 /* Returns 1 if the arrays have overlapping data, 0 otherwise */ |
| 679 static int |
| 680 __pyx_slices_overlap({{memviewslice_name}} *slice1, |
| 681 {{memviewslice_name}} *slice2, |
| 682 int ndim, size_t itemsize) |
| 683 { |
| 684 void *start1, *end1, *start2, *end2; |
| 685 |
| 686 __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); |
| 687 __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); |
| 688 |
| 689 return (start1 < end2) && (start2 < end1); |
| 690 } |
| 691 |
| 692 |
| 693 ////////// MemviewSliceIsCContig.proto ////////// |
| 694 |
| 695 #define __pyx_memviewslice_is_c_contig{{ndim}}(slice) \ |
| 696 __pyx_memviewslice_is_contig(&slice, 'C', {{ndim}}) |
| 697 |
| 698 |
| 699 ////////// MemviewSliceIsFContig.proto ////////// |
| 700 |
| 701 #define __pyx_memviewslice_is_f_contig{{ndim}}(slice) \ |
| 702 __pyx_memviewslice_is_contig(&slice, 'F', {{ndim}}) |
| 703 |
| 704 |
| 705 ////////// MemviewSliceIsContig.proto ////////// |
| 706 |
| 707 static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} *mvs, |
| 708 char order, int ndim); |
| 709 |
| 710 |
| 711 ////////// MemviewSliceIsContig ////////// |
| 712 |
| 713 static int |
| 714 __pyx_memviewslice_is_contig(const {{memviewslice_name}} *mvs, |
| 715 char order, int ndim) |
| 716 { |
| 717 int i, index, step, start; |
| 718 Py_ssize_t itemsize = mvs->memview->view.itemsize; |
| 719 |
| 720 if (order == 'F') { |
| 721 step = 1; |
| 722 start = 0; |
| 723 } else { |
| 724 step = -1; |
| 725 start = ndim - 1; |
| 726 } |
| 727 |
| 728 for (i = 0; i < ndim; i++) { |
| 729 index = start + step * i; |
| 730 if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize) |
| 731 return 0; |
| 732 |
| 733 itemsize *= mvs->shape[index]; |
| 734 } |
| 735 |
| 736 return 1; |
| 737 } |
| 738 |
| 739 |
| 740 /////////////// MemviewSliceIndex /////////////// |
| 741 |
| 742 static CYTHON_INLINE char * |
| 743 __pyx_memviewslice_index_full(const char *bufp, Py_ssize_t idx, |
| 744 Py_ssize_t stride, Py_ssize_t suboffset) |
| 745 { |
| 746 bufp = bufp + idx * stride; |
| 747 if (suboffset >= 0) { |
| 748 bufp = *((char **) bufp) + suboffset; |
| 749 } |
| 750 return (char *) bufp; |
| 751 } |
| 752 |
| 753 |
| 754 /////////////// MemviewDtypeToObject.proto /////////////// |
| 755 |
| 756 {{if to_py_function}} |
| 757 static PyObject *{{get_function}}(const char *itemp); /* proto */ |
| 758 {{endif}} |
| 759 |
| 760 {{if from_py_function}} |
| 761 static int {{set_function}}(const char *itemp, PyObject *obj); /* proto */ |
| 762 {{endif}} |
| 763 |
| 764 /////////////// MemviewDtypeToObject /////////////// |
| 765 |
| 766 {{#__pyx_memview_<dtype_name>_to_object}} |
| 767 |
| 768 /* Convert a dtype to or from a Python object */ |
| 769 |
| 770 {{if to_py_function}} |
| 771 static PyObject *{{get_function}}(const char *itemp) { |
| 772 return (PyObject *) {{to_py_function}}(*({{dtype}} *) itemp); |
| 773 } |
| 774 {{endif}} |
| 775 |
| 776 {{if from_py_function}} |
| 777 static int {{set_function}}(const char *itemp, PyObject *obj) { |
| 778 {{dtype}} value = {{from_py_function}}(obj); |
| 779 if ({{error_condition}}) |
| 780 return 0; |
| 781 *({{dtype}} *) itemp = value; |
| 782 return 1; |
| 783 } |
| 784 {{endif}} |
| 785 |
| 786 |
| 787 /////////////// MemviewObjectToObject.proto /////////////// |
| 788 |
| 789 /* Function callbacks (for memoryview object) for dtype object */ |
| 790 static PyObject *{{get_function}}(const char *itemp); /* proto */ |
| 791 static int {{set_function}}(const char *itemp, PyObject *obj); /* proto */ |
| 792 |
| 793 |
| 794 /////////////// MemviewObjectToObject /////////////// |
| 795 |
| 796 static PyObject *{{get_function}}(const char *itemp) { |
| 797 PyObject *result = *(PyObject **) itemp; |
| 798 Py_INCREF(result); |
| 799 return result; |
| 800 } |
| 801 |
| 802 static int {{set_function}}(const char *itemp, PyObject *obj) { |
| 803 Py_INCREF(obj); |
| 804 Py_DECREF(*(PyObject **) itemp); |
| 805 *(PyObject **) itemp = obj; |
| 806 return 1; |
| 807 } |
| 808 |
| 809 /////////// ToughSlice ////////// |
| 810 |
| 811 /* Dimension is indexed with 'start:stop:step' */ |
| 812 |
| 813 if (unlikely(__pyx_memoryview_slice_memviewslice( |
| 814 &{{dst}}, |
| 815 {{src}}.shape[{{dim}}], {{src}}.strides[{{dim}}], {{src}}.suboffsets[{{dim}}
], |
| 816 {{dim}}, |
| 817 {{new_ndim}}, |
| 818 &{{suboffset_dim}}, |
| 819 {{start}}, |
| 820 {{stop}}, |
| 821 {{step}}, |
| 822 {{int(have_start)}}, |
| 823 {{int(have_stop)}}, |
| 824 {{int(have_step)}}, |
| 825 1) < 0)) |
| 826 { |
| 827 {{error_goto}} |
| 828 } |
| 829 |
| 830 |
| 831 ////////// SimpleSlice ////////// |
| 832 |
| 833 /* Dimension is indexed with ':' only */ |
| 834 |
| 835 {{dst}}.shape[{{new_ndim}}] = {{src}}.shape[{{dim}}]; |
| 836 {{dst}}.strides[{{new_ndim}}] = {{src}}.strides[{{dim}}]; |
| 837 |
| 838 {{if access == 'direct'}} |
| 839 {{dst}}.suboffsets[{{new_ndim}}] = -1; |
| 840 {{else}} |
| 841 {{dst}}.suboffsets[{{new_ndim}}] = {{src}}.suboffsets[{{dim}}]; |
| 842 if ({{src}}.suboffsets[{{dim}}] >= 0) |
| 843 {{suboffset_dim}} = {{new_ndim}}; |
| 844 {{endif}} |
| 845 |
| 846 |
| 847 ////////// SliceIndex ////////// |
| 848 |
| 849 // Dimension is indexed with an integer, we could use the ToughSlice |
| 850 // approach, but this is faster |
| 851 |
| 852 { |
| 853 Py_ssize_t __pyx_tmp_idx = {{idx}}; |
| 854 Py_ssize_t __pyx_tmp_shape = {{src}}.shape[{{dim}}]; |
| 855 Py_ssize_t __pyx_tmp_stride = {{src}}.strides[{{dim}}]; |
| 856 if ({{wraparound}} && (__pyx_tmp_idx < 0)) |
| 857 __pyx_tmp_idx += __pyx_tmp_shape; |
| 858 |
| 859 if ({{boundscheck}} && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shap
e)) { |
| 860 {{if not have_gil}} |
| 861 #ifdef WITH_THREAD |
| 862 PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); |
| 863 #endif |
| 864 {{endif}} |
| 865 |
| 866 PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis {{dim}})"); |
| 867 |
| 868 {{if not have_gil}} |
| 869 #ifdef WITH_THREAD |
| 870 PyGILState_Release(__pyx_gilstate_save); |
| 871 #endif |
| 872 {{endif}} |
| 873 |
| 874 {{error_goto}} |
| 875 } |
| 876 |
| 877 {{if all_dimensions_direct}} |
| 878 {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride; |
| 879 {{else}} |
| 880 if ({{suboffset_dim}} < 0) { |
| 881 {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride; |
| 882 |
| 883 /* This dimension is the first dimension, or is preceded by */ |
| 884 /* direct or indirect dimensions that are indexed away. */ |
| 885 /* Hence suboffset_dim must be less than zero, and we can have */ |
| 886 /* our data pointer refer to another block by dereferencing. */ |
| 887 /* slice.data -> B -> C becomes slice.data -> C */ |
| 888 |
| 889 {{if indirect}} |
| 890 { |
| 891 Py_ssize_t __pyx_tmp_suboffset = {{src}}.suboffsets[{{dim}}]; |
| 892 |
| 893 {{if generic}} |
| 894 if (__pyx_tmp_suboffset >= 0) |
| 895 {{endif}} |
| 896 |
| 897 {{dst}}.data = *((char **) {{dst}}.data) + __pyx_tmp_suboffs
et; |
| 898 } |
| 899 {{endif}} |
| 900 |
| 901 } else { |
| 902 {{dst}}.suboffsets[{{suboffset_dim}}] += __pyx_tmp_idx * __pyx_tmp_s
tride; |
| 903 |
| 904 /* Note: dimension can not be indirect, the compiler will have */ |
| 905 /* issued an error */ |
| 906 } |
| 907 |
| 908 {{endif}} |
| 909 } |
| 910 |
| 911 |
| 912 ////////// FillStrided1DScalar.proto ////////// |
| 913 |
| 914 static void |
| 915 __pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t
stride, |
| 916 size_t itemsize, void *itemp); |
| 917 |
| 918 ////////// FillStrided1DScalar ////////// |
| 919 |
| 920 /* Fill a slice with a scalar value. The dimension is direct and strided or cont
iguous */ |
| 921 /* This can be used as a callback for the memoryview object to efficienty assign
a scalar */ |
| 922 /* Currently unused */ |
| 923 static void |
| 924 __pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t
stride, |
| 925 size_t itemsize, void *itemp) |
| 926 { |
| 927 Py_ssize_t i; |
| 928 {{type_decl}} item = *(({{type_decl}} *) itemp); |
| 929 {{type_decl}} *endp; |
| 930 |
| 931 stride /= sizeof({{type_decl}}); |
| 932 endp = p + stride * extent; |
| 933 |
| 934 while (p < endp) { |
| 935 *p = item; |
| 936 p += stride; |
| 937 } |
| 938 } |
OLD | NEW |