eigenpy 3.12.0
Bindings between Numpy and Eigen using Boost.Python
Loading...
Searching...
No Matches
eigen-allocator.hpp
1//
2// Copyright (c) 2014-2023 CNRS INRIA
3//
4
5#ifndef __eigenpy_eigen_allocator_hpp__
6#define __eigenpy_eigen_allocator_hpp__
7
8#include "eigenpy/fwd.hpp"
9#include "eigenpy/numpy-map.hpp"
10#include "eigenpy/register.hpp"
11#include "eigenpy/scalar-conversion.hpp"
12#include "eigenpy/utils/is-aligned.hpp"
13
14namespace eigenpy {
15
16namespace details {
17template <typename MatType,
18 bool IsVectorAtCompileTime = MatType::IsVectorAtCompileTime>
20 static MatType *run(int rows, int cols, void *storage) {
21 if (storage)
22 return new (storage) MatType(rows, cols);
23 else
24 return new MatType(rows, cols);
25 }
26
27 static MatType *run(PyArrayObject *pyArray, void *storage = NULL) {
28 assert(PyArray_NDIM(pyArray) == 1 || PyArray_NDIM(pyArray) == 2);
29
30 int rows = -1, cols = -1;
31 const int ndim = PyArray_NDIM(pyArray);
32 if (ndim == 2) {
33 rows = (int)PyArray_DIMS(pyArray)[0];
34 cols = (int)PyArray_DIMS(pyArray)[1];
35 } else if (ndim == 1) {
36 rows = (int)PyArray_DIMS(pyArray)[0];
37 cols = 1;
38 }
39
40 return run(rows, cols, storage);
41 }
42};
43
44template <typename MatType>
45struct init_matrix_or_array<MatType, true> {
46 static MatType *run(int rows, int cols, void *storage) {
47 if (storage)
48 return new (storage) MatType(rows, cols);
49 else
50 return new MatType(rows, cols);
51 }
52
53 static MatType *run(int size, void *storage) {
54 if (storage)
55 return new (storage) MatType(size);
56 else
57 return new MatType(size);
58 }
59
60 static MatType *run(PyArrayObject *pyArray, void *storage = NULL) {
61 const int ndim = PyArray_NDIM(pyArray);
62 if (ndim == 1) {
63 const int size = (int)PyArray_DIMS(pyArray)[0];
64 return run(size, storage);
65 } else {
66 const int rows = (int)PyArray_DIMS(pyArray)[0];
67 const int cols = (int)PyArray_DIMS(pyArray)[1];
68 return run(rows, cols, storage);
69 }
70 }
71};
72
73#ifdef EIGENPY_WITH_TENSOR_SUPPORT
74template <typename Tensor>
75struct init_tensor {
76 static Tensor *run(PyArrayObject *pyArray, void *storage = NULL) {
77 enum { Rank = Tensor::NumDimensions };
78 assert(PyArray_NDIM(pyArray) == Rank);
79 typedef typename Tensor::Index Index;
80
81 Eigen::array<Index, Rank> dimensions;
82 for (int k = 0; k < PyArray_NDIM(pyArray); ++k)
83 dimensions[k] = PyArray_DIMS(pyArray)[k];
84
85 if (storage)
86 return new (storage) Tensor(dimensions);
87 else
88 return new Tensor(dimensions);
89 }
90};
91#endif
92
93template <typename MatType>
95
96template <typename EigenType,
97 typename BaseType = typename get_eigen_base_type<EigenType>::type>
99
100template <typename MatType>
101struct check_swap_impl<MatType, Eigen::MatrixBase<MatType>>
102 : check_swap_impl_matrix<MatType> {};
103
104template <typename MatType>
106 static bool run(PyArrayObject *pyArray,
107 const Eigen::MatrixBase<MatType> &mat) {
108 if (PyArray_NDIM(pyArray) == 0) return false;
109 if (mat.rows() == PyArray_DIMS(pyArray)[0])
110 return false;
111 else
112 return true;
113 }
114};
115
116template <typename EigenType>
117bool check_swap(PyArrayObject *pyArray, const EigenType &mat) {
118 return check_swap_impl<EigenType>::run(pyArray, mat);
119}
120
121#ifdef EIGENPY_WITH_TENSOR_SUPPORT
122template <typename TensorType>
123struct check_swap_impl_tensor {
124 static bool run(PyArrayObject * /*pyArray*/, const TensorType & /*tensor*/) {
125 return false;
126 }
127};
128
129template <typename TensorType>
130struct check_swap_impl<TensorType, Eigen::TensorBase<TensorType>>
131 : check_swap_impl_tensor<TensorType> {};
132#endif
133
134// template <typename MatType>
135// struct cast_impl_matrix;
136//
137// template <typename EigenType,
138// typename BaseType = typename get_eigen_base_type<EigenType>::type>
139// struct cast_impl;
140//
141// template <typename MatType>
142// struct cast_impl<MatType, Eigen::MatrixBase<MatType> >
143// : cast_impl_matrix<MatType> {};
144//
145// template <typename MatType>
146// struct cast_impl_matrix
147//{
148// template <typename NewScalar, typename MatrixIn, typename MatrixOut>
149// static void run(const Eigen::MatrixBase<MatrixIn> &input,
150// const Eigen::MatrixBase<MatrixOut> &dest) {
151// dest.const_cast_derived() = input.template cast<NewScalar>();
152// }
153// };
154
155template <typename Scalar, typename NewScalar,
156 template <typename D> class EigenBase = Eigen::MatrixBase,
157 bool cast_is_valid = FromTypeToType<Scalar, NewScalar>::value>
158struct cast {
159 template <typename MatrixIn, typename MatrixOut>
160 static void run(const Eigen::MatrixBase<MatrixIn> &input,
161 const Eigen::MatrixBase<MatrixOut> &dest) {
162 dest.const_cast_derived() = input.template cast<NewScalar>();
163 }
164};
165
166#ifdef EIGENPY_WITH_TENSOR_SUPPORT
167template <typename Scalar, typename NewScalar>
168struct cast<Scalar, NewScalar, Eigen::TensorRef, true> {
169 template <typename TensorIn, typename TensorOut>
170 static void run(const TensorIn &input, TensorOut &dest) {
171 dest = input.template cast<NewScalar>();
172 }
173};
174#endif
175
176template <typename Scalar, typename NewScalar,
177 template <typename D> class EigenBase>
178struct cast<Scalar, NewScalar, EigenBase, false> {
179 template <typename MatrixIn, typename MatrixOut>
180 static void run(const MatrixIn /*input*/, const MatrixOut /*dest*/) {
181 // do nothing
182 assert(false && "Must never happened");
183 }
184};
185
186} // namespace details
187
188#define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, Scalar, NewScalar, \
189 pyArray, mat) \
190 details::cast<Scalar, NewScalar>::run( \
191 NumpyMap<MatType, Scalar>::map(pyArray, \
192 details::check_swap(pyArray, mat)), \
193 mat)
194
195#define EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, NewScalar, \
196 mat, pyArray) \
197 details::cast<Scalar, NewScalar>::run( \
198 mat, NumpyMap<MatType, NewScalar>::map( \
199 pyArray, details::check_swap(pyArray, mat)))
200
201// Define specific cast for Windows and Mac
202#if defined _WIN32 || defined __CYGWIN__
203// Manage NPY_INT on Windows (NPY_INT32 is NPY_LONG).
204// See https://github.com/stack-of-tasks/eigenpy/pull/455
205#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
206 MatType, Scalar, pyArray, mat, CAST_MACRO) \
207 case NPY_INT: \
208 CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \
209 break; \
210 case NPY_UINT: \
211 CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \
212 break;
213#elif defined __APPLE__
214// Manage NPY_LONGLONG on Mac (NPY_INT64 is NPY_LONG).
215// long long and long are both the same type
216// but NPY_LONGLONG and NPY_LONGĀ are different dtype.
217// See https://github.com/stack-of-tasks/eigenpy/pull/455
218#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
219 MatType, Scalar, pyArray, mat, CAST_MACRO) \
220 case NPY_LONGLONG: \
221 CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \
222 break; \
223 case NPY_ULONGLONG: \
224 CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \
225 break;
226#else
227#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
228 MatType, Scalar, pyArray, mat, CAST_MACRO)
229#endif
230
232#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH( \
233 pyArray_type_code, MatType, Scalar, pyArray, mat, CAST_MACRO) \
234 switch (pyArray_type_code) { \
235 case NPY_BOOL: \
236 CAST_MACRO(MatType, bool, Scalar, pyArray, mat); \
237 break; \
238 case NPY_INT8: \
239 CAST_MACRO(MatType, int8_t, Scalar, pyArray, mat); \
240 break; \
241 case NPY_INT16: \
242 CAST_MACRO(MatType, int16_t, Scalar, pyArray, mat); \
243 break; \
244 case NPY_INT32: \
245 CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \
246 break; \
247 case NPY_INT64: \
248 CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \
249 break; \
250 case NPY_UINT8: \
251 CAST_MACRO(MatType, uint8_t, Scalar, pyArray, mat); \
252 break; \
253 case NPY_UINT16: \
254 CAST_MACRO(MatType, uint16_t, Scalar, pyArray, mat); \
255 break; \
256 case NPY_UINT32: \
257 CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \
258 break; \
259 case NPY_UINT64: \
260 CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \
261 break; \
262 case NPY_FLOAT: \
263 CAST_MACRO(MatType, float, Scalar, pyArray, mat); \
264 break; \
265 case NPY_CFLOAT: \
266 CAST_MACRO(MatType, std::complex<float>, Scalar, pyArray, mat); \
267 break; \
268 case NPY_DOUBLE: \
269 CAST_MACRO(MatType, double, Scalar, pyArray, mat); \
270 break; \
271 case NPY_CDOUBLE: \
272 CAST_MACRO(MatType, std::complex<double>, Scalar, pyArray, mat); \
273 break; \
274 case NPY_LONGDOUBLE: \
275 CAST_MACRO(MatType, long double, Scalar, pyArray, mat); \
276 break; \
277 case NPY_CLONGDOUBLE: \
278 CAST_MACRO(MatType, std::complex<long double>, Scalar, pyArray, mat); \
279 break; \
280 EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
281 MatType, Scalar, pyArray, mat, CAST_MACRO) \
282 default: \
283 throw Exception("You asked for a conversion which is not implemented."); \
284 }
285
286template <typename EigenType>
287struct EigenAllocator;
288
289template <typename EigenType,
290 typename BaseType = typename get_eigen_base_type<EigenType>::type>
292
293template <typename MatType>
295
296template <typename MatType>
297struct eigen_allocator_impl<MatType, Eigen::MatrixBase<MatType>>
298 : eigen_allocator_impl_matrix<MatType> {};
299
300template <typename MatType>
301struct eigen_allocator_impl<const MatType, const Eigen::MatrixBase<MatType>>
302 : eigen_allocator_impl_matrix<const MatType> {};
303
304template <typename MatType>
306 typedef MatType Type;
307 typedef typename MatType::Scalar Scalar;
308
309 static void allocate(
310 PyArrayObject *pyArray,
311 boost::python::converter::rvalue_from_python_storage<MatType> *storage) {
312 void *raw_ptr = storage->storage.bytes;
313 assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) &&
314 "The pointer is not aligned.");
315
316 Type *mat_ptr = details::init_matrix_or_array<Type>::run(pyArray, raw_ptr);
317 Type &mat = *mat_ptr;
318
319 copy(pyArray, mat);
320 }
321
323 template <typename MatrixDerived>
324 static void copy(PyArrayObject *pyArray,
325 const Eigen::MatrixBase<MatrixDerived> &mat_) {
326 MatrixDerived &mat = mat_.const_cast_derived();
327 const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
328 const int Scalar_type_code = Register::getTypeCode<Scalar>();
329
330 if (pyArray_type_code == Scalar_type_code) {
332 pyArray, details::check_swap(pyArray, mat)); // avoid useless cast
333 return;
334 }
335 EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH(
336 pyArray_type_code, MatType, Scalar, pyArray, mat,
337 EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX);
338 }
339
341 template <typename MatrixDerived>
342 static void copy(const Eigen::MatrixBase<MatrixDerived> &mat_,
343 PyArrayObject *pyArray) {
344 const MatrixDerived &mat =
345 const_cast<const MatrixDerived &>(mat_.derived());
346 const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
347 const int Scalar_type_code = Register::getTypeCode<Scalar>();
348
349 if (pyArray_type_code == Scalar_type_code) // no cast needed
350 {
352 details::check_swap(pyArray, mat)) = mat;
353 return;
354 }
355 throw Exception(
356 "Scalar conversion from Eigen to Numpy is not implemented.");
357 }
358};
359
360#ifdef EIGENPY_WITH_TENSOR_SUPPORT
361template <typename TensorType>
362struct eigen_allocator_impl_tensor;
363
364template <typename TensorType>
365struct eigen_allocator_impl<TensorType, Eigen::TensorBase<TensorType>>
366 : eigen_allocator_impl_tensor<TensorType> {};
367
368template <typename TensorType>
369struct eigen_allocator_impl<const TensorType,
370 const Eigen::TensorBase<TensorType>>
371 : eigen_allocator_impl_tensor<const TensorType> {};
372
373template <typename TensorType>
374struct eigen_allocator_impl_tensor {
375 typedef typename TensorType::Scalar Scalar;
376 static void allocate(
377 PyArrayObject *pyArray,
378 boost::python::converter::rvalue_from_python_storage<TensorType>
379 *storage) {
380 void *raw_ptr = storage->storage.bytes;
381 assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) &&
382 "The pointer is not aligned.");
383
384 TensorType *tensor_ptr =
385 details::init_tensor<TensorType>::run(pyArray, raw_ptr);
386 TensorType &tensor = *tensor_ptr;
387
388 copy(pyArray, tensor);
389 }
390
391#define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, Scalar, \
392 NewScalar, pyArray, tensor) \
393 { \
394 typename NumpyMap<TensorType, Scalar>::EigenMap pyArray_map = \
395 NumpyMap<TensorType, Scalar>::map( \
396 pyArray, details::check_swap(pyArray, tensor)); \
397 details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(pyArray_map, \
398 tensor); \
399 }
400
402 template <typename TensorDerived>
403 static void copy(PyArrayObject *pyArray, TensorDerived &tensor) {
404 const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
405 const int Scalar_type_code = Register::getTypeCode<Scalar>();
406
407 if (pyArray_type_code == Scalar_type_code) {
408 tensor = NumpyMap<TensorType, Scalar>::map(
409 pyArray, details::check_swap(pyArray, tensor)); // avoid useless cast
410 return;
411 }
412
413 EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH(
414 pyArray_type_code, TensorType, Scalar, pyArray, tensor,
415 EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR);
416 }
417
418#define EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, \
419 NewScalar, tensor, pyArray) \
420 { \
421 typename NumpyMap<TensorType, NewScalar>::EigenMap pyArray_map = \
422 NumpyMap<TensorType, NewScalar>::map( \
423 pyArray, details::check_swap(pyArray, tensor)); \
424 details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(tensor, \
425 pyArray_map); \
426 }
427
429 static void copy(const TensorType &tensor, PyArrayObject *pyArray) {
430 const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
431 const int Scalar_type_code = Register::getTypeCode<Scalar>();
432
433 if (pyArray_type_code == Scalar_type_code) // no cast needed
434 {
435 NumpyMap<TensorType, Scalar>::map(
436 pyArray, details::check_swap(pyArray, tensor)) = tensor;
437 return;
438 }
439
440 throw Exception(
441 "Scalar conversion from Eigen to Numpy is not implemented.");
442 }
443};
444#endif
445
446#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
454template <typename MatType>
455inline bool is_arr_layout_compatible_with_mat_type(PyArrayObject *pyArray) {
456 bool is_array_C_cont = PyArray_IS_C_CONTIGUOUS(pyArray);
457 bool is_array_F_cont = PyArray_IS_F_CONTIGUOUS(pyArray);
458 return (MatType::IsRowMajor && is_array_C_cont) ||
459 (!MatType::IsRowMajor && is_array_F_cont) ||
460 (MatType::IsVectorAtCompileTime &&
461 (is_array_C_cont || is_array_F_cont));
462}
463
464template <typename MatType, int Options, typename Stride>
465struct eigen_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride>> {
466 typedef Eigen::Ref<MatType, Options, Stride> RefType;
467 typedef typename MatType::Scalar Scalar;
468
469 typedef
470 typename ::boost::python::detail::referent_storage<RefType &>::StorageType
471 StorageType;
472
473 static void allocate(
474 PyArrayObject *pyArray,
475 ::boost::python::converter::rvalue_from_python_storage<RefType>
476 *storage) {
477 typedef typename StrideType<
478 MatType,
479 Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
480 Eigen::internal::traits<RefType>::StrideType::
481 OuterStrideAtCompileTime>::type NumpyMapStride;
482
483 bool need_to_allocate = false;
484 const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
485 const int Scalar_type_code = Register::getTypeCode<Scalar>();
486 if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
487 bool incompatible_layout =
488 !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
489 need_to_allocate |= incompatible_layout;
490 if (Options !=
491 Eigen::Unaligned) // we need to check whether the memory is correctly
492 // aligned and composed of a continuous segment
493 {
494 void *data_ptr = PyArray_DATA(pyArray);
495 if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options))
496 need_to_allocate |= true;
497 }
498
499 void *raw_ptr = storage->storage.bytes;
500 if (need_to_allocate) {
501 MatType *mat_ptr;
502 mat_ptr = details::init_matrix_or_array<MatType>::run(pyArray);
503 RefType mat_ref(*mat_ptr);
504
505 new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
506
507 RefType &mat = *reinterpret_cast<RefType *>(raw_ptr);
508 EigenAllocator<MatType>::copy(pyArray, mat);
509 } else {
510 assert(pyArray_type_code == Scalar_type_code);
511 typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap
512 numpyMap =
513 NumpyMap<MatType, Scalar, Options, NumpyMapStride>::map(pyArray);
514 RefType mat_ref(numpyMap);
515 new (raw_ptr) StorageType(mat_ref, pyArray);
516 }
517 }
518
519 static void copy(RefType const &ref, PyArrayObject *pyArray) {
520 EigenAllocator<MatType>::copy(ref, pyArray);
521 }
522};
523
524template <typename MatType, int Options, typename Stride>
526 const Eigen::Ref<const MatType, Options, Stride>> {
527 typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
528 typedef typename MatType::Scalar Scalar;
529
530 typedef
531 typename ::boost::python::detail::referent_storage<RefType &>::StorageType
532 StorageType;
533
534 static void allocate(
535 PyArrayObject *pyArray,
536 ::boost::python::converter::rvalue_from_python_storage<RefType>
537 *storage) {
538 typedef typename StrideType<
539 MatType,
540 Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
541 Eigen::internal::traits<RefType>::StrideType::
542 OuterStrideAtCompileTime>::type NumpyMapStride;
543
544 bool need_to_allocate = false;
545 const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
546 const int Scalar_type_code = Register::getTypeCode<Scalar>();
547
548 if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
549 bool incompatible_layout =
550 !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
551 need_to_allocate |= incompatible_layout;
552 if (Options !=
553 Eigen::Unaligned) // we need to check whether the memory is correctly
554 // aligned and composed of a continuous segment
555 {
556 void *data_ptr = PyArray_DATA(pyArray);
557 if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options))
558 need_to_allocate |= true;
559 }
560
561 void *raw_ptr = storage->storage.bytes;
562 if (need_to_allocate) {
563 MatType *mat_ptr;
564 mat_ptr = details::init_matrix_or_array<MatType>::run(pyArray);
565 RefType mat_ref(*mat_ptr);
566
567 new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
568
569 MatType &mat = *mat_ptr;
570 EigenAllocator<MatType>::copy(pyArray, mat);
571 } else {
572 assert(pyArray_type_code == Scalar_type_code);
573 typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap
574 numpyMap =
575 NumpyMap<MatType, Scalar, Options, NumpyMapStride>::map(pyArray);
576 RefType mat_ref(numpyMap);
577 new (raw_ptr) StorageType(mat_ref, pyArray);
578 }
579 }
580
581 static void copy(RefType const &ref, PyArrayObject *pyArray) {
582 EigenAllocator<MatType>::copy(ref, pyArray);
583 }
584};
585#endif
586
587#ifdef EIGENPY_WITH_TENSOR_SUPPORT
588
589template <typename TensorType, typename TensorRef>
590struct eigen_allocator_impl_tensor_ref;
591
592template <typename TensorType>
593struct eigen_allocator_impl_tensor<Eigen::TensorRef<TensorType>>
594 : eigen_allocator_impl_tensor_ref<TensorType,
595 Eigen::TensorRef<TensorType>> {};
596
597template <typename TensorType>
598struct eigen_allocator_impl_tensor<const Eigen::TensorRef<const TensorType>>
599 : eigen_allocator_impl_tensor_ref<
600 const TensorType, const Eigen::TensorRef<const TensorType>> {};
601
602template <typename TensorType, typename RefType>
603struct eigen_allocator_impl_tensor_ref {
604 typedef typename TensorType::Scalar Scalar;
605
606 typedef
607 typename ::boost::python::detail::referent_storage<RefType &>::StorageType
608 StorageType;
609
610 static void allocate(
611 PyArrayObject *pyArray,
612 ::boost::python::converter::rvalue_from_python_storage<RefType>
613 *storage) {
614 // typedef typename StrideType<
615 // MatType,
616 // Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
617 // Eigen::internal::traits<RefType>::StrideType::
618 // OuterStrideAtCompileTime>::type NumpyMapStride;
619
620 static const int Options = Eigen::internal::traits<TensorType>::Options;
621
622 bool need_to_allocate = false;
623 const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
624 const int Scalar_type_code = Register::getTypeCode<Scalar>();
625 if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
626 // bool incompatible_layout =
627 // !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
628 // need_to_allocate |= incompatible_layout;
629 // if (Options !=
630 // Eigen::Unaligned) // we need to check whether the memory is
631 // correctly
632 // // aligned and composed of a continuous segment
633 // {
634 // void *data_ptr = PyArray_DATA(pyArray);
635 // if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr,
636 // Options))
637 // need_to_allocate |= true;
638 // }
639
640 void *raw_ptr = storage->storage.bytes;
641 if (need_to_allocate) {
642 typedef typename boost::remove_const<TensorType>::type TensorTypeNonConst;
643 TensorTypeNonConst *tensor_ptr;
644 tensor_ptr = details::init_tensor<TensorTypeNonConst>::run(pyArray);
645 RefType tensor_ref(*tensor_ptr);
646
647 new (raw_ptr) StorageType(tensor_ref, pyArray, tensor_ptr);
648
649 TensorTypeNonConst &tensor = *tensor_ptr;
650 EigenAllocator<TensorTypeNonConst>::copy(pyArray, tensor);
651 } else {
652 assert(pyArray_type_code == Scalar_type_code);
653 typename NumpyMap<TensorType, Scalar, Options>::EigenMap numpyMap =
654 NumpyMap<TensorType, Scalar, Options>::map(pyArray);
655 RefType tensor_ref(numpyMap);
656 new (raw_ptr) StorageType(tensor_ref, pyArray);
657 }
658 }
659
660 static void copy(RefType const &ref, PyArrayObject *pyArray) {
661 EigenAllocator<TensorType>::copy(ref, pyArray);
662 }
663};
664
665#endif
666
667template <typename EigenType>
668struct EigenAllocator : eigen_allocator_impl<EigenType> {};
669
670} // namespace eigenpy
671
672#endif // __eigenpy_eigen_allocator_hpp__
static void copy(PyArrayObject *pyArray, const Eigen::MatrixBase< MatrixDerived > &mat_)
Copy Python array into the input matrix mat.
static void copy(const Eigen::MatrixBase< MatrixDerived > &mat_, PyArrayObject *pyArray)
Copy mat into the Python array using Eigen::Map.