5#ifndef __eigenpy_numpy_allocator_hpp__ 6#define __eigenpy_numpy_allocator_hpp__ 8#include "eigenpy/fwd.hpp" 9#include "eigenpy/eigen-allocator.hpp" 10#include "eigenpy/numpy-type.hpp" 11#include "eigenpy/register.hpp" 15template <
typename EigenType,
typename BaseType>
18template <
typename EigenType>
21template <
typename MatType>
23 MatType, Eigen::MatrixBase<typename remove_const_reference<MatType>::type>>
26template <
typename MatType>
29 const Eigen::MatrixBase<typename remove_const_reference<MatType>::type>>
37template <
typename MatType>
41template <
typename EigenType,
42 typename BaseType =
typename get_eigen_base_type<EigenType>::type>
45template <
typename MatType>
47 template <
typename SimilarMatrixType>
48 static PyArrayObject *allocate(
49 const Eigen::MatrixBase<SimilarMatrixType> &mat, npy_intp nd,
51 typedef typename SimilarMatrixType::Scalar Scalar;
53 const int code = Register::getTypeCode<Scalar>();
54 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
55 static_cast<int>(nd), shape, code);
64#ifdef EIGENPY_WITH_TENSOR_SUPPORT 66template <
typename TensorType>
67struct numpy_allocator_impl_tensor;
69template <
typename TensorType>
71 : numpy_allocator_impl_tensor<TensorType> {};
73template <
typename TensorType>
74struct numpy_allocator_impl<const TensorType,
75 const Eigen::TensorBase<TensorType>>
76 : numpy_allocator_impl_tensor<const TensorType> {};
78template <
typename TensorType>
79struct numpy_allocator_impl_tensor {
80 template <
typename TensorDerived>
81 static PyArrayObject *allocate(
const TensorDerived &tensor, npy_intp nd,
83 const int code = Register::getTypeCode<typename TensorDerived::Scalar>();
84 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
85 static_cast<int>(nd), shape, code);
88 EigenAllocator<TensorDerived>::copy(
89 static_cast<const TensorDerived &
>(tensor), pyArray);
96template <
typename MatType>
98 template <
typename SimilarMatrixType>
99 static PyArrayObject *allocate(Eigen::PlainObjectBase<SimilarMatrixType> &mat,
100 npy_intp nd, npy_intp *shape) {
101 typedef typename SimilarMatrixType::Scalar Scalar;
103 NPY_ARRAY_MEMORY_CONTIGUOUS =
104 SimilarMatrixType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
107 if (NumpyType::sharedMemory()) {
108 const int Scalar_type_code = Register::getTypeCode<Scalar>();
109 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
110 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code,
111 mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
120#if EIGEN_VERSION_AT_LEAST(3, 2, 0) 122template <
typename MatType,
int Options,
typename Str
ide>
124 typedef Eigen::Ref<MatType, Options, Stride> RefType;
126 static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
127 typedef typename RefType::Scalar Scalar;
129 NPY_ARRAY_MEMORY_CONTIGUOUS =
130 RefType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
133 if (NumpyType::sharedMemory()) {
134 const int Scalar_type_code = Register::getTypeCode<Scalar>();
135 const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
136 Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
138 outer_stride = reverse_strides ? mat.innerStride()
141#if NPY_ABI_VERSION < 0x02000000 142 const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
145 PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
147 npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
149 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
150 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code,
151 strides, mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
155 return NumpyAllocator<MatType>::allocate(mat, nd, shape);
162template <
typename MatType>
164 template <
typename SimilarMatrixType>
165 static PyArrayObject *allocate(
166 const Eigen::PlainObjectBase<SimilarMatrixType> &mat, npy_intp nd,
168 typedef typename SimilarMatrixType::Scalar Scalar;
170 NPY_ARRAY_MEMORY_CONTIGUOUS_RO = SimilarMatrixType::IsRowMajor
171 ? NPY_ARRAY_CARRAY_RO
172 : NPY_ARRAY_FARRAY_RO
175 if (NumpyType::sharedMemory()) {
176 const int Scalar_type_code = Register::getTypeCode<Scalar>();
177 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
178 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code,
179 const_cast<Scalar *
>(mat.data()),
180 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
189#if EIGEN_VERSION_AT_LEAST(3, 2, 0) 191template <
typename MatType,
int Options,
typename Str
ide>
193 const Eigen::Ref<const MatType, Options, Stride>> {
194 typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
196 static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
197 typedef typename RefType::Scalar Scalar;
199 NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
200 RefType::IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
203 if (NumpyType::sharedMemory()) {
204 const int Scalar_type_code = Register::getTypeCode<Scalar>();
206 const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
207 Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
209 outer_stride = reverse_strides ? mat.innerStride()
212#if NPY_ABI_VERSION < 0x02000000 213 const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
216 PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
218 npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
220 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
221 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code,
222 strides,
const_cast<Scalar *
>(mat.data()),
223 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
227 return NumpyAllocator<MatType>::allocate(mat, nd, shape);
234#ifdef EIGENPY_WITH_TENSOR_SUPPORT 235template <
typename TensorType>
236struct numpy_allocator_impl_tensor<Eigen::TensorRef<TensorType>> {
237 typedef Eigen::TensorRef<TensorType> RefType;
239 static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
241 typedef typename RefType::Scalar Scalar;
242 static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
244 NPY_ARRAY_MEMORY_CONTIGUOUS =
245 IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
248 if (NumpyType::sharedMemory()) {
249 const int Scalar_type_code = Register::getTypeCode<Scalar>();
256 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
257 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code, NULL,
258 const_cast<Scalar *
>(tensor.data()),
259 NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
263 return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
268template <
typename TensorType>
269struct numpy_allocator_impl_tensor<const Eigen::TensorRef<const TensorType>> {
270 typedef const Eigen::TensorRef<const TensorType> RefType;
272 static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
274 typedef typename RefType::Scalar Scalar;
275 static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
277 NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
278 IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
281 if (NumpyType::sharedMemory()) {
282 const int Scalar_type_code = Register::getTypeCode<Scalar>();
284 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
285 getPyArrayType(),
static_cast<int>(nd), shape, Scalar_type_code, NULL,
286 const_cast<Scalar *
>(tensor.data()),
287 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
291 return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);