eigenpy 3.12.0
Bindings between Numpy and Eigen using Boost.Python
Loading...
Searching...
No Matches
numpy-allocator.hpp
1/*
2 * Copyright 2020-2023 INRIA
3 */
4
5#ifndef __eigenpy_numpy_allocator_hpp__
6#define __eigenpy_numpy_allocator_hpp__
7
8#include "eigenpy/fwd.hpp"
9#include "eigenpy/eigen-allocator.hpp"
10#include "eigenpy/numpy-type.hpp"
11#include "eigenpy/register.hpp"
12
13namespace eigenpy {
14
15template <typename EigenType, typename BaseType>
17
18template <typename EigenType>
20
21template <typename MatType>
23 MatType, Eigen::MatrixBase<typename remove_const_reference<MatType>::type>>
24 : numpy_allocator_impl_matrix<MatType> {};
25
26template <typename MatType>
28 const MatType,
29 const Eigen::MatrixBase<typename remove_const_reference<MatType>::type>>
30 : numpy_allocator_impl_matrix<const MatType> {};
31
32// template <typename MatType>
33// struct numpy_allocator_impl<MatType &, Eigen::MatrixBase<MatType> > :
34// numpy_allocator_impl_matrix<MatType &>
35//{};
36
37template <typename MatType>
38struct numpy_allocator_impl<const MatType &, const Eigen::MatrixBase<MatType>>
40
41template <typename EigenType,
42 typename BaseType = typename get_eigen_base_type<EigenType>::type>
43struct NumpyAllocator : numpy_allocator_impl<EigenType, BaseType> {};
44
45template <typename MatType>
47 template <typename SimilarMatrixType>
48 static PyArrayObject *allocate(
49 const Eigen::MatrixBase<SimilarMatrixType> &mat, npy_intp nd,
50 npy_intp *shape) {
51 typedef typename SimilarMatrixType::Scalar Scalar;
52
53 const int code = Register::getTypeCode<Scalar>();
54 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
55 static_cast<int>(nd), shape, code);
56
57 // Copy data
59
60 return pyArray;
61 }
62};
63
64#ifdef EIGENPY_WITH_TENSOR_SUPPORT
65
66template <typename TensorType>
67struct numpy_allocator_impl_tensor;
68
69template <typename TensorType>
70struct numpy_allocator_impl<TensorType, Eigen::TensorBase<TensorType>>
71 : numpy_allocator_impl_tensor<TensorType> {};
72
73template <typename TensorType>
74struct numpy_allocator_impl<const TensorType,
75 const Eigen::TensorBase<TensorType>>
76 : numpy_allocator_impl_tensor<const TensorType> {};
77
78template <typename TensorType>
79struct numpy_allocator_impl_tensor {
80 template <typename TensorDerived>
81 static PyArrayObject *allocate(const TensorDerived &tensor, npy_intp nd,
82 npy_intp *shape) {
83 const int code = Register::getTypeCode<typename TensorDerived::Scalar>();
84 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
85 static_cast<int>(nd), shape, code);
86
87 // Copy data
88 EigenAllocator<TensorDerived>::copy(
89 static_cast<const TensorDerived &>(tensor), pyArray);
90
91 return pyArray;
92 }
93};
94#endif
95
96template <typename MatType>
97struct numpy_allocator_impl_matrix<MatType &> {
98 template <typename SimilarMatrixType>
99 static PyArrayObject *allocate(Eigen::PlainObjectBase<SimilarMatrixType> &mat,
100 npy_intp nd, npy_intp *shape) {
101 typedef typename SimilarMatrixType::Scalar Scalar;
102 enum {
103 NPY_ARRAY_MEMORY_CONTIGUOUS =
104 SimilarMatrixType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
105 };
106
107 if (NumpyType::sharedMemory()) {
108 const int Scalar_type_code = Register::getTypeCode<Scalar>();
109 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
110 getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
111 mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
112
113 return pyArray;
114 } else {
115 return NumpyAllocator<MatType>::allocate(mat, nd, shape);
116 }
117 }
118};
119
120#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
121
122template <typename MatType, int Options, typename Stride>
123struct numpy_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride>> {
124 typedef Eigen::Ref<MatType, Options, Stride> RefType;
125
126 static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
127 typedef typename RefType::Scalar Scalar;
128 enum {
129 NPY_ARRAY_MEMORY_CONTIGUOUS =
130 RefType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
131 };
132
133 if (NumpyType::sharedMemory()) {
134 const int Scalar_type_code = Register::getTypeCode<Scalar>();
135 const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
136 Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
137 : mat.innerStride(),
138 outer_stride = reverse_strides ? mat.innerStride()
139 : mat.outerStride();
140
141#if NPY_ABI_VERSION < 0x02000000
142 const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
143#else
144 const int elsize =
145 PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
146#endif
147 npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
148
149 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
150 getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
151 strides, mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
152
153 return pyArray;
154 } else {
155 return NumpyAllocator<MatType>::allocate(mat, nd, shape);
156 }
157 }
158};
159
160#endif
161
162template <typename MatType>
163struct numpy_allocator_impl_matrix<const MatType &> {
164 template <typename SimilarMatrixType>
165 static PyArrayObject *allocate(
166 const Eigen::PlainObjectBase<SimilarMatrixType> &mat, npy_intp nd,
167 npy_intp *shape) {
168 typedef typename SimilarMatrixType::Scalar Scalar;
169 enum {
170 NPY_ARRAY_MEMORY_CONTIGUOUS_RO = SimilarMatrixType::IsRowMajor
171 ? NPY_ARRAY_CARRAY_RO
172 : NPY_ARRAY_FARRAY_RO
173 };
174
175 if (NumpyType::sharedMemory()) {
176 const int Scalar_type_code = Register::getTypeCode<Scalar>();
177 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
178 getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
179 const_cast<Scalar *>(mat.data()),
180 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
181
182 return pyArray;
183 } else {
184 return NumpyAllocator<MatType>::allocate(mat, nd, shape);
185 }
186 }
187};
188
189#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
190
191template <typename MatType, int Options, typename Stride>
193 const Eigen::Ref<const MatType, Options, Stride>> {
194 typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
195
196 static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
197 typedef typename RefType::Scalar Scalar;
198 enum {
199 NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
200 RefType::IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
201 };
202
203 if (NumpyType::sharedMemory()) {
204 const int Scalar_type_code = Register::getTypeCode<Scalar>();
205
206 const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
207 Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
208 : mat.innerStride(),
209 outer_stride = reverse_strides ? mat.innerStride()
210 : mat.outerStride();
211
212#if NPY_ABI_VERSION < 0x02000000
213 const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
214#else
215 const int elsize =
216 PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
217#endif
218 npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
219
220 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
221 getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
222 strides, const_cast<Scalar *>(mat.data()),
223 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
224
225 return pyArray;
226 } else {
227 return NumpyAllocator<MatType>::allocate(mat, nd, shape);
228 }
229 }
230};
231
232#endif
233
234#ifdef EIGENPY_WITH_TENSOR_SUPPORT
235template <typename TensorType>
236struct numpy_allocator_impl_tensor<Eigen::TensorRef<TensorType>> {
237 typedef Eigen::TensorRef<TensorType> RefType;
238
239 static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
240 npy_intp *shape) {
241 typedef typename RefType::Scalar Scalar;
242 static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
243 enum {
244 NPY_ARRAY_MEMORY_CONTIGUOUS =
245 IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
246 };
247
248 if (NumpyType::sharedMemory()) {
249 const int Scalar_type_code = Register::getTypeCode<Scalar>();
250 // static const Index NumIndices = TensorType::NumIndices;
251
252 // const int elsize =
253 // call_PyArray_DescrFromType(Scalar_type_code)->elsize; npy_intp
254 // strides[NumIndices];
255
256 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
257 getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
258 const_cast<Scalar *>(tensor.data()),
259 NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
260
261 return pyArray;
262 } else {
263 return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
264 }
265 }
266};
267
268template <typename TensorType>
269struct numpy_allocator_impl_tensor<const Eigen::TensorRef<const TensorType>> {
270 typedef const Eigen::TensorRef<const TensorType> RefType;
271
272 static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
273 npy_intp *shape) {
274 typedef typename RefType::Scalar Scalar;
275 static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
276 enum {
277 NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
278 IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
279 };
280
281 if (NumpyType::sharedMemory()) {
282 const int Scalar_type_code = Register::getTypeCode<Scalar>();
283
284 PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
285 getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
286 const_cast<Scalar *>(tensor.data()),
287 NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
288
289 return pyArray;
290 } else {
291 return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
292 }
293 }
294};
295
296#endif
297} // namespace eigenpy
298
299#endif // ifndef __eigenpy_numpy_allocator_hpp__