11#include <initializer_list>
45 static std::atomic<std::uint64_t> counter{0};
55template <
class T, std::
size_t Align>
class AlignedAllocator :
public std::allocator<T> {
58 using size_type = std::size_t;
60 using const_pointer =
const T *;
64 using is_always_equal = std::true_type;
65 using propagate_on_container_move_assignment = std::true_type;
66 using propagate_on_container_copy_assignment = std::true_type;
67 using propagate_on_container_swap = std::true_type;
69 template <
typename U>
struct rebind {
70 using other = AlignedAllocator<U, Align>;
73 AlignedAllocator() noexcept = default;
75 template <typename U> AlignedAllocator(const AlignedAllocator<U, Align> & ) noexcept {}
77 pointer allocate(size_type n) {
82 const size_type bytes = n *
sizeof(T);
83 const size_type aligned_bytes = (bytes + Align - 1) / Align * Align;
85 if (
auto *ptr =
static_cast<pointer
>(std::aligned_alloc(Align, aligned_bytes))) {
88 throw std::bad_alloc();
91 void deallocate(pointer ptr, size_type )
noexcept { std::free(ptr); }
94template <
class T,
class U, std::
size_t Align>
95bool operator==(
const AlignedAllocator<T, Align> &,
const AlignedAllocator<U, Align> &)
noexcept {
99template <
class T,
class U, std::
size_t Align>
100bool operator!=(
const AlignedAllocator<T, Align> &,
const AlignedAllocator<U, Align> &)
noexcept {
136template <
class T, std::
size_t N, Layout L = Layout::Contig>
class NDArray {
137 static_assert(N >= 1,
"NDArray rank must be >= 1");
143 static_assert(std::is_arithmetic_v<T> && !std::is_same_v<T, bool>,
144 "NDArray element type must be arithmetic and not bool");
148 template <
class U, std::
size_t M, Layout LL>
friend class NDArray;
265 assert(this->
m_ndarray->m_mutable &&
"write to read-only borrow");
279 template <Layout L2 = L>
281 NDArray(std::initializer_list<std::size_t> dims)
284 assert(dims.size() == N);
286 std::size_t size = 1;
287 for (
auto d : dims) {
291 m_strides = computeContiguousStrides(m_shape);
293 m_data = m_vec.data();
306 template <Layout L2 = L>
308 explicit NDArray(std::array<std::size_t, N> shape)
309 : m_data(
nullptr), m_base(
nullptr), m_shape(shape), m_offset(0),
311 std::size_t size = 1;
312 for (std::size_t k = 0; k < N; ++k) {
315 m_strides = computeContiguousStrides(m_shape);
317 m_data = m_vec.data();
326 : m_vec(other.m_vec), m_shape(other.m_shape), m_strides(other.m_strides),
327 m_offset(other.m_offset), m_storage(other.m_storage), m_mutable(other.m_mutable) {
334 : m_vec(std::move(other.m_vec)), m_shape(other.m_shape), m_strides(other.m_strides),
335 m_offset(other.m_offset), m_storage(other.m_storage), m_mutable(other.m_mutable) {
338 other.m_data =
nullptr;
339 other.m_base =
nullptr;
344 if (
this == &other) {
348 m_shape = other.m_shape;
349 m_strides = other.m_strides;
350 m_offset = other.m_offset;
351 m_storage = other.m_storage;
352 m_mutable = other.m_mutable;
360 if (
this == &other) {
363 m_vec = std::move(other.m_vec);
364 m_shape = other.m_shape;
365 m_strides = other.m_strides;
366 m_offset = other.m_offset;
367 m_storage = other.m_storage;
368 m_mutable = other.m_mutable;
371 other.m_data =
nullptr;
372 other.m_base =
nullptr;
377 NDArray(clustering::detail::BorrowedTag, T *
data, T *base, std::array<std::size_t, N> shape,
378 std::array<std::ptrdiff_t, N> strides, std::ptrdiff_t offset,
bool isMutable) noexcept
379 : m_data(
data), m_base(base), m_vec(), m_shape(shape), m_strides(strides), m_offset(offset),
382 assert(offset == 0 && strides == computeContiguousStrides(shape) &&
383 "Contig NDArray requires contiguous strides and zero offset");
397 template <Layout L2 = L>
400 assert(
index < m_shape[0]);
410 template <Layout L2 = L>
413 assert(
index < m_shape[0]);
425 static_assert(
sizeof...(Ix) == N,
"operator() requires exactly N indices");
426 assert(m_mutable &&
"write to read-only borrow");
427 return m_data[computeElementOffset(std::index_sequence_for<Ix...>{}, ix...)];
431 template <
class... Ix>
const T &
operator()(Ix... ix)
const noexcept {
432 static_assert(
sizeof...(Ix) == N,
"operator() requires exactly N indices");
433 return m_data[computeElementOffset(std::index_sequence_for<Ix...>{}, ix...)];
443 assert(m_mutable &&
"write to read-only borrow");
444 return m_data[
index];
478 return m_offset == 0 && m_strides == computeContiguousStrides(m_shape);
488 [[nodiscard]]
bool isMutable() const noexcept {
return m_mutable; }
503 const T *
data() const noexcept {
return m_data; }
514 assert(m_mutable &&
"write to read-only borrow");
525 [[nodiscard]] T *
baseData() const noexcept {
return m_base; }
533 template <std::
size_t A>
bool isAligned() const noexcept {
534 return (
reinterpret_cast<std::uintptr_t
>(m_data) % A) == 0;
549 assert(
isAligned<A>() &&
"alignedData<A>() requires A-byte aligned data");
550 return static_cast<T *
>(__builtin_assume_aligned(m_data, A));
555 assert(
isAligned<A>() &&
"alignedData<A>() requires A-byte aligned data");
556 return static_cast<const T *
>(__builtin_assume_aligned(m_data, A));
568 template <Layout L2 = L>
571 return NDArray(clustering::detail::BorrowedTag{}, ptr, ptr, shape,
572 computeContiguousStrides(shape), 0,
true);
581 template <Layout L2 = L>
583 static NDArray borrow(
const T *ptr, std::array<std::size_t, N> shape)
noexcept {
584 auto *mutPtr =
const_cast<T *
>(ptr);
585 return NDArray(clustering::detail::BorrowedTag{}, mutPtr, mutPtr, shape,
586 computeContiguousStrides(shape), 0,
false);
595 template <Layout L2 = L>
598 std::array<std::ptrdiff_t, N> strides)
noexcept {
599 return NDArray(clustering::detail::BorrowedTag{}, ptr, ptr, shape, strides, 0,
true);
603 template <Layout L2 = L>
606 std::array<std::ptrdiff_t, N> strides)
noexcept {
607 auto *mutPtr =
const_cast<T *
>(ptr);
608 return NDArray(clustering::detail::BorrowedTag{}, mutPtr, mutPtr, shape, strides, 0,
false);
614 template <std::
size_t M = N>
617 return borrow(ptr, std::array<std::size_t, 1>{n});
621 template <std::
size_t M = N>
624 return borrow(ptr, std::array<std::size_t, 1>{n});
640 template <Layout L2 = L>
643 std::array<std::ptrdiff_t, N> stridesInBytes,
645 std::array<std::ptrdiff_t, N> element_strides{};
646 for (std::size_t k = 0; k < N; ++k) {
647 assert(stridesInBytes[k] %
static_cast<std::ptrdiff_t
>(
sizeof(T)) == 0 &&
648 "borrowBytes requires byte strides divisible by sizeof(T)");
649 element_strides[k] = stridesInBytes[k] /
static_cast<std::ptrdiff_t
>(
sizeof(T));
651 return NDArray(clustering::detail::BorrowedTag{}, ptr, ptr, shape, element_strides, 0,
661 template <std::
size_t M = N>
664 return borrow(s.data(), std::array<std::size_t, 1>{s.size()});
668 template <std::
size_t M = N>
671 return borrow(s.data(), std::array<std::size_t, 1>{s.size()});
681 template <std::
size_t M = N>
685 clustering::detail::BorrowedTag{}, m_data, m_base,
686 std::array<std::size_t, 2>{m_shape[1], m_shape[0]},
687 std::array<std::ptrdiff_t, 2>{m_strides[1], m_strides[0]}, m_offset, m_mutable);
691 template <std::
size_t M = N>
695 clustering::detail::BorrowedTag{},
const_cast<T *
>(m_data),
const_cast<T *
>(m_base),
696 std::array<std::size_t, 2>{m_shape[1], m_shape[0]},
697 std::array<std::ptrdiff_t, 2>{m_strides[1], m_strides[0]}, m_offset,
false);
706 template <std::
size_t M = N>
709 assert(i < m_shape[0]);
710 std::array<std::size_t, N - 1> new_shape{};
711 std::array<std::ptrdiff_t, N - 1> new_strides{};
712 for (std::size_t k = 0; k + 1 < N; ++k) {
713 new_shape[k] = m_shape[k + 1];
714 new_strides[k] = m_strides[k + 1];
716 return NDArray<T, N - 1, L>(clustering::detail::BorrowedTag{},
717 m_data + m_offset + (
static_cast<std::ptrdiff_t
>(i) * m_strides[0]),
718 m_base, new_shape, new_strides, 0, m_mutable);
722 template <std::
size_t M = N>
725 assert(i < m_shape[0]);
726 std::array<std::size_t, N - 1> new_shape{};
727 std::array<std::ptrdiff_t, N - 1> new_strides{};
728 for (std::size_t k = 0; k + 1 < N; ++k) {
729 new_shape[k] = m_shape[k + 1];
730 new_strides[k] = m_strides[k + 1];
732 return NDArray<T, N - 1, L>(clustering::detail::BorrowedTag{},
733 const_cast<T *
>(m_data) + m_offset +
734 (
static_cast<std::ptrdiff_t
>(i) * m_strides[0]),
735 const_cast<T *
>(m_base), new_shape, new_strides, 0,
false);
744 template <std::
size_t M = N>
747 assert(j < m_shape[1]);
749 clustering::detail::BorrowedTag{},
750 m_data + m_offset + (
static_cast<std::ptrdiff_t
>(j) * m_strides[1]), m_base,
751 std::array<std::size_t, 1>{m_shape[0]}, std::array<std::ptrdiff_t, 1>{m_strides[0]}, 0,
756 template <std::
size_t M = N>
759 assert(j < m_shape[1]);
761 clustering::detail::BorrowedTag{},
762 const_cast<T *
>(m_data) + m_offset + (
static_cast<std::ptrdiff_t
>(j) * m_strides[1]),
763 const_cast<T *
>(m_base), std::array<std::size_t, 1>{m_shape[0]},
764 std::array<std::ptrdiff_t, 1>{m_strides[0]}, 0,
false);
774 std::size_t end)
noexcept {
775 assert(axis < N && begin <= end && end <= m_shape[axis]);
776 std::array<std::size_t, N> new_shape = m_shape;
777 new_shape[axis] = end - begin;
779 clustering::detail::BorrowedTag{},
780 m_data + m_offset + (
static_cast<std::ptrdiff_t
>(begin) * m_strides[axis]), m_base,
781 new_shape, m_strides, 0, m_mutable);
786 std::size_t end)
const noexcept {
787 assert(axis < N && begin <= end && end <= m_shape[axis]);
788 std::array<std::size_t, N> new_shape = m_shape;
789 new_shape[axis] = end - begin;
791 clustering::detail::BorrowedTag{},
792 const_cast<T *
>(m_data) + m_offset + (
static_cast<std::ptrdiff_t
>(begin) * m_strides[axis]),
793 const_cast<T *
>(m_base), new_shape, m_strides, 0,
false);
803 std::array<std::size_t, N> new_shape{};
804 std::array<std::ptrdiff_t, N> new_strides{};
805 std::ptrdiff_t advance = 0;
806 for (std::size_t k = 0; k < N; ++k) {
807 const std::size_t end = std::min(ranges[k].end, m_shape[k]);
808 const std::size_t begin = ranges[k].begin;
809 const std::ptrdiff_t step = ranges[k].step;
810 assert(begin <= end && step > 0);
811 new_shape[k] = step == 1 ? (end - begin)
812 : (end - begin +
static_cast<std::size_t
>(step) - 1) /
813 static_cast<std::size_t
>(step);
814 new_strides[k] = m_strides[k] * step;
815 advance +=
static_cast<std::ptrdiff_t
>(begin) * m_strides[k];
818 m_data + m_offset + advance, m_base, new_shape,
819 new_strides, 0, m_mutable);
824 std::array<std::size_t, N> new_shape{};
825 std::array<std::ptrdiff_t, N> new_strides{};
826 std::ptrdiff_t advance = 0;
827 for (std::size_t k = 0; k < N; ++k) {
828 const std::size_t end = std::min(ranges[k].end, m_shape[k]);
829 const std::size_t begin = ranges[k].begin;
830 const std::ptrdiff_t step = ranges[k].step;
831 assert(begin <= end && step > 0);
832 new_shape[k] = step == 1 ? (end - begin)
833 : (end - begin +
static_cast<std::size_t
>(step) - 1) /
834 static_cast<std::size_t
>(step);
835 new_strides[k] = m_strides[k] * step;
836 advance +=
static_cast<std::ptrdiff_t
>(begin) * m_strides[k];
839 clustering::detail::BorrowedTag{},
const_cast<T *
>(m_data) + m_offset + advance,
840 const_cast<T *
>(m_base), new_shape, new_strides, 0,
false);
849 std::array<std::size_t, N> new_shape{};
850 std::array<std::ptrdiff_t, N> new_strides{};
851 for (std::size_t k = 0; k < N; ++k) {
853 new_shape[k] = m_shape[perm[k]];
854 new_strides[k] = m_strides[perm[k]];
857 new_shape, new_strides, m_offset, m_mutable);
862 permute(
const std::array<std::size_t, N> &perm)
const noexcept {
863 std::array<std::size_t, N> new_shape{};
864 std::array<std::ptrdiff_t, N> new_strides{};
865 for (std::size_t k = 0; k < N; ++k) {
867 new_shape[k] = m_shape[perm[k]];
868 new_strides[k] = m_strides[perm[k]];
871 const_cast<T *
>(m_data),
const_cast<T *
>(m_base),
872 new_shape, new_strides, m_offset,
false);
885 template <std::
size_t M>
887 assert(
isContiguous() &&
"view<M> requires a contiguous source");
888 assert(productOfShape(shape) == numel() &&
"view<M> must preserve element count");
890 clustering::detail::BorrowedTag{}, m_data, m_base, shape,
891 NDArray<T, M, Layout::Contig>::computeContiguousStrides(shape), 0, m_mutable);
895 template <std::
size_t M>
897 assert(
isContiguous() &&
"view<M> requires a contiguous source");
898 assert(productOfShape(shape) == numel() &&
"view<M> must preserve element count");
900 clustering::detail::BorrowedTag{},
const_cast<T *
>(m_data),
const_cast<T *
>(m_base), shape,
901 NDArray<T, M, Layout::Contig>::computeContiguousStrides(shape), 0,
false);
915 assert(productOfShape(shape) == numel() &&
"reshape<M> must preserve element count");
918 clustering::detail::BorrowedTag{}, m_data, m_base, shape,
919 NDArray<T, M, Layout::Contig>::computeContiguousStrides(shape), 0, m_mutable);
922 copyToContiguous(result.
data());
927 template <std::
size_t M>
929 assert(productOfShape(shape) == numel() &&
"reshape<M> must preserve element count");
932 clustering::detail::BorrowedTag{},
const_cast<T *
>(m_data),
const_cast<T *
>(m_base),
933 shape, NDArray<T, M, Layout::Contig>::computeContiguousStrides(shape), 0,
false);
936 copyToContiguous(result.
data());
950 clustering::detail::BorrowedTag{}, m_data, m_base, m_shape,
951 NDArray<T, N, Layout::Contig>::computeContiguousStrides(m_shape), 0, m_mutable);
954 copyToContiguous(result.
data());
962 clustering::detail::BorrowedTag{},
const_cast<T *
>(m_data),
const_cast<T *
>(m_base),
963 m_shape, NDArray<T, N, Layout::Contig>::computeContiguousStrides(m_shape), 0,
false);
966 copyToContiguous(result.
data());
979 copyToContiguous(result.
data());
994 std::stringstream ss;
995 ss <<
"NDarray<" <<
typeid(T).name() <<
", " << N <<
">(";
996 for (
auto d : m_shape) {
1001 const std::size_t total = numel();
1003 std::array<std::size_t, N> idx{};
1004 for (std::size_t flat = 0; flat < total; ++flat) {
1005 std::ptrdiff_t off = m_offset;
1006 for (std::size_t k = 0; k < N; ++k) {
1007 off +=
static_cast<std::ptrdiff_t
>(idx[k]) * m_strides[k];
1009 ss << m_data[off] <<
", ";
1010 for (std::size_t k = N; k-- > 0;) {
1011 if (++idx[k] < m_shape[k]) {
1019 ss <<
"size: " << total <<
"\n";
1031 static std::array<std::ptrdiff_t, N>
1032 computeContiguousStrides(
const std::array<std::size_t, N> &shape) {
1033 std::array<std::ptrdiff_t, N> s{};
1035 for (std::size_t k = N - 1; k > 0; --k) {
1036 s[k - 1] = s[k] *
static_cast<std::ptrdiff_t
>(shape[k]);
1041 template <std::
size_t M>
1042 static std::size_t productOfShape(
const std::array<std::size_t, M> &shape)
noexcept {
1043 std::size_t size = 1;
1044 for (std::size_t k = 0; k < M; ++k) {
1050 std::size_t numel() const noexcept {
return productOfShape(m_shape); }
1055 void copyToContiguous(T *dst)
const noexcept {
1056 const std::size_t total = numel();
1061 std::memcpy(dst, m_data + m_offset, total *
sizeof(T));
1064 std::array<std::size_t, N> idx{};
1065 for (std::size_t flat = 0; flat < total; ++flat) {
1066 std::ptrdiff_t off = m_offset;
1067 for (std::size_t k = 0; k < N; ++k) {
1068 off +=
static_cast<std::ptrdiff_t
>(idx[k]) * m_strides[k];
1070 dst[flat] = m_data[off];
1071 for (std::size_t k = N; k-- > 0;) {
1072 if (++idx[k] < m_shape[k]) {
1080 template <std::size_t... Ks,
class... Ix>
1081 std::size_t computeElementOffset(std::index_sequence<Ks...>, Ix... ix)
const noexcept {
1082 std::ptrdiff_t off = m_offset;
1083 ((off +=
static_cast<std::ptrdiff_t
>(ix) * m_strides[Ks]), ...);
1084 return static_cast<std::size_t
>(off);
1089 std::vector<T, clustering::detail::AlignedAllocator<T, 32>> m_vec;
1090 std::array<std::size_t, N> m_shape;
1091 std::array<std::ptrdiff_t, N> m_strides;
1092 std::ptrdiff_t m_offset;
1105template <
class T, std::
size_t NA, Layout LA, std::
size_t NB, Layout LB>
1107 return a.baseData() == b.baseData();
Provides read-write access to NDArray elements.
Accessor(NDArray< T, N, Layout::Contig > &ndarray, std::size_t index, std::size_t dim)
Constructs an Accessor for an NDArray.
Accessor & operator=(T value) noexcept
Assigns a value to the element at the accessor's position.
Accessor operator[](std::size_t index) noexcept
Provides access to the next dimension of the NDArray.
std::size_t m_index
Index in the flat representation of the array.
NDArray< T, N, Layout::Contig > * m_ndarray
Pointer to the NDArray.
std::size_t m_dim
Current dimension of the accessor.
BaseAccessor(NDArray< T, N, Layout::Contig > *ndarray, std::size_t index, std::size_t dim)
Constructs a BaseAccessor for a given NDArray, index, and dimension.
const T * data() const
Returns a pointer to the element data.
Provides read-only access to NDArray elements.
ConstAccessor(const NDArray< T, N, Layout::Contig > &ndarray, std::size_t index, std::size_t dim)
Constructs a ConstAccessor for a constant NDArray.
ConstAccessor operator[](std::size_t index) const noexcept
Provides access to the next dimension of the NDArray.
ConstAccessor(const ConstAccessor &other)=default
Defaulted copy constructor; accessors are lightweight and trivially copyable.
size_t index() const noexcept
Returns the flat index in the NDArray corresponding to the accessor.
Represents a multidimensional array (NDArray) of a fixed number of dimensions N and element type T.
T & flatIndex(std::size_t index) noexcept
Provides direct access to the flat underlying array at a specific index.
NDArray< T, N, Layout::Contig > clone() const
Returns a freshly-allocated owned contiguous array with deep-copied contents.
const T & flatIndex(std::size_t index) const noexcept
Provides read-only access to the flat underlying array at a specific index.
const T * alignedData() const noexcept
Read-only overload of alignedData<A>; attaches the same alignment hint to the pointer.
NDArray< T, N, Layout::MaybeStrided > slice(const std::array< Range, N > &ranges) noexcept
Borrowed multi-axis slice; each Range applies to its corresponding axis.
NDArray< T, N - 1, L > row(std::size_t i) const noexcept
Read-only row view; mirrors the mutable overload and flips m_mutable off.
NDArray(std::initializer_list< std::size_t > dims)
Constructs a contiguous owned NDArray with specified dimensions.
static NDArray borrow(const T *ptr, std::array< std::size_t, N > shape) noexcept
Borrows a read-only contiguous buffer as an NDArray.
NDArray< T, M, Layout::Contig > view(std::array< std::size_t, M > shape) noexcept
Returns a borrowed contiguous rank-M view over the same buffer with shape shape.
bool isContiguous() const noexcept
Reports whether the array's runtime layout is row-major contiguous with zero offset.
T * data() noexcept
Provides read-write access to the internal data array.
size_t dim(std::size_t index) const noexcept
Returns the size of a specific dimension of the NDArray.
NDArray< T, 2, Layout::MaybeStrided > t() noexcept
Transposes a rank-2 NDArray into a borrowed view with swapped axes.
NDArray< T, 1, Layout::MaybeStrided > col(std::size_t j) noexcept
Returns a borrowed rank-1 view of column j of a rank-2 array.
std::ptrdiff_t strideAt(std::size_t index) const noexcept
Returns the stride (in elements) for dimension index.
static NDArray fromSpan(std::span< T > s) noexcept
Explicit std::span adapter for rank-1 borrows.
T & operator()(Ix... ix) noexcept
Direct multi-index element access via strides.
NDArray< T, 1, Layout::MaybeStrided > col(std::size_t j) const noexcept
Read-only column view; mirrors the mutable overload and flips m_mutable off.
static NDArray borrowBytes(T *ptr, std::array< std::size_t, N > shape, std::array< std::ptrdiff_t, N > stridesInBytes, bool isMutable) noexcept
Borrow a buffer whose strides are expressed in bytes (NumPy's convention).
NDArray< T, M, Layout::Contig > reshape(std::array< std::size_t, M > shape)
Returns a contiguous rank-M array with shape shape, copying only when needed.
bool isOwned() const noexcept
Reports whether the array owns its underlying buffer.
static NDArray borrow1D(const T *ptr, std::size_t n) noexcept
Read-only rank-1 convenience borrow; mirrors the mutable borrow1D.
NDArray< T, N, Layout::MaybeStrided > slice(std::size_t axis, std::size_t begin, std::size_t end) noexcept
Borrowed half-open slice along a single axis.
NDArray< T, 2, Layout::MaybeStrided > t() const noexcept
Read-only transpose; the returned view carries m_mutable = false.
NDArray< T, N, Layout::MaybeStrided > slice(const std::array< Range, N > &ranges) const noexcept
Read-only multi-axis slice; mirrors the mutable overload with m_mutable = false.
friend bool operator==(const NDArray &, const NDArray &)=delete
NDArray< T, N - 1, L > row(std::size_t i) noexcept
Returns a borrowed view of row i with the leading dimension dropped.
friend bool operator!=(const NDArray &, const NDArray &)=delete
NDArray(NDArray &&other) noexcept
Move constructor; steals m_vec and re-seats m_data for owned storage.
NDArray(std::array< std::size_t, N > shape)
Constructs a contiguous owned NDArray from a runtime std::array of dimensions.
static NDArray borrow1D(T *ptr, std::size_t n) noexcept
Rank-1 convenience borrow; avoids the std::array<size_t, 1>{n} boilerplate.
T * baseData() const noexcept
Returns the original (non-advanced) base pointer for storage-identity comparisons.
static NDArray fromSpan(std::span< const T > s) noexcept
Read-only span adapter; delegates to the read-only borrow overload.
static NDArray borrow(const T *ptr, std::array< std::size_t, N > shape, std::array< std::ptrdiff_t, N > strides) noexcept
Read-only strided borrow; flips m_mutable off so writes through the view assert.
NDArray< T, N, Layout::MaybeStrided > slice(std::size_t axis, std::size_t begin, std::size_t end) const noexcept
Read-only single-axis slice; mirrors the mutable overload with m_mutable = false.
static NDArray borrow(T *ptr, std::array< std::size_t, N > shape) noexcept
Borrows a contiguous buffer as an NDArray without taking ownership.
bool isAligned() const noexcept
Tests whether data() is aligned to A bytes.
NDArray & operator=(NDArray &&other) noexcept
Move assignment; steals m_vec and re-seats m_data for owned storage.
NDArray(const NDArray &other)
Copy constructor; re-seats m_data against m_vec for owned storage.
NDArray< T, M, Layout::Contig > reshape(std::array< std::size_t, M > shape) const
Read-only rank-M reshape; aliases on contiguous sources, copies otherwise.
std::string debugDump() const
Returns a formatted string representing the contents of the NDArray.
const T & operator()(Ix... ix) const noexcept
Read-only multi-index element access via strides; mirrors the mutable overload.
NDArray & operator=(const NDArray &other)
Copy assignment; re-seats m_data against m_vec for owned storage.
NDArray< T, M, Layout::Contig > view(std::array< std::size_t, M > shape) const noexcept
Read-only rank-M view; mirrors the mutable overload with m_mutable = false.
NDArray< T, N, Layout::MaybeStrided > permute(const std::array< std::size_t, N > &perm) const noexcept
Read-only permuted view; mirrors the mutable overload with m_mutable = false.
static NDArray borrow(T *ptr, std::array< std::size_t, N > shape, std::array< std::ptrdiff_t, N > strides) noexcept
Borrows a strided buffer as an NDArray without taking ownership.
NDArray< T, N, Layout::Contig > contiguous()
Returns a contiguous rank-N array with the same shape, copying only when needed.
NDArray< T, N, Layout::Contig > contiguous() const
Read-only contiguous view; aliases on contiguous sources, copies otherwise.
NDArray< T, N, Layout::MaybeStrided > permute(const std::array< std::size_t, N > &perm) noexcept
Borrowed view with axes reordered by perm.
const T * data() const noexcept
Provides read-only access to the internal data array.
T * alignedData() noexcept
Returns data() with an alignment hint of A bytes applied.
bool isMutable() const noexcept
Reports whether writes through operator(), Accessor, or flatIndex are allowed.
bool operator!=(const AlignedAllocator< T, Align > &, const AlignedAllocator< U, Align > &) noexcept
std::atomic< std::uint64_t > & alignedAllocCallCount() noexcept
Process-global counter of non-empty AlignedAllocator::allocate calls.
bool operator==(const AlignedAllocator< T, Align > &, const AlignedAllocator< U, Align > &) noexcept
bool sameStorage(const NDArray< T, NA, LA > &a, const NDArray< T, NB, LB > &b) noexcept
Returns true when a and b share the same underlying allocation.
NDArrayStorage
Tag indicating whether an NDArray owns its buffer or borrows memory from elsewhere.
Layout
Compile-time layout tag for NDArray.