Clustering
C++20 header-only: DBSCAN, HDBSCAN, k-means.
Loading...
Searching...
No Matches
kdtree.h
Go to the documentation of this file.
1#pragma once
2
3#include <algorithm>
4#include <cassert>
5#include <cstddef>
6#include <cstdint>
7#include <limits>
8#include <numeric>
9#include <span>
10#include <utility>
11#include <vector>
12
15#include "clustering/math/detail/avx2_helpers.h"
16#include "clustering/math/detail/radius_scan.h"
17#include "clustering/math/detail/sq_distances_block.h"
18#include "clustering/math/detail/top_k_neighbors.h"
21#include "clustering/ndarray.h"
22
23namespace clustering {
24
38struct KDTreeNode {
41 std::size_t m_index;
43 std::size_t m_dim;
52 std::uint32_t m_id;
53};
54
61enum class KDTreeDistanceType : std::uint8_t {
63};
64
90template <class T, KDTreeDistanceType distanceType = KDTreeDistanceType::kEucledian,
91 std::size_t LeafSize = 16, class AllocT = LinearAllocator<KDTreeNode>>
92class KDTree {
93public:
94 using value_type = T;
95
108 KDTree(const NDArray<T, 2> &points)
109 : m_allocator(calculatePoolSize(points.dim(0))), m_points(points), m_dim(points.dim(1)) {
111 const std::size_t n = points.dim(0);
112 m_indices.resize(n);
113 std::iota(m_indices.begin(), m_indices.end(), 0);
114 m_root = build(0, n, 0);
115 // Materialize points in tree-build order. After @ref build rewrites @c m_indices into a
116 // permutation matching the tree layout, a leaf's points live at @c m_points_reordered slots
117 // `[leaf.m_index, leaf.m_index + leaf.m_dim)`. Contiguous access there replaces the
118 // scatter-indirection `m_points[m_indices[k]`] had, which at low d was the cache-miss
119 // ceiling: every leaf-brute-force iteration landed on a random row of @c m_points.
120 m_points_reordered.resize(n * m_dim);
121 const T *src = points.data();
122 T *dst = m_points_reordered.data();
123 for (std::size_t k = 0; k < n; ++k) {
124 const std::size_t src_row = m_indices[k];
125 const T *s = src + (src_row * m_dim);
126 T *d = dst + (k * m_dim);
127 for (std::size_t j = 0; j < m_dim; ++j) {
128 d[j] = s[j];
129 }
130 }
131 // Populate per-node axis-aligned bounding boxes once the tree is built and the reordered
132 // points buffer is materialized. Layout is `(numNodes * 2 * d)` flat: row @c 2*id holds the
133 // min-coords vector, row @c 2*id + 1 holds the max-coords vector. Dual-tree walkers consume
134 // this through @ref nodeBounds as a pair of @c std::span views; leaving @ref KDTreeNode's
135 // size unchanged past the monotonic @c m_id keeps leaf-scan cache behaviour stable at
136 // high @c d.
137 m_nodeBounds.assign(static_cast<std::size_t>(m_nextNodeId) * 2 * m_dim, T{});
138 if (m_root != nullptr) {
139 populateBounds(m_root);
140 }
141 }
142
155 std::vector<std::size_t> query(const NDArray<T, 1> &query_point, T radius,
156 std::int64_t limit = -1) const {
157 std::vector<std::size_t> indices;
158 const T radius_sq = radius * radius;
159 std::vector<KDTreeNode *> stack;
160 stack.reserve(kDefaultStackReserve);
161 // Copy the borrowed row into a scratch buffer so the core walker can assume a contiguous
162 // `d`-element block regardless of the source layout. The copy is d stores -- free at d=2.
163 std::vector<T> qbuf(m_dim);
164 for (std::size_t k = 0; k < m_dim; ++k) {
165 qbuf[k] = query_point[k];
166 }
167 queryImpl(m_root, qbuf.data(), radius_sq, indices, stack, limit);
168 return indices;
169 }
170
183 [[nodiscard]] std::vector<std::vector<std::int32_t>> query(T radius, math::Pool pool) const {
184 const std::size_t n = m_points.dim(0);
185 std::vector<std::vector<std::int32_t>> adj(n);
186 if (n == 0) {
187 return adj;
188 }
189
190 const T radius_sq = radius * radius;
191
192 auto runRange = [&](std::size_t lo, std::size_t hi) {
193 // Reuse the traversal stack across every query in this chunk. Tree depth stays below
194 // log2(n / LeafSize) + a few for spilled internal pushes, so kDefaultStackReserve rarely
195 // grows past its initial capacity; clearing keeps the allocation alive between queries.
196 std::vector<KDTreeNode *> stack;
197 stack.reserve(kDefaultStackReserve);
198 const T *sourceData = m_points.data();
199 for (std::size_t i = lo; i < hi; ++i) {
200 // @p i iterates the source ordering; @c sourceData + i*m_dim is the caller's row.
201 const T *qp = sourceData + (i * m_dim);
202 queryImpl(m_root, qp, radius_sq, adj[i], stack, /*limit=*/-1);
203 }
204 };
205
206 if (pool.shouldParallelize(n, 4, 2) && pool.pool != nullptr) {
207 pool.pool
208 ->submit_blocks(std::size_t{0}, n,
209 [&](std::size_t lo, std::size_t hi) { runRange(lo, hi); })
210 .wait();
211 } else {
212 runRange(0, n);
213 }
214 return adj;
215 }
216
241 [[nodiscard]] std::pair<NDArray<std::int32_t, 2>, NDArray<T, 2>> knnQuery(std::int32_t k,
242 math::Pool pool) const {
243 const std::size_t n = m_points.dim(0);
245 CLUSTERING_ALWAYS_ASSERT(std::cmp_less(k, n));
246
247 const auto kSz = static_cast<std::size_t>(k);
248 NDArray<std::int32_t, 2> indices({n, kSz});
249 NDArray<T, 2> sqDists({n, kSz});
250
251 auto runRange = [&](std::size_t lo, std::size_t hi) {
252 // Reuse the traversal stack and top-k tracker across every query in this chunk; both are
253 // reset at the head of each per-point walk.
254 std::vector<KDTreeNode *> stack;
255 stack.reserve(kDefaultStackReserve);
256 math::detail::TopKNeighbors<T, std::int32_t> topK(kSz);
257 const T *sourceData = m_points.data();
258 std::int32_t *idxOut = indices.data();
259 T *distOut = sqDists.data();
260 for (std::size_t i = lo; i < hi; ++i) {
261 const auto iOriginal = static_cast<std::int32_t>(i);
262 const T *qp = sourceData + (i * m_dim);
263 topK.clear();
264 knnQueryImpl(m_root, qp, iOriginal, topK, stack);
265 topK.drainAscending(distOut + (i * kSz), idxOut + (i * kSz));
266 }
267 };
268
269 if (pool.shouldParallelize(n, 4, 2) && pool.pool != nullptr) {
270 pool.pool
271 ->submit_blocks(std::size_t{0}, n,
272 [&](std::size_t lo, std::size_t hi) { runRange(lo, hi); })
273 .wait();
274 } else {
275 runRange(0, n);
276 }
277 return {std::move(indices), std::move(sqDists)};
278 }
279
292 [[nodiscard]] std::pair<std::span<const T>, std::span<const T>>
293 nodeBounds(const KDTreeNode *node) const noexcept {
294 assert(node != nullptr && "KDTree::nodeBounds on null node");
295 const std::size_t base = static_cast<std::size_t>(node->m_id) * 2 * m_dim;
296 const T *bounds = m_nodeBounds.data() + base;
297 return {std::span<const T>(bounds, m_dim), std::span<const T>(bounds + m_dim, m_dim)};
298 }
299
310 [[nodiscard]] std::span<const std::size_t> indexPermutation() const noexcept {
311 return {m_indices.data(), m_indices.size()};
312 }
313
325 [[nodiscard]] std::span<const T> reorderedPoints() const noexcept {
326 return {m_points_reordered.data(), m_points_reordered.size()};
327 }
328
335 [[nodiscard]] std::size_t nodeCount() const noexcept {
336 return static_cast<std::size_t>(m_nextNodeId);
337 }
338
340 [[nodiscard]] const KDTreeNode *root() const noexcept { return m_root; }
341
343 [[nodiscard]] std::size_t dim() const noexcept { return m_dim; }
344
353 if (m_allocator.isDeallocSupported()) {
354 doRecDealloc(m_root);
355 }
356 }
357
358private:
369 static size_t calculatePoolSize(size_t numPoints) {
370 if (numPoints == 0) {
371 return 0;
372 }
373 // With leaf-size tuning, the number of nodes is at most numPoints
374 // (much less than the original 2*numPoints-1, since leaf nodes batch
375 // up to LeafSize points each).
376 return numPoints;
377 }
378
387 static void doRecDealloc(KDTreeNode *node) {
388 if (node == nullptr) {
389 return;
390 }
391
392 doRecDealloc(node->m_left);
393 doRecDealloc(node->m_right);
394
395 delete node;
396 }
397
412 KDTreeNode *build(std::size_t start, std::size_t end, std::size_t depth) {
413 if (start >= end) {
414 return nullptr;
415 }
416
417 // Leaf node: store range into m_indices, brute-force at query time
418 if (end - start <= LeafSize) {
419 KDTreeNode *node = m_allocator.allocate();
420 *node = {.m_index = start, // offset into m_indices
421 .m_dim = end - start, // count of points
422 .m_left = nullptr,
423 .m_right = nullptr,
424 .m_id = m_nextNodeId++};
425 return node;
426 }
427
428 // Internal node: split on median
429 const std::size_t dim = depth % m_points.dim(1);
430 const std::size_t median = start + (((end - start) - 1) / 2);
431
432 using diff_t = std::vector<std::size_t>::difference_type;
433 std::nth_element(m_indices.begin() + static_cast<diff_t>(start),
434 m_indices.begin() + static_cast<diff_t>(median),
435 m_indices.begin() + static_cast<diff_t>(end),
436 [this, dim](std::size_t lhs, std::size_t rhs) {
437 return m_points[lhs][dim] < m_points[rhs][dim];
438 });
439
440 KDTreeNode *node = m_allocator.allocate();
441 *node = {.m_index = median, // reordered slot of the pivot
442 .m_dim = dim,
443 .m_left = build(start, median, depth + 1),
444 .m_right = build(median + 1, end, depth + 1),
445 .m_id = m_nextNodeId++};
446
447 return node;
448 }
449
471 template <class OutIdx>
472 void queryImpl(KDTreeNode *root, const T *qp, T radius_sq, std::vector<OutIdx> &indices,
473 std::vector<KDTreeNode *> &stack, std::int64_t limit = -1) const {
474 if (root == nullptr) {
475 return;
476 }
477
478 stack.clear();
479 stack.push_back(root);
480
481 const T *reorderedBase = m_points_reordered.data();
482
483 while (!stack.empty()) {
484 const KDTreeNode *node = stack.back();
485 stack.pop_back();
486
487 if (node == nullptr) {
488 continue;
489 }
490
491 if (limit != -1 && indices.size() == static_cast<std::size_t>(limit)) {
492 break;
493 }
494
495 // Leaf node: brute-force all points in the range. @c m_points_reordered lays points in
496 // tree-build order, so a leaf's entries are a contiguous @c count x d block -- one or
497 // two cache lines at small @c d. @ref math::detail::radiusScan dispatches into a SIMD
498 // kernel when @c d matches a batched width (today @c f32, `d == 2`) and otherwise
499 // falls back to the scalar @ref sqEuclideanRowPtr per-row primitive.
500 if (node->m_left == nullptr && node->m_right == nullptr) {
501 const std::size_t base = node->m_index;
502 const std::size_t count = node->m_dim;
503 const T *leafPts = reorderedBase + (base * m_dim);
504 const bool isBounded = (limit != -1);
505 const std::size_t cap =
506 isBounded ? static_cast<std::size_t>(limit) : std::numeric_limits<std::size_t>::max();
507 math::detail::radiusScan(qp, leafPts, count, m_dim, radius_sq, [&](std::size_t i) noexcept {
508 if (indices.size() >= cap) {
509 return;
510 }
511 indices.push_back(static_cast<OutIdx>(m_indices[base + i]));
512 });
513 if (isBounded && indices.size() >= cap) {
514 break;
515 }
516 continue;
517 }
518
519 // Internal node: check the split point and traverse children. `node->m_index` is the
520 // pivot's slot in @c m_points_reordered, so the pivot's row lives at
521 // @c reorderedBase + slot * m_dim. The original point index (needed for @c indices)
522 // comes from `m_indices[slot]`.
523 const std::size_t pivotSlot = node->m_index;
524 const std::size_t splitDim = node->m_dim;
525 const T *pivotRow = reorderedBase + (pivotSlot * m_dim);
526 const T dist_sq = math::detail::sqEuclideanRowPtr(qp, pivotRow, m_dim);
527 if (dist_sq <= radius_sq) {
528 indices.push_back(static_cast<OutIdx>(m_indices[pivotSlot]));
529 }
530
531 const T pivotCoord = pivotRow[splitDim];
532 const T diff = qp[splitDim] - pivotCoord;
533 if (diff < 0) {
534 if (node->m_left != nullptr) {
535 stack.push_back(node->m_left);
536 }
537 if (diff * diff <= radius_sq && node->m_right != nullptr) {
538 stack.push_back(node->m_right);
539 }
540 } else {
541 if (node->m_right != nullptr) {
542 stack.push_back(node->m_right);
543 }
544 if (diff * diff <= radius_sq && node->m_left != nullptr) {
545 stack.push_back(node->m_left);
546 }
547 }
548 }
549 }
550
568 void knnQueryImpl(KDTreeNode *root, const T *qp, std::int32_t selfIndex,
569 math::detail::TopKNeighbors<T, std::int32_t> &topK,
570 std::vector<KDTreeNode *> &stack) const {
571 if (root == nullptr) {
572 return;
573 }
574
575 stack.clear();
576 stack.push_back(root);
577
578 const T *reorderedBase = m_points_reordered.data();
579
580 while (!stack.empty()) {
581 const KDTreeNode *node = stack.back();
582 stack.pop_back();
583
584 if (node == nullptr) {
585 continue;
586 }
587
588 // Current pruning bound. Until the tracker fills, accept everything; once full, the
589 // retained worst-key serves as the upper bound.
590 const T bound = topK.boundKey();
591
592 // AABB gap prune: the minimum possible squared distance from the query to any point
593 // under @c node is @c pointAabbGapSq against the subtree's bounding box. The parent's
594 // single-axis prune is a strictly weaker lower bound; at d>=4 the full-AABB gap skips
595 // subtrees the axis prune lets through, which at d=8 is the dominant share of the kNN
596 // walker's remaining internal-node visits.
597 if (bound != std::numeric_limits<T>::max()) {
598 auto [nmin, nmax] = this->nodeBounds(node);
599 const T gapSq = math::pointAabbGapSq<T>(qp, nmin, nmax);
600 if (gapSq >= bound) {
601 continue;
602 }
603 }
604
605 if (node->m_left == nullptr && node->m_right == nullptr) {
606 // Leaf: walk the contiguous count x d block and admit each candidate. Self-exclusion
607 // is by original index so a candidate is skipped iff it is the query row. Distances
608 // are computed in blocks of four so the horizontal-sum epilogue is shared across
609 // neighbours; the top-k admit then runs over the precomputed dsq vector.
610 const std::size_t base = node->m_index;
611 const std::size_t count = node->m_dim;
612 const T *leafPts = reorderedBase + (base * m_dim);
613 std::array<T, LeafSize> dsqBuf{};
614 math::detail::sqDistancesAosBlock<T>(qp, leafPts, count, m_dim, dsqBuf.data());
615 // Batch-level prune: if every distance in the leaf exceeds the current worst retained
616 // bound, no candidate can enter the tracker. Scan for the minimum once; the common
617 // case at LeafSize=64 with k in [4, 32] is that every distance falls above the bound
618 // and the per-entry admit loop (self-exclude cmp, index load, topK.push) is skipped
619 // entirely. The scan runs scalar because LeafSize is known at compile time and the
620 // auto-vectoriser produces a tight min-reduce with no hsum epilogue of its own.
621 if (topK.full()) {
622 T minDsq = dsqBuf[0];
623 for (std::size_t i = 1; i < count; ++i) {
624 if (dsqBuf[i] < minDsq) {
625 minDsq = dsqBuf[i];
626 }
627 }
628 if (minDsq >= bound) {
629 continue;
630 }
631 }
632 for (std::size_t i = 0; i < count; ++i) {
633 const auto pointIdx = static_cast<std::int32_t>(m_indices[base + i]);
634 if (pointIdx == selfIndex) {
635 continue;
636 }
637 topK.push(dsqBuf[i], pointIdx);
638 }
639 continue;
640 }
641
642 // Internal: test the pivot, then descend near child first so the bound tightens before
643 // the far-child prune test. Compute the split-axis delta squared first so both the pivot
644 // admit and the descend-far gate can share it: the full d-wide pivot distance is at least
645 // the split-axis contribution, so a larger delta squared proves the pivot cannot enter
646 // the retained set and the d-wide hsum is skipped entirely.
647 const std::size_t pivotSlot = node->m_index;
648 const std::size_t splitDim = node->m_dim;
649 const T *pivotRow = reorderedBase + (pivotSlot * m_dim);
650 const auto pivotIdx = static_cast<std::int32_t>(m_indices[pivotSlot]);
651 const T pivotCoord = pivotRow[splitDim];
652 const T diff = qp[splitDim] - pivotCoord;
653 const T diffSq = diff * diff;
654 if (pivotIdx != selfIndex && diffSq <= bound) {
655 const T dist_sq = math::detail::sqEuclideanRowPtr(qp, pivotRow, m_dim);
656 topK.push(dist_sq, pivotIdx);
657 }
658 // DFS descends near-first, far-second. Stack is LIFO, so push far before near.
659 if (diff < 0) {
660 if (diffSq <= bound && node->m_right != nullptr) {
661 stack.push_back(node->m_right);
662 }
663 if (node->m_left != nullptr) {
664 stack.push_back(node->m_left);
665 }
666 } else {
667 if (diffSq <= bound && node->m_left != nullptr) {
668 stack.push_back(node->m_left);
669 }
670 if (node->m_right != nullptr) {
671 stack.push_back(node->m_right);
672 }
673 }
674 }
675 }
676
687 void populateBounds(KDTreeNode *node) noexcept {
688 T *minOut = m_nodeBounds.data() + (static_cast<std::size_t>(node->m_id) * 2 * m_dim);
689 T *maxOut = minOut + m_dim;
690
691 if (node->m_left == nullptr && node->m_right == nullptr) {
692 // Leaf: seed from the first stored row, then fold the remaining rows into min/max.
693 const std::size_t base = node->m_index;
694 const std::size_t count = node->m_dim;
695 const T *leafPts = m_points_reordered.data() + (base * m_dim);
696 for (std::size_t j = 0; j < m_dim; ++j) {
697 minOut[j] = leafPts[j];
698 maxOut[j] = leafPts[j];
699 }
700 for (std::size_t i = 1; i < count; ++i) {
701 const T *row = leafPts + (i * m_dim);
702 for (std::size_t j = 0; j < m_dim; ++j) {
703 if (row[j] < minOut[j]) {
704 minOut[j] = row[j];
705 }
706 if (row[j] > maxOut[j]) {
707 maxOut[j] = row[j];
708 }
709 }
710 }
711 return;
712 }
713
714 // Internal: recurse first so children's bounds are ready, then take their union. The tree
715 // invariant guarantees every internal node has at least one child -- the build routine only
716 // returns @c nullptr when @p start >= @p end, and the internal-node branch is only entered
717 // when the range exceeds @c LeafSize, which is `>= 1`.
718 if (node->m_left != nullptr) {
719 populateBounds(node->m_left);
720 }
721 if (node->m_right != nullptr) {
722 populateBounds(node->m_right);
723 }
724
725 const KDTreeNode *const seed = (node->m_left != nullptr) ? node->m_left : node->m_right;
726 const T *seedMin = m_nodeBounds.data() + (static_cast<std::size_t>(seed->m_id) * 2 * m_dim);
727 const T *seedMax = seedMin + m_dim;
728 for (std::size_t j = 0; j < m_dim; ++j) {
729 minOut[j] = seedMin[j];
730 maxOut[j] = seedMax[j];
731 }
732
733 if (node->m_left != nullptr && node->m_right != nullptr) {
734 const T *otherMin =
735 m_nodeBounds.data() + (static_cast<std::size_t>(node->m_right->m_id) * 2 * m_dim);
736 const T *otherMax = otherMin + m_dim;
737 for (std::size_t j = 0; j < m_dim; ++j) {
738 if (otherMin[j] < minOut[j]) {
739 minOut[j] = otherMin[j];
740 }
741 if (otherMax[j] > maxOut[j]) {
742 maxOut[j] = otherMax[j];
743 }
744 }
745 }
746
747 // Union-in the pivot's own row so the internal-node box encloses the pivot point too.
748 const std::size_t pivotSlot = node->m_index;
749 const T *pivotRow = m_points_reordered.data() + (pivotSlot * m_dim);
750 for (std::size_t j = 0; j < m_dim; ++j) {
751 if (pivotRow[j] < minOut[j]) {
752 minOut[j] = pivotRow[j];
753 }
754 if (pivotRow[j] > maxOut[j]) {
755 maxOut[j] = pivotRow[j];
756 }
757 }
758 }
759
763 static constexpr std::size_t kDefaultStackReserve = 64;
764
765 AllocT m_allocator;
766
767 KDTreeNode *m_root = nullptr;
768 const NDArray<T, 2> &m_points;
769 std::size_t m_dim = 0;
770 std::vector<std::size_t> m_indices;
772 std::vector<T> m_points_reordered;
777 std::uint32_t m_nextNodeId = 0;
780 std::vector<T> m_nodeBounds;
781};
782
783} // namespace clustering
#define CLUSTERING_ALWAYS_ASSERT(cond)
Release-active assertion: evaluates cond in every build configuration.
std::pair< NDArray< std::int32_t, 2 >, NDArray< T, 2 > > knnQuery(std::int32_t k, math::Pool pool) const
Returns the k nearest neighbours of every indexed point, self-excluded.
Definition kdtree.h:241
std::vector< std::size_t > query(const NDArray< T, 1 > &query_point, T radius, std::int64_t limit=-1) const
Finds points within a specified radius of a query point.
Definition kdtree.h:155
std::vector< std::vector< std::int32_t > > query(T radius, math::Pool pool) const
Returns the full radius-neighborhood adjacency over the indexed point cloud.
Definition kdtree.h:183
std::span< const std::size_t > indexPermutation() const noexcept
Permutation from reordered-slot index to original point index.
Definition kdtree.h:310
~KDTree()
Destroys the KDTree, deallocating its nodes.
Definition kdtree.h:352
std::pair< std::span< const T >, std::span< const T > > nodeBounds(const KDTreeNode *node) const noexcept
Axis-aligned bounding box of the points routed through node.
Definition kdtree.h:293
std::size_t nodeCount() const noexcept
Total node count, equal to one past the largest m_id assigned during construction.
Definition kdtree.h:335
KDTree(const NDArray< T, 2 > &points)
Constructs a KDTree using a given set of points.
Definition kdtree.h:108
std::span< const T > reorderedPoints() const noexcept
Points in reordered (tree-build) order as a flat row-major buffer.
Definition kdtree.h:325
const KDTreeNode * root() const noexcept
Root of the tree; nullptr for an empty point set.
Definition kdtree.h:340
Represents a multidimensional array (NDArray) of a fixed number of dimensions N and element type T.
Definition ndarray.h:136
bool isContiguous() const noexcept
Reports whether the array's runtime layout is row-major contiguous with zero offset.
Definition ndarray.h:474
size_t dim(std::size_t index) const noexcept
Returns the size of a specific dimension of the NDArray.
Definition ndarray.h:461
const T * data() const noexcept
Provides read-only access to the internal data array.
Definition ndarray.h:503
T pointAabbGapSq(const T *point, std::span< const T > boxMin, std::span< const T > boxMax) noexcept
Squared gap distance between a point and an axis-aligned bounding box.
Definition aabb.h:44
KDTreeDistanceType
Distance metric a KDTree builds pruning bounds under.
Definition kdtree.h:61
@ kEucledian
Squared Euclidean; radii and comparisons run on squared distances.
Definition kdtree.h:62
Node in a KDTree, sharing the same struct shape between internals and leaves.
Definition kdtree.h:38
std::size_t m_dim
Internal: split dimension. Leaf: point count packed at m_index.
Definition kdtree.h:43
KDTreeNode * m_right
Right child, or nullptr on a leaf.
Definition kdtree.h:47
std::size_t m_index
Internal: pivot slot in the tree's reordered point buffer.
Definition kdtree.h:41
KDTreeNode * m_left
Left child, or nullptr on a leaf.
Definition kdtree.h:45
std::uint32_t m_id
Monotonic identifier assigned at construction; keys into the owning tree's per-node bounds buffer.
Definition kdtree.h:52
Thin injection wrapper around a BS::light_thread_pool.
Definition thread.h:63
BS::light_thread_pool * pool
Underlying pool, or nullptr to force serial execution.
Definition thread.h:65
bool shouldParallelize(std::size_t totalWork, std::size_t minChunk, std::size_t minTasksPerWorker=2) const noexcept
Decide whether totalWork warrants parallel dispatch.
Definition thread.h:98