mirror of https://github.com/koide3/small_gicp.git
Replace nanoflann (#42)
* replace nanoflann with original kdtree * replace KdTreeOMP in python binding * include array to fix build error on windows * add kdtree test with synthetic data * add nowait to see if it fixes error on win * update README * use openmp atomic * revert. MSVC does not support openmp very well... * disable parallel kdtree construction on windows * update README * rephrase * avoid knn result copy * refactoring
This commit is contained in:
parent
640dcb6279
commit
0d31edaa74
|
|
@ -267,13 +267,13 @@ if(BUILD_TESTS)
|
|||
TBB::tbb
|
||||
TBB::tbbmalloc
|
||||
PCL::PCL
|
||||
OpenMP::OpenMP_CXX
|
||||
)
|
||||
|
||||
gtest_discover_tests(${TEST_NAME} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
|
||||
#############
|
||||
## Install ##
|
||||
#############
|
||||
|
|
|
|||
14
README.md
14
README.md
|
|
@ -21,7 +21,7 @@ This library uses some C++17 features. The PCL interface is not compatible with
|
|||
|
||||
## Dependencies
|
||||
|
||||
- [Mandatory] [Eigen](https://eigen.tuxfamily.org/), [nanoflann](https://github.com/jlblancoc/nanoflann) ([bundled1](include/small_gicp/ann/nanoflann.hpp), [bundled2](include/small_gicp/ann/nanoflann_omp.hpp), [bundled3](include/small_gicp/ann/nanoflann_tbb.hpp)), [Sophus](https://github.com/strasdat/Sophus) ([bundled](include/small_gicp/util/lie.hpp))
|
||||
- [Mandatory] [Eigen](https://eigen.tuxfamily.org/), [nanoflann](https://github.com/jlblancoc/nanoflann) ([bundled](include/small_gicp/ann/kdtree.hpp)), [Sophus](https://github.com/strasdat/Sophus) ([bundled](include/small_gicp/util/lie.hpp))
|
||||
- [Optional] [OpenMP](https://www.openmp.org/), [Intel TBB](https://www.intel.com/content/www/us/en/developer/tools/oneapi/onetbb.html), [PCL](https://pointclouds.org/), [Iridescence](https://github.com/koide3/iridescence)
|
||||
|
||||
## Installation
|
||||
|
|
@ -171,8 +171,8 @@ estimate_covariances_omp(*target, num_neighbors, num_threads);
|
|||
estimate_covariances_omp(*source, num_neighbors, num_threads);
|
||||
|
||||
// Create KdTree for target and source.
|
||||
auto target_tree = std::make_shared<KdTreeOMP<pcl::PointCloud<pcl::PointCovariance>>>(target, num_threads);
|
||||
auto source_tree = std::make_shared<KdTreeOMP<pcl::PointCloud<pcl::PointCovariance>>>(source, num_threads);
|
||||
auto target_tree = std::make_shared<KdTree<pcl::PointCloud<pcl::PointCovariance>>>(target, KdTreeBuilderOMP(num_threads));
|
||||
auto source_tree = std::make_shared<KdTree<pcl::PointCloud<pcl::PointCovariance>>>(source, KdTreeBuilderOMP(num_threads));
|
||||
|
||||
Registration<GICPFactor, ParallelReductionOMP> registration;
|
||||
registration.reduction.num_threads = num_threads;
|
||||
|
|
@ -214,8 +214,8 @@ target = voxelgrid_sampling_omp(*target, downsampling_resolution, num_threads);
|
|||
source = voxelgrid_sampling_omp(*source, downsampling_resolution, num_threads);
|
||||
|
||||
// Create KdTree
|
||||
auto target_tree = std::make_shared<KdTreeOMP<PointCloud>>(target, num_threads);
|
||||
auto source_tree = std::make_shared<KdTreeOMP<PointCloud>>(source, num_threads);
|
||||
auto target_tree = std::make_shared<KdTree<PointCloud>>(target, KdTreeBuilderOMP(num_threads));
|
||||
auto source_tree = std::make_shared<KdTree<PointCloud>>(source, KdTreeBuilderOMP(num_threads));
|
||||
|
||||
// Estimate point covariances
|
||||
estimate_covariances_omp(*target, *target_tree, num_neighbors, num_threads);
|
||||
|
|
@ -334,8 +334,8 @@ Example D: Example with Open3D
|
|||
target_o3d = open3d.io.read_point_cloud('small_gicp/data/target.ply').paint_uniform_color([0, 1, 0])
|
||||
source_o3d = open3d.io.read_point_cloud('small_gicp/data/source.ply').paint_uniform_color([0, 0, 1])
|
||||
|
||||
target, target_tree = small_gicp.preprocess_points(points_numpy=numpy.asarray(target_o3d.points), downsampling_resolution=0.25)
|
||||
source, source_tree = small_gicp.preprocess_points(points_numpy=numpy.asarray(source_o3d.points), downsampling_resolution=0.25)
|
||||
target, target_tree = small_gicp.preprocess_points(numpy.asarray(target_o3d.points), downsampling_resolution=0.25)
|
||||
source, source_tree = small_gicp.preprocess_points(numpy.asarray(source_o3d.points), downsampling_resolution=0.25)
|
||||
result = small_gicp.align(target, source, target_tree)
|
||||
|
||||
source_o3d.transform(result.T_target_source)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,105 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||
// SPDX-License-Identifier: MIT
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <Eigen/Core>
|
||||
#include <small_gicp/points/traits.hpp>
|
||||
#include <small_gicp/ann/traits.hpp>
|
||||
#include <small_gicp/ann/nanoflann.hpp>
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief Unsafe KdTree with arbitrary nanoflann Adaptor.
|
||||
/// @note This class does not hold the ownership of the input points.
|
||||
/// You must keep the input points along with this class.
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
class UnsafeKdTreeGeneric {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<UnsafeKdTreeGeneric>;
|
||||
using ConstPtr = std::shared_ptr<const UnsafeKdTreeGeneric>;
|
||||
using ThisClass = UnsafeKdTreeGeneric<PointCloud, Adaptor>;
|
||||
using Index = Adaptor<nanoflann::L2_Simple_Adaptor<double, ThisClass, double>, ThisClass, 3, size_t>;
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
explicit UnsafeKdTreeGeneric(const PointCloud& points) : points(points), index(3, *this, nanoflann::KDTreeSingleIndexAdaptorParams(10)) { index.buildIndex(); }
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
/// @params num_threads Number of threads used for building the index (This argument is only valid for OMP implementation)
|
||||
explicit UnsafeKdTreeGeneric(const PointCloud& points, int num_threads) : points(points), index(3, *this, nanoflann::KDTreeSingleIndexAdaptorParams(10)) {
|
||||
index.buildIndex(num_threads);
|
||||
}
|
||||
|
||||
~UnsafeKdTreeGeneric() {}
|
||||
|
||||
// Interfaces for nanoflann
|
||||
size_t kdtree_get_point_count() const { return traits::size(points); }
|
||||
double kdtree_get_pt(const size_t idx, const size_t dim) const { return traits::point(points, idx)[dim]; }
|
||||
|
||||
template <class BBox>
|
||||
bool kdtree_get_bbox(BBox&) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// @brief Find k-nearest neighbors
|
||||
size_t knn_search(const Eigen::Vector4d& pt, size_t k, size_t* k_indices, double* k_sq_dists) const { return index.knnSearch(pt.data(), k, k_indices, k_sq_dists); }
|
||||
|
||||
private:
|
||||
const PointCloud& points; ///< Input points
|
||||
Index index; ///< KdTree index
|
||||
};
|
||||
|
||||
/// @brief KdTree with arbitrary nanoflann Adaptor
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
class KdTreeGeneric {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<KdTreeGeneric>;
|
||||
using ConstPtr = std::shared_ptr<const KdTreeGeneric>;
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
explicit KdTreeGeneric(const std::shared_ptr<const PointCloud>& points) : points(points), tree(*points) {}
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
explicit KdTreeGeneric(const std::shared_ptr<const PointCloud>& points, int num_threads) : points(points), tree(*points, num_threads) {}
|
||||
|
||||
~KdTreeGeneric() {}
|
||||
|
||||
/// @brief Find k-nearest neighbors
|
||||
size_t knn_search(const Eigen::Vector4d& pt, size_t k, size_t* k_indices, double* k_sq_dists) const { return tree.knn_search(pt, k, k_indices, k_sq_dists); }
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const PointCloud> points; ///< Input points
|
||||
const UnsafeKdTreeGeneric<PointCloud, Adaptor> tree; ///< KdTree
|
||||
};
|
||||
|
||||
/// @brief Standard KdTree (unsafe)
|
||||
template <class PointCloud>
|
||||
using UnsafeKdTree = UnsafeKdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptor>;
|
||||
|
||||
/// @brief Standard KdTree
|
||||
template <class PointCloud>
|
||||
using KdTree = KdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptor>;
|
||||
|
||||
namespace traits {
|
||||
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
struct Traits<UnsafeKdTreeGeneric<PointCloud, Adaptor>> {
|
||||
static size_t knn_search(const UnsafeKdTreeGeneric<PointCloud, Adaptor>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||
return tree.knn_search(point, k, k_indices, k_sq_dists);
|
||||
}
|
||||
};
|
||||
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
struct Traits<KdTreeGeneric<PointCloud, Adaptor>> {
|
||||
static size_t knn_search(const KdTreeGeneric<PointCloud, Adaptor>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||
return tree.knn_search(point, k, k_indices, k_sq_dists);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace traits
|
||||
|
||||
} // namespace small_gicp
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||
// SPDX-License-Identifier: MIT
|
||||
#pragma once
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <small_gicp/points/traits.hpp>
|
||||
#include <small_gicp/ann/kdtree.hpp>
|
||||
#include <small_gicp/ann/nanoflann_omp.hpp>
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief Unsafe KdTree with multi-thread tree construction with OpenMP backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using UnsafeKdTreeOMP = UnsafeKdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorOMP>;
|
||||
|
||||
/// @brief KdTree with multi-thread tree construction with OpenMP backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using KdTreeOMP = KdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorOMP>;
|
||||
|
||||
} // namespace small_gicp
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||
// SPDX-License-Identifier: MIT
|
||||
#pragma once
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <small_gicp/points/traits.hpp>
|
||||
#include <small_gicp/ann/kdtree.hpp>
|
||||
#include <small_gicp/ann/nanoflann_tbb.hpp>
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief Unsafe KdTree with multi-thread tree construction with TBB backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using UnsafeKdTreeTBB = UnsafeKdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorTBB>;
|
||||
|
||||
/// @brief KdTree with multi-thread tree construction with TBB backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using KdTreeTBB = KdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorTBB>;
|
||||
|
||||
} // namespace small_gicp
|
||||
|
|
@ -1,101 +1,305 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// While the following KdTree code is written from scratch, it is heavily inspired by the nanoflann library.
|
||||
// Thus, the following original license of nanoflann is included to be sure.
|
||||
|
||||
// https://github.com/jlblancoc/nanoflann/blob/master/include/nanoflann.hpp
|
||||
/***********************************************************************
|
||||
* Software License Agreement (BSD License)
|
||||
*
|
||||
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
|
||||
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
|
||||
* Copyright 2011-2024 Jose Luis Blanco (joseluisblancoc@gmail.com).
|
||||
* All rights reserved.
|
||||
*
|
||||
* THE BSD LICENSE
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*************************************************************************/
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include <small_gicp/points/traits.hpp>
|
||||
#include <small_gicp/ann/traits.hpp>
|
||||
#include <small_gicp/ann/nanoflann.hpp>
|
||||
#include <small_gicp/ann/projection.hpp>
|
||||
#include <small_gicp/ann/knn_result.hpp>
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief Unsafe KdTree with arbitrary nanoflann Adaptor.
|
||||
using NodeIndexType = std::uint32_t;
|
||||
static constexpr NodeIndexType INVALID_NODE = std::numeric_limits<NodeIndexType>::max();
|
||||
|
||||
/// @brief KdTree node.
|
||||
template <typename Projection>
|
||||
struct KdTreeNode {
|
||||
union {
|
||||
struct Leaf {
|
||||
NodeIndexType first; ///< First point index in the leaf node.
|
||||
NodeIndexType last; ///< Last point index in the leaf node.
|
||||
} lr; ///< Leaf node.
|
||||
struct NonLeaf {
|
||||
Projection proj; ///< Projection axis.
|
||||
double thresh; ///< Threshold value.
|
||||
} sub; ///< Non-leaf node.
|
||||
} node_type;
|
||||
|
||||
NodeIndexType left = INVALID_NODE; ///< Left child node index.
|
||||
NodeIndexType right = INVALID_NODE; ///< Right child node index.
|
||||
};
|
||||
|
||||
/// @brief Single thread Kd-tree builder.
|
||||
struct KdTreeBuilder {
|
||||
public:
|
||||
/// @brief Build KdTree
|
||||
/// @param kdtree Kd-tree to build
|
||||
/// @param points Point cloud
|
||||
template <typename KdTree, typename PointCloud>
|
||||
void build_tree(KdTree& kdtree, const PointCloud& points) const {
|
||||
kdtree.indices.resize(traits::size(points));
|
||||
std::iota(kdtree.indices.begin(), kdtree.indices.end(), 0);
|
||||
|
||||
size_t node_count = 0;
|
||||
kdtree.nodes.resize(traits::size(points));
|
||||
kdtree.root = create_node(kdtree, node_count, points, kdtree.indices.begin(), kdtree.indices.begin(), kdtree.indices.end());
|
||||
kdtree.nodes.resize(node_count);
|
||||
}
|
||||
|
||||
/// @brief Create a Kd-tree node from the given point indices.
|
||||
/// @param global_first Global first point index iterator (i.e., this->indices.begin()).
|
||||
/// @param first First point index iterator to be scanned.
|
||||
/// @param last Last point index iterator to be scanned.
|
||||
/// @return Index of the created node.
|
||||
template <typename PointCloud, typename KdTree, typename IndexConstIterator>
|
||||
NodeIndexType create_node(KdTree& kdtree, size_t& node_count, const PointCloud& points, IndexConstIterator global_first, IndexConstIterator first, IndexConstIterator last)
|
||||
const {
|
||||
const size_t N = std::distance(first, last);
|
||||
// Create a leaf node.
|
||||
if (N <= max_leaf_size) {
|
||||
const NodeIndexType node_index = node_count++;
|
||||
auto& node = kdtree.nodes[node_index];
|
||||
|
||||
// std::sort(first, last);
|
||||
node.node_type.lr.first = std::distance(global_first, first);
|
||||
node.node_type.lr.last = std::distance(global_first, last);
|
||||
|
||||
return node_index;
|
||||
}
|
||||
|
||||
// Find the best axis to split the input points.
|
||||
using Projection = typename KdTree::Projection;
|
||||
const auto proj = Projection::find_axis(points, first, last, projection_setting);
|
||||
const auto median_itr = first + N / 2;
|
||||
std::nth_element(first, median_itr, last, [&](size_t i, size_t j) { return proj(traits::point(points, i)) < proj(traits::point(points, j)); });
|
||||
|
||||
// Create a non-leaf node.
|
||||
const NodeIndexType node_index = node_count++;
|
||||
auto& node = kdtree.nodes[node_index];
|
||||
node.node_type.sub.proj = proj;
|
||||
node.node_type.sub.thresh = proj(traits::point(points, *median_itr));
|
||||
|
||||
// Create left and right child nodes.
|
||||
node.left = create_node(kdtree, node_count, points, global_first, first, median_itr);
|
||||
node.right = create_node(kdtree, node_count, points, global_first, median_itr, last);
|
||||
|
||||
return node_index;
|
||||
}
|
||||
|
||||
public:
|
||||
int max_leaf_size = 20; ///< Maximum number of points in a leaf node.
|
||||
ProjectionSetting projection_setting; ///< Projection setting.
|
||||
};
|
||||
|
||||
/// @brief "Unsafe" KdTree.
|
||||
/// @note This class does not hold the ownership of the input points.
|
||||
/// You must keep the input points along with this class.
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
class UnsafeKdTreeGeneric {
|
||||
template <typename PointCloud, typename Projection_ = AxisAlignedProjection>
|
||||
struct UnsafeKdTree {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<UnsafeKdTreeGeneric>;
|
||||
using ConstPtr = std::shared_ptr<const UnsafeKdTreeGeneric>;
|
||||
using ThisClass = UnsafeKdTreeGeneric<PointCloud, Adaptor>;
|
||||
using Index = Adaptor<nanoflann::L2_Simple_Adaptor<double, ThisClass, double>, ThisClass, 3, size_t>;
|
||||
using Projection = Projection_;
|
||||
using Node = KdTreeNode<Projection>;
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
explicit UnsafeKdTreeGeneric(const PointCloud& points) : points(points), index(3, *this, nanoflann::KDTreeSingleIndexAdaptorParams(10)) { index.buildIndex(); }
|
||||
/// @param points Point cloud
|
||||
/// @param builder Kd-tree builder
|
||||
template <typename Builder = KdTreeBuilder>
|
||||
explicit UnsafeKdTree(const PointCloud& points, const Builder& builder = KdTreeBuilder()) : points(points) {
|
||||
if (traits::size(points) == 0) {
|
||||
std::cerr << "warning: Empty point cloud" << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
/// @params num_threads Number of threads used for building the index (This argument is only valid for OMP implementation)
|
||||
explicit UnsafeKdTreeGeneric(const PointCloud& points, int num_threads) : points(points), index(3, *this, nanoflann::KDTreeSingleIndexAdaptorParams(10)) {
|
||||
index.buildIndex(num_threads);
|
||||
builder.build_tree(*this, points);
|
||||
}
|
||||
|
||||
~UnsafeKdTreeGeneric() {}
|
||||
|
||||
// Interfaces for nanoflann
|
||||
size_t kdtree_get_point_count() const { return traits::size(points); }
|
||||
double kdtree_get_pt(const size_t idx, const size_t dim) const { return traits::point(points, idx)[dim]; }
|
||||
|
||||
template <class BBox>
|
||||
bool kdtree_get_bbox(BBox&) const {
|
||||
return false;
|
||||
/// @brief Find the nearest neighbor.
|
||||
/// @param query Query point
|
||||
/// @param k_indices Index of the nearest neighbor
|
||||
/// @param k_sq_dists Squared distance to the nearest neighbor
|
||||
/// @param setting KNN search setting
|
||||
/// @return Number of found neighbors (0 or 1)
|
||||
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||
return knn_search<1>(query, k_indices, k_sq_dists, setting);
|
||||
}
|
||||
|
||||
/// @brief Find k-nearest neighbors
|
||||
size_t knn_search(const Eigen::Vector4d& pt, size_t k, size_t* k_indices, double* k_sq_dists) const { return index.knnSearch(pt.data(), k, k_indices, k_sq_dists); }
|
||||
/// @brief Find k-nearest neighbors. This method uses dynamic memory allocation.
|
||||
/// @param query Query point
|
||||
/// @param k Number of neighbors
|
||||
/// @param k_indices Indices of neighbors
|
||||
/// @param k_sq_dists Squared distances to neighbors
|
||||
/// @param setting KNN search setting
|
||||
/// @return Number of found neighbors
|
||||
size_t knn_search(const Eigen::Vector4d& query, int k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||
KnnResult<-1> result(k_indices, k_sq_dists, k);
|
||||
knn_search(query, root, result, setting);
|
||||
return result.num_found();
|
||||
}
|
||||
|
||||
/// @brief Find k-nearest neighbors. This method uses fixed and static memory allocation. Might be faster for small k.
|
||||
/// @param query Query point
|
||||
/// @param k_indices Indices of neighbors
|
||||
/// @param k_sq_dists Squared distances to neighbors
|
||||
/// @param setting KNN search setting
|
||||
/// @return Number of found neighbors
|
||||
template <int N>
|
||||
size_t knn_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||
KnnResult<N> result(k_indices, k_sq_dists);
|
||||
knn_search(query, root, result, setting);
|
||||
return result.num_found();
|
||||
}
|
||||
|
||||
private:
|
||||
const PointCloud& points; ///< Input points
|
||||
Index index; ///< KdTree index
|
||||
};
|
||||
/// @brief Find k-nearest neighbors.
|
||||
template <typename Result>
|
||||
bool knn_search(const Eigen::Vector4d& query, NodeIndexType node_index, Result& result, const KnnSetting& setting) const {
|
||||
const auto& node = nodes[node_index];
|
||||
|
||||
// Check if it's a leaf node.
|
||||
if (node.left == INVALID_NODE) {
|
||||
// Compare the query point with all points in the leaf node.
|
||||
for (size_t i = node.node_type.lr.first; i < node.node_type.lr.last; i++) {
|
||||
const double sq_dist = (traits::point(points, indices[i]) - query).squaredNorm();
|
||||
result.push(indices[i], sq_dist);
|
||||
}
|
||||
return !setting.fulfilled(result);
|
||||
}
|
||||
|
||||
const double val = node.node_type.sub.proj(query);
|
||||
const double diff = val - node.node_type.sub.thresh;
|
||||
const double cut_sq_dist = diff * diff;
|
||||
|
||||
NodeIndexType best_child;
|
||||
NodeIndexType other_child;
|
||||
|
||||
if (diff < 0.0) {
|
||||
best_child = node.left;
|
||||
other_child = node.right;
|
||||
} else {
|
||||
best_child = node.right;
|
||||
other_child = node.left;
|
||||
}
|
||||
|
||||
// Check the best child node first.
|
||||
if (!knn_search(query, best_child, result, setting)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if the other child node needs to be tested.
|
||||
if (result.worst_distance() > cut_sq_dist) {
|
||||
return knn_search(query, other_child, result, setting);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// @brief KdTree with arbitrary nanoflann Adaptor
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
class KdTreeGeneric {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<KdTreeGeneric>;
|
||||
using ConstPtr = std::shared_ptr<const KdTreeGeneric>;
|
||||
const PointCloud& points; ///< Input points
|
||||
std::vector<size_t> indices; ///< Point indices refered by nodes
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
explicit KdTreeGeneric(const std::shared_ptr<const PointCloud>& points) : points(points), tree(*points) {}
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param points Input points
|
||||
explicit KdTreeGeneric(const std::shared_ptr<const PointCloud>& points, int num_threads) : points(points), tree(*points, num_threads) {}
|
||||
|
||||
~KdTreeGeneric() {}
|
||||
|
||||
/// @brief Find k-nearest neighbors
|
||||
size_t knn_search(const Eigen::Vector4d& pt, size_t k, size_t* k_indices, double* k_sq_dists) const { return tree.knn_search(pt, k, k_indices, k_sq_dists); }
|
||||
|
||||
private:
|
||||
const std::shared_ptr<const PointCloud> points; ///< Input points
|
||||
const UnsafeKdTreeGeneric<PointCloud, Adaptor> tree; ///< KdTree
|
||||
NodeIndexType root; ///< Root node index (should be zero)
|
||||
std::vector<Node> nodes; ///< Kd-tree nodes
|
||||
};
|
||||
|
||||
/// @brief Standard KdTree (unsafe)
|
||||
template <class PointCloud>
|
||||
using UnsafeKdTree = UnsafeKdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptor>;
|
||||
/// @brief "Safe" KdTree that holds the ownership of the input points.
|
||||
template <typename PointCloud, typename Projection = AxisAlignedProjection>
|
||||
struct KdTree {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<KdTree<PointCloud, Projection>>;
|
||||
using ConstPtr = std::shared_ptr<const KdTree<PointCloud, Projection>>;
|
||||
|
||||
/// @brief Standard KdTree
|
||||
template <class PointCloud>
|
||||
using KdTree = KdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptor>;
|
||||
template <typename Builder = KdTreeBuilder>
|
||||
explicit KdTree(std::shared_ptr<const PointCloud> points, const Builder& builder = Builder()) : points(points),
|
||||
kdtree(*points, builder) {}
|
||||
|
||||
/// @brief Find k-nearest neighbors. This method uses dynamic memory allocation.
|
||||
/// @param query Query point
|
||||
/// @param k Number of neighbors
|
||||
/// @param k_indices Indices of neighbors
|
||||
/// @param k_sq_dists Squared distances to neighbors
|
||||
/// @param setting KNN search setting
|
||||
/// @return Number of found neighbors
|
||||
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||
return kdtree.nearest_neighbor_search(query, k_indices, k_sq_dists, setting);
|
||||
}
|
||||
|
||||
/// @brief Find k-nearest neighbors. This method uses dynamic memory allocation.
|
||||
/// @param query Query point
|
||||
/// @param k Number of neighbors
|
||||
/// @param k_indices Indices of neighbors
|
||||
/// @param k_sq_dists Squared distances to neighbors
|
||||
/// @param setting KNN search setting
|
||||
/// @return Number of found neighbors
|
||||
size_t knn_search(const Eigen::Vector4d& query, size_t k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||
return kdtree.knn_search(query, k, k_indices, k_sq_dists, setting);
|
||||
}
|
||||
|
||||
public:
|
||||
const std::shared_ptr<const PointCloud> points; ///< Points
|
||||
const UnsafeKdTree<PointCloud, Projection> kdtree; ///< KdTree
|
||||
};
|
||||
|
||||
namespace traits {
|
||||
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
struct Traits<UnsafeKdTreeGeneric<PointCloud, Adaptor>> {
|
||||
static size_t knn_search(const UnsafeKdTreeGeneric<PointCloud, Adaptor>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||
template <typename PointCloud, typename Projection>
|
||||
struct Traits<UnsafeKdTree<PointCloud, Projection>> {
|
||||
static size_t nearest_neighbor_search(const UnsafeKdTree<PointCloud, Projection>& tree, const Eigen::Vector4d& point, size_t* k_indices, double* k_sq_dists) {
|
||||
return tree.nearest_neighbor_search(point, k_indices, k_sq_dists);
|
||||
}
|
||||
|
||||
static size_t knn_search(const UnsafeKdTree<PointCloud, Projection>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||
return tree.knn_search(point, k, k_indices, k_sq_dists);
|
||||
}
|
||||
};
|
||||
|
||||
template <class PointCloud, template <typename, typename, int, typename> class Adaptor>
|
||||
struct Traits<KdTreeGeneric<PointCloud, Adaptor>> {
|
||||
static size_t knn_search(const KdTreeGeneric<PointCloud, Adaptor>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||
template <typename PointCloud, typename Projection>
|
||||
struct Traits<KdTree<PointCloud, Projection>> {
|
||||
static size_t nearest_neighbor_search(const KdTree<PointCloud, Projection>& tree, const Eigen::Vector4d& point, size_t* k_indices, double* k_sq_dists) {
|
||||
return tree.nearest_neighbor_search(point, k_indices, k_sq_dists);
|
||||
}
|
||||
|
||||
static size_t knn_search(const KdTree<PointCloud, Projection>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||
return tree.knn_search(point, k, k_indices, k_sq_dists);
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2,21 +2,102 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
#pragma once
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <small_gicp/points/traits.hpp>
|
||||
#include <atomic>
|
||||
#include <small_gicp/ann/kdtree.hpp>
|
||||
#include <small_gicp/ann/nanoflann_omp.hpp>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("warning: Task-based OpenMP parallelism causes run-time memory errors with Eigen matrices.")
|
||||
#pragma message("warning: Thus, OpenMP-based multi-threading for KdTree construction is disabled on MSVC.")
|
||||
#endif
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief Unsafe KdTree with multi-thread tree construction with OpenMP backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using UnsafeKdTreeOMP = UnsafeKdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorOMP>;
|
||||
/// @brief Kd-tree builder with OpenMP.
|
||||
struct KdTreeBuilderOMP {
|
||||
public:
|
||||
/// @brief Constructor
|
||||
/// @param num_threads Number of threads
|
||||
KdTreeBuilderOMP(int num_threads = 4) : num_threads(num_threads), max_leaf_size(20) {}
|
||||
|
||||
/// @brief KdTree with multi-thread tree construction with OpenMP backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using KdTreeOMP = KdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorOMP>;
|
||||
/// @brief Build KdTree
|
||||
template <typename KdTree, typename PointCloud>
|
||||
void build_tree(KdTree& kdtree, const PointCloud& points) const {
|
||||
kdtree.indices.resize(traits::size(points));
|
||||
std::iota(kdtree.indices.begin(), kdtree.indices.end(), 0);
|
||||
|
||||
std::atomic_uint64_t node_count = 0;
|
||||
kdtree.nodes.resize(traits::size(points));
|
||||
|
||||
#ifndef _MSC_VER
|
||||
#pragma omp parallel num_threads(num_threads)
|
||||
{
|
||||
#pragma omp single nowait
|
||||
{ kdtree.root = create_node(kdtree, node_count, points, kdtree.indices.begin(), kdtree.indices.begin(), kdtree.indices.end()); }
|
||||
}
|
||||
#else
|
||||
kdtree.root = create_node(kdtree, node_count, points, kdtree.indices.begin(), kdtree.indices.begin(), kdtree.indices.end());
|
||||
#endif
|
||||
|
||||
kdtree.nodes.resize(node_count);
|
||||
}
|
||||
|
||||
/// @brief Create a Kd-tree node from the given point indices.
|
||||
/// @param global_first Global first point index iterator (i.e., this->indices.begin()).
|
||||
/// @param first First point index iterator to be scanned.
|
||||
/// @param last Last point index iterator to be scanned.
|
||||
/// @return Index of the created node.
|
||||
template <typename PointCloud, typename KdTree, typename IndexConstIterator>
|
||||
NodeIndexType create_node(
|
||||
KdTree& kdtree,
|
||||
std::atomic_uint64_t& node_count,
|
||||
const PointCloud& points,
|
||||
IndexConstIterator global_first,
|
||||
IndexConstIterator first,
|
||||
IndexConstIterator last) const {
|
||||
const size_t N = std::distance(first, last);
|
||||
// Create a leaf node.
|
||||
if (N <= max_leaf_size) {
|
||||
const NodeIndexType node_index = node_count++;
|
||||
auto& node = kdtree.nodes[node_index];
|
||||
|
||||
// std::sort(first, last);
|
||||
node.node_type.lr.first = std::distance(global_first, first);
|
||||
node.node_type.lr.last = std::distance(global_first, last);
|
||||
|
||||
return node_index;
|
||||
}
|
||||
|
||||
// Find the best axis to split the input points.
|
||||
using Projection = typename KdTree::Projection;
|
||||
const auto proj = Projection::find_axis(points, first, last, projection_setting);
|
||||
const auto median_itr = first + N / 2;
|
||||
std::nth_element(first, median_itr, last, [&](size_t i, size_t j) { return proj(traits::point(points, i)) < proj(traits::point(points, j)); });
|
||||
|
||||
// Create a non-leaf node.
|
||||
const NodeIndexType node_index = node_count++;
|
||||
auto& node = kdtree.nodes[node_index];
|
||||
node.node_type.sub.proj = proj;
|
||||
node.node_type.sub.thresh = proj(traits::point(points, *median_itr));
|
||||
|
||||
// Create left and right child nodes.
|
||||
#ifndef _MSC_VER
|
||||
#pragma omp task default(shared) if (N > 512)
|
||||
node.left = create_node(kdtree, node_count, points, global_first, first, median_itr);
|
||||
#pragma omp task default(shared) if (N > 512)
|
||||
node.right = create_node(kdtree, node_count, points, global_first, median_itr, last);
|
||||
#pragma omp taskwait
|
||||
#else
|
||||
node.left = create_node(kdtree, node_count, points, global_first, first, median_itr);
|
||||
node.right = create_node(kdtree, node_count, points, global_first, median_itr, last);
|
||||
#endif
|
||||
|
||||
return node_index;
|
||||
}
|
||||
|
||||
public:
|
||||
int num_threads; ///< Number of threads
|
||||
int max_leaf_size; ///< Maximum number of points in a leaf node.
|
||||
ProjectionSetting projection_setting; ///< Projection setting.
|
||||
};
|
||||
|
||||
} // namespace small_gicp
|
||||
|
|
|
|||
|
|
@ -2,21 +2,81 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
#pragma once
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <small_gicp/points/traits.hpp>
|
||||
#include <atomic>
|
||||
#include <tbb/tbb.h>
|
||||
#include <small_gicp/ann/kdtree.hpp>
|
||||
#include <small_gicp/ann/nanoflann_tbb.hpp>
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief Unsafe KdTree with multi-thread tree construction with TBB backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using UnsafeKdTreeTBB = UnsafeKdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorTBB>;
|
||||
/// @brief Kd-tree builder with TBB.
|
||||
struct KdTreeBuilderTBB {
|
||||
public:
|
||||
/// @brief Build KdTree
|
||||
template <typename KdTree, typename PointCloud>
|
||||
void build_tree(KdTree& kdtree, const PointCloud& points) const {
|
||||
kdtree.indices.resize(traits::size(points));
|
||||
std::iota(kdtree.indices.begin(), kdtree.indices.end(), 0);
|
||||
|
||||
/// @brief KdTree with multi-thread tree construction with TBB backend.
|
||||
/// @note This class only parallelizes the tree construction. The search is still single-threaded as in the normal KdTree.
|
||||
template <class PointCloud>
|
||||
using KdTreeTBB = KdTreeGeneric<PointCloud, nanoflann::KDTreeSingleIndexAdaptorTBB>;
|
||||
std::atomic_uint64_t node_count = 0;
|
||||
kdtree.nodes.resize(traits::size(points));
|
||||
kdtree.root = create_node(kdtree, node_count, points, kdtree.indices.begin(), kdtree.indices.begin(), kdtree.indices.end());
|
||||
kdtree.nodes.resize(node_count);
|
||||
}
|
||||
|
||||
/// @brief Create a Kd-tree node from the given point indices.
|
||||
/// @param global_first Global first point index iterator (i.e., this->indices.begin()).
|
||||
/// @param first First point index iterator to be scanned.
|
||||
/// @param last Last point index iterator to be scanned.
|
||||
/// @return Index of the created node.
|
||||
template <typename PointCloud, typename KdTree, typename IndexConstIterator>
|
||||
NodeIndexType create_node(
|
||||
KdTree& kdtree,
|
||||
std::atomic_uint64_t& node_count,
|
||||
const PointCloud& points,
|
||||
IndexConstIterator global_first,
|
||||
IndexConstIterator first,
|
||||
IndexConstIterator last) const {
|
||||
const size_t N = std::distance(first, last);
|
||||
// Create a leaf node.
|
||||
if (N <= max_leaf_size) {
|
||||
const NodeIndexType node_index = node_count++;
|
||||
auto& node = kdtree.nodes[node_index];
|
||||
|
||||
// std::sort(first, last);
|
||||
node.node_type.lr.first = std::distance(global_first, first);
|
||||
node.node_type.lr.last = std::distance(global_first, last);
|
||||
|
||||
return node_index;
|
||||
}
|
||||
|
||||
// Find the best axis to split the input points.
|
||||
using Projection = typename KdTree::Projection;
|
||||
const auto proj = Projection::find_axis(points, first, last, projection_setting);
|
||||
const auto median_itr = first + N / 2;
|
||||
std::nth_element(first, median_itr, last, [&](size_t i, size_t j) { return proj(traits::point(points, i)) < proj(traits::point(points, j)); });
|
||||
|
||||
// Create a non-leaf node.
|
||||
const NodeIndexType node_index = node_count++;
|
||||
auto& node = kdtree.nodes[node_index];
|
||||
node.node_type.sub.proj = proj;
|
||||
node.node_type.sub.thresh = proj(traits::point(points, *median_itr));
|
||||
|
||||
// Create left and right child nodes.
|
||||
if (N > 512) {
|
||||
tbb::parallel_invoke(
|
||||
[&] { node.left = create_node(kdtree, node_count, points, global_first, first, median_itr); },
|
||||
[&] { node.right = create_node(kdtree, node_count, points, global_first, median_itr, last); });
|
||||
} else {
|
||||
node.left = create_node(kdtree, node_count, points, global_first, first, median_itr);
|
||||
node.right = create_node(kdtree, node_count, points, global_first, median_itr, last);
|
||||
}
|
||||
|
||||
return node_index;
|
||||
}
|
||||
|
||||
public:
|
||||
int max_leaf_size = 20; ///< Maximum number of points in a leaf node.
|
||||
ProjectionSetting projection_setting; ///< Projection setting.
|
||||
};
|
||||
|
||||
} // namespace small_gicp
|
||||
|
|
|
|||
|
|
@ -0,0 +1,95 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <limits>
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief K-nearest neighbor search setting.
|
||||
struct KnnSetting {
|
||||
public:
|
||||
/// @brief Check if the result satisfies the early termination condition.
|
||||
template <typename Result>
|
||||
bool fulfilled(const Result& result) const {
|
||||
return result.worst_distance() < epsilon;
|
||||
}
|
||||
|
||||
public:
|
||||
double epsilon = 0.0; ///< Early termination threshold
|
||||
};
|
||||
|
||||
/// @brief K-nearest neighbor search result container.
|
||||
/// @tparam N Number of neighbors to search. If N == -1, the number of neighbors is dynamicaly determined.
|
||||
template <int N>
|
||||
struct KnnResult {
|
||||
public:
|
||||
static constexpr size_t INVALID = std::numeric_limits<size_t>::max();
|
||||
|
||||
/// @brief Constructor
|
||||
/// @param indices Buffer to store indices (must be larger than k=max(N, num_neighbors))
|
||||
/// @param distances Buffer to store distances (must be larger than k=max(N, num_neighbors))
|
||||
/// @param num_neighbors Number of neighbors to search (must be -1 for static case N > 0)
|
||||
explicit KnnResult(size_t* indices, double* distances, int num_neighbors = -1) : num_neighbors(num_neighbors), indices(indices), distances(distances) {
|
||||
if constexpr (N > 0) {
|
||||
if (num_neighbors >= 0) {
|
||||
std::cerr << "warning: Specifying dynamic num_neighbors=" << num_neighbors << " for a static KNN result container (N=" << N << ")" << std::endl;
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
if (num_neighbors <= 0) {
|
||||
std::cerr << "error: Specifying invalid num_neighbors=" << num_neighbors << " for a dynamic KNN result container" << std::endl;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
std::fill(this->indices, this->indices + buffer_size(), INVALID);
|
||||
std::fill(this->distances, this->distances + buffer_size(), std::numeric_limits<double>::max());
|
||||
}
|
||||
|
||||
/// @brief Buffer size (i.e., Maximum number of neighbors)
|
||||
size_t buffer_size() const {
|
||||
if constexpr (N > 0) {
|
||||
return N;
|
||||
} else {
|
||||
return num_neighbors;
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief Number of found neighbors.
|
||||
size_t num_found() const { return std::distance(indices, std::find(indices, indices + buffer_size(), INVALID)); }
|
||||
|
||||
/// @brief Worst distance in the result.
|
||||
double worst_distance() const { return distances[buffer_size() - 1]; }
|
||||
|
||||
/// @brief Push a pair of point index and distance to the result.
|
||||
void push(size_t index, double distance) {
|
||||
if (distance >= worst_distance()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if constexpr (N == 1) {
|
||||
indices[0] = index;
|
||||
distances[0] = distance;
|
||||
} else {
|
||||
for (int i = buffer_size() - 1; i >= 0; i--) {
|
||||
if (i == 0 || distance >= distances[i - 1]) {
|
||||
indices[i] = index;
|
||||
distances[i] = distance;
|
||||
break;
|
||||
}
|
||||
|
||||
indices[i] = indices[i - 1];
|
||||
distances[i] = distances[i - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
const int num_neighbors; ///< Maximum number of neighbors to search
|
||||
size_t* indices; ///< Indices of neighbors
|
||||
double* distances; ///< Distances to neighbors
|
||||
};
|
||||
|
||||
} // namespace small_gicp
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||
// SPDX-License-Identifier: MIT
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Eigen>
|
||||
|
||||
namespace small_gicp {
|
||||
|
||||
/// @brief Parameters to control the projection axis search.
|
||||
struct ProjectionSetting {
|
||||
int max_scan_count = 128; ///< Maximum number of points to use for the axis search.
|
||||
};
|
||||
|
||||
/// @brief Conventional axis-aligned projection (i.e., selecting any of XYZ axes with the largest variance).
|
||||
struct AxisAlignedProjection {
|
||||
public:
|
||||
/// @brief Project the point to the selected axis.
|
||||
/// @param pt Point to project
|
||||
/// @return Projected value
|
||||
double operator()(const Eigen::Vector4d& pt) const { return pt[axis]; }
|
||||
|
||||
/// @brief Find the axis with the largest variance.
|
||||
/// @param points Point cloud
|
||||
/// @param first First point index iterator
|
||||
/// @param last Last point index iterator
|
||||
/// @param setting Search setting
|
||||
/// @return Projection with the largest variance axis
|
||||
template <typename PointCloud, typename IndexConstIterator>
|
||||
static AxisAlignedProjection find_axis(const PointCloud& points, IndexConstIterator first, IndexConstIterator last, const ProjectionSetting& setting) {
|
||||
const size_t N = std::distance(first, last);
|
||||
Eigen::Vector4d sum_pt = Eigen::Vector4d::Zero();
|
||||
Eigen::Vector4d sum_sq = Eigen::Vector4d::Zero();
|
||||
|
||||
const size_t step = N < setting.max_scan_count ? 1 : N / setting.max_scan_count;
|
||||
const size_t num_steps = N / step;
|
||||
for (int i = 0; i < num_steps; i++) {
|
||||
const auto itr = first + step * i;
|
||||
const Eigen::Vector4d pt = traits::point(points, *itr);
|
||||
sum_pt += pt;
|
||||
sum_sq += pt.cwiseProduct(pt);
|
||||
}
|
||||
|
||||
const Eigen::Vector4d mean = sum_pt / sum_pt.w();
|
||||
const Eigen::Vector4d var = (sum_sq - mean.cwiseProduct(sum_pt));
|
||||
|
||||
return AxisAlignedProjection{var[0] > var[1] ? (var[0] > var[2] ? 0 : 2) : (var[1] > var[2] ? 1 : 2)};
|
||||
}
|
||||
|
||||
public:
|
||||
int axis; ///< Axis index (0: X, 1: Y, 2: Z)
|
||||
};
|
||||
|
||||
/// @brief Normal projection (i.e., selecting the 3D direction with the largest variance of the points).
|
||||
struct NormalProjection {
|
||||
public:
|
||||
/// @brief Project the point to the normal direction.
|
||||
/// @param pt Point to project
|
||||
/// @return Projected value
|
||||
double operator()(const Eigen::Vector4d& pt) const { return Eigen::Map<const Eigen::Vector3d>(normal.data()).dot(pt.head<3>()); }
|
||||
|
||||
/// @brief Find the direction with the largest variance.
|
||||
/// @param points Point cloud
|
||||
/// @param first First point index iterator
|
||||
/// @param last Last point index iterator
|
||||
/// @param setting Search setting
|
||||
/// @return Projection with the largest variance direction
|
||||
template <typename PointCloud, typename IndexConstIterator>
|
||||
static NormalProjection find_axis(const PointCloud& points, IndexConstIterator first, IndexConstIterator last, const ProjectionSetting& setting) {
|
||||
const size_t N = std::distance(first, last);
|
||||
Eigen::Vector4d sum_pt = Eigen::Vector4d::Zero();
|
||||
Eigen::Matrix4d sum_sq = Eigen::Matrix4d::Zero();
|
||||
|
||||
const size_t step = N < setting.max_scan_count ? 1 : N / setting.max_scan_count;
|
||||
const size_t num_steps = N / step;
|
||||
for (int i = 0; i < num_steps; i++) {
|
||||
const auto itr = first + step * i;
|
||||
const Eigen::Vector4d pt = traits::point(points, *itr);
|
||||
sum_pt += pt;
|
||||
sum_sq += pt * pt.transpose();
|
||||
}
|
||||
|
||||
const Eigen::Vector4d mean = sum_pt / sum_pt.w();
|
||||
const Eigen::Matrix4d cov = (sum_sq - mean * sum_pt.transpose()) / sum_pt.w();
|
||||
|
||||
Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> eig;
|
||||
eig.computeDirect(cov.block<3, 3>(0, 0));
|
||||
|
||||
NormalProjection p;
|
||||
Eigen::Map<Eigen::Vector3d>(p.normal.data()) = eig.eigenvectors().col(2);
|
||||
return p;
|
||||
}
|
||||
|
||||
public:
|
||||
std::array<double, 3> normal; ///< Projection direction
|
||||
};
|
||||
|
||||
} // namespace small_gicp
|
||||
|
|
@ -16,7 +16,7 @@ struct NullFactor {
|
|||
/// @param source Source point cloud
|
||||
/// @param target_tree Nearest neighbor search for the target point cloud
|
||||
/// @param T Linearization point
|
||||
/// @param H [in/out] Linearized precision matrix.
|
||||
/// @param H [in/out] Linearized information matrix.
|
||||
/// @param b [in/out] Linearized information vector.
|
||||
/// @param e [in/out] Error at the linearization point.
|
||||
template <typename TargetPointCloud, typename SourcePointCloud, typename TargetTree>
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ struct GICPFactor {
|
|||
/// @param T Linearization point
|
||||
/// @param source_index Source point index
|
||||
/// @param rejector Correspondence rejector
|
||||
/// @param H Linearized precision matrix
|
||||
/// @param H Linearized information matrix
|
||||
/// @param b Linearized information vector
|
||||
/// @param e Error at the linearization point
|
||||
/// @return
|
||||
|
|
|
|||
|
|
@ -99,8 +99,8 @@ protected:
|
|||
std::string registration_type_; ///< Registration type ("GICP" or "VGICP").
|
||||
bool verbose_; ///< Verbosity flag.
|
||||
|
||||
std::shared_ptr<KdTreeOMP<pcl::PointCloud<PointSource>>> target_tree_; ///< KdTree for target point cloud.
|
||||
std::shared_ptr<KdTreeOMP<pcl::PointCloud<PointSource>>> source_tree_; ///< KdTree for source point cloud.
|
||||
std::shared_ptr<KdTree<pcl::PointCloud<PointSource>>> target_tree_; ///< KdTree for target point cloud.
|
||||
std::shared_ptr<KdTree<pcl::PointCloud<PointSource>>> source_tree_; ///< KdTree for source point cloud.
|
||||
|
||||
std::shared_ptr<GaussianVoxelMap> target_voxelmap_; ///< VoxelMap for target point cloud.
|
||||
std::shared_ptr<GaussianVoxelMap> source_voxelmap_; ///< VoxelMap for source point cloud.
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ void RegistrationPCL<PointSource, PointTarget>::setInputSource(const PointCloudS
|
|||
}
|
||||
|
||||
pcl::Registration<PointSource, PointTarget, Scalar>::setInputSource(cloud);
|
||||
source_tree_ = std::make_shared<KdTreeOMP<pcl::PointCloud<PointSource>>>(input_, num_threads_);
|
||||
source_tree_ = std::make_shared<KdTree<pcl::PointCloud<PointSource>>>(input_, KdTreeBuilderOMP(num_threads_));
|
||||
source_covs_.clear();
|
||||
source_voxelmap_.reset();
|
||||
}
|
||||
|
|
@ -56,7 +56,7 @@ void RegistrationPCL<PointSource, PointTarget>::setInputTarget(const PointCloudT
|
|||
}
|
||||
|
||||
pcl::Registration<PointSource, PointTarget, Scalar>::setInputTarget(cloud);
|
||||
target_tree_ = std::make_shared<KdTreeOMP<pcl::PointCloud<PointTarget>>>(target_, num_threads_);
|
||||
target_tree_ = std::make_shared<KdTree<pcl::PointCloud<PointTarget>>>(target_, KdTreeBuilderOMP(num_threads_));
|
||||
target_covs_.clear();
|
||||
target_voxelmap_.reset();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ struct SerialReduction {
|
|||
/// @param rejector Correspondence rejector
|
||||
/// @param T Linearization point
|
||||
/// @param factors Factors to be linearized
|
||||
/// @return Sum of the linearized systems (Precision matrix, information vector, and error)
|
||||
/// @return Sum of the linearized systems (information matrix, information vector, and error)
|
||||
template <typename TargetPointCloud, typename SourcePointCloud, typename TargetTree, typename CorrespondenceRejector, typename Factor>
|
||||
std::tuple<Eigen::Matrix<double, 6, 6>, Eigen::Matrix<double, 6, 1>, double> linearize(
|
||||
const TargetPointCloud& target,
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ struct RegistrationResult {
|
|||
size_t iterations; ///< Number of optimization iterations
|
||||
size_t num_inliers; ///< Number of inliear points
|
||||
|
||||
Eigen::Matrix<double, 6, 6> H; ///< Final precision matrix
|
||||
Eigen::Matrix<double, 6, 6> H; ///< Final information matrix
|
||||
Eigen::Matrix<double, 6, 1> b; ///< Final information vector
|
||||
double error; ///< Final error
|
||||
};
|
||||
|
|
|
|||
|
|
@ -108,11 +108,11 @@ int main(int argc, char** argv) {
|
|||
if (method == "small") {
|
||||
UnsafeKdTree<PointCloud> tree(*downsampled);
|
||||
} else if (method == "omp") {
|
||||
UnsafeKdTreeOMP<PointCloud> tree(*downsampled, num_threads);
|
||||
UnsafeKdTree<PointCloud> tree(*downsampled, KdTreeBuilderOMP(num_threads));
|
||||
}
|
||||
#ifdef BUILD_WITH_TBB
|
||||
else if (method == "tbb") {
|
||||
UnsafeKdTreeTBB<PointCloud> tree(*downsampled);
|
||||
UnsafeKdTree<PointCloud> tree(*downsampled, KdTreeBuilderTBB());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
@ -139,7 +139,7 @@ int main(int argc, char** argv) {
|
|||
Summarizer kdtree_omp_times(true);
|
||||
sw.start();
|
||||
for (size_t j = 0; j < num_trials; j++) {
|
||||
UnsafeKdTreeOMP<PointCloud> tree(*downsampled[i], num_threads);
|
||||
UnsafeKdTree<PointCloud> tree(*downsampled[i], KdTreeBuilderOMP(num_threads));
|
||||
sw.lap();
|
||||
kdtree_omp_times.push(sw.msec());
|
||||
}
|
||||
|
|
@ -153,7 +153,7 @@ int main(int argc, char** argv) {
|
|||
Summarizer kdtree_tbb_times(true);
|
||||
sw.start();
|
||||
for (size_t j = 0; j < num_trials; j++) {
|
||||
UnsafeKdTreeTBB<PointCloud> tree(*downsampled[i]);
|
||||
UnsafeKdTree<PointCloud> tree(*downsampled[i], KdTreeBuilderTBB());
|
||||
sw.lap();
|
||||
kdtree_tbb_times.push(sw.msec());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ public:
|
|||
|
||||
// Preprocess input points (kdtree construction & covariance estimation)
|
||||
// Note that input points here is already downsampled
|
||||
auto tree = std::make_shared<KdTreeOMP<PointCloud>>(points, params.num_threads);
|
||||
auto tree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(params.num_threads));
|
||||
estimate_covariances_omp(*points, *tree, params.num_neighbors, params.num_threads);
|
||||
|
||||
if (target_points == nullptr) {
|
||||
|
|
@ -55,8 +55,8 @@ public:
|
|||
private:
|
||||
Summarizer reg_times;
|
||||
|
||||
PointCloud::Ptr target_points; // Last point cloud to be registration target
|
||||
KdTreeOMP<PointCloud>::Ptr target_tree; // KdTree of the last point cloud
|
||||
PointCloud::Ptr target_points; // Last point cloud to be registration target
|
||||
KdTree<PointCloud>::Ptr target_tree; // KdTree of the last point cloud
|
||||
|
||||
Eigen::Isometry3d T_world_lidar; // T_world_lidar
|
||||
};
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ public:
|
|||
Stopwatch sw;
|
||||
sw.start();
|
||||
|
||||
auto tree = std::make_shared<KdTreeTBB<PointCloud>>(points);
|
||||
auto tree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderTBB());
|
||||
estimate_covariances_tbb(*points, *tree, params.num_neighbors);
|
||||
|
||||
if (target_points == nullptr) {
|
||||
|
|
@ -57,7 +57,7 @@ private:
|
|||
Summarizer reg_times;
|
||||
|
||||
PointCloud::Ptr target_points;
|
||||
KdTreeTBB<PointCloud>::Ptr target_tree;
|
||||
KdTree<PointCloud>::Ptr target_tree;
|
||||
|
||||
Eigen::Isometry3d T;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ public:
|
|||
Stopwatch sw;
|
||||
sw.start();
|
||||
|
||||
auto tree = std::make_shared<KdTreeOMP<PointCloud>>(points, params.num_threads);
|
||||
auto tree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(params.num_threads));
|
||||
estimate_covariances_omp(*points, *tree, params.num_neighbors, params.num_threads);
|
||||
|
||||
auto voxelmap = std::make_shared<GaussianVoxelMap>(params.voxel_resolution);
|
||||
|
|
|
|||
|
|
@ -56,8 +56,8 @@ void example2(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr& raw_target, const
|
|||
estimate_covariances_omp(*source, num_neighbors, num_threads);
|
||||
|
||||
// Create KdTree for target and source.
|
||||
auto target_tree = std::make_shared<KdTreeOMP<pcl::PointCloud<pcl::PointCovariance>>>(target, num_threads);
|
||||
auto source_tree = std::make_shared<KdTreeOMP<pcl::PointCloud<pcl::PointCovariance>>>(source, num_threads);
|
||||
auto target_tree = std::make_shared<KdTree<pcl::PointCloud<pcl::PointCovariance>>>(target, KdTreeBuilderOMP(num_threads));
|
||||
auto source_tree = std::make_shared<KdTree<pcl::PointCloud<pcl::PointCovariance>>>(source, KdTreeBuilderOMP(num_threads));
|
||||
|
||||
Registration<GICPFactor, ParallelReductionOMP> registration;
|
||||
registration.reduction.num_threads = num_threads;
|
||||
|
|
|
|||
|
|
@ -33,8 +33,8 @@ void example1(const std::vector<Eigen::Vector4f>& target_points, const std::vect
|
|||
source = voxelgrid_sampling_omp(*source, downsampling_resolution, num_threads);
|
||||
|
||||
// Create KdTree
|
||||
auto target_tree = std::make_shared<KdTreeOMP<PointCloud>>(target, num_threads);
|
||||
auto source_tree = std::make_shared<KdTreeOMP<PointCloud>>(source, num_threads);
|
||||
auto target_tree = std::make_shared<KdTree<PointCloud>>(target, KdTreeBuilderOMP(num_threads));
|
||||
auto source_tree = std::make_shared<KdTree<PointCloud>>(source, KdTreeBuilderOMP(num_threads));
|
||||
|
||||
// Estimate point covariances
|
||||
estimate_covariances_omp(*target, *target_tree, num_neighbors, num_threads);
|
||||
|
|
@ -219,7 +219,7 @@ struct MyGeneralFactor {
|
|||
/// @param source Source point cloud
|
||||
/// @param target_tree Nearest neighbor search for the target point cloud
|
||||
/// @param T Linearization point
|
||||
/// @param H [in/out] Linearized precision matrix.
|
||||
/// @param H [in/out] Linearized information matrix.
|
||||
/// @param b [in/out] Linearized information vector.
|
||||
/// @param e [in/out] Error at the linearization point.
|
||||
template <typename TargetPointCloud, typename SourcePointCloud, typename TargetTree>
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ void define_align(py::module& m) {
|
|||
[](
|
||||
const PointCloud::ConstPtr& target,
|
||||
const PointCloud::ConstPtr& source,
|
||||
KdTreeOMP<PointCloud>::ConstPtr target_tree,
|
||||
KdTree<PointCloud>::ConstPtr target_tree,
|
||||
const Eigen::Matrix4d& init_T_target_source,
|
||||
double max_correspondence_distance,
|
||||
int num_threads,
|
||||
|
|
@ -110,7 +110,7 @@ void define_align(py::module& m) {
|
|||
registration.optimizer.max_iterations = max_iterations;
|
||||
|
||||
if (target_tree == nullptr) {
|
||||
target_tree = std::make_shared<KdTreeOMP<PointCloud>>(target, num_threads);
|
||||
target_tree = std::make_shared<KdTree<PointCloud>>(target, KdTreeBuilderOMP(num_threads));
|
||||
}
|
||||
return registration.align(*target, *source, *target_tree, Eigen::Isometry3d(init_T_target_source));
|
||||
},
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ void define_factors(py::module& m) {
|
|||
ICPFactor& factor,
|
||||
const PointCloud& target,
|
||||
const PointCloud& source,
|
||||
const KdTreeOMP<PointCloud>& kdtree,
|
||||
const KdTree<PointCloud>& kdtree,
|
||||
const Eigen::Matrix4d& T,
|
||||
size_t source_index,
|
||||
const DistanceRejector& rejector) -> std::tuple<bool, Eigen::Matrix<double, 6, 6>, Eigen::Matrix<double, 6, 1>, double> {
|
||||
|
|
@ -53,7 +53,7 @@ void define_factors(py::module& m) {
|
|||
PointToPlaneICPFactor& factor,
|
||||
const PointCloud& target,
|
||||
const PointCloud& source,
|
||||
const KdTreeOMP<PointCloud>& kdtree,
|
||||
const KdTree<PointCloud>& kdtree,
|
||||
const Eigen::Matrix4d& T,
|
||||
size_t source_index,
|
||||
const DistanceRejector& rejector) -> std::tuple<bool, Eigen::Matrix<double, 6, 6>, Eigen::Matrix<double, 6, 1>, double> {
|
||||
|
|
@ -73,7 +73,7 @@ void define_factors(py::module& m) {
|
|||
GICPFactor& factor,
|
||||
const PointCloud& target,
|
||||
const PointCloud& source,
|
||||
const KdTreeOMP<PointCloud>& kdtree,
|
||||
const KdTree<PointCloud>& kdtree,
|
||||
const Eigen::Matrix4d& T,
|
||||
size_t source_index,
|
||||
const DistanceRejector& rejector) -> std::tuple<bool, Eigen::Matrix<double, 6, 6>, Eigen::Matrix<double, 6, 1>, double> {
|
||||
|
|
|
|||
|
|
@ -16,17 +16,20 @@ using namespace small_gicp;
|
|||
|
||||
void define_kdtree(py::module& m) {
|
||||
// KdTree
|
||||
py::class_<KdTreeOMP<PointCloud>, std::shared_ptr<KdTreeOMP<PointCloud>>>(m, "KdTree") //
|
||||
.def(py::init<PointCloud::ConstPtr, int>(), py::arg("points"), py::arg("num_threads") = 1)
|
||||
py::class_<KdTree<PointCloud>, std::shared_ptr<KdTree<PointCloud>>>(m, "KdTree") //
|
||||
.def(
|
||||
py::init([](const PointCloud::ConstPtr& points, int num_threads) { return std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(num_threads)); }),
|
||||
py::arg("points"),
|
||||
py::arg("num_threads") = 1)
|
||||
.def(
|
||||
"nearest_neighbor_search",
|
||||
[](const KdTreeOMP<PointCloud>& kdtree, const Eigen::Vector3d& pt) {
|
||||
[](const KdTree<PointCloud>& kdtree, const Eigen::Vector3d& pt) {
|
||||
size_t k_index = -1;
|
||||
double k_sq_dist = std::numeric_limits<double>::max();
|
||||
const size_t found = traits::nearest_neighbor_search(kdtree, Eigen::Vector4d(pt.x(), pt.y(), pt.z(), 1.0), &k_index, &k_sq_dist);
|
||||
return std::make_tuple(found, k_index, k_sq_dist);
|
||||
})
|
||||
.def("knn_search", [](const KdTreeOMP<PointCloud>& kdtree, const Eigen::Vector3d& pt, int k) {
|
||||
.def("knn_search", [](const KdTree<PointCloud>& kdtree, const Eigen::Vector3d& pt, int k) {
|
||||
std::vector<size_t> k_indices(k, -1);
|
||||
std::vector<double> k_sq_dists(k, std::numeric_limits<double>::max());
|
||||
const size_t found = traits::knn_search(kdtree, Eigen::Vector4d(pt.x(), pt.y(), pt.z(), 1.0), k, k_indices.data(), k_sq_dists.data());
|
||||
|
|
|
|||
|
|
@ -57,9 +57,9 @@ void define_preprocess(py::module& m) {
|
|||
// estimate_normals
|
||||
m.def(
|
||||
"estimate_normals",
|
||||
[](PointCloud::Ptr points, std::shared_ptr<KdTreeOMP<PointCloud>> tree, int num_neighbors, int num_threads) {
|
||||
[](PointCloud::Ptr points, std::shared_ptr<KdTree<PointCloud>> tree, int num_neighbors, int num_threads) {
|
||||
if (tree == nullptr) {
|
||||
tree = std::make_shared<KdTreeOMP<PointCloud>>(points, num_threads);
|
||||
tree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(num_threads));
|
||||
}
|
||||
|
||||
if (num_threads == 1) {
|
||||
|
|
@ -76,9 +76,9 @@ void define_preprocess(py::module& m) {
|
|||
// estimate_covariances
|
||||
m.def(
|
||||
"estimate_covariances",
|
||||
[](PointCloud::Ptr points, std::shared_ptr<KdTreeOMP<PointCloud>> tree, int num_neighbors, int num_threads) {
|
||||
[](PointCloud::Ptr points, std::shared_ptr<KdTree<PointCloud>> tree, int num_neighbors, int num_threads) {
|
||||
if (tree == nullptr) {
|
||||
tree = std::make_shared<KdTreeOMP<PointCloud>>(points, num_threads);
|
||||
tree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(num_threads));
|
||||
}
|
||||
|
||||
if (num_threads == 1) {
|
||||
|
|
@ -95,9 +95,9 @@ void define_preprocess(py::module& m) {
|
|||
// estimate_normals_covariances
|
||||
m.def(
|
||||
"estimate_normals_covariances",
|
||||
[](PointCloud::Ptr points, std::shared_ptr<KdTreeOMP<PointCloud>> tree, int num_neighbors, int num_threads) {
|
||||
[](PointCloud::Ptr points, std::shared_ptr<KdTree<PointCloud>> tree, int num_neighbors, int num_threads) {
|
||||
if (tree == nullptr) {
|
||||
tree = std::make_shared<KdTreeOMP<PointCloud>>(points, num_threads);
|
||||
tree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(num_threads));
|
||||
}
|
||||
|
||||
if (num_threads == 1) {
|
||||
|
|
@ -114,7 +114,7 @@ void define_preprocess(py::module& m) {
|
|||
// preprocess_points (numpy)
|
||||
m.def(
|
||||
"preprocess_points",
|
||||
[](const Eigen::MatrixXd& points_numpy, double downsampling_resolution, int num_neighbors, int num_threads) -> std::pair<PointCloud::Ptr, KdTreeOMP<PointCloud>::Ptr> {
|
||||
[](const Eigen::MatrixXd& points_numpy, double downsampling_resolution, int num_neighbors, int num_threads) -> std::pair<PointCloud::Ptr, KdTree<PointCloud>::Ptr> {
|
||||
if (points_numpy.cols() != 3 && points_numpy.cols() != 4) {
|
||||
std::cerr << "points_numpy must be Nx3 or Nx4" << std::endl;
|
||||
return {nullptr, nullptr};
|
||||
|
|
@ -131,7 +131,7 @@ void define_preprocess(py::module& m) {
|
|||
}
|
||||
|
||||
auto downsampled = voxelgrid_sampling_omp(*points, downsampling_resolution, num_threads);
|
||||
auto kdtree = std::make_shared<KdTreeOMP<PointCloud>>(downsampled, num_threads);
|
||||
auto kdtree = std::make_shared<KdTree<PointCloud>>(downsampled, KdTreeBuilderOMP(num_threads));
|
||||
estimate_normals_covariances_omp(*downsampled, *kdtree, num_neighbors, num_threads);
|
||||
return {downsampled, kdtree};
|
||||
},
|
||||
|
|
@ -143,14 +143,14 @@ void define_preprocess(py::module& m) {
|
|||
// preprocess_points
|
||||
m.def(
|
||||
"preprocess_points",
|
||||
[](const PointCloud& points, double downsampling_resolution, int num_neighbors, int num_threads) -> std::pair<PointCloud::Ptr, KdTreeOMP<PointCloud>::Ptr> {
|
||||
[](const PointCloud& points, double downsampling_resolution, int num_neighbors, int num_threads) -> std::pair<PointCloud::Ptr, KdTree<PointCloud>::Ptr> {
|
||||
if (points.empty()) {
|
||||
std::cerr << "warning: points is empty" << std::endl;
|
||||
return {nullptr, nullptr};
|
||||
}
|
||||
|
||||
auto downsampled = voxelgrid_sampling_omp(points, downsampling_resolution, num_threads);
|
||||
auto kdtree = std::make_shared<KdTreeOMP<PointCloud>>(downsampled, num_threads);
|
||||
auto kdtree = std::make_shared<KdTree<PointCloud>>(downsampled, KdTreeBuilderOMP(num_threads));
|
||||
estimate_normals_covariances_omp(*downsampled, *kdtree, num_neighbors, num_threads);
|
||||
return {downsampled, kdtree};
|
||||
},
|
||||
|
|
|
|||
|
|
@ -0,0 +1,262 @@
|
|||
#include <random>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <small_gicp/points/point_cloud.hpp>
|
||||
#include <small_gicp/ann/kdtree.hpp>
|
||||
#include <small_gicp/ann/kdtree_omp.hpp>
|
||||
#include <small_gicp/ann/kdtree_tbb.hpp>
|
||||
|
||||
using namespace small_gicp;
|
||||
|
||||
struct Problem {
|
||||
using Ptr = std::shared_ptr<Problem>;
|
||||
using ConstPtr = std::shared_ptr<const Problem>;
|
||||
|
||||
std::string target_name;
|
||||
std::string query_name;
|
||||
PointCloud::Ptr target;
|
||||
PointCloud::Ptr query;
|
||||
|
||||
std::vector<std::vector<size_t>> indices;
|
||||
std::vector<std::vector<double>> dists;
|
||||
};
|
||||
|
||||
class KdTreeSyntheticTest : public testing::Test, public testing::WithParamInterface<std::string> {
|
||||
public:
|
||||
void SetUp() override {
|
||||
std::mt19937 mt;
|
||||
|
||||
std::vector<Eigen::Vector4d> points(256);
|
||||
|
||||
// Geneate point sets with different distributions
|
||||
// Uniform real [-1.0, 1.0]
|
||||
auto udist = std::uniform_real_distribution<>(-1.0, 1.0);
|
||||
std::generate(points.begin(), points.end(), [&]() { return Eigen::Vector4d(udist(mt), udist(mt), udist(mt), 1.0); });
|
||||
names.emplace_back("r[-1.0, 1.0]");
|
||||
targets.emplace_back(std::make_shared<PointCloud>(points));
|
||||
|
||||
// Uniform real with a wide band [-1e6, 1e6]
|
||||
udist = std::uniform_real_distribution<>(-1e6, 1e6);
|
||||
std::generate(points.begin(), points.end(), [&]() { return Eigen::Vector4d(udist(mt), udist(mt), udist(mt), 1.0); });
|
||||
names.emplace_back("r[-1e6, 1e6]");
|
||||
targets.emplace_back(std::make_shared<PointCloud>(points));
|
||||
|
||||
// Two separate uniform real distributions [-1.0, -0.5] + [0.5, 1.0]
|
||||
auto udist_l = std::uniform_real_distribution<>(-1.0, -0.5);
|
||||
auto udist_r = std::uniform_real_distribution<>(0.5, 1.0);
|
||||
std::generate(points.begin(), points.begin() + points.size() / 2, [&]() { return Eigen::Vector4d(udist_l(mt), udist_l(mt), udist_l(mt), 1.0); });
|
||||
std::generate(points.begin() + points.size() / 2, points.end(), [&]() { return Eigen::Vector4d(udist_r(mt), udist_r(mt), udist_r(mt), 1.0); });
|
||||
names.emplace_back("r[-1.0, -0.5]+R[0.5, 1.0]");
|
||||
targets.emplace_back(std::make_shared<PointCloud>(points));
|
||||
|
||||
// Uniform integer [-3, 3]
|
||||
auto idist = std::uniform_int_distribution<>(-3, 3);
|
||||
std::generate(points.begin(), points.end(), [&]() { return Eigen::Vector4d(idist(mt), idist(mt), idist(mt), 1.0); });
|
||||
names.emplace_back("i[-3, 3]");
|
||||
targets.emplace_back(std::make_shared<PointCloud>(points));
|
||||
|
||||
// Normal distribution (mean=0.0, std=1.0)
|
||||
auto ndist = std::normal_distribution<>(0.0, 1.0);
|
||||
std::generate(points.begin(), points.end(), [&]() { return Eigen::Vector4d(idist(mt), idist(mt), idist(mt), 1.0); });
|
||||
names.emplace_back("n(0.0,1.0)");
|
||||
targets.emplace_back(std::make_shared<PointCloud>(points));
|
||||
|
||||
const int N = targets.size();
|
||||
for (int i = 0; i < N; i++) {
|
||||
// Create point sets with fewer points
|
||||
auto points = std::make_shared<PointCloud>(targets[i]->points);
|
||||
points->resize(10);
|
||||
names.emplace_back(names[i] + "(10pts)");
|
||||
targets.emplace_back(points);
|
||||
|
||||
points = std::make_shared<PointCloud>(targets[i]->points);
|
||||
points->resize(5);
|
||||
names.emplace_back(names[i] + "(5pts)");
|
||||
targets.emplace_back(points);
|
||||
}
|
||||
|
||||
// Generate problems and groundtruth
|
||||
for (int i = 0; i < targets.size(); i++) {
|
||||
for (int j = 0; j < targets.size(); j++) {
|
||||
auto problem = std::make_shared<Problem>();
|
||||
problem->target_name = names[i];
|
||||
problem->query_name = names[j];
|
||||
problem->target = targets[i];
|
||||
problem->query = targets[j];
|
||||
|
||||
auto [indices, dists] = bruteforce_knn(*problem->target, *problem->query);
|
||||
problem->indices = std::move(indices);
|
||||
problem->dists = std::move(dists);
|
||||
|
||||
problems.emplace_back(problem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Brute-force k-nearest neighbor search
|
||||
std::pair<std::vector<std::vector<size_t>>, std::vector<std::vector<double>>> bruteforce_knn(const PointCloud& target, const PointCloud& queries) {
|
||||
const int k = 20;
|
||||
std::vector<std::vector<size_t>> k_indices(queries.size());
|
||||
std::vector<std::vector<double>> k_sq_dists(queries.size());
|
||||
|
||||
struct IndexDist {
|
||||
bool operator<(const IndexDist& rhs) const { return dist < rhs.dist; }
|
||||
int index;
|
||||
double dist;
|
||||
};
|
||||
|
||||
for (int i = 0; i < queries.size(); i++) {
|
||||
const auto& query = queries.point(i);
|
||||
|
||||
std::priority_queue<IndexDist> queue;
|
||||
for (int j = 0; j < target.size(); j++) {
|
||||
const double sq_dist = (target.point(j) - query).squaredNorm();
|
||||
if (queue.size() < k) {
|
||||
queue.push(IndexDist{j, sq_dist});
|
||||
} else if (sq_dist < queue.top().dist) {
|
||||
queue.pop();
|
||||
queue.push(IndexDist{j, sq_dist});
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<size_t> indices(queue.size(), 0);
|
||||
std::vector<double> dists(queue.size(), 0.0);
|
||||
while (!queue.empty()) {
|
||||
indices[queue.size() - 1] = queue.top().index;
|
||||
dists[queue.size() - 1] = queue.top().dist;
|
||||
queue.pop();
|
||||
}
|
||||
|
||||
k_indices[i] = std::move(indices);
|
||||
k_sq_dists[i] = std::move(dists);
|
||||
}
|
||||
|
||||
return {k_indices, k_sq_dists};
|
||||
}
|
||||
|
||||
template <typename KdTree>
|
||||
std::pair<std::vector<std::vector<size_t>>, std::vector<std::vector<double>>> nearest_neighbor_search(const KdTree& kdtree, const PointCloud& target, const PointCloud& queries) {
|
||||
std::vector<std::vector<size_t>> k_indices(queries.size());
|
||||
std::vector<std::vector<double>> k_sq_dists(queries.size());
|
||||
|
||||
for (int i = 0; i < queries.size(); i++) {
|
||||
std::vector<size_t> indices(1, 0);
|
||||
std::vector<double> sq_dists(1, 0.0);
|
||||
|
||||
const size_t num_results = traits::nearest_neighbor_search(kdtree, queries.point(i), indices.data(), sq_dists.data());
|
||||
indices.resize(num_results);
|
||||
sq_dists.resize(num_results);
|
||||
|
||||
k_indices[i] = std::move(indices);
|
||||
k_sq_dists[i] = std::move(sq_dists);
|
||||
}
|
||||
|
||||
return {k_indices, k_sq_dists};
|
||||
}
|
||||
|
||||
template <typename KdTree>
|
||||
std::pair<std::vector<std::vector<size_t>>, std::vector<std::vector<double>>> knn_search(const KdTree& kdtree, const PointCloud& target, const PointCloud& queries, int k) {
|
||||
std::vector<std::vector<size_t>> k_indices(queries.size());
|
||||
std::vector<std::vector<double>> k_sq_dists(queries.size());
|
||||
|
||||
for (int i = 0; i < queries.size(); i++) {
|
||||
std::vector<size_t> indices(k, 0);
|
||||
std::vector<double> sq_dists(k, 0.0);
|
||||
|
||||
const size_t num_results = traits::knn_search(kdtree, queries.point(i), k, indices.data(), sq_dists.data());
|
||||
indices.resize(num_results);
|
||||
sq_dists.resize(num_results);
|
||||
|
||||
k_indices[i] = std::move(indices);
|
||||
k_sq_dists[i] = std::move(sq_dists);
|
||||
}
|
||||
|
||||
return {k_indices, k_sq_dists};
|
||||
}
|
||||
|
||||
void
|
||||
validate(const Problem::Ptr& prob, const std::vector<std::vector<size_t>>& k_indices, const std::vector<std::vector<double>>& k_sq_dists, int k, const std::string& test_name) {
|
||||
EXPECT_EQ(k_indices.size(), prob->query->size()) << test_name;
|
||||
EXPECT_EQ(k_sq_dists.size(), prob->query->size()) << test_name;
|
||||
|
||||
for (int i = 0; i < prob->query->size(); i++) {
|
||||
const int expected_n = std::min<int>(k, prob->target->size());
|
||||
EXPECT_EQ(k_indices[i].size(), expected_n) << test_name;
|
||||
EXPECT_EQ(k_sq_dists[i].size(), expected_n) << test_name;
|
||||
|
||||
for (int j = 0; j < expected_n; j++) {
|
||||
const double sq_dist = (prob->target->point(k_indices[i][j]) - prob->query->point(i)).squaredNorm();
|
||||
EXPECT_NEAR(k_sq_dists[i][j], sq_dist, 1e-3) << test_name;
|
||||
EXPECT_NEAR(k_sq_dists[i][j], prob->dists[i][j], 1e-3) << test_name;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
std::vector<std::string> names;
|
||||
std::vector<PointCloud::Ptr> targets;
|
||||
|
||||
std::vector<std::shared_ptr<Problem>> problems;
|
||||
};
|
||||
|
||||
// Check if the data is synthesized correctly
|
||||
TEST_F(KdTreeSyntheticTest, LoadCheck) {
|
||||
EXPECT_EQ(names.size(), targets.size());
|
||||
EXPECT_NE(problems.size(), 0);
|
||||
|
||||
for (const auto& prob : problems) {
|
||||
EXPECT_EQ(prob->indices.size(), prob->query->size());
|
||||
EXPECT_EQ(prob->dists.size(), prob->query->size());
|
||||
|
||||
for (int i = 0; i < prob->query->size(); i++) {
|
||||
EXPECT_EQ(prob->indices[i].size(), prob->dists[i].size());
|
||||
EXPECT_EQ(prob->indices[i].size(), std::min<int>(20, prob->target->size()));
|
||||
EXPECT_TRUE(std::is_sorted(prob->dists[i].begin(), prob->dists[i].end()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(KdTreeSyntheticTest, KdTreeSyntheticTest, testing::Values("SMALL", "TBB", "OMP"), [](const auto& info) { return info.param; });
|
||||
|
||||
// Check if kdtree works correctly
|
||||
TEST_P(KdTreeSyntheticTest, KnnTest) {
|
||||
const auto method = GetParam();
|
||||
|
||||
for (const auto& prob : problems) {
|
||||
KdTree<PointCloud>::Ptr kdtree;
|
||||
KdTree<PointCloud, NormalProjection>::Ptr kdtree_normal;
|
||||
|
||||
if (method == "SMALL") {
|
||||
kdtree = std::make_shared<KdTree<PointCloud>>(prob->target);
|
||||
kdtree_normal = std::make_shared<KdTree<PointCloud, NormalProjection>>(prob->target);
|
||||
} else if (method == "TBB") {
|
||||
kdtree = std::make_shared<KdTree<PointCloud>>(prob->target, KdTreeBuilderTBB());
|
||||
kdtree_normal = std::make_shared<KdTree<PointCloud, NormalProjection>>(prob->target, KdTreeBuilderTBB());
|
||||
} else if (method == "OMP") {
|
||||
kdtree = std::make_shared<KdTree<PointCloud>>(prob->target, KdTreeBuilderOMP());
|
||||
kdtree_normal = std::make_shared<KdTree<PointCloud, NormalProjection>>(prob->target, KdTreeBuilderOMP());
|
||||
}
|
||||
|
||||
std::vector<int> ks = {1, 2, 3, 5, 10, 20};
|
||||
for (auto k : ks) {
|
||||
std::stringstream test_name;
|
||||
test_name << prob->target_name << "(" << prob->target_name << "," << prob->query_name << "," << k << ")";
|
||||
|
||||
const auto [k_indices, k_sq_dists] = knn_search(*kdtree, *prob->target, *prob->query, k);
|
||||
validate(prob, k_indices, k_sq_dists, k, test_name.str());
|
||||
|
||||
test_name << "_normal";
|
||||
const auto [k_indices2, k_sq_dists2] = knn_search(*kdtree_normal, *prob->target, *prob->query, k);
|
||||
validate(prob, k_indices2, k_sq_dists2, k, test_name.str());
|
||||
}
|
||||
|
||||
std::stringstream test_name;
|
||||
test_name << prob->target_name << "(" << prob->target_name << "," << prob->query_name << ",nn)";
|
||||
const auto [k_indices, k_sq_dists] = nearest_neighbor_search(*kdtree, *prob->target, *prob->query);
|
||||
validate(prob, k_indices, k_sq_dists, 1, test_name.str());
|
||||
|
||||
test_name << "_normal";
|
||||
const auto [k_indices2, k_sq_dists2] = nearest_neighbor_search(*kdtree_normal, *prob->target, *prob->query);
|
||||
validate(prob, k_indices2, k_sq_dists2, 1, test_name.str());
|
||||
}
|
||||
}
|
||||
|
|
@ -185,16 +185,16 @@ TEST_P(KdTreeTest, KnnTest) {
|
|||
auto kdtree_pcl = std::make_shared<KdTree<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl);
|
||||
test_kdtree(*points_pcl, *kdtree_pcl);
|
||||
} else if (method == "TBB") {
|
||||
auto kdtree = std::make_shared<KdTreeTBB<PointCloud>>(points);
|
||||
auto kdtree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderTBB());
|
||||
test_kdtree(*points, *kdtree);
|
||||
|
||||
auto kdtree_pcl = std::make_shared<KdTreeTBB<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl);
|
||||
auto kdtree_pcl = std::make_shared<KdTree<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl, KdTreeBuilderTBB());
|
||||
test_kdtree(*points_pcl, *kdtree_pcl);
|
||||
} else if (method == "OMP") {
|
||||
auto kdtree = std::make_shared<KdTreeOMP<PointCloud>>(points, 4);
|
||||
auto kdtree = std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(4));
|
||||
test_kdtree(*points, *kdtree);
|
||||
|
||||
auto kdtree_pcl = std::make_shared<KdTreeOMP<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl, 4);
|
||||
auto kdtree_pcl = std::make_shared<KdTree<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl, KdTreeBuilderOMP(4));
|
||||
test_kdtree(*points_pcl, *kdtree_pcl);
|
||||
} else if (method == "IVOX") {
|
||||
auto voxelmap = std::make_shared<IncrementalVoxelMap<FlatContainerNormalCov>>(1.0);
|
||||
|
|
|
|||
Loading…
Reference in New Issue