* increase number of data to test multi-threaded sort

* test for cached_kdtree

* test for flat_voxelmap
This commit is contained in:
koide3 2024-04-03 11:49:50 +09:00 committed by GitHub
parent 3cc60c0187
commit 088cc972bc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 60 additions and 25 deletions

View File

@ -21,16 +21,18 @@ public:
/// @brief Constructor
/// @param points Input points
/// @param leaf_size Cache voxel resolution
CachedKdTree(const PointCloud& points, double leaf_size) : inv_leaf_size(1.0 / leaf_size), kdtree(points) {}
CachedKdTree(const std::shared_ptr<const PointCloud>& points, double leaf_size) : inv_leaf_size(1.0 / leaf_size), kdtree(points) {}
size_t knn_search(const Eigen::Vector4d& pt, size_t k, size_t* k_indices, double* k_sq_dists) const {
const Eigen::Vector3i coord = fast_floor(pt * inv_leaf_size).head<3>();
CacheTable::const_accessor ca;
if (cache.find(ca, coord)) {
std::copy(ca->second.first.begin(), ca->first.end(), k_indices);
std::copy(ca->second.second.begin(), ca->second.end(), k_sq_dists);
return ca->second.first.size();
const size_t n = std::min(ca->second.first.size(), k);
std::copy(ca->second.first.begin(), ca->second.first.begin() + n, k_indices);
std::copy(ca->second.second.begin(), ca->second.second.begin() + n, k_sq_dists);
return n;
}
const size_t n = kdtree.knn_search(pt, k, k_indices, k_sq_dists);
@ -53,7 +55,7 @@ public:
using CacheTable = tbb::concurrent_hash_map<Eigen::Vector3i, KnnResult, XORVector3iHash>;
mutable CacheTable cache;
UnsafeKdTree<PointCloud> kdtree;
KdTree<PointCloud> kdtree;
};
namespace traits {

View File

@ -35,9 +35,9 @@ public:
using Ptr = std::shared_ptr<FlatVoxelMap>;
using ConstPtr = std::shared_ptr<const FlatVoxelMap>;
FlatVoxelMap(double leaf_size, const PointCloud& points) : inv_leaf_size(1.0 / leaf_size), seek_count(2), points(points) {
FlatVoxelMap(const std::shared_ptr<const PointCloud>& points, double leaf_size) : inv_leaf_size(1.0 / leaf_size), seek_count(2), points(points) {
set_offset_pattern(7);
create_table(points);
create_table(*points);
}
~FlatVoxelMap() {}
@ -101,7 +101,7 @@ public:
const auto index_begin = indices.data() + voxel->index_loc;
for (auto index_itr = index_begin; index_itr != index_begin + voxel->num_indices; index_itr++) {
const double sq_dist = (traits::point(points, *index_itr) - pt).squaredNorm();
const double sq_dist = (traits::point(*points, *index_itr) - pt).squaredNorm();
if (queue.size() < k) {
queue.push(IndexDistance{*index_itr, sq_dist});
} else if (sq_dist < queue.top().distance) {
@ -125,11 +125,16 @@ public:
private:
void create_table(const PointCloud& points) {
// Here, we assume that the data structure of std::atomic_int64_t is the same as that of std::int64_t.
// This is a dangerous assumption. If C++20 is available, should use std::atomic_ref<std::int64_t> instead.
static_assert(sizeof(std::atomic_int64_t) == sizeof(std::int64_t), "We assume that std::atomic_int64_t is the same as std::int64_t.");
const double min_sq_dist_in_cell = 0.05 * 0.05;
const int max_points_per_cell = 10;
const size_t buckets_size = traits::size(points);
std::vector<std::int64_t> assignment_table(max_points_per_cell * buckets_size, -1);
std::vector<std::atomic_int64_t> assignment_table(max_points_per_cell * buckets_size);
memset(assignment_table.data(), -1, sizeof(std::atomic_int64_t) * max_points_per_cell * buckets_size);
std::vector<Eigen::Vector3i> coords(traits::size(points));
tbb::parallel_for(static_cast<size_t>(0), static_cast<size_t>(traits::size(points)), [&](size_t i) {
@ -140,10 +145,9 @@ private:
const size_t hash = XORVector3iHash::hash(coord);
for (size_t bucket_index = hash; bucket_index < hash + seek_count; bucket_index++) {
auto slot_begin = assignment_table.data() + (bucket_index % buckets_size) * max_points_per_cell;
std::atomic_ref<std::int64_t> slot_begin_a(*slot_begin);
std::int64_t expected = -1;
if (slot_begin_a.compare_exchange_strong(expected, static_cast<std::int64_t>(i))) {
if (slot_begin->compare_exchange_strong(expected, static_cast<std::int64_t>(i))) {
// Succeeded to insert the point in the first slot
break;
}
@ -160,8 +164,7 @@ private:
for (auto slot = slot_begin + 1; slot != slot_begin + max_points_per_cell; slot++) {
std::int64_t expected = -1;
std::atomic_ref<std::int64_t> slot_a(*slot);
if (slot_a.compare_exchange_strong(expected, static_cast<std::int64_t>(i))) {
if (slot->compare_exchange_strong(expected, static_cast<std::int64_t>(i))) {
// Succeeded to insert the point
break;
}
@ -206,7 +209,7 @@ public:
const int seek_count;
std::vector<Eigen::Vector3i> offsets;
const PointCloud& points;
std::shared_ptr<const PointCloud> points;
std::vector<FlatVoxelInfo> voxels;
std::vector<size_t> indices;
};

View File

@ -7,6 +7,8 @@
#include <small_gicp/ann/kdtree.hpp>
#include <small_gicp/ann/kdtree_omp.hpp>
#include <small_gicp/ann/kdtree_tbb.hpp>
#include <small_gicp/ann/cached_kdtree.hpp>
#include <small_gicp/ann/flat_voxelmap.hpp>
#include <small_gicp/util/downsampling.hpp>
#include <small_gicp/points/point_cloud.hpp>
#include <small_gicp/pcl/pcl_point_traits.hpp>
@ -75,26 +77,32 @@ public:
}
template <typename PointCloud, typename KdTree>
void test_knn_(const PointCloud& points, const KdTree& tree) {
void test_knn_(const PointCloud& points, const KdTree& tree, bool strict = true) {
for (size_t i = 0; i < queries.size(); i++) {
// k-nearest neighbors search
const auto& query = queries[i];
std::vector<size_t> indices(k);
std::vector<double> sq_dists(k);
const size_t num_results = traits::knn_search(tree, query, k, indices.data(), sq_dists.data());
EXPECT_EQ(num_results, k) << "num_neighbors must be k";
for (size_t j = 0; j < k; j++) {
EXPECT_EQ(indices[j], k_indices[i][j]);
EXPECT_NEAR(sq_dists[j], k_sq_dists[i][j], 1e-3);
if (strict) {
EXPECT_EQ(num_results, k) << "num_neighbors must be k";
for (size_t j = 0; j < k; j++) {
EXPECT_EQ(indices[j], k_indices[i][j]);
EXPECT_NEAR(sq_dists[j], k_sq_dists[i][j], 1e-3);
}
}
// Nearest neighbor search
size_t k_index;
double k_sq_dist;
const size_t found = traits::nearest_neighbor_search(tree, query, &k_index, &k_sq_dist);
EXPECT_EQ(found, 1) << "num_neighbors must be 1";
EXPECT_EQ(k_index, k_indices[i][0]);
EXPECT_NEAR(k_sq_dist, k_sq_dists[i][0], 1e-3);
if (strict) {
EXPECT_EQ(found, 1) << "num_neighbors must be 1";
EXPECT_EQ(k_index, k_indices[i][0]);
EXPECT_NEAR(k_sq_dist, k_sq_dists[i][0], 1e-3);
}
}
}
@ -123,7 +131,7 @@ TEST_F(KdTreeTest, LoadCheck) {
}
}
INSTANTIATE_TEST_SUITE_P(KdTreeTest, KdTreeTest, testing::Values("SMALL", "TBB", "OMP"), [](const auto& info) { return info.param; });
INSTANTIATE_TEST_SUITE_P(KdTreeTest, KdTreeTest, testing::Values("SMALL", "TBB", "OMP", "CACHED", "FLAT"), [](const auto& info) { return info.param; });
// Check if kdtree works correctly for empty points
TEST_P(KdTreeTest, EmptyTest) {
@ -155,6 +163,28 @@ TEST_P(KdTreeTest, KnnTest) {
auto kdtree_pcl = std::make_shared<KdTreeOMP<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl, 4);
test_knn_(*points_pcl, *kdtree_pcl);
} else if (method == "CACHED") {
// This is approximated and no guarantee about the search result
const bool strict = false;
auto kdtree = std::make_shared<CachedKdTree<PointCloud>>(points, 0.025);
test_knn_(*points, *kdtree, strict);
auto kdtree_pcl = std::make_shared<CachedKdTree<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl, 0.025);
test_knn_(*points_pcl, *kdtree_pcl, strict);
} else if (method == "FLAT") {
// This is approximated and no guarantee about the search result
const bool strict = false;
auto kdtree = std::make_shared<FlatVoxelMap<PointCloud>>(points, 0.5);
kdtree->set_offset_pattern(1);
kdtree->set_offset_pattern(7);
kdtree->set_offset_pattern(27);
test_knn_(*points, *kdtree, strict);
auto kdtree_pcl = std::make_shared<FlatVoxelMap<pcl::PointCloud<pcl::PointXYZ>>>(points_pcl, 0.5);
kdtree_pcl->set_offset_pattern(27);
test_knn_(*points_pcl, *kdtree_pcl, strict);
} else {
throw std::runtime_error("Invalid method: " + method);
}

View File

@ -26,7 +26,7 @@ bool identical(const std::vector<T>& arr1, const std::vector<T>& arr2) {
TEST(SortOMP, MergeSortTest) {
std::mt19937 mt;
std::uniform_int_distribution<> size_dist(0, 1024);
std::uniform_int_distribution<> size_dist(0, 8192);
// int
for (int i = 0; i < 100; i++) {
@ -68,7 +68,7 @@ TEST(SortOMP, MergeSortTest) {
TEST(SortOMP, QuickSortTest) {
std::mt19937 mt;
std::uniform_int_distribution<> size_dist(0, 1024);
std::uniform_int_distribution<> size_dist(0, 8192);
// int
for (int i = 0; i < 100; i++) {