This commit is contained in:
k.koide 2024-04-01 13:56:34 +09:00
parent c998bbb8b2
commit cf5de1e6b6
6 changed files with 341 additions and 43 deletions

3
.gitignore vendored
View File

@ -2,4 +2,7 @@
build/*
imgui.ini
dist/*
small_gicp.egg-info/*
scripts/results/*

View File

@ -25,6 +25,7 @@ option(BUILD_WITH_FAST_GICP "Build with fast_gicp (required for benchmark and te
option(BUILD_WITH_IRIDESCENCE "Build with Iridescence (required for benchmark)" OFF)
option(BUILD_WITH_MARCH_NATIVE "Build with -march=native" OFF)
option(ENABLE_COVERAGE "Enable coverage" OFF)
option(BUILD_PYTHON_BINDINGS "Build python bindings" OFF)
if(BUILD_WITH_MARCH_NATIVE)
add_compile_options(-march=native)

View File

@ -35,13 +35,22 @@ sudo make install
### Python
Coming soon.
```bash
cd small_gicp
python3 setup.py build
python3 setup.py install --user
# [Optional] Install stubs for autocomplete (If you know a better way, let me know...)
pip install pybind11-stubgen
cd ~/.local/lib/python3.10/site-packages
pybind11-stubgen -o . --ignore-invalid=all small_gicp
```
## Usage (C++)
The following examples assume `using namespace small_gicp` is placed somewhere.
### Using helper library ([01_basic_resigtration.cpp](https://github.com/koide3/small_gicp/blob/master/src/example/01_basic_registration.cpp))
### Using helper library ([01_basic_resigtration.cpp](src/example/01_basic_registration.cpp))
The helper library (`registration_helper.hpp`) enables easily processing point clouds represented as `std::vector<Eigen::Vector(3|4)(f|d)>`.
<details><summary>Expand</summary>
@ -97,7 +106,7 @@ Eigen::Matrix<double, 6, 6> H = result.H; // Final Hessian matrix (6x6)
</details>
### Using with PCL interface ([02_basic_resigtration_pcl.cpp](https://github.com/koide3/small_gicp/blob/master/src/example/02_basic_resigtration_pcl.cpp))
### Using with PCL interface ([02_basic_resigtration_pcl.cpp](src/example/02_basic_resigtration_pcl.cpp))
The PCL interface allows using small_gicp as a drop-in replacement for `pcl::GeneralizedIterativeClosestPoint`. It is also possible to directly feed `pcl::PointCloud` to algorithms implemented in small_gicp.
@ -168,7 +177,7 @@ auto result = registration.align(*target, *source, *target_tree, Eigen::Isometry
</details>
### Using `Registration` template ([03_registration_template.cpp](https://github.com/koide3/small_gicp/blob/master/src/example/03_registration_template.cpp))
### Using `Registration` template ([03_registration_template.cpp](src/example/03_registration_template.cpp))
If you want to fine-control and customize the registration process, use `small_gicp::Registration` template that allows modifying the inner algorithms and parameters.
<details><summary>Expand</summary>
@ -219,13 +228,89 @@ size_t num_inliers = result.num_inliers; // Number of inlier source points
Eigen::Matrix<double, 6, 6> H = result.H; // Final Hessian matrix (6x6)
```
See [03_registration_template.cpp](https://github.com/koide3/small_gicp/blob/master/src/example/03_registration_template.cpp) for more detailed customization example.
See [03_registration_template.cpp](src/example/03_registration_template.cpp) for more detailed customization example.
</details>
## Usage (Python)
Coming soon.
[basic_registration.py](src/example/basic_registration.py)
<details><summary>Expand</summary>
Example A : Perform registration with numpy arrays
```python
# Arguments
# - target_points : Nx4 or Nx3 numpy array of the target point cloud
# - source_points : Nx4 or Nx3 numpy array of the source point cloud
# Optional arguments
# - init_T_target_source : Initial guess of the transformation matrix (4x4 numpy array)
# - registration_type : Registration type ("ICP", "PLANE_ICP", "GICP", "VGICP")
# - voxel_resolution : Voxel resolution for VGICP
# - downsampling_resolution : Downsampling resolution
# - max_correspondence_distance : Maximum correspondence distance
# - num_threads : Number of threads
result = small_gicp.align_points(target_raw_numpy, source_raw_numpy, downsampling_resolution=0.25)
result.T_target_source # Estimated transformation (4x4 numpy array)
result.converged # If true, the optimization converged successfully
result.iterations # Number of iterations the optimization took
result.num_inliers # Number of inlier points
result.H # Final Hessian matrix (6x6 matrix)
result.b # Final information vector (6D vector)
result.e # Final error (float)
```
Example B : Perform preprocessing and registration separately
```python
# Preprocess point clouds
# Arguments
# - points_numpy : Nx4 or Nx3 numpy array of the target point cloud
# Optional arguments
# - downsampling_resolution : Downsampling resolution
# - num_neighbors : Number of neighbors for normal and covariance estimation
# - num_threads : Number of threads
target, target_tree = small_gicp.preprocess_points(points_numpy=target_raw_numpy, downsampling_resolution=0.25)
source, source_tree = small_gicp.preprocess_points(points_numpy=source_raw_numpy, downsampling_resolution=0.25)
# Align point clouds
# Arguments
# - target : Target point cloud (small_gicp.PointCloud)
# - source : Source point cloud (small_gicp.PointCloud)
# - target_tree : KD-tree of the target point cloud (small_gicp.KdTree)
# Optional arguments
# - init_T_target_source : Initial guess of the transformation matrix (4x4 numpy array)
# - max_correspondence_distance : Maximum correspondence distance
# - num_threads : Number of threads
result = small_gicp.align(target, source, target_tree)
```
Example C : Perform each of preprocessing steps one-by-one
```python
# Convert numpy arrays (Nx3 or Nx4) to small_gicp.PointCloud
target_raw = small_gicp.PointCloud(target_raw_numpy)
source_raw = small_gicp.PointCloud(source_raw_numpy)
# Downsampling
target = small_gicp.voxelgrid_sampling(target_raw, 0.25)
source = small_gicp.voxelgrid_sampling(source_raw, 0.25)
# KdTree construction
target_tree = small_gicp.KdTree(target)
source_tree = small_gicp.KdTree(source)
# Estimate covariances
small_gicp.estimate_covariances(target, target_tree)
small_gicp.estimate_covariances(source, source_tree)
# Align point clouds
result = small_gicp.align(target, source, target_tree)
```
</details>
## Benchmark
@ -250,7 +335,7 @@ Coming soon.
- Single-thread `small_gicp::GICP` is about **2.4x and 1.9x faster** than `pcl::GICP` and `fast_gicp::GICP`, respectively.
- `small_gicp::(GICP|VGICP)` shows a better multi-thread scalability compared to `fast_gicp::(GICP|VGICP)`.
- `small_gicp::GICP` parallelized with [TBB flow graph](https://github.com/koide3/small_gicp/blob/master/src/odometry_benchmark_small_gicp_tbb_flow.cpp) shows an excellent scalablity to many-threads situations (**~128 threads**) but with latency degradation.
- `small_gicp::GICP` parallelized with [TBB flow graph](src/odometry_benchmark_small_gicp_tbb_flow.cpp) shows an excellent scalablity to many-threads situations (**~128 threads**) but with latency degradation.
![odometry_time](docs/assets/odometry_time.png)

140
setup.py Executable file
View File

@ -0,0 +1,140 @@
import os
import re
import subprocess
import sys
from pathlib import Path
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name: str, sourcedir: str = "") -> None:
super().__init__(name, sources=[])
self.sourcedir = os.fspath(Path(sourcedir).resolve())
class CMakeBuild(build_ext):
def build_extension(self, ext: CMakeExtension) -> None:
# Must be in this form due to bug in .resolve() only fixed in Python 3.10+
ext_fullpath = Path.cwd() / self.get_ext_fullpath(ext.name)
extdir = ext_fullpath.parent.resolve()
# Using this requires trailing slash for auto-detection & inclusion of
# auxiliary "native" libs
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
# EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
# from Python.
cmake_args = [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}",
f"-DPYTHON_EXECUTABLE={sys.executable}",
f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
]
build_args = []
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSx on conda-forge)
if "CMAKE_ARGS" in os.environ:
cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
# In this example, we pass in the version to C++. You might not need to.
cmake_args += [f"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}"]
if self.compiler.compiler_type != "msvc":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator or cmake_generator == "Ninja":
try:
import ninja
ninja_executable_path = Path(ninja.BIN_DIR) / "ninja"
cmake_args += [
"-GNinja",
f"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}",
]
except ImportError:
pass
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"
]
build_args += ["--config", cfg]
if sys.platform.startswith("darwin"):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
if archs:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += [f"-j{self.parallel}"]
build_temp = Path(self.build_temp) / ext.name
if not build_temp.exists():
build_temp.mkdir(parents=True)
subprocess.run(
["cmake", ext.sourcedir, *cmake_args], cwd=build_temp, check=True
)
subprocess.run(
["cmake", "--build", ".", *build_args], cwd=build_temp, check=True
)
# The information here can also be placed in setup.cfg - better separation of
# logic and declaration, and simpler if you include description/version in a file.
setup(
name="small_gicp",
version="0.0.1",
author="Kenji Koide",
author_email="k.koide@aist.go.jp",
description="Efficient and parallelized algorithms for fine point cloud registration",
long_description="",
ext_modules=[CMakeExtension("small_gicp")],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
extras_require={"test": ["pytest>=6.0"]},
python_requires=">=3.7",
)

127
src/example/basic_registration.py Normal file → Executable file
View File

@ -5,19 +5,9 @@ from scipy.spatial.transform import Rotation
import small_gicp
from pyridescence import *
# Verity the estimated transformation matrix (for testing)
def verify_result(T_target_source, gt_T_target_source):
error = numpy.linalg.inv(T_target_source) @ gt_T_target_source
error_trans = numpy.linalg.norm(error[:3, 3])
error_rot = Rotation.from_matrix(error[:3, :3]).magnitude()
if error_trans > 0.1 or error_rot > 0.1:
print('error_trans={:.4f}, error_rot={:.4f}'.format(error_trans, error_rot))
exit(1)
# Basic registation example with numpy arrays
def example1(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray, gt_T_target_source : numpy.ndarray):
def example_numpy1(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray):
# Example A : Perform registration with numpy arrays
# Arguments
# - target_points : Nx4 or Nx3 numpy array of the target point cloud
@ -26,15 +16,17 @@ def example1(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray,
# - init_T_target_source : Initial guess of the transformation matrix (4x4 numpy array)
# - registration_type : Registration type ("ICP", "PLANE_ICP", "GICP", "VGICP")
# - voxel_resolution : Voxel resolution for VGICP
# - downsampling_resolution : Downsampling resolution
# - max_correspondence_distance : Maximum correspondence distance
# - max_iterations : Maximum number of iterations
result = small_gicp.align_points(target_raw_numpy, source_raw_numpy)
# Verity the estimated transformation matrix
verify_result(result.T_target_source, gt_T_target_source)
# - num_threads : Number of threads
result = small_gicp.align_points(target_raw_numpy, source_raw_numpy, downsampling_resolution=0.25)
return result.T_target_source
# Example to perform preprocessing and registration separately
def example_numpy2(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray):
# Example B : Perform preprocessing and registration separately
# Preprocess point clouds
# Arguments
# - points_numpy : Nx4 or Nx3 numpy array of the target point cloud
@ -56,33 +48,110 @@ def example1(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray,
# - num_threads : Number of threads
result = small_gicp.align(target, source, target_tree)
# Verity the estimated transformation matrix
verify_result(result.T_target_source, gt_T_target_source)
return result.T_target_source
# Basic registation example with small_gicp.PointCloud
def example2(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray, gt_T_target_source : numpy.ndarray):
# Convert numpy arrays to small_gicp.PointCloud
def example_small1(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray):
# Convert numpy arrays (Nx3 or Nx4) to small_gicp.PointCloud
target_raw = small_gicp.PointCloud(target_raw_numpy)
source_raw = small_gicp.PointCloud(source_raw_numpy)
pass
# Preprocess point clouds
target, target_tree = small_gicp.preprocess_points(target_raw, downsampling_resolution=0.25)
source, source_tree = small_gicp.preprocess_points(source_raw, downsampling_resolution=0.25)
result = small_gicp.align(target, source, target_tree)
return result.T_target_source
# Example to perform each preprocessing and registration separately
def example_small2(target_raw_numpy : numpy.ndarray, source_raw_numpy : numpy.ndarray):
# Convert numpy arrays (Nx3 or Nx4) to small_gicp.PointCloud
target_raw = small_gicp.PointCloud(target_raw_numpy)
source_raw = small_gicp.PointCloud(source_raw_numpy)
# Downsampling
target = small_gicp.voxelgrid_sampling(target_raw, 0.25)
source = small_gicp.voxelgrid_sampling(source_raw, 0.25)
# KdTree construction
target_tree = small_gicp.KdTree(target)
source_tree = small_gicp.KdTree(source)
# Estimate covariances
small_gicp.estimate_covariances(target, target_tree)
small_gicp.estimate_covariances(source, source_tree)
# Align point clouds
result = small_gicp.align(target, source, target_tree)
return result.T_target_source
def main():
gt_T_target_source = numpy.loadtxt('../data/T_target_source.txt') # Load the ground truth transformation matrix
### Following functions are for testing ###
# Verity the estimated transformation matrix (for testing)
def verify_result(T_target_source, gt_T_target_source):
error = numpy.linalg.inv(T_target_source) @ gt_T_target_source
error_trans = numpy.linalg.norm(error[:3, 3])
error_rot = Rotation.from_matrix(error[:3, :3]).magnitude()
assert error_trans < 0.01
assert error_rot < 0.01
import pytest
# Load the point clouds and the ground truth transformation matrix
@pytest.fixture(scope='module', autouse=True)
def load_points():
gt_T_target_source = numpy.loadtxt('data/T_target_source.txt') # Load the ground truth transformation matrix
print('--- gt_T_target_source ---')
print(gt_T_target_source)
target_raw = small_gicp.read_ply(('../data/target.ply')) # Read the target point cloud (small_gicp.PointCloud)
source_raw = small_gicp.read_ply(('../data/source.ply')) # Read the source point cloud (small_gicp.PointCloud)
target_raw = small_gicp.read_ply(('data/target.ply')) # Read the target point cloud (small_gicp.PointCloud)
source_raw = small_gicp.read_ply(('data/source.ply')) # Read the source point cloud (small_gicp.PointCloud)
target_raw_numpy = target_raw.points() # Nx4 numpy array of the target point cloud
source_raw_numpy = source_raw.points() # Nx4 numpy array of the source point cloud
example1(target_raw_numpy, source_raw_numpy, gt_T_target_source)
example2(target_raw_numpy, source_raw_numpy, gt_T_target_source)
return
yield (gt_T_target_source, target_raw_numpy, source_raw_numpy)
# Check if the point clouds are loaded correctly
def test_load_points(load_points):
gt_T_target_source, target_raw_numpy, source_raw_numpy = load_points
assert gt_T_target_source.shape[0] == 4 and gt_T_target_source.shape[1] == 4
assert len(target_raw_numpy) > 0
assert len(source_raw_numpy) > 0
def test_example_numpy1(load_points):
gt_T_target_source, target_raw_numpy, source_raw_numpy = load_points
T_target_source = example_numpy1(target_raw_numpy, source_raw_numpy)
verify_result(T_target_source, gt_T_target_source)
def test_example_numpy2(load_points):
gt_T_target_source, target_raw_numpy, source_raw_numpy = load_points
T_target_source = example_numpy2(target_raw_numpy, source_raw_numpy)
verify_result(T_target_source, gt_T_target_source)
def test_example_small1(load_points):
gt_T_target_source, target_raw_numpy, source_raw_numpy = load_points
T_target_source = example_small1(target_raw_numpy, source_raw_numpy)
verify_result(T_target_source, gt_T_target_source)
def test_example_small2(load_points):
gt_T_target_source, target_raw_numpy, source_raw_numpy = load_points
T_target_source = example_small2(target_raw_numpy, source_raw_numpy)
verify_result(T_target_source, gt_T_target_source)
if __name__ == "__main__":
main()
target_raw = small_gicp.read_ply(('data/target.ply')) # Read the target point cloud (small_gicp.PointCloud)
source_raw = small_gicp.read_ply(('data/source.ply')) # Read the source point cloud (small_gicp.PointCloud)
target_raw_numpy = target_raw.points() # Nx4 numpy array of the target point cloud
source_raw_numpy = source_raw.points() # Nx4 numpy array of the source point cloud
T_target_source = example_numpy1(target_raw_numpy, source_raw_numpy)
T_target_source = example_numpy2(target_raw_numpy, source_raw_numpy)
T_target_source = example_small1(target_raw_numpy, source_raw_numpy)
T_target_source = example_small2(target_raw_numpy, source_raw_numpy)

View File

@ -131,8 +131,8 @@ PYBIND11_MODULE(small_gicp, m) {
return voxelgrid_sampling_omp(points, resolution, num_threads);
},
"Voxelgrid sampling",
py::arg("points (Nx3) or (Nx4)"),
py::arg("resolution"),
py::arg("points"),
py::arg("downsampling_resolution"),
py::arg("num_threads") = 1);
// estimate_normals