mirror of https://github.com/koide3/small_gicp.git
Compare commits
28 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
1d8cce8add | |
|
|
08bc50beff | |
|
|
9befefb198 | |
|
|
3466ea263a | |
|
|
df145cbb15 | |
|
|
db2f8e6646 | |
|
|
2c5e9e6092 | |
|
|
ff63d5ef76 | |
|
|
13e0a75cc1 | |
|
|
8e665814a9 | |
|
|
4e8e745800 | |
|
|
e669301de3 | |
|
|
f127ae7a51 | |
|
|
d6b0cb8e99 | |
|
|
fd29d8cf94 | |
|
|
8cbec29610 | |
|
|
ad72715259 | |
|
|
0a2617d035 | |
|
|
aec35190eb | |
|
|
76b2f004fa | |
|
|
fccb6195a8 | |
|
|
765da6f68d | |
|
|
45b0b29af3 | |
|
|
e95cb19007 | |
|
|
f48faf0790 | |
|
|
be01905b04 | |
|
|
0d3f5e6315 | |
|
|
7e42a90d27 |
|
|
@ -13,7 +13,7 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
- name: Install Dependencies
|
- name: Install Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get -y update
|
sudo apt-get -y update
|
||||||
sudo apt-get install --no-install-recommends -y build-essential cmake python3-pip pybind11-dev libeigen3-dev libfmt-dev libtbb-dev libomp-dev libpcl-dev libgtest-dev lcov
|
sudo apt-get install --no-install-recommends -y build-essential cmake python3-pip pybind11-dev libeigen3-dev libfmt-dev libtbb-dev libomp-dev libpcl-dev libgtest-dev lcov ninja-build
|
||||||
pip install -U setuptools pytest pytest-cov numpy scipy
|
pip install -U setuptools pytest pytest-cov numpy scipy
|
||||||
|
|
||||||
- name: Build (C++)
|
- name: Build (C++)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
name: gendoc
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
gendoc:
|
||||||
|
name: Generate documentation
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Configure Git Credentials
|
||||||
|
run: |
|
||||||
|
git config user.name github-actions[bot]
|
||||||
|
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
|
||||||
|
|
||||||
|
- name : Install dependencies
|
||||||
|
run: sudo apt-get install -y git cmake build-essential doxygen graphviz python3-dev python3-pip pybind11-dev libeigen3-dev libomp-dev python3-numpy python3-sphinx python3-sphinx-rtd-theme
|
||||||
|
|
||||||
|
- name: Install python dependencies
|
||||||
|
run: python -m pip install mkdocs mkdocs-material
|
||||||
|
|
||||||
|
- name: Generate documentation
|
||||||
|
run: cd docs && make all
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: site
|
||||||
|
path: ./site/*
|
||||||
|
|
||||||
|
- name: Deploy documentation
|
||||||
|
run: cd docs && make deploy
|
||||||
|
|
@ -0,0 +1,21 @@
|
||||||
|
name: license
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
paths-ignore: '**.md'
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
paths-ignore: '**.md'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
license_check:
|
||||||
|
name: License check
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Check license
|
||||||
|
run: |
|
||||||
|
find include/small_gicp src/small_gicp -type f | xargs -I filename bash -c 'if ! grep -q SPDX-License-Identifier filename; then echo filename : lisence not found; exit 1; fi'
|
||||||
|
|
@ -0,0 +1,29 @@
|
||||||
|
name: Draft PDF
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ paper ]
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
paper:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Paper Draft
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Build draft PDF
|
||||||
|
uses: openjournals/openjournals-draft-action@master
|
||||||
|
with:
|
||||||
|
journal: joss
|
||||||
|
# This should be the path to the paper within your repo.
|
||||||
|
paper-path: docs/paper.md
|
||||||
|
- name: Upload
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: paper
|
||||||
|
# This is the output path where Pandoc will write the compiled
|
||||||
|
# PDF. Note, this should be the same directory as the input
|
||||||
|
# paper.md
|
||||||
|
path: docs/paper.pdf
|
||||||
|
|
@ -13,7 +13,7 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
- name: Install Dependencies
|
- name: Install Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get -y update
|
sudo apt-get -y update
|
||||||
sudo apt-get install --no-install-recommends -y build-essential cmake python3-pip pybind11-dev libeigen3-dev libfmt-dev libtbb-dev libomp-dev libpcl-dev libgtest-dev
|
sudo apt-get install --no-install-recommends -y build-essential cmake python3-pip pybind11-dev libeigen3-dev libfmt-dev libtbb-dev libomp-dev libpcl-dev libgtest-dev ninja-build
|
||||||
pip install -U setuptools pytest numpy scipy
|
pip install -U setuptools pytest numpy scipy
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,11 @@
|
||||||
# Benchmark
|
# Benchmark
|
||||||
|
|
||||||
|
## Dataset
|
||||||
|
|
||||||
|
The following benchmark requires the KITTI odometry evaluation dataset. You can download the full dataset (80GB) from [the official dataset page](https://www.cvlibs.net/datasets/kitti/eval_odometry.php) or a part of the dataset (500 frames in 00 sequence, 622MB) from [google drive (KITTI00.tar.gz)](https://drive.google.com/file/d/1h9tARKvX6BwLfc_vfMfdxuP3bSbmgjmd/view?usp=sharing).
|
||||||
|
|
||||||
|
Note that because the original KITTI dataset is distributed under the CC BY-NC-SA 3.0 license, the derived dataset (KITTI00.tar.gz) must not be used for commercial purposes.
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
@ -123,5 +129,5 @@ small_vgicp : APE=5.956 +- 2.725 RPE(100)=1.315 +- 0.762 RPE(400)=6.8
|
||||||
|
|
||||||
[Code](https://github.com/koide3/small_gicp/blob/pybench/src/benchmark/odometry_benchmark.py)
|
[Code](https://github.com/koide3/small_gicp/blob/pybench/src/benchmark/odometry_benchmark.py)
|
||||||
|
|
||||||
Processing speed comparison between small_gicp and Open3D ([youtube]((https://youtu.be/LNESzGXPr4c?feature=shared))).
|
Processing speed comparison between small_gicp and Open3D ([youtube](https://youtu.be/LNESzGXPr4c?feature=shared)).
|
||||||
[](https://youtu.be/LNESzGXPr4c?feature=shared)
|
[](https://youtu.be/LNESzGXPr4c?feature=shared)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
cff-version: "1.2.0"
|
||||||
|
authors:
|
||||||
|
- family-names: Koide
|
||||||
|
given-names: Kenji
|
||||||
|
orcid: "https://orcid.org/0000-0001-5361-1428"
|
||||||
|
contact:
|
||||||
|
- family-names: Koide
|
||||||
|
given-names: Kenji
|
||||||
|
orcid: "https://orcid.org/0000-0001-5361-1428"
|
||||||
|
doi: 10.5281/zenodo.13283012
|
||||||
|
message: If you use this software, please cite our article in the
|
||||||
|
Journal of Open Source Software.
|
||||||
|
preferred-citation:
|
||||||
|
authors:
|
||||||
|
- family-names: Koide
|
||||||
|
given-names: Kenji
|
||||||
|
orcid: "https://orcid.org/0000-0001-5361-1428"
|
||||||
|
date-published: 2024-08-10
|
||||||
|
doi: 10.21105/joss.06948
|
||||||
|
issn: 2475-9066
|
||||||
|
issue: 100
|
||||||
|
journal: Journal of Open Source Software
|
||||||
|
publisher:
|
||||||
|
name: Open Journals
|
||||||
|
start: 6948
|
||||||
|
title: "small_gicp: Efficient and parallel algorithms for point cloud
|
||||||
|
registration"
|
||||||
|
type: article
|
||||||
|
url: "https://joss.theoj.org/papers/10.21105/joss.06948"
|
||||||
|
volume: 9
|
||||||
|
title: "small_gicp: Efficient and parallel algorithms for point cloud
|
||||||
|
registration"
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
cmake_minimum_required(VERSION 3.16)
|
cmake_minimum_required(VERSION 3.16)
|
||||||
project(small_gicp VERSION 0.1.0 LANGUAGES C CXX)
|
project(small_gicp VERSION 1.0.0 LANGUAGES C CXX)
|
||||||
|
|
||||||
set(CMAKE_CXX_STANDARD 17)
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake")
|
||||||
|
|
@ -22,7 +22,7 @@ option(BUILD_PYTHON_BINDINGS "Build python bindings" OFF)
|
||||||
option(ENABLE_COVERAGE "Enable coverage" OFF)
|
option(ENABLE_COVERAGE "Enable coverage" OFF)
|
||||||
|
|
||||||
# Dependency options
|
# Dependency options
|
||||||
set(BUILD_WITH_OPENMP CACHE STRING "Build with OpenMP" "auto")
|
set(BUILD_WITH_OPENMP "auto" CACHE STRING "Build with OpenMP")
|
||||||
option(BUILD_WITH_TBB "Build with TBB" OFF)
|
option(BUILD_WITH_TBB "Build with TBB" OFF)
|
||||||
option(BUILD_WITH_PCL "Build with PCL (required for benchmark and test only)" OFF)
|
option(BUILD_WITH_PCL "Build with PCL (required for benchmark and test only)" OFF)
|
||||||
option(BUILD_WITH_FAST_GICP "Build with fast_gicp (required for benchmark and test only)" OFF)
|
option(BUILD_WITH_FAST_GICP "Build with fast_gicp (required for benchmark and test only)" OFF)
|
||||||
|
|
@ -97,7 +97,10 @@ endif()
|
||||||
|
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
add_compile_definitions(_USE_MATH_DEFINES)
|
add_compile_definitions(_USE_MATH_DEFINES)
|
||||||
# add_compile_options(/openmp:llvm)
|
add_compile_options(/bigobj)
|
||||||
|
if(BUILD_WITH_OPENMP)
|
||||||
|
add_compile_options(/openmp:llvm)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
##############
|
##############
|
||||||
|
|
@ -111,8 +114,8 @@ if(ENABLE_COVERAGE)
|
||||||
find_program(GENHTML genhtml REQUIRED)
|
find_program(GENHTML genhtml REQUIRED)
|
||||||
|
|
||||||
add_custom_target(coverage
|
add_custom_target(coverage
|
||||||
COMMAND ${LCOV} --directory . --capture --output-file coverage.info
|
COMMAND ${LCOV} --directory . --capture --output-file coverage.info --ignore-errors mismatch
|
||||||
COMMAND ${LCOV} --remove coverage.info -o coverage.info '/usr/*'
|
COMMAND ${LCOV} --remove coverage.info -o coverage.info '/usr/*' --ignore-errors mismatch
|
||||||
COMMAND ${GENHTML} --demangle-cpp -o coverage coverage.info
|
COMMAND ${GENHTML} --demangle-cpp -o coverage coverage.info
|
||||||
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
|
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
endif()
|
endif()
|
||||||
|
|
@ -187,7 +190,9 @@ if(BUILD_BENCHMARKS)
|
||||||
src/benchmark/odometry_benchmark_small_gicp_tbb.cpp
|
src/benchmark/odometry_benchmark_small_gicp_tbb.cpp
|
||||||
src/benchmark/odometry_benchmark_small_vgicp_tbb.cpp
|
src/benchmark/odometry_benchmark_small_vgicp_tbb.cpp
|
||||||
src/benchmark/odometry_benchmark_small_gicp_model_tbb.cpp
|
src/benchmark/odometry_benchmark_small_gicp_model_tbb.cpp
|
||||||
|
src/benchmark/odometry_benchmark_small_gicp_model_omp.cpp
|
||||||
src/benchmark/odometry_benchmark_small_vgicp_model_tbb.cpp
|
src/benchmark/odometry_benchmark_small_vgicp_model_tbb.cpp
|
||||||
|
src/benchmark/odometry_benchmark_small_vgicp_model_omp.cpp
|
||||||
src/benchmark/odometry_benchmark_small_gicp_tbb_flow.cpp
|
src/benchmark/odometry_benchmark_small_gicp_tbb_flow.cpp
|
||||||
src/benchmark/odometry_benchmark.cpp
|
src/benchmark/odometry_benchmark.cpp
|
||||||
)
|
)
|
||||||
|
|
@ -245,6 +250,12 @@ if(BUILD_EXAMPLES)
|
||||||
TBB::tbbmalloc
|
TBB::tbbmalloc
|
||||||
PCL::PCL
|
PCL::PCL
|
||||||
)
|
)
|
||||||
|
if(MSVC)
|
||||||
|
set_target_properties(${EXAMPLE_NAME}
|
||||||
|
PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY
|
||||||
|
"${CMAKE_SOURCE_DIR}"
|
||||||
|
)
|
||||||
|
endif()
|
||||||
endforeach()
|
endforeach()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
@ -271,6 +282,13 @@ if(BUILD_TESTS)
|
||||||
)
|
)
|
||||||
|
|
||||||
gtest_discover_tests(${TEST_NAME} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
|
gtest_discover_tests(${TEST_NAME} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
|
||||||
|
|
||||||
|
if(MSVC)
|
||||||
|
set_target_properties(${TEST_NAME}
|
||||||
|
PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY
|
||||||
|
"${CMAKE_SOURCE_DIR}"
|
||||||
|
)
|
||||||
|
endif()
|
||||||
endforeach()
|
endforeach()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
|
||||||
22
README.md
22
README.md
|
|
@ -10,10 +10,10 @@
|
||||||
|
|
||||||
Note that GPU-based implementations are NOT included in this package.
|
Note that GPU-based implementations are NOT included in this package.
|
||||||
|
|
||||||
If you find this package useful for your project, please consider leaving a comment [here](https://github.com/koide3/small_gicp/issues/3). It would help the author receive recognition in his organization and keep working on this project.
|
If you find this package useful for your project, please consider leaving a comment [here](https://github.com/koide3/small_gicp/issues/3). It would help the author receive recognition in his organization and keep working on this project. Please also cite [the corresponding software paper](https://joss.theoj.org/papers/10.21105/joss.06948) if you use this software in an academic work.
|
||||||
|
|
||||||
|
|
||||||
[](https://github.com/koide3/small_gicp/actions/workflows/build-linux.yml) [](https://github.com/koide3/small_gicp/actions/workflows/build-macos.yml) [](https://github.com/koide3/small_gicp/actions/workflows/build-windows.yml) [](https://github.com/koide3/small_gicp/actions/workflows/test.yml) [](https://codecov.io/gh/koide3/small_gicp)
|
[](https://joss.theoj.org/papers/059b017c823ed9fd211fc91373cdc2cc) [](https://github.com/koide3/small_gicp/actions/workflows/build-linux.yml) [](https://github.com/koide3/small_gicp/actions/workflows/build-macos.yml) [](https://github.com/koide3/small_gicp/actions/workflows/build-windows.yml) [](https://github.com/koide3/small_gicp/actions/workflows/test.yml) [](https://codecov.io/gh/koide3/small_gicp)
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
|
|
@ -431,7 +431,7 @@ python3 src/example/basic_registration.py
|
||||||
|
|
||||||
## [Benchmark](BENCHMARK.md)
|
## [Benchmark](BENCHMARK.md)
|
||||||
|
|
||||||
Processing speed comparison between small_gicp and Open3D ([youtube]((https://youtu.be/LNESzGXPr4c?feature=shared))).
|
Processing speed comparison between small_gicp and Open3D ([youtube](https://youtu.be/LNESzGXPr4c?feature=shared)).
|
||||||
[](https://youtu.be/LNESzGXPr4c?feature=shared)
|
[](https://youtu.be/LNESzGXPr4c?feature=shared)
|
||||||
|
|
||||||
### Downsampling
|
### Downsampling
|
||||||
|
|
@ -465,6 +465,22 @@ This package is released under the MIT license.
|
||||||
|
|
||||||
If you find this package useful for your project, please consider leaving a comment [here](https://github.com/koide3/small_gicp/issues/3). It would help the author receive recognition in his organization and keep working on this project.
|
If you find this package useful for your project, please consider leaving a comment [here](https://github.com/koide3/small_gicp/issues/3). It would help the author receive recognition in his organization and keep working on this project.
|
||||||
|
|
||||||
|
Please also cite [the following article](https://joss.theoj.org/papers/10.21105/joss.06948) if you use this software in an academic work.
|
||||||
|
|
||||||
|
```
|
||||||
|
@article{small_gicp,
|
||||||
|
author = {Kenji Koide},
|
||||||
|
title = {{small\_gicp: Efficient and parallel algorithms for point cloud registration}},
|
||||||
|
journal = {Journal of Open Source Software},
|
||||||
|
month = aug,
|
||||||
|
number = {100},
|
||||||
|
pages = {6948},
|
||||||
|
volume = {9},
|
||||||
|
year = {2024},
|
||||||
|
doi = {10.21105/joss.06948}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
[Kenji Koide](https://staff.aist.go.jp/k.koide/), National Institute of Advanced Industrial Science and Technology (AIST)
|
[Kenji Koide](https://staff.aist.go.jp/k.koide/), National Institute of Advanced Industrial Science and Technology (AIST)
|
||||||
|
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
find_path(Iridescence_INCLUDE_DIRS glk/drawable.hpp
|
|
||||||
HINTS /usr/local/include/iridescence /usr/include/iridescence
|
|
||||||
DOC "Iridescence include directories")
|
|
||||||
|
|
||||||
find_library(Iridescence_LIBRARY NAMES iridescence
|
|
||||||
HINTS /usr/local/lib /usr/lib
|
|
||||||
DOC "Iridescence libraries")
|
|
||||||
|
|
||||||
find_library(gl_imgui_LIBRARY NAMES gl_imgui
|
|
||||||
HINTS /usr/local/lib /usr/lib
|
|
||||||
DOC "Iridescence libraries")
|
|
||||||
|
|
||||||
set(Iridescence_LIBRARIES ${Iridescence_LIBRARY} ${gl_imgui_LIBRARY})
|
|
||||||
|
|
||||||
add_library(Iridescence::Iridescence INTERFACE IMPORTED GLOBAL)
|
|
||||||
set_target_properties(Iridescence::Iridescence PROPERTIES
|
|
||||||
INTERFACE_INCLUDE_DIRECTORIES "${Iridescence_INCLUDE_DIRS}"
|
|
||||||
INTERFACE_LINK_LIBRARIES "${Iridescence_LIBRARIES}")
|
|
||||||
|
|
||||||
include(FindPackageHandleStandardArgs)
|
|
||||||
find_package_handle_standard_args(Iridescence DEFAULT_MSG Iridescence_INCLUDE_DIRS Iridescence_LIBRARIES)
|
|
||||||
|
|
@ -7,7 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||||
&& apt-get install --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
wget nano build-essential git cmake python3-dev python3-pip pybind11-dev \
|
wget nano build-essential git cmake python3-dev python3-pip pybind11-dev \
|
||||||
libeigen3-dev libomp-dev
|
libeigen3-dev libomp-dev ninja-build
|
||||||
|
|
||||||
RUN mkdir -p ~/.config/pip
|
RUN mkdir -p ~/.config/pip
|
||||||
RUN echo "[global]\nbreak-system-packages=true" > ~/.config/pip/pip.conf
|
RUN echo "[global]\nbreak-system-packages=true" > ~/.config/pip/pip.conf
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||||
&& apt-get install --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
wget nano build-essential git cmake python3-dev python3-pip pybind11-dev \
|
wget nano build-essential git cmake python3-dev python3-pip pybind11-dev \
|
||||||
libeigen3-dev libomp-dev
|
libeigen3-dev libomp-dev ninja-build
|
||||||
|
|
||||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||||
&& apt-get install --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
|
|
|
||||||
|
|
@ -25,4 +25,4 @@ all: cpp py mkdocs
|
||||||
.PHONY: deploy
|
.PHONY: deploy
|
||||||
deploy:
|
deploy:
|
||||||
@echo "Deploying documentation..."
|
@echo "Deploying documentation..."
|
||||||
cd .. && mkdocs gh-deploy
|
cd .. && mkdocs gh-deploy --force
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@
|
||||||
project = 'small_gicp'
|
project = 'small_gicp'
|
||||||
copyright = '2024, k.koide'
|
copyright = '2024, k.koide'
|
||||||
author = 'k.koide'
|
author = 'k.koide'
|
||||||
|
version = '1.0.0'
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
@ -22,6 +23,7 @@ extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
|
||||||
templates_path = ['_templates']
|
templates_path = ['_templates']
|
||||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
||||||
|
|
||||||
|
autoclass_content = 'both'
|
||||||
|
|
||||||
# -- Options for HTML output -------------------------------------------------
|
# -- Options for HTML output -------------------------------------------------
|
||||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,120 @@
|
||||||
|
@InProceedings{Rusu,
|
||||||
|
author = {Radu Bogdan Rusu and Steve Cousins},
|
||||||
|
title = {{3D is here: Point Cloud Library (PCL)}},
|
||||||
|
booktitle = {{IEEE International Conference on Robotics and Automation (ICRA2011)}},
|
||||||
|
month = {May},
|
||||||
|
year = {2011},
|
||||||
|
address = {Shanghai, China},
|
||||||
|
doi = {10.1109/ICRA.2011.5980567}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{Zhou,
|
||||||
|
author = {Qian-Yi Zhou and Jaesik Park and Vladlen Koltun},
|
||||||
|
title = {{Open3D}: {A} Modern Library for {3D} Data Processing},
|
||||||
|
journal = {arXiv:1801.09847},
|
||||||
|
year = {2018},
|
||||||
|
doi = {10.48550/arXiv.1801.09847}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ARTICLE{Bai,
|
||||||
|
author={Bai, Chunge and Xiao, Tao and Chen, Yajie and Wang, Haoqian and Zhang, Fang and Gao, Xiang},
|
||||||
|
journal={IEEE Robotics and Automation Letters},
|
||||||
|
title={Faster-LIO: Lightweight Tightly Coupled Lidar-Inertial Odometry Using Parallel Sparse Incremental Voxels},
|
||||||
|
year={2022},
|
||||||
|
volume={7},
|
||||||
|
number={2},
|
||||||
|
pages={4861-4868},
|
||||||
|
doi={10.1109/LRA.2022.3152830}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{Koide,
|
||||||
|
title = {Voxelized GICP for Fast and Accurate 3D Point Cloud Registration},
|
||||||
|
author = {Kenji Koide, Masashi Yokozuka, Shuji Oishi, and Atsuhiko Banno},
|
||||||
|
booktitle = {IEEE International Conference on Robotics and Automation (ICRA2021)},
|
||||||
|
pages = {11054--11059},
|
||||||
|
year = {2021},
|
||||||
|
month = {May},
|
||||||
|
doi = {10.1109/ICRA48506.2021.9560835}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{Zhang,
|
||||||
|
title={Iterative point matching for registration of free-form curves and surfaces},
|
||||||
|
author={Zhang, Zhengyou},
|
||||||
|
journal={International journal of computer vision},
|
||||||
|
volume={13},
|
||||||
|
number={2},
|
||||||
|
pages={119--152},
|
||||||
|
year={1994},
|
||||||
|
publisher={Springer},
|
||||||
|
doi={10.1007/BF01427149}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{Segal,
|
||||||
|
title={Generalized-{ICP}},
|
||||||
|
author={Segal, Aleksandr and Haehnel, Dirk and Thrun, Sebastian},
|
||||||
|
booktitle={Robotics: science and systems},
|
||||||
|
volume={2},
|
||||||
|
number={4},
|
||||||
|
pages={435},
|
||||||
|
year={2009},
|
||||||
|
doi={10.15607/rss.2009.v.021}
|
||||||
|
}
|
||||||
|
|
||||||
|
@INPROCEEDINGS{Wang,
|
||||||
|
author={Wang, Han and Wang, Chen and Xie, Lihua},
|
||||||
|
booktitle={IEEE International Conference on Robotics and Automation (ICRA2020)},
|
||||||
|
title={Intensity Scan Context: Coding Intensity and Geometry Relations for Loop Closure Detection},
|
||||||
|
year={2020},
|
||||||
|
pages={2095-2101},
|
||||||
|
doi={10.1109/ICRA40945.2020.9196764}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{Izadinia,
|
||||||
|
title={Scene recomposition by learning-based icp},
|
||||||
|
author={Izadinia, Hamid and Seitz, Steven M},
|
||||||
|
booktitle={IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR2020)},
|
||||||
|
pages={930--939},
|
||||||
|
year={2020},
|
||||||
|
doi={10.1109/cvpr42600.2020.00101}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ARTICLE{Kim,
|
||||||
|
author={Kim, Kyuwon and Im, Junhyuck and Jee, Gyuin},
|
||||||
|
journal={IEEE Transactions on Intelligent Transportation Systems},
|
||||||
|
title={Tunnel Facility Based Vehicle Localization in Highway Tunnel Using 3D LIDAR},
|
||||||
|
year={2022},
|
||||||
|
volume={23},
|
||||||
|
number={10},
|
||||||
|
pages={17575-17583},
|
||||||
|
doi={10.1109/TITS.2022.3160235}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{Pomerleau,
|
||||||
|
author = {Pomerleau, Fran{\c c}ois and Colas, Francis and Siegwart, Roland and Magnenat, St{\'e}phane},
|
||||||
|
title = {{Comparing ICP Variants on Real-World Data Sets}},
|
||||||
|
journal = {Autonomous Robots},
|
||||||
|
year = {2013},
|
||||||
|
volume = {34},
|
||||||
|
number = {3},
|
||||||
|
pages = {133--148},
|
||||||
|
month = feb,
|
||||||
|
doi={10.1007/s10514-013-9327-2}
|
||||||
|
}
|
||||||
|
|
||||||
|
@INPROCEEDINGS{Serafin,
|
||||||
|
author={Serafin, Jacopo and Grisetti, Giorgio},
|
||||||
|
booktitle={IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS2015)},
|
||||||
|
title={NICP: Dense normal based point cloud registration},
|
||||||
|
year={2015},
|
||||||
|
pages={742-749},
|
||||||
|
doi={10.1109/IROS.2015.7353455}}
|
||||||
|
|
||||||
|
|
||||||
|
@inproceedings{Datar,
|
||||||
|
author = {Datar, Mayur and Immorlica, Nicole and Indyk, Piotr and Mirrokni, Vahab S.},
|
||||||
|
title = {Locality-sensitive hashing scheme based on p-stable distributions},
|
||||||
|
year = {2004},
|
||||||
|
doi = {10.1145/997817.997857},
|
||||||
|
booktitle = {Twentieth Annual Symposium on Computational Geometry},
|
||||||
|
pages = {253–262},
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,127 @@
|
||||||
|
---
|
||||||
|
title: 'small_gicp: Efficient and parallel algorithms for point cloud registration'
|
||||||
|
tags:
|
||||||
|
- C++
|
||||||
|
- Python
|
||||||
|
- Point cloud registration
|
||||||
|
authors:
|
||||||
|
- name: Kenji Koide
|
||||||
|
orcid: 0000-0001-5361-1428
|
||||||
|
corresponding: true
|
||||||
|
affiliation: "1"
|
||||||
|
affiliations:
|
||||||
|
- name: National Institute of Advanced Industrial Science and Technology (AIST), Japan
|
||||||
|
index: 1
|
||||||
|
date: 22 June 2024
|
||||||
|
bibliography: paper.bib
|
||||||
|
---
|
||||||
|
|
||||||
|
\def\CC{{C\nolinebreak[4]\hspace{-.05em}\raisebox{.4ex}{\tiny\bf ++}}}
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
Point cloud registration is a task of aligning two point clouds measured by 3D ranging
|
||||||
|
sensors, for example, LiDARs and range cameras. Iterative point cloud registration,
|
||||||
|
also known as fine registration or local registration, iteratively refines the transformation
|
||||||
|
between point clouds starting from an initial guess.
|
||||||
|
Each iteration involves a proximity-based point correspondence search and the minimization
|
||||||
|
of the distance between corresponding points, continuing until convergence.
|
||||||
|
Iterative closest point (ICP) and its variants,
|
||||||
|
such as Generalized ICP, are representative iterative point cloud registration algorithms.
|
||||||
|
They are widely used in applications like autonomous vehicle localization [@Kim], place recognition [@Wang],
|
||||||
|
and object classification [@Izadinia]. Since these applications often require real-time or near-real-time
|
||||||
|
processing, speed is a critical factor in point cloud registration routines.
|
||||||
|
|
||||||
|
**small_gicp** provides efficient and parallel algorithms to create an extremely
|
||||||
|
fast point cloud registration pipeline. It offers parallel implementations of
|
||||||
|
downsampling, nearest neighbor search, local feature extraction, and registration
|
||||||
|
to accelerate the entire process.
|
||||||
|
small_gicp is implemented as a header-only \CC library with minimal dependencies
|
||||||
|
to offer efficiency, portability, and customizability.
|
||||||
|
|
||||||
|
# Statement of need
|
||||||
|
|
||||||
|
There are several point cloud processing libraries, and PCL [@Rusu], Open3D
|
||||||
|
[@Zhou], libpointmatcher [@Pomerleau] are commonly used in real-time applications
|
||||||
|
owing to their performant implementations.
|
||||||
|
Although they offer numerous functionalities, including those required for point cloud
|
||||||
|
registration, they present several challenges for practical applications and scientific
|
||||||
|
research.
|
||||||
|
|
||||||
|
**Processing speed:**
|
||||||
|
A typical point cloud registration pipeline includes processes such as downsampling,
|
||||||
|
nearest neighbor search (e.g., KdTree construction), local feature estimation, and
|
||||||
|
registration error minimization.
|
||||||
|
PCL and Open3D support multi-threading only for parts of these processes (feature
|
||||||
|
estimation and registration error minimization), with the remaining single-threaded
|
||||||
|
parts often limiting the overall processing speed.
|
||||||
|
Additionally, the multi-thread implementations in these libraries can have significant
|
||||||
|
overheads, reducing scalability to many-core CPUs.
|
||||||
|
These issues make it difficult to meet real-time processing requirements, especially on
|
||||||
|
low-specification CPUs. It is also difficult to fully utilize the computational power
|
||||||
|
of modern high-end CPUs.
|
||||||
|
|
||||||
|
**Customizability:**
|
||||||
|
Customizing the internal workings (e.g., replacing the registration cost function or
|
||||||
|
changing the correspondence search method) of existing implementations is challenging
|
||||||
|
due to hard-coded processes. This poses a significant hurdle for research and development,
|
||||||
|
where testing new cost functions and search algorithms is essential.
|
||||||
|
|
||||||
|
**small_gicp:**
|
||||||
|
To address these issues and accelerate the development of point cloud registration-related systems,
|
||||||
|
we designed small_gicp with the following features:
|
||||||
|
|
||||||
|
- Fully parallelized point cloud preprocessing and registration algorithms with minimal overhead,
|
||||||
|
offering up to 2x speed gain in single-threaded scenarios and better scalability in multi-core
|
||||||
|
environments.
|
||||||
|
|
||||||
|
- A modular and customizable framework using \CC templates, allowing easy customization of the
|
||||||
|
algorithm's internal workings while maintaining efficiency.
|
||||||
|
|
||||||
|
- A header-only \CC library implementation for easy integration into user projects, with Python bindings
|
||||||
|
provided for collaborative use with other libraries (e.g., Open3D).
|
||||||
|
|
||||||
|
# Functionalities
|
||||||
|
|
||||||
|
**small_gicp** implements several preprocessing algorithms related to point cloud registration, and
|
||||||
|
ICP variant algorithms (point-to-point ICP, point-to-plane ICP, and Generalized ICP based on
|
||||||
|
distribution-to-distribution correspondence).
|
||||||
|
|
||||||
|
- Downsampling
|
||||||
|
- Voxelgrid sampling
|
||||||
|
- Random sampling
|
||||||
|
- Nearest neighbor search and point accumulation structures
|
||||||
|
- KdTree
|
||||||
|
- Linear iVox (supports incremental points insertion and LRU-cache-based voxel deletion) [@Bai]
|
||||||
|
- Gaussian voxelmap (supports incremental points insertion and LRU-cache-based voxel deletion) [@Koide]
|
||||||
|
- Registration error functions
|
||||||
|
- Point-to-point ICP error [@Zhang]
|
||||||
|
- Point-to-plane ICP error
|
||||||
|
- Generalized ICP error [@Segal]
|
||||||
|
- Robust kernels
|
||||||
|
- Least squares optimizers
|
||||||
|
- GaussNewton optimizer
|
||||||
|
- LevenbergMarquardt optimizer
|
||||||
|
|
||||||
|
# Benchmark results
|
||||||
|
|
||||||
|
- Single-threaded and multi-threaded (6 threads) `small_gicp::voxelgrid_sampling` are approximately 1.3x and 3.2x faster than `pcl::VoxelGrid`, respectively.
|
||||||
|
- Multi-threaded construction of `small_gicp::KdTree` can be up to 6x faster than that of `nanoflann`.
|
||||||
|
- Single-threaded `small_gicp::GICP` is about 2.4x faster than `pcl::GICP`, with the multi-threaded version showing better scalability.
|
||||||
|
|
||||||
|
More details can be found at \url{https://github.com/koide3/small_gicp/blob/master/BENCHMARK.md}.
|
||||||
|
|
||||||
|
# Future work
|
||||||
|
|
||||||
|
The efficiency of nearest neighbor search significantly impacts the overall performance of point cloud registration.
|
||||||
|
While small_gicp currently offers efficient and parallel implementations of KdTree and voxelmap, which are general and useful in many situations,
|
||||||
|
there are other nearest neighbor search methods that can be more efficient under mild assumptions about the point cloud measurement model
|
||||||
|
(e.g., projective search [@Serafin]).
|
||||||
|
We plan to implement these alternative neighbor search algorithms to further enhance the speed of the point cloud registration process.
|
||||||
|
The design of small_gicp, where nearest neighbor search and pose optimization are decoupled, facilitates the easy integration of these new search algorithms.
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
|
||||||
|
This work was supported in part by JSPS KAKENHI Grant Number 23K16979.
|
||||||
|
|
||||||
|
# References
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
|
@ -5,12 +7,14 @@
|
||||||
#include <Eigen/Geometry>
|
#include <Eigen/Geometry>
|
||||||
#include <small_gicp/ann/traits.hpp>
|
#include <small_gicp/ann/traits.hpp>
|
||||||
#include <small_gicp/points/traits.hpp>
|
#include <small_gicp/points/traits.hpp>
|
||||||
|
#include <small_gicp/ann/knn_result.hpp>
|
||||||
|
|
||||||
namespace small_gicp {
|
namespace small_gicp {
|
||||||
|
|
||||||
/// @brief Point container with a flat vector.
|
/// @brief Point container with a flat vector.
|
||||||
/// @note IncrementalVoxelMap combined with FlastContainer is mostly the same as linear iVox.
|
/// @note IncrementalVoxelMap combined with FlastContainer is mostly the same as linear iVox.
|
||||||
/// Bai et al., "Faster-LIO: Lightweight Tightly Coupled Lidar-Inertial Odometry Using Parallel Sparse Incremental Voxels", IEEE RA-L, 2022
|
/// Bai et al., "Faster-LIO: Lightweight Tightly Coupled Lidar-Inertial Odometry Using Parallel Sparse Incremental Voxels", IEEE RA-L, 2022
|
||||||
|
/// @note This container stores only up to max_num_points_in_cell points and avoids insertings points that are too close to existing points (min_sq_dist_in_cell).
|
||||||
/// @tparam HasNormals If true, store normals.
|
/// @tparam HasNormals If true, store normals.
|
||||||
/// @tparam HasCovs If true, store covariances.
|
/// @tparam HasCovs If true, store covariances.
|
||||||
template <bool HasNormals = false, bool HasCovs = false>
|
template <bool HasNormals = false, bool HasCovs = false>
|
||||||
|
|
@ -29,6 +33,7 @@ public:
|
||||||
size_t size() const { return points.size(); }
|
size_t size() const { return points.size(); }
|
||||||
|
|
||||||
/// @brief Add a point to the container.
|
/// @brief Add a point to the container.
|
||||||
|
/// If there is a point that is too close to the input point, or there are too many points in the cell, the input point will not be ignored.
|
||||||
/// @param setting Setting
|
/// @param setting Setting
|
||||||
/// @param transformed_pt Transformed point (== T * points[i])
|
/// @param transformed_pt Transformed point (== T * points[i])
|
||||||
/// @param points Point cloud
|
/// @param points Point cloud
|
||||||
|
|
@ -65,53 +70,45 @@ public:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t min_index = -1;
|
KnnResult<1> result(k_index, k_sq_dist);
|
||||||
double min_sq_dist = std::numeric_limits<double>::max();
|
knn_search(pt, result);
|
||||||
|
return result.num_found();
|
||||||
for (size_t i = 0; i < points.size(); i++) {
|
|
||||||
const double sq_dist = (points[i] - pt).squaredNorm();
|
|
||||||
if (sq_dist < min_sq_dist) {
|
|
||||||
min_index = i;
|
|
||||||
min_sq_dist = sq_dist;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*k_index = min_index;
|
|
||||||
*k_sq_dist = min_sq_dist;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Find k nearest neighbors.
|
/// @brief Find k nearest neighbors.
|
||||||
/// @param pt Query point
|
/// @param pt Query point
|
||||||
/// @param k Number of neighbors
|
/// @param k Number of neighbors
|
||||||
/// @param k_index Indices of nearest neighbors
|
/// @param k_index Indices of nearest neighbors
|
||||||
/// @param k_sq_dist Squared distances to nearest neighbors
|
/// @param k_sq_dist Squared distances to nearest neighbors (sorted in ascending order)
|
||||||
/// @return Number of found points
|
/// @return Number of found points
|
||||||
size_t knn_search(const Eigen::Vector4d& pt, int k, size_t* k_index, double* k_sq_dist) const {
|
size_t knn_search(const Eigen::Vector4d& pt, int k, size_t* k_indices, double* k_sq_dists) const {
|
||||||
if (points.empty()) {
|
if (points.empty()) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::priority_queue<std::pair<size_t, double>> queue;
|
KnnResult<-1> result(k_indices, k_sq_dists, k);
|
||||||
|
knn_search(pt, result);
|
||||||
|
return result.num_found();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Find k nearest neighbors.
|
||||||
|
/// @param pt Query point
|
||||||
|
/// @param k Number of neighbors
|
||||||
|
/// @param k_index Indices of nearest neighbors
|
||||||
|
/// @param k_sq_dist Squared distances to nearest neighbors (sorted in ascending order)
|
||||||
|
/// @return Number of found points
|
||||||
|
template <typename Result>
|
||||||
|
void knn_search(const Eigen::Vector4d& pt, Result& result) const {
|
||||||
|
if (points.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < points.size(); i++) {
|
for (size_t i = 0; i < points.size(); i++) {
|
||||||
const double sq_dist = (points[i] - pt).squaredNorm();
|
const double sq_dist = (points[i] - pt).squaredNorm();
|
||||||
queue.push({i, sq_dist});
|
result.push(i, sq_dist);
|
||||||
if (queue.size() > k) {
|
|
||||||
queue.pop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t n = queue.size();
|
|
||||||
while (!queue.empty()) {
|
|
||||||
k_index[queue.size() - 1] = queue.top().first;
|
|
||||||
k_sq_dist[queue.size() - 1] = queue.top().second;
|
|
||||||
queue.pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
struct Empty {};
|
struct Empty {};
|
||||||
|
|
||||||
|
|
@ -149,6 +146,11 @@ struct Traits<FlatContainer<HasNormals, HasCovs>> {
|
||||||
static size_t knn_search(const FlatContainer<HasNormals, HasCovs>& container, const Eigen::Vector4d& pt, size_t k, size_t* k_index, double* k_sq_dist) {
|
static size_t knn_search(const FlatContainer<HasNormals, HasCovs>& container, const Eigen::Vector4d& pt, size_t k, size_t* k_index, double* k_sq_dist) {
|
||||||
return container.knn_search(pt, k, k_index, k_sq_dist);
|
return container.knn_search(pt, k, k_index, k_sq_dist);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Result>
|
||||||
|
static void knn_search(const FlatContainer<HasNormals, HasCovs>& container, const Eigen::Vector4d& pt, Result& result) {
|
||||||
|
container.knn_search(pt, result);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace traits
|
} // namespace traits
|
||||||
|
|
|
||||||
|
|
@ -79,6 +79,11 @@ struct Traits<GaussianVoxel> {
|
||||||
static size_t knn_search(const GaussianVoxel& voxel, const Eigen::Vector4d& pt, size_t k, size_t* k_index, double* k_sq_dist) {
|
static size_t knn_search(const GaussianVoxel& voxel, const Eigen::Vector4d& pt, size_t k, size_t* k_index, double* k_sq_dist) {
|
||||||
return nearest_neighbor_search(voxel, pt, k_index, k_sq_dist);
|
return nearest_neighbor_search(voxel, pt, k_index, k_sq_dist);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Result>
|
||||||
|
static void knn_search(const GaussianVoxel& voxel, const Eigen::Vector4d& pt, Result& result) {
|
||||||
|
result.push(0, (voxel.mean - pt).squaredNorm());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace traits
|
} // namespace traits
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@
|
||||||
#include <Eigen/Geometry>
|
#include <Eigen/Geometry>
|
||||||
|
|
||||||
#include <small_gicp/ann/traits.hpp>
|
#include <small_gicp/ann/traits.hpp>
|
||||||
|
#include <small_gicp/ann/knn_result.hpp>
|
||||||
#include <small_gicp/ann/flat_container.hpp>
|
#include <small_gicp/ann/flat_container.hpp>
|
||||||
#include <small_gicp/points/traits.hpp>
|
#include <small_gicp/points/traits.hpp>
|
||||||
#include <small_gicp/util/fast_floor.hpp>
|
#include <small_gicp/util/fast_floor.hpp>
|
||||||
|
|
@ -30,8 +31,10 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Incremental voxelmap.
|
/// @brief Incremental voxelmap.
|
||||||
/// This class supports incremental point cloud insertion and LRU-based voxel deletion.
|
/// This class supports incremental point cloud insertion and LRU-based voxel deletion that removes voxels that are not recently referenced.
|
||||||
/// @note This class can be used as a point cloud as well as a neighbor search structure.
|
/// @note This class can be used as a point cloud as well as a neighbor search structure.
|
||||||
|
/// @note This class can handle arbitrary number of voxels and arbitrary range of voxel coordinates (in 32-bit int range).
|
||||||
|
/// @note To use this as a source point cloud for registration, use `SequentialVoxelMapAccessor`.
|
||||||
template <typename VoxelContents>
|
template <typename VoxelContents>
|
||||||
struct IncrementalVoxelMap {
|
struct IncrementalVoxelMap {
|
||||||
public:
|
public:
|
||||||
|
|
@ -40,7 +43,7 @@ public:
|
||||||
|
|
||||||
/// @brief Constructor.
|
/// @brief Constructor.
|
||||||
/// @param leaf_size Voxel size
|
/// @param leaf_size Voxel size
|
||||||
explicit IncrementalVoxelMap(double leaf_size) : inv_leaf_size(1.0 / leaf_size), lru_horizon(100), lru_clear_cycle(10), lru_counter(0) {}
|
explicit IncrementalVoxelMap(double leaf_size) : inv_leaf_size(1.0 / leaf_size), lru_horizon(100), lru_clear_cycle(10), lru_counter(0) { set_search_offsets(1); }
|
||||||
|
|
||||||
/// @brief Number of points in the voxelmap.
|
/// @brief Number of points in the voxelmap.
|
||||||
size_t size() const { return flat_voxels.size(); }
|
size_t size() const { return flat_voxels.size(); }
|
||||||
|
|
@ -94,49 +97,54 @@ public:
|
||||||
/// @param sq_dist Squared distance to the nearest neighbor
|
/// @param sq_dist Squared distance to the nearest neighbor
|
||||||
/// @return Number of found points (0 or 1)
|
/// @return Number of found points (0 or 1)
|
||||||
size_t nearest_neighbor_search(const Eigen::Vector4d& pt, size_t* index, double* sq_dist) const {
|
size_t nearest_neighbor_search(const Eigen::Vector4d& pt, size_t* index, double* sq_dist) const {
|
||||||
const Eigen::Vector3i coord = fast_floor(pt * inv_leaf_size).template head<3>();
|
const Eigen::Vector3i center = fast_floor(pt * inv_leaf_size).template head<3>();
|
||||||
|
size_t voxel_index = 0;
|
||||||
|
const auto index_transform = [&](size_t i) { return calc_index(voxel_index, i); };
|
||||||
|
KnnResult<1, decltype(index_transform)> result(index, sq_dist, -1, index_transform);
|
||||||
|
|
||||||
|
for (const auto& offset : search_offsets) {
|
||||||
|
const Eigen::Vector3i coord = center + offset;
|
||||||
const auto found = voxels.find(coord);
|
const auto found = voxels.find(coord);
|
||||||
if (found == voxels.end()) {
|
if (found == voxels.end()) {
|
||||||
return 0;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t voxel_index = found->second;
|
voxel_index = found->second;
|
||||||
const auto& voxel = flat_voxels[voxel_index]->second;
|
const auto& voxel = flat_voxels[voxel_index]->second;
|
||||||
|
|
||||||
size_t point_index;
|
traits::Traits<VoxelContents>::knn_search(voxel, pt, result);
|
||||||
if (traits::nearest_neighbor_search(voxel, pt, &point_index, sq_dist) == 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*index = calc_index(voxel_index, point_index);
|
return result.num_found();
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Find k nearest neighbors
|
/// @brief Find k nearest neighbors
|
||||||
/// @param pt Query point
|
/// @param pt Query point
|
||||||
/// @param k Number of neighbors
|
/// @param k Number of neighbors
|
||||||
/// @param k_indices Indices of nearest neighbors
|
/// @param k_indices Indices of nearest neighbors
|
||||||
/// @param k_sq_dists Squared distances to nearest neighbors
|
/// @param k_sq_dists Squared distances to nearest neighbors (sorted in ascending order)
|
||||||
/// @return Number of found points
|
/// @return Number of found points
|
||||||
size_t knn_search(const Eigen::Vector4d& pt, size_t k, size_t* k_indices, double* k_sq_dists) const {
|
size_t knn_search(const Eigen::Vector4d& pt, size_t k, size_t* k_indices, double* k_sq_dists) const {
|
||||||
const Eigen::Vector3i coord = fast_floor(pt * inv_leaf_size).template head<3>();
|
const Eigen::Vector3i center = fast_floor(pt * inv_leaf_size).template head<3>();
|
||||||
|
|
||||||
|
size_t voxel_index = 0;
|
||||||
|
const auto index_transform = [&](size_t i) { return calc_index(voxel_index, i); };
|
||||||
|
KnnResult<-1, decltype(index_transform)> result(k_indices, k_sq_dists, k, index_transform);
|
||||||
|
|
||||||
|
for (const auto& offset : search_offsets) {
|
||||||
|
const Eigen::Vector3i coord = center + offset;
|
||||||
const auto found = voxels.find(coord);
|
const auto found = voxels.find(coord);
|
||||||
if (found == voxels.end()) {
|
if (found == voxels.end()) {
|
||||||
return 0;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t voxel_index = found->second;
|
voxel_index = found->second;
|
||||||
const auto& voxel = flat_voxels[voxel_index]->second;
|
const auto& voxel = flat_voxels[voxel_index]->second;
|
||||||
|
|
||||||
std::vector<size_t> point_indices(k);
|
traits::Traits<VoxelContents>::knn_search(voxel, pt, result);
|
||||||
std::vector<double> sq_dists(k);
|
|
||||||
const size_t num_found = traits::knn_search(voxel, pt, k, point_indices.data(), sq_dists.data());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < num_found; i++) {
|
|
||||||
k_indices[i] = calc_index(voxel_index, point_indices[i]);
|
|
||||||
k_sq_dists[i] = sq_dists[i];
|
|
||||||
}
|
}
|
||||||
return num_found;
|
|
||||||
|
return result.num_found();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Calculate the global point index from the voxel index and the point index.
|
/// @brief Calculate the global point index from the voxel index and the point index.
|
||||||
|
|
@ -144,6 +152,39 @@ public:
|
||||||
inline size_t voxel_id(const size_t i) const { return i >> point_id_bits; } ///< Extract the point ID from a global index.
|
inline size_t voxel_id(const size_t i) const { return i >> point_id_bits; } ///< Extract the point ID from a global index.
|
||||||
inline size_t point_id(const size_t i) const { return i & ((1ull << point_id_bits) - 1); } ///< Extract the voxel ID from a global index.
|
inline size_t point_id(const size_t i) const { return i & ((1ull << point_id_bits) - 1); } ///< Extract the voxel ID from a global index.
|
||||||
|
|
||||||
|
/// @brief Set the pattern of the search offsets. (Must be 1, 7, or 27)
|
||||||
|
/// @note 1: center only, 7: center + 6 adjacent neighbors (+- 1X/1Y/1Z), 27: center + 26 neighbors (3 x 3 x 3 cube)
|
||||||
|
void set_search_offsets(int num_offsets) {
|
||||||
|
switch (num_offsets) {
|
||||||
|
default:
|
||||||
|
std::cerr << "warning: unsupported search_offsets=" << num_offsets << " (supported values: 1, 7, 27)" << std::endl;
|
||||||
|
std::cerr << " : using default search_offsets=1" << std::endl;
|
||||||
|
[[fallthrough]];
|
||||||
|
case 1:
|
||||||
|
search_offsets = {Eigen::Vector3i(0, 0, 0)};
|
||||||
|
break;
|
||||||
|
case 7:
|
||||||
|
search_offsets = {
|
||||||
|
Eigen::Vector3i(0, 0, 0),
|
||||||
|
Eigen::Vector3i(1, 0, 0),
|
||||||
|
Eigen::Vector3i(0, 1, 0),
|
||||||
|
Eigen::Vector3i(0, 0, 1),
|
||||||
|
Eigen::Vector3i(-1, 0, 0),
|
||||||
|
Eigen::Vector3i(0, -1, 0),
|
||||||
|
Eigen::Vector3i(0, 0, -1)};
|
||||||
|
break;
|
||||||
|
case 27:
|
||||||
|
for (int i = -1; i <= 1; i++) {
|
||||||
|
for (int j = -1; j <= 1; j++) {
|
||||||
|
for (int k = -1; k <= 1; k++) {
|
||||||
|
search_offsets.emplace_back(i, j, k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static_assert(sizeof(size_t) == 8, "size_t must be 64-bit");
|
static_assert(sizeof(size_t) == 8, "size_t must be 64-bit");
|
||||||
static constexpr int point_id_bits = 32; ///< Use the first 32 bits for point id
|
static constexpr int point_id_bits = 32; ///< Use the first 32 bits for point id
|
||||||
|
|
@ -154,6 +195,8 @@ public:
|
||||||
size_t lru_clear_cycle; ///< LRU clear cycle. Voxel deletion is performed every lru_clear_cycle steps.
|
size_t lru_clear_cycle; ///< LRU clear cycle. Voxel deletion is performed every lru_clear_cycle steps.
|
||||||
size_t lru_counter; ///< LRU counter. Incremented every step.
|
size_t lru_counter; ///< LRU counter. Incremented every step.
|
||||||
|
|
||||||
|
std::vector<Eigen::Vector3i> search_offsets; ///< Voxel search offsets.
|
||||||
|
|
||||||
typename VoxelContents::Setting voxel_setting; ///< Voxel setting.
|
typename VoxelContents::Setting voxel_setting; ///< Voxel setting.
|
||||||
std::vector<std::shared_ptr<std::pair<VoxelInfo, VoxelContents>>> flat_voxels; ///< Voxel contents.
|
std::vector<std::shared_ptr<std::pair<VoxelInfo, VoxelContents>>> flat_voxels; ///< Voxel contents.
|
||||||
std::unordered_map<Eigen::Vector3i, size_t, XORVector3iHash> voxels; ///< Voxel index map.
|
std::unordered_map<Eigen::Vector3i, size_t, XORVector3iHash> voxels; ///< Voxel index map.
|
||||||
|
|
|
||||||
|
|
@ -154,8 +154,8 @@ public:
|
||||||
|
|
||||||
/// @brief Find the nearest neighbor.
|
/// @brief Find the nearest neighbor.
|
||||||
/// @param query Query point
|
/// @param query Query point
|
||||||
/// @param k_indices Index of the nearest neighbor
|
/// @param k_indices Index of the nearest neighbor (uninitialized if not found)
|
||||||
/// @param k_sq_dists Squared distance to the nearest neighbor
|
/// @param k_sq_dists Squared distance to the nearest neighbor (uninitialized if not found)
|
||||||
/// @param setting KNN search setting
|
/// @param setting KNN search setting
|
||||||
/// @return Number of found neighbors (0 or 1)
|
/// @return Number of found neighbors (0 or 1)
|
||||||
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
|
@ -166,7 +166,7 @@ public:
|
||||||
/// @param query Query point
|
/// @param query Query point
|
||||||
/// @param k Number of neighbors
|
/// @param k Number of neighbors
|
||||||
/// @param k_indices Indices of neighbors
|
/// @param k_indices Indices of neighbors
|
||||||
/// @param k_sq_dists Squared distances to neighbors
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
/// @param setting KNN search setting
|
/// @param setting KNN search setting
|
||||||
/// @return Number of found neighbors
|
/// @return Number of found neighbors
|
||||||
size_t knn_search(const Eigen::Vector4d& query, int k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
size_t knn_search(const Eigen::Vector4d& query, int k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
|
@ -178,7 +178,7 @@ public:
|
||||||
/// @brief Find k-nearest neighbors. This method uses fixed and static memory allocation. Might be faster for small k.
|
/// @brief Find k-nearest neighbors. This method uses fixed and static memory allocation. Might be faster for small k.
|
||||||
/// @param query Query point
|
/// @param query Query point
|
||||||
/// @param k_indices Indices of neighbors
|
/// @param k_indices Indices of neighbors
|
||||||
/// @param k_sq_dists Squared distances to neighbors
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
/// @param setting KNN search setting
|
/// @param setting KNN search setting
|
||||||
/// @return Number of found neighbors
|
/// @return Number of found neighbors
|
||||||
template <int N>
|
template <int N>
|
||||||
|
|
@ -255,7 +255,7 @@ public:
|
||||||
/// @param query Query point
|
/// @param query Query point
|
||||||
/// @param k Number of neighbors
|
/// @param k Number of neighbors
|
||||||
/// @param k_indices Indices of neighbors
|
/// @param k_indices Indices of neighbors
|
||||||
/// @param k_sq_dists Squared distances to neighbors
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
/// @param setting KNN search setting
|
/// @param setting KNN search setting
|
||||||
/// @return Number of found neighbors
|
/// @return Number of found neighbors
|
||||||
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
|
@ -266,7 +266,7 @@ public:
|
||||||
/// @param query Query point
|
/// @param query Query point
|
||||||
/// @param k Number of neighbors
|
/// @param k Number of neighbors
|
||||||
/// @param k_indices Indices of neighbors
|
/// @param k_indices Indices of neighbors
|
||||||
/// @param k_sq_dists Squared distances to neighbors
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
/// @param setting KNN search setting
|
/// @param setting KNN search setting
|
||||||
/// @return Number of found neighbors
|
/// @return Number of found neighbors
|
||||||
size_t knn_search(const Eigen::Vector4d& query, size_t k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
size_t knn_search(const Eigen::Vector4d& query, size_t k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
|
|
@ -20,9 +22,14 @@ public:
|
||||||
double epsilon = 0.0; ///< Early termination threshold
|
double epsilon = 0.0; ///< Early termination threshold
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// @brief Identity transform (alternative to std::identity in C++20).
|
||||||
|
struct identity_transform {
|
||||||
|
size_t operator()(size_t i) const { return i; }
|
||||||
|
};
|
||||||
|
|
||||||
/// @brief K-nearest neighbor search result container.
|
/// @brief K-nearest neighbor search result container.
|
||||||
/// @tparam N Number of neighbors to search. If N == -1, the number of neighbors is dynamicaly determined.
|
/// @tparam N Number of neighbors to search. If N == -1, the number of neighbors is dynamicaly determined.
|
||||||
template <int N>
|
template <int N, typename IndexTransform = identity_transform>
|
||||||
struct KnnResult {
|
struct KnnResult {
|
||||||
public:
|
public:
|
||||||
static constexpr size_t INVALID = std::numeric_limits<size_t>::max();
|
static constexpr size_t INVALID = std::numeric_limits<size_t>::max();
|
||||||
|
|
@ -31,7 +38,12 @@ public:
|
||||||
/// @param indices Buffer to store indices (must be larger than k=max(N, num_neighbors))
|
/// @param indices Buffer to store indices (must be larger than k=max(N, num_neighbors))
|
||||||
/// @param distances Buffer to store distances (must be larger than k=max(N, num_neighbors))
|
/// @param distances Buffer to store distances (must be larger than k=max(N, num_neighbors))
|
||||||
/// @param num_neighbors Number of neighbors to search (must be -1 for static case N > 0)
|
/// @param num_neighbors Number of neighbors to search (must be -1 for static case N > 0)
|
||||||
explicit KnnResult(size_t* indices, double* distances, int num_neighbors = -1) : num_neighbors(num_neighbors), indices(indices), distances(distances) {
|
explicit KnnResult(size_t* indices, double* distances, int num_neighbors = -1, const IndexTransform& index_transform = identity_transform())
|
||||||
|
: index_transform(index_transform),
|
||||||
|
capacity(num_neighbors),
|
||||||
|
num_found_neighbors(0),
|
||||||
|
indices(indices),
|
||||||
|
distances(distances) {
|
||||||
if constexpr (N > 0) {
|
if constexpr (N > 0) {
|
||||||
if (num_neighbors >= 0) {
|
if (num_neighbors >= 0) {
|
||||||
std::cerr << "warning: Specifying dynamic num_neighbors=" << num_neighbors << " for a static KNN result container (N=" << N << ")" << std::endl;
|
std::cerr << "warning: Specifying dynamic num_neighbors=" << num_neighbors << " for a static KNN result container (N=" << N << ")" << std::endl;
|
||||||
|
|
@ -53,41 +65,44 @@ public:
|
||||||
if constexpr (N > 0) {
|
if constexpr (N > 0) {
|
||||||
return N;
|
return N;
|
||||||
} else {
|
} else {
|
||||||
return num_neighbors;
|
return capacity;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Number of found neighbors.
|
/// @brief Number of found neighbors.
|
||||||
size_t num_found() const { return std::distance(indices, std::find(indices, indices + buffer_size(), INVALID)); }
|
size_t num_found() const { return num_found_neighbors; }
|
||||||
|
|
||||||
/// @brief Worst distance in the result.
|
/// @brief Worst distance in the result.
|
||||||
double worst_distance() const { return distances[buffer_size() - 1]; }
|
double worst_distance() const { return distances[buffer_size() - 1]; }
|
||||||
|
|
||||||
/// @brief Push a pair of point index and distance to the result.
|
/// @brief Push a pair of point index and distance to the result.
|
||||||
|
/// @note The result is sorted by distance in ascending order.
|
||||||
void push(size_t index, double distance) {
|
void push(size_t index, double distance) {
|
||||||
if (distance >= worst_distance()) {
|
if (distance >= worst_distance()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if constexpr (N == 1) {
|
if constexpr (N == 1) {
|
||||||
indices[0] = index;
|
indices[0] = index_transform(index);
|
||||||
distances[0] = distance;
|
distances[0] = distance;
|
||||||
} else {
|
} else {
|
||||||
for (int i = buffer_size() - 1; i >= 0; i--) {
|
int insert_loc = std::min<int>(num_found_neighbors, buffer_size() - 1);
|
||||||
if (i == 0 || distance >= distances[i - 1]) {
|
for (; insert_loc > 0 && distance < distances[insert_loc - 1]; insert_loc--) {
|
||||||
indices[i] = index;
|
indices[insert_loc] = indices[insert_loc - 1];
|
||||||
distances[i] = distance;
|
distances[insert_loc] = distances[insert_loc - 1];
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
indices[i] = indices[i - 1];
|
indices[insert_loc] = index_transform(index);
|
||||||
distances[i] = distances[i - 1];
|
distances[insert_loc] = distance;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
num_found_neighbors = std::min<int>(num_found_neighbors + 1, buffer_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
const int num_neighbors; ///< Maximum number of neighbors to search
|
const IndexTransform index_transform; ///< Point index transformation (e.g., local point index to global point/voxel index)
|
||||||
|
const int capacity; ///< Maximum number of neighbors to search
|
||||||
|
int num_found_neighbors; ///< Number of found neighbors
|
||||||
size_t* indices; ///< Indices of neighbors
|
size_t* indices; ///< Indices of neighbors
|
||||||
double* distances; ///< Distances to neighbors
|
double* distances; ///< Distances to neighbors
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ struct ProjectionSetting {
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Conventional axis-aligned projection (i.e., selecting any of XYZ axes with the largest variance).
|
/// @brief Conventional axis-aligned projection (i.e., selecting any of XYZ axes with the largest variance).
|
||||||
|
/// @note Up to max_scan_count samples are used to estimate the variance.
|
||||||
struct AxisAlignedProjection {
|
struct AxisAlignedProjection {
|
||||||
public:
|
public:
|
||||||
/// @brief Project the point to the selected axis.
|
/// @brief Project the point to the selected axis.
|
||||||
|
|
@ -53,6 +54,7 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Normal projection (i.e., selecting the 3D direction with the largest variance of the points).
|
/// @brief Normal projection (i.e., selecting the 3D direction with the largest variance of the points).
|
||||||
|
/// @note Up to max_scan_count samples are used to estimate the variance along the axis.
|
||||||
struct NormalProjection {
|
struct NormalProjection {
|
||||||
public:
|
public:
|
||||||
/// @brief Project the point to the normal direction.
|
/// @brief Project the point to the normal direction.
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,219 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <Eigen/Core>
|
||||||
|
#include <small_gicp/points/traits.hpp>
|
||||||
|
#include <small_gicp/ann/knn_result.hpp>
|
||||||
|
|
||||||
|
namespace small_gicp {
|
||||||
|
|
||||||
|
/// @brief Equirectangular projection.
|
||||||
|
struct EquirectangularProjection {
|
||||||
|
public:
|
||||||
|
/// @brief Project the point into the normalized image coordinates. (u, v) in ([0, 1], [0, 1])
|
||||||
|
Eigen::Vector2d operator()(const Eigen::Vector3d& pt_3d) const {
|
||||||
|
if (pt_3d.squaredNorm() < 1e-3) {
|
||||||
|
return Eigen::Vector2d(0.5, 0.5);
|
||||||
|
}
|
||||||
|
|
||||||
|
const Eigen::Vector3d bearing = pt_3d.normalized();
|
||||||
|
const double lat = -std::asin(bearing[1]);
|
||||||
|
const double lon = std::atan2(bearing[0], bearing[2]);
|
||||||
|
|
||||||
|
return Eigen::Vector2d(lon / (2.0 * M_PI) + 0.5, lat / M_PI + 0.5);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
/// @brief Border clamp mode. Points out of the border are discarded.
|
||||||
|
struct BorderClamp {
|
||||||
|
public:
|
||||||
|
int operator()(int x, int width) const { return x; }
|
||||||
|
};
|
||||||
|
|
||||||
|
/// @brief Border repeat mode. Points out of the border are wrapped around.
|
||||||
|
struct BorderRepeat {
|
||||||
|
public:
|
||||||
|
int operator()(int x, int width) const { return x < 0 ? x + width : (x >= width ? x - width : x); }
|
||||||
|
};
|
||||||
|
|
||||||
|
/// @brief "Unsafe" projective search. This class does not hold the ownership of the target point cloud.
|
||||||
|
template <typename PointCloud, typename Projection = EquirectangularProjection, typename BorderModeH = BorderRepeat, typename BorderModeV = BorderClamp>
|
||||||
|
struct UnsafeProjectiveSearch {
|
||||||
|
public:
|
||||||
|
/// @brief Constructor.
|
||||||
|
/// @param width Index map width
|
||||||
|
/// @param height Index map height
|
||||||
|
/// @param points Target point cloud
|
||||||
|
UnsafeProjectiveSearch(int width, int height, const PointCloud& points) : points(points), index_map(height, width), search_window_h(10), search_window_v(5) {
|
||||||
|
index_map.setConstant(invalid_index);
|
||||||
|
|
||||||
|
Projection project;
|
||||||
|
for (size_t i = 0; i < traits::size(points); ++i) {
|
||||||
|
const Eigen::Vector4d pt = traits::point(points, i);
|
||||||
|
const Eigen::Vector2d uv = project(pt.head<3>());
|
||||||
|
const int u = uv[0] * index_map.cols();
|
||||||
|
const int v = uv[1] * index_map.rows();
|
||||||
|
|
||||||
|
if (u < 0 || u >= index_map.cols() || v < 0 || v >= index_map.rows()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
index_map(v, u) = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Find the nearest neighbor.
|
||||||
|
/// @param query Query point
|
||||||
|
/// @param k_indices Index of the nearest neighbor (uninitialized if not found)
|
||||||
|
/// @param k_sq_dists Squared distance to the nearest neighbor (uninitialized if not found)
|
||||||
|
/// @param setting KNN search setting
|
||||||
|
/// @return Number of found neighbors (0 or 1)
|
||||||
|
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
return knn_search<1>(query, k_indices, k_sq_dists, setting);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Find k-nearest neighbors. This method uses dynamic memory allocation.
|
||||||
|
/// @param query Query point
|
||||||
|
/// @param k Number of neighbors
|
||||||
|
/// @param k_indices Indices of neighbors
|
||||||
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
|
/// @param setting KNN search setting
|
||||||
|
/// @return Number of found neighbors
|
||||||
|
size_t knn_search(const Eigen::Vector4d& query, int k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
KnnResult<-1> result(k_indices, k_sq_dists, k);
|
||||||
|
knn_search(query, result, setting);
|
||||||
|
return result.num_found();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Find k-nearest neighbors. This method uses fixed and static memory allocation. Might be faster for small k.
|
||||||
|
/// @param query Query point
|
||||||
|
/// @param k_indices Indices of neighbors
|
||||||
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
|
/// @param setting KNN search setting
|
||||||
|
/// @return Number of found neighbors
|
||||||
|
template <int N>
|
||||||
|
size_t knn_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
KnnResult<N> result(k_indices, k_sq_dists);
|
||||||
|
knn_search(query, result, setting);
|
||||||
|
return result.num_found();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename Result>
|
||||||
|
void knn_search(const Eigen::Vector4d& query, Result& result, const KnnSetting& setting) const {
|
||||||
|
BorderModeH border_h;
|
||||||
|
BorderModeV border_v;
|
||||||
|
|
||||||
|
Projection project;
|
||||||
|
const Eigen::Vector2d uv = project(query.head<3>());
|
||||||
|
const int u = uv[0] * index_map.cols();
|
||||||
|
const int v = uv[1] * index_map.rows();
|
||||||
|
|
||||||
|
for (int du = -search_window_h; du <= search_window_h; du++) {
|
||||||
|
const int u_clamped = border_h(u + du, index_map.cols());
|
||||||
|
if (u_clamped < 0 || u_clamped >= index_map.cols()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int dv = -search_window_v; dv <= search_window_v; dv++) {
|
||||||
|
const int v_clamped = border_v(v + dv, index_map.rows());
|
||||||
|
if (v_clamped < 0 || v_clamped >= index_map.rows()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto index = index_map(v_clamped, u_clamped);
|
||||||
|
if (index == invalid_index) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const double sq_dist = (traits::point(points, index) - query).squaredNorm();
|
||||||
|
result.push(index, sq_dist);
|
||||||
|
|
||||||
|
if (setting.fulfilled(result)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr std::uint32_t invalid_index = std::numeric_limits<std::uint32_t>::max();
|
||||||
|
|
||||||
|
const PointCloud& points;
|
||||||
|
Eigen::Matrix<std::uint32_t, -1, -1> index_map;
|
||||||
|
|
||||||
|
int search_window_h;
|
||||||
|
int search_window_v;
|
||||||
|
};
|
||||||
|
|
||||||
|
/// @brief "Safe" projective search. This class holds the ownership of the target point cloud.
|
||||||
|
template <typename PointCloud, typename Projection = EquirectangularProjection, typename BorderModeH = BorderRepeat, typename BorderModeV = BorderClamp>
|
||||||
|
struct ProjectiveSearch {
|
||||||
|
public:
|
||||||
|
using Ptr = std::shared_ptr<ProjectiveSearch<PointCloud, Projection>>;
|
||||||
|
using ConstPtr = std::shared_ptr<const ProjectiveSearch<PointCloud, Projection>>;
|
||||||
|
|
||||||
|
explicit ProjectiveSearch(int width, int height, std::shared_ptr<const PointCloud> points) : points(points), search(width, height, *points) {}
|
||||||
|
|
||||||
|
/// @brief Find k-nearest neighbors. This method uses dynamic memory allocation.
|
||||||
|
/// @param query Query point
|
||||||
|
/// @param k Number of neighbors
|
||||||
|
/// @param k_indices Indices of neighbors
|
||||||
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
|
/// @param setting KNN search setting
|
||||||
|
/// @return Number of found neighbors
|
||||||
|
size_t nearest_neighbor_search(const Eigen::Vector4d& query, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
return search.nearest_neighbor_search(query, k_indices, k_sq_dists, setting);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @brief Find k-nearest neighbors. This method uses dynamic memory allocation.
|
||||||
|
/// @param query Query point
|
||||||
|
/// @param k Number of neighbors
|
||||||
|
/// @param k_indices Indices of neighbors
|
||||||
|
/// @param k_sq_dists Squared distances to neighbors (sorted in ascending order)
|
||||||
|
/// @param setting KNN search setting
|
||||||
|
/// @return Number of found neighbors
|
||||||
|
size_t knn_search(const Eigen::Vector4d& query, size_t k, size_t* k_indices, double* k_sq_dists, const KnnSetting& setting = KnnSetting()) const {
|
||||||
|
return search.knn_search(query, k, k_indices, k_sq_dists, setting);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
const std::shared_ptr<const PointCloud> points; ///< Points
|
||||||
|
const UnsafeProjectiveSearch<PointCloud, Projection, BorderModeH, BorderModeV> search; ///< Search
|
||||||
|
};
|
||||||
|
|
||||||
|
namespace traits {
|
||||||
|
|
||||||
|
template <typename PointCloud, typename Projection, typename BorderModeH, typename BorderModeV>
|
||||||
|
struct Traits<UnsafeProjectiveSearch<PointCloud, Projection, BorderModeH, BorderModeV>> {
|
||||||
|
static size_t nearest_neighbor_search(
|
||||||
|
const UnsafeProjectiveSearch<PointCloud, Projection, BorderModeH, BorderModeV>& tree,
|
||||||
|
const Eigen::Vector4d& point,
|
||||||
|
size_t* k_indices,
|
||||||
|
double* k_sq_dists) {
|
||||||
|
return tree.nearest_neighbor_search(point, k_indices, k_sq_dists);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
knn_search(const UnsafeProjectiveSearch<PointCloud, Projection, BorderModeH, BorderModeV>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||||
|
return tree.knn_search(point, k, k_indices, k_sq_dists);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename PointCloud, typename Projection, typename BorderModeH, typename BorderModeV>
|
||||||
|
struct Traits<ProjectiveSearch<PointCloud, Projection, BorderModeH, BorderModeV>> {
|
||||||
|
static size_t
|
||||||
|
nearest_neighbor_search(const ProjectiveSearch<PointCloud, Projection, BorderModeH, BorderModeV>& tree, const Eigen::Vector4d& point, size_t* k_indices, double* k_sq_dists) {
|
||||||
|
return tree.nearest_neighbor_search(point, k_indices, k_sq_dists);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
knn_search(const ProjectiveSearch<PointCloud, Projection, BorderModeH, BorderModeV>& tree, const Eigen::Vector4d& point, size_t k, size_t* k_indices, double* k_sq_dists) {
|
||||||
|
return tree.knn_search(point, k, k_indices, k_sq_dists);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace traits
|
||||||
|
|
||||||
|
} // namespace small_gicp
|
||||||
|
|
@ -0,0 +1,58 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <small_gicp/points/traits.hpp>
|
||||||
|
#include <small_gicp/ann/incremental_voxelmap.hpp>
|
||||||
|
|
||||||
|
namespace small_gicp {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief A wrapper class to sequentially access points in a voxelmap (e.g., using points in a voxelmap as source point cloud for registration).
|
||||||
|
* @note If the contents of the voxelmap are changed, the accessor must be recreated.
|
||||||
|
* @example
|
||||||
|
small_gicp::IncrementalVoxelMap<small_gicp::FlatContainerCov>::Ptr last_voxelmap = ...;
|
||||||
|
small_gicp::IncrementalVoxelMap<small_gicp::FlatContainerCov>::Ptr voxelmap = ...;
|
||||||
|
|
||||||
|
// Create a sequential accessor
|
||||||
|
const auto accessor = small_gicp::create_sequential_accessor(*voxelmap);
|
||||||
|
|
||||||
|
// Use the voxelmap as a source point cloud
|
||||||
|
small_gicp::Registration<small_gicp::GICPFactor, small_gicp::ParallelReductionOMP> registration;
|
||||||
|
auto result = registration.align(*last_voxelmap, accessor, *last_voxelmap, Eigen::Isometry3d::Identity());
|
||||||
|
*/
|
||||||
|
template <typename VoxelMap>
|
||||||
|
class SequentialVoxelMapAccessor {
|
||||||
|
public:
|
||||||
|
/// @brief Constructor.
|
||||||
|
/// @param voxelmap Voxelmap
|
||||||
|
SequentialVoxelMapAccessor(const VoxelMap& voxelmap) : voxelmap(voxelmap), indices(traits::point_indices(voxelmap)) {}
|
||||||
|
|
||||||
|
/// @brief Number of points in the voxelmap.
|
||||||
|
size_t size() const { return indices.size(); }
|
||||||
|
|
||||||
|
public:
|
||||||
|
const VoxelMap& voxelmap; ///< Voxelmap
|
||||||
|
const std::vector<size_t> indices; ///< Indices of points in the voxelmap
|
||||||
|
};
|
||||||
|
|
||||||
|
/// @brief Create a sequential voxelmap accessor.
|
||||||
|
template <typename VoxelMap>
|
||||||
|
SequentialVoxelMapAccessor<VoxelMap> create_sequential_accessor(const VoxelMap& voxelmap) {
|
||||||
|
return SequentialVoxelMapAccessor<VoxelMap>(voxelmap);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename VoxelMap>
|
||||||
|
struct traits::Traits<SequentialVoxelMapAccessor<VoxelMap>> {
|
||||||
|
static size_t size(const SequentialVoxelMapAccessor<VoxelMap>& accessor) { return accessor.size(); }
|
||||||
|
|
||||||
|
static bool has_points(const SequentialVoxelMapAccessor<VoxelMap>& accessor) { return traits::has_points(accessor.voxelmap); }
|
||||||
|
static bool has_normals(const SequentialVoxelMapAccessor<VoxelMap>& accessor) { return traits::has_normals(accessor.voxelmap); }
|
||||||
|
static bool has_covs(const SequentialVoxelMapAccessor<VoxelMap>& accessor) { return traits::has_covs(accessor.voxelmap); }
|
||||||
|
|
||||||
|
static Eigen::Vector4d point(const SequentialVoxelMapAccessor<VoxelMap>& accessor, size_t i) { return traits::point(accessor.voxelmap, accessor.indices[i]); }
|
||||||
|
static Eigen::Vector4d normal(const SequentialVoxelMapAccessor<VoxelMap>& accessor, size_t i) { return traits::normal(accessor.voxelmap, accessor.indices[i]); }
|
||||||
|
static Eigen::Matrix4d cov(const SequentialVoxelMapAccessor<VoxelMap>& accessor, size_t i) { return traits::cov(accessor.voxelmap, accessor.indices[i]); }
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace small_gicp
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Eigen/Core>
|
#include <Eigen/Core>
|
||||||
|
|
|
||||||
|
|
@ -99,8 +99,8 @@ protected:
|
||||||
std::string registration_type_; ///< Registration type ("GICP" or "VGICP").
|
std::string registration_type_; ///< Registration type ("GICP" or "VGICP").
|
||||||
bool verbose_; ///< Verbosity flag.
|
bool verbose_; ///< Verbosity flag.
|
||||||
|
|
||||||
std::shared_ptr<KdTree<pcl::PointCloud<PointSource>>> target_tree_; ///< KdTree for target point cloud.
|
std::shared_ptr<small_gicp::KdTree<pcl::PointCloud<PointSource>>> target_tree_; ///< KdTree for target point cloud.
|
||||||
std::shared_ptr<KdTree<pcl::PointCloud<PointSource>>> source_tree_; ///< KdTree for source point cloud.
|
std::shared_ptr<small_gicp::KdTree<pcl::PointCloud<PointSource>>> source_tree_; ///< KdTree for source point cloud.
|
||||||
|
|
||||||
std::shared_ptr<GaussianVoxelMap> target_voxelmap_; ///< VoxelMap for target point cloud.
|
std::shared_ptr<GaussianVoxelMap> target_voxelmap_; ///< VoxelMap for target point cloud.
|
||||||
std::shared_ptr<GaussianVoxelMap> source_voxelmap_; ///< VoxelMap for source point cloud.
|
std::shared_ptr<GaussianVoxelMap> source_voxelmap_; ///< VoxelMap for source point cloud.
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ void RegistrationPCL<PointSource, PointTarget>::setInputSource(const PointCloudS
|
||||||
}
|
}
|
||||||
|
|
||||||
pcl::Registration<PointSource, PointTarget, Scalar>::setInputSource(cloud);
|
pcl::Registration<PointSource, PointTarget, Scalar>::setInputSource(cloud);
|
||||||
source_tree_ = std::make_shared<KdTree<pcl::PointCloud<PointSource>>>(input_, KdTreeBuilderOMP(num_threads_));
|
source_tree_ = std::make_shared<small_gicp::KdTree<pcl::PointCloud<PointSource>>>(input_, KdTreeBuilderOMP(num_threads_));
|
||||||
source_covs_.clear();
|
source_covs_.clear();
|
||||||
source_voxelmap_.reset();
|
source_voxelmap_.reset();
|
||||||
}
|
}
|
||||||
|
|
@ -56,7 +56,7 @@ void RegistrationPCL<PointSource, PointTarget>::setInputTarget(const PointCloudT
|
||||||
}
|
}
|
||||||
|
|
||||||
pcl::Registration<PointSource, PointTarget, Scalar>::setInputTarget(cloud);
|
pcl::Registration<PointSource, PointTarget, Scalar>::setInputTarget(cloud);
|
||||||
target_tree_ = std::make_shared<KdTree<pcl::PointCloud<PointTarget>>>(target_, KdTreeBuilderOMP(num_threads_));
|
target_tree_ = std::make_shared<small_gicp::KdTree<pcl::PointCloud<PointTarget>>>(target_, KdTreeBuilderOMP(num_threads_));
|
||||||
target_covs_.clear();
|
target_covs_.clear();
|
||||||
target_voxelmap_.reset();
|
target_voxelmap_.reset();
|
||||||
}
|
}
|
||||||
|
|
@ -214,7 +214,7 @@ void RegistrationPCL<PointSource, PointTarget>::computeTransformation(PointCloud
|
||||||
estimate_covariances_omp(target_proxy, *target_tree_, k_correspondences_, num_threads_);
|
estimate_covariances_omp(target_proxy, *target_tree_, k_correspondences_, num_threads_);
|
||||||
}
|
}
|
||||||
|
|
||||||
Registration<GICPFactor, ParallelReductionOMP> registration;
|
small_gicp::Registration<GICPFactor, ParallelReductionOMP> registration;
|
||||||
registration.criteria.rotation_eps = rotation_epsilon_;
|
registration.criteria.rotation_eps = rotation_epsilon_;
|
||||||
registration.criteria.translation_eps = transformation_epsilon_;
|
registration.criteria.translation_eps = transformation_epsilon_;
|
||||||
registration.reduction.num_threads = num_threads_;
|
registration.reduction.num_threads = num_threads_;
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
// SPDX-License-Identifier: MIT
|
// SPDX-License-Identifier: MIT
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
#include <Eigen/Core>
|
#include <Eigen/Core>
|
||||||
#include <small_gicp/points/traits.hpp>
|
#include <small_gicp/points/traits.hpp>
|
||||||
|
|
||||||
|
|
@ -15,5 +16,30 @@ struct Traits<Eigen::MatrixXd> {
|
||||||
static Eigen::Vector4d point(const Eigen::MatrixXd& points, size_t i) { return Eigen::Vector4d(points(i, 0), points(i, 1), points(i, 2), 1.0); }
|
static Eigen::Vector4d point(const Eigen::MatrixXd& points, size_t i) { return Eigen::Vector4d(points(i, 0), points(i, 1), points(i, 2), 1.0); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename Scalar, int D>
|
||||||
|
struct Traits<std::vector<Eigen::Matrix<Scalar, D, 1>>> {
|
||||||
|
static_assert(D == 3 || D == 4, "Only 3D and 4D (homogeneous) points are supported");
|
||||||
|
static_assert(std::is_floating_point<Scalar>::value, "Only floating point types are supported");
|
||||||
|
|
||||||
|
using Point = Eigen::Matrix<Scalar, D, 1>;
|
||||||
|
static size_t size(const std::vector<Point>& points) { return points.size(); }
|
||||||
|
static bool has_points(const std::vector<Point>& points) { return points.size(); }
|
||||||
|
static Eigen::Vector4d point(const std::vector<Point>& points, size_t i) {
|
||||||
|
if constexpr (std::is_same_v<Scalar, double>) {
|
||||||
|
if constexpr (D == 3) {
|
||||||
|
return points[i].homogeneous();
|
||||||
|
} else {
|
||||||
|
return points[i];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if constexpr (D == 3) {
|
||||||
|
return points[i].homogeneous().template cast<double>();
|
||||||
|
} else {
|
||||||
|
return points[i].template cast<double>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace traits
|
} // namespace traits
|
||||||
} // namespace small_gicp
|
} // namespace small_gicp
|
||||||
|
|
@ -108,7 +108,7 @@ struct LevenbergMarquardtOptimizer {
|
||||||
// Solve with damping
|
// Solve with damping
|
||||||
const Eigen::Matrix<double, 6, 1> delta = (H + lambda * Eigen::Matrix<double, 6, 6>::Identity()).ldlt().solve(-b);
|
const Eigen::Matrix<double, 6, 1> delta = (H + lambda * Eigen::Matrix<double, 6, 6>::Identity()).ldlt().solve(-b);
|
||||||
|
|
||||||
// Validte new solution
|
// Validate new solution
|
||||||
const Eigen::Isometry3d new_T = result.T_target_source * se3_exp(delta);
|
const Eigen::Isometry3d new_T = result.T_target_source * se3_exp(delta);
|
||||||
double new_e = reduction.error(target, source, new_T, factors);
|
double new_e = reduction.error(target, source, new_T, factors);
|
||||||
general_factor.update_error(target, source, new_T, &e);
|
general_factor.update_error(target, source, new_T, &e);
|
||||||
|
|
@ -118,12 +118,13 @@ struct LevenbergMarquardtOptimizer {
|
||||||
<< " dr=" << delta.head<3>().norm() << std::endl;
|
<< " dr=" << delta.head<3>().norm() << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_e < e) {
|
if (new_e <= e) {
|
||||||
// Error decreased, decrease lambda
|
// Error decreased, decrease lambda
|
||||||
result.converged = criteria.converged(delta);
|
result.converged = criteria.converged(delta);
|
||||||
result.T_target_source = new_T;
|
result.T_target_source = new_T;
|
||||||
lambda /= lambda_factor;
|
lambda /= lambda_factor;
|
||||||
success = true;
|
success = true;
|
||||||
|
e = new_e;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,11 @@
|
||||||
namespace small_gicp {
|
namespace small_gicp {
|
||||||
|
|
||||||
#ifndef _OPENMP
|
#ifndef _OPENMP
|
||||||
|
#ifdef _WIN32
|
||||||
|
#pragma message(__FILE__, " OpenMP is not available.Parallel reduction will be disabled.")
|
||||||
|
#else
|
||||||
#warning "OpenMP is not available. Parallel reduction will be disabled."
|
#warning "OpenMP is not available. Parallel reduction will be disabled."
|
||||||
|
#endif
|
||||||
inline int omp_get_thread_num() {
|
inline int omp_get_thread_num() {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,9 @@
|
||||||
|
|
||||||
namespace small_gicp {
|
namespace small_gicp {
|
||||||
|
|
||||||
/// @brief Preprocess point cloud (downsampling, kdtree creation, and normal and covariance estimation)
|
/// @brief Preprocess point cloud (downsampling, kdtree creation, and normal and covariance estimation).
|
||||||
|
/// @note When num_threads >= 2, this function has minor run-by-run non-determinism due to the parallel downsampling.
|
||||||
|
/// @see small_gicp::voxelgrid_sampling_omp, small_gicp::estimate_normals_covariances_omp
|
||||||
/// @param points Input points
|
/// @param points Input points
|
||||||
/// @param downsampling_resolution Downsample resolution
|
/// @param downsampling_resolution Downsample resolution
|
||||||
/// @param num_neighbors Number of neighbors for normal/covariance estimation
|
/// @param num_neighbors Number of neighbors for normal/covariance estimation
|
||||||
|
|
@ -19,11 +21,14 @@ preprocess_points(const PointCloud& points, double downsampling_resolution, int
|
||||||
|
|
||||||
/// @brief Preprocess point cloud (downsampling, kdtree creation, and normal and covariance estimation)
|
/// @brief Preprocess point cloud (downsampling, kdtree creation, and normal and covariance estimation)
|
||||||
/// @note This function only accepts Eigen::Vector(3|4)(f|d) as input
|
/// @note This function only accepts Eigen::Vector(3|4)(f|d) as input
|
||||||
|
/// @note When num_threads >= 2, this function has minor run-by-run non-determinism due to the parallel downsampling.
|
||||||
|
/// @see small_gicp::voxelgrid_sampling_omp, small_gicp::estimate_normals_covariances_omp
|
||||||
template <typename T, int D>
|
template <typename T, int D>
|
||||||
std::pair<PointCloud::Ptr, std::shared_ptr<KdTree<PointCloud>>>
|
std::pair<PointCloud::Ptr, std::shared_ptr<KdTree<PointCloud>>>
|
||||||
preprocess_points(const std::vector<Eigen::Matrix<T, D, 1>>& points, double downsampling_resolution, int num_neighbors = 10, int num_threads = 4);
|
preprocess_points(const std::vector<Eigen::Matrix<T, D, 1>>& points, double downsampling_resolution, int num_neighbors = 10, int num_threads = 4);
|
||||||
|
|
||||||
/// @brief Create Gaussian voxel map
|
/// @brief Create an incremental Gaussian voxel map.
|
||||||
|
/// @see small_gicp::IncrementalVoxelMap
|
||||||
/// @param points Input points
|
/// @param points Input points
|
||||||
/// @param voxel_resolution Voxel resolution
|
/// @param voxel_resolution Voxel resolution
|
||||||
GaussianVoxelMap::Ptr create_gaussian_voxelmap(const PointCloud& points, double voxel_resolution);
|
GaussianVoxelMap::Ptr create_gaussian_voxelmap(const PointCloud& points, double voxel_resolution);
|
||||||
|
|
@ -40,10 +45,12 @@ struct RegistrationSetting {
|
||||||
double translation_eps = 1e-3; ///< Translation tolerance for convergence check
|
double translation_eps = 1e-3; ///< Translation tolerance for convergence check
|
||||||
int num_threads = 4; ///< Number of threads
|
int num_threads = 4; ///< Number of threads
|
||||||
int max_iterations = 20; ///< Maximum number of iterations
|
int max_iterations = 20; ///< Maximum number of iterations
|
||||||
|
bool verbose = false; ///< Verbose mode
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Align point clouds
|
/// @brief Align point clouds
|
||||||
/// @note This function only accepts Eigen::Vector(3|4)(f|d) as input
|
/// @note This function only accepts Eigen::Vector(3|4)(f|d) as input
|
||||||
|
/// @see small_gicp::voxelgrid_sampling_omp, small_gicp::estimate_normals_covariances_omp
|
||||||
/// @param target Target points
|
/// @param target Target points
|
||||||
/// @param source Source points
|
/// @param source Source points
|
||||||
/// @param init_T Initial guess of T_target_source
|
/// @param init_T Initial guess of T_target_source
|
||||||
|
|
@ -71,9 +78,8 @@ RegistrationResult align(
|
||||||
const RegistrationSetting& setting = RegistrationSetting());
|
const RegistrationSetting& setting = RegistrationSetting());
|
||||||
|
|
||||||
/// @brief Align preprocessed point clouds with VGICP
|
/// @brief Align preprocessed point clouds with VGICP
|
||||||
/// @param target Target point cloud
|
/// @param target Target Gaussian voxelmap
|
||||||
/// @param source Source point cloud
|
/// @param source Source point cloud
|
||||||
/// @param target_tree Nearest neighbor search for the target point cloud
|
|
||||||
/// @param init_T Initial guess of T_target_source
|
/// @param init_T Initial guess of T_target_source
|
||||||
/// @param setting Registration setting
|
/// @param setting Registration setting
|
||||||
/// @return Registration result
|
/// @return Registration result
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ struct DistanceRejector {
|
||||||
return sq_dist > max_dist_sq;
|
return sq_dist > max_dist_sq;
|
||||||
}
|
}
|
||||||
|
|
||||||
double max_dist_sq;
|
double max_dist_sq; ///< Maximum squared distance between corresponding points
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace small_gicp
|
} // namespace small_gicp
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ struct TerminationCriteria {
|
||||||
/// @brief Check the convergence
|
/// @brief Check the convergence
|
||||||
/// @param delta Transformation update vector
|
/// @param delta Transformation update vector
|
||||||
/// @return True if converged
|
/// @return True if converged
|
||||||
bool converged(const Eigen::Matrix<double, 6, 1>& delta) const { return delta.template head<3>().norm() < rotation_eps && delta.template tail<3>().norm() < translation_eps; }
|
bool converged(const Eigen::Matrix<double, 6, 1>& delta) const { return delta.template head<3>().norm() <= rotation_eps && delta.template tail<3>().norm() <= translation_eps; }
|
||||||
|
|
||||||
double translation_eps; ///< Rotation tolerance [rad]
|
double translation_eps; ///< Rotation tolerance [rad]
|
||||||
double rotation_eps; ///< Translation tolerance
|
double rotation_eps; ///< Translation tolerance
|
||||||
|
|
|
||||||
|
|
@ -12,8 +12,10 @@
|
||||||
|
|
||||||
namespace small_gicp {
|
namespace small_gicp {
|
||||||
|
|
||||||
/// @brief Voxelgrid downsampling.
|
/// @brief Voxelgrid downsampling. This function computes exact average of points in each voxel, and each voxel can contain arbitrary number of points.
|
||||||
/// @note Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
/// @note Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
||||||
|
/// For example, if the downsampling resolution is 0.01 m, point coordinates must be in [-10485.76, 10485.75] m.
|
||||||
|
/// Points outside the valid range will be ignored.
|
||||||
/// @param points Input points
|
/// @param points Input points
|
||||||
/// @param leaf_size Downsampling resolution
|
/// @param leaf_size Downsampling resolution
|
||||||
/// @return Downsampled points
|
/// @return Downsampled points
|
||||||
|
|
@ -25,16 +27,17 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling(const InputPointCloud& poin
|
||||||
|
|
||||||
const double inv_leaf_size = 1.0 / leaf_size;
|
const double inv_leaf_size = 1.0 / leaf_size;
|
||||||
|
|
||||||
const int coord_bit_size = 21; // Bits to represent each voxel coordinate (pack 21x3=63bits in 64bit int)
|
constexpr std::uint64_t invalid_coord = std::numeric_limits<std::uint64_t>::max();
|
||||||
const size_t coord_bit_mask = (1 << 21) - 1; // Bit mask
|
constexpr int coord_bit_size = 21; // Bits to represent each voxel coordinate (pack 21x3=63bits in 64bit int)
|
||||||
const int coord_offset = 1 << (coord_bit_size - 1); // Coordinate offset to make values positive
|
constexpr size_t coord_bit_mask = (1 << 21) - 1; // Bit mask
|
||||||
|
constexpr int coord_offset = 1 << (coord_bit_size - 1); // Coordinate offset to make values positive
|
||||||
|
|
||||||
std::vector<std::pair<std::uint64_t, size_t>> coord_pt(traits::size(points));
|
std::vector<std::pair<std::uint64_t, size_t>> coord_pt(traits::size(points));
|
||||||
for (size_t i = 0; i < traits::size(points); i++) {
|
for (size_t i = 0; i < traits::size(points); i++) {
|
||||||
const Eigen::Array4i coord = fast_floor(traits::point(points, i) * inv_leaf_size) + coord_offset;
|
const Eigen::Array4i coord = fast_floor(traits::point(points, i) * inv_leaf_size) + coord_offset;
|
||||||
if ((coord < 0).any() || (coord > coord_bit_mask).any()) {
|
if ((coord < 0).any() || (coord > coord_bit_mask).any()) {
|
||||||
std::cerr << "warning: voxel coord is out of range!!" << std::endl;
|
std::cerr << "warning: voxel coord is out of range!!" << std::endl;
|
||||||
coord_pt[i] = {0, i};
|
coord_pt[i] = {invalid_coord, i};
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -56,6 +59,10 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling(const InputPointCloud& poin
|
||||||
size_t num_points = 0;
|
size_t num_points = 0;
|
||||||
Eigen::Vector4d sum_pt = traits::point(points, coord_pt.front().second);
|
Eigen::Vector4d sum_pt = traits::point(points, coord_pt.front().second);
|
||||||
for (size_t i = 1; i < traits::size(points); i++) {
|
for (size_t i = 1; i < traits::size(points); i++) {
|
||||||
|
if (coord_pt[i].first == invalid_coord) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (coord_pt[i - 1].first != coord_pt[i].first) {
|
if (coord_pt[i - 1].first != coord_pt[i].first) {
|
||||||
traits::set_point(*downsampled, num_points++, sum_pt / sum_pt.w());
|
traits::set_point(*downsampled, num_points++, sum_pt / sum_pt.w());
|
||||||
sum_pt.setZero();
|
sum_pt.setZero();
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,11 @@
|
||||||
namespace small_gicp {
|
namespace small_gicp {
|
||||||
|
|
||||||
/// @brief Voxel grid downsampling with OpenMP backend.
|
/// @brief Voxel grid downsampling with OpenMP backend.
|
||||||
|
/// @note This function has minor run-by-run non-deterministic behavior due to parallel data collection that results
|
||||||
|
/// in a deviation of the number of points in the downsampling results (up to 10% increase from the single-thread version).
|
||||||
/// @note Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
/// @note Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
||||||
|
/// For example, if the downsampling resolution is 0.01 m, point coordinates must be in [-10485.76, 10485.75] m.
|
||||||
|
/// Points outside the valid range will be ignored.
|
||||||
/// @param points Input points
|
/// @param points Input points
|
||||||
/// @param leaf_size Downsampling resolution
|
/// @param leaf_size Downsampling resolution
|
||||||
/// @return Downsampled points
|
/// @return Downsampled points
|
||||||
|
|
@ -25,9 +29,11 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling_omp(const InputPointCloud&
|
||||||
}
|
}
|
||||||
|
|
||||||
const double inv_leaf_size = 1.0 / leaf_size;
|
const double inv_leaf_size = 1.0 / leaf_size;
|
||||||
const int coord_bit_size = 21; // Bits to represent each voxel coordinate (pack 21x3 = 63bits in 64bit int)
|
|
||||||
const size_t coord_bit_mask = (1 << 21) - 1; // Bit mask
|
constexpr std::uint64_t invalid_coord = std::numeric_limits<std::uint64_t>::max();
|
||||||
const int coord_offset = 1 << (coord_bit_size - 1); // Coordinate offset to make values positive
|
constexpr int coord_bit_size = 21; // Bits to represent each voxel coordinate (pack 21x3 = 63bits in 64bit int)
|
||||||
|
constexpr size_t coord_bit_mask = (1 << 21) - 1; // Bit mask
|
||||||
|
constexpr int coord_offset = 1 << (coord_bit_size - 1); // Coordinate offset to make values positive
|
||||||
|
|
||||||
std::vector<std::pair<std::uint64_t, size_t>> coord_pt(traits::size(points));
|
std::vector<std::pair<std::uint64_t, size_t>> coord_pt(traits::size(points));
|
||||||
#pragma omp parallel for num_threads(num_threads) schedule(guided, 32)
|
#pragma omp parallel for num_threads(num_threads) schedule(guided, 32)
|
||||||
|
|
@ -35,7 +41,7 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling_omp(const InputPointCloud&
|
||||||
const Eigen::Array4i coord = fast_floor(traits::point(points, i) * inv_leaf_size) + coord_offset;
|
const Eigen::Array4i coord = fast_floor(traits::point(points, i) * inv_leaf_size) + coord_offset;
|
||||||
if ((coord < 0).any() || (coord > coord_bit_mask).any()) {
|
if ((coord < 0).any() || (coord > coord_bit_mask).any()) {
|
||||||
std::cerr << "warning: voxel coord is out of range!!" << std::endl;
|
std::cerr << "warning: voxel coord is out of range!!" << std::endl;
|
||||||
coord_pt[i] = {0, i};
|
coord_pt[i] = {invalid_coord, i};
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Compute voxel coord bits (0|1bit, z|21bit, y|21bit, x|21bit)
|
// Compute voxel coord bits (0|1bit, z|21bit, y|21bit, x|21bit)
|
||||||
|
|
@ -47,11 +53,7 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling_omp(const InputPointCloud&
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort by voxel coords
|
// Sort by voxel coords
|
||||||
quick_sort_omp(
|
quick_sort_omp(coord_pt.begin(), coord_pt.end(), [](const auto& lhs, const auto& rhs) { return lhs.first < rhs.first; }, num_threads);
|
||||||
coord_pt.begin(),
|
|
||||||
coord_pt.end(),
|
|
||||||
[](const auto& lhs, const auto& rhs) { return lhs.first < rhs.first; },
|
|
||||||
num_threads);
|
|
||||||
|
|
||||||
auto downsampled = std::make_shared<OutputPointCloud>();
|
auto downsampled = std::make_shared<OutputPointCloud>();
|
||||||
traits::resize(*downsampled, traits::size(points));
|
traits::resize(*downsampled, traits::size(points));
|
||||||
|
|
@ -69,6 +71,10 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling_omp(const InputPointCloud&
|
||||||
|
|
||||||
Eigen::Vector4d sum_pt = traits::point(points, coord_pt[block_begin].second);
|
Eigen::Vector4d sum_pt = traits::point(points, coord_pt[block_begin].second);
|
||||||
for (size_t i = block_begin + 1; i != block_end; i++) {
|
for (size_t i = block_begin + 1; i != block_end; i++) {
|
||||||
|
if (coord_pt[i].first == invalid_coord) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (coord_pt[i - 1].first != coord_pt[i].first) {
|
if (coord_pt[i - 1].first != coord_pt[i].first) {
|
||||||
sub_points.emplace_back(sum_pt / sum_pt.w());
|
sub_points.emplace_back(sum_pt / sum_pt.w());
|
||||||
sum_pt.setZero();
|
sum_pt.setZero();
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,11 @@
|
||||||
namespace small_gicp {
|
namespace small_gicp {
|
||||||
|
|
||||||
/// @brief Voxel grid downsampling with TBB backend.
|
/// @brief Voxel grid downsampling with TBB backend.
|
||||||
|
/// @note This function has minor run-by-run non-deterministic behavior due to parallel data collection that results
|
||||||
|
/// in a deviation of the number of points in the downsampling results (up to 10% increase from the single-thread version).
|
||||||
/// @note Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
/// @note Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
||||||
|
/// For example, if the downsampling resolution is 0.01 m, point coordinates must be in [-10485.76, 10485.75] m.
|
||||||
|
/// Points outside the valid range will be ignored.
|
||||||
/// @param points Input points
|
/// @param points Input points
|
||||||
/// @param leaf_size Downsampling resolution
|
/// @param leaf_size Downsampling resolution
|
||||||
/// @return Downsampled points
|
/// @return Downsampled points
|
||||||
|
|
@ -25,9 +29,11 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling_tbb(const InputPointCloud&
|
||||||
}
|
}
|
||||||
|
|
||||||
const double inv_leaf_size = 1.0 / leaf_size;
|
const double inv_leaf_size = 1.0 / leaf_size;
|
||||||
const int coord_bit_size = 21; // Bits to represent each voxel coordinate (pack 21x3 = 63bits in 64bit int)
|
|
||||||
const size_t coord_bit_mask = (1 << 21) - 1; // Bit mask
|
constexpr std::uint64_t invalid_coord = std::numeric_limits<std::uint64_t>::max();
|
||||||
const int coord_offset = 1 << (coord_bit_size - 1); // Coordinate offset to make values positive
|
constexpr int coord_bit_size = 21; // Bits to represent each voxel coordinate (pack 21x3 = 63bits in 64bit int)
|
||||||
|
constexpr size_t coord_bit_mask = (1 << 21) - 1; // Bit mask
|
||||||
|
constexpr int coord_offset = 1 << (coord_bit_size - 1); // Coordinate offset to make values positive
|
||||||
|
|
||||||
std::vector<std::pair<std::uint64_t, size_t>> coord_pt(traits::size(points));
|
std::vector<std::pair<std::uint64_t, size_t>> coord_pt(traits::size(points));
|
||||||
tbb::parallel_for(tbb::blocked_range<size_t>(0, traits::size(points), 64), [&](const tbb::blocked_range<size_t>& range) {
|
tbb::parallel_for(tbb::blocked_range<size_t>(0, traits::size(points), 64), [&](const tbb::blocked_range<size_t>& range) {
|
||||||
|
|
@ -35,7 +41,7 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling_tbb(const InputPointCloud&
|
||||||
const Eigen::Array4i coord = fast_floor(traits::point(points, i) * inv_leaf_size) + coord_offset;
|
const Eigen::Array4i coord = fast_floor(traits::point(points, i) * inv_leaf_size) + coord_offset;
|
||||||
if ((coord < 0).any() || (coord > coord_bit_mask).any()) {
|
if ((coord < 0).any() || (coord > coord_bit_mask).any()) {
|
||||||
std::cerr << "warning: voxel coord is out of range!!" << std::endl;
|
std::cerr << "warning: voxel coord is out of range!!" << std::endl;
|
||||||
coord_pt[i] = {0, i};
|
coord_pt[i] = {invalid_coord, i};
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -63,6 +69,10 @@ std::shared_ptr<OutputPointCloud> voxelgrid_sampling_tbb(const InputPointCloud&
|
||||||
|
|
||||||
Eigen::Vector4d sum_pt = traits::point(points, coord_pt[range.begin()].second);
|
Eigen::Vector4d sum_pt = traits::point(points, coord_pt[range.begin()].second);
|
||||||
for (size_t i = range.begin() + 1; i != range.end(); i++) {
|
for (size_t i = range.begin() + 1; i != range.end(); i++) {
|
||||||
|
if (coord_pt[i].first == invalid_coord) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (coord_pt[i - 1].first != coord_pt[i].first) {
|
if (coord_pt[i - 1].first != coord_pt[i].first) {
|
||||||
sub_points.emplace_back(sum_pt / sum_pt.w());
|
sub_points.emplace_back(sum_pt / sum_pt.w());
|
||||||
sum_pt.setZero();
|
sum_pt.setZero();
|
||||||
|
|
|
||||||
|
|
@ -75,27 +75,22 @@ inline Eigen::Quaterniond so3_exp(const Eigen::Vector3d& omega) {
|
||||||
/// @param a Twist vector [rx, ry, rz, tx, ty, tz]
|
/// @param a Twist vector [rx, ry, rz, tx, ty, tz]
|
||||||
/// @return SE3 matrix
|
/// @return SE3 matrix
|
||||||
inline Eigen::Isometry3d se3_exp(const Eigen::Matrix<double, 6, 1>& a) {
|
inline Eigen::Isometry3d se3_exp(const Eigen::Matrix<double, 6, 1>& a) {
|
||||||
using std::cos;
|
|
||||||
using std::sin;
|
|
||||||
const Eigen::Vector3d omega = a.head<3>();
|
const Eigen::Vector3d omega = a.head<3>();
|
||||||
|
|
||||||
double theta = std::sqrt(omega.dot(omega));
|
const double theta_sq = omega.dot(omega);
|
||||||
const Eigen::Quaterniond so3 = so3_exp(omega);
|
const double theta = std::sqrt(theta_sq);
|
||||||
const Eigen::Matrix3d Omega = skew(omega);
|
|
||||||
const Eigen::Matrix3d Omega_sq = Omega * Omega;
|
|
||||||
Eigen::Matrix3d V;
|
|
||||||
|
|
||||||
if (theta < 1e-10) {
|
|
||||||
V = so3.matrix();
|
|
||||||
/// Note: That is an accurate expansion!
|
|
||||||
} else {
|
|
||||||
const double theta_sq = theta * theta;
|
|
||||||
V = (Eigen::Matrix3d::Identity() + (1.0 - cos(theta)) / (theta_sq)*Omega + (theta - sin(theta)) / (theta_sq * theta) * Omega_sq);
|
|
||||||
}
|
|
||||||
|
|
||||||
Eigen::Isometry3d se3 = Eigen::Isometry3d::Identity();
|
Eigen::Isometry3d se3 = Eigen::Isometry3d::Identity();
|
||||||
se3.linear() = so3.toRotationMatrix();
|
se3.linear() = so3_exp(omega).toRotationMatrix();
|
||||||
|
|
||||||
|
if (theta < 1e-10) {
|
||||||
|
se3.translation() = se3.linear() * a.tail<3>();
|
||||||
|
/// Note: That is an accurate expansion!
|
||||||
|
} else {
|
||||||
|
const Eigen::Matrix3d Omega = skew(omega);
|
||||||
|
const Eigen::Matrix3d V = (Eigen::Matrix3d::Identity() + (1.0 - std::cos(theta)) / theta_sq * Omega + (theta - std::sin(theta)) / (theta_sq * theta) * Omega * Omega);
|
||||||
se3.translation() = V * a.tail<3>();
|
se3.translation() = V * a.tail<3>();
|
||||||
|
}
|
||||||
|
|
||||||
return se3;
|
return se3;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@
|
||||||
namespace small_gicp {
|
namespace small_gicp {
|
||||||
|
|
||||||
/// @brief Computes point normals from eigenvectors and sets them to the point cloud.
|
/// @brief Computes point normals from eigenvectors and sets them to the point cloud.
|
||||||
|
/// @note If a sufficient number of neighbor points is not found, a zero vector is set to the point.
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
struct NormalSetter {
|
struct NormalSetter {
|
||||||
/// @brief Handle invalid case (too few points).
|
/// @brief Handle invalid case (too few points).
|
||||||
|
|
@ -25,6 +26,7 @@ struct NormalSetter {
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Computes point covariances from eigenvectors and sets them to the point cloud.
|
/// @brief Computes point covariances from eigenvectors and sets them to the point cloud.
|
||||||
|
/// @note If a sufficient number of neighbor points is not found, an identity matrix is set to the point.
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
struct CovarianceSetter {
|
struct CovarianceSetter {
|
||||||
/// @brief Handle invalid case (too few points).
|
/// @brief Handle invalid case (too few points).
|
||||||
|
|
@ -44,6 +46,7 @@ struct CovarianceSetter {
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Computes point normals and covariances from eigenvectors and sets them to the point cloud.
|
/// @brief Computes point normals and covariances from eigenvectors and sets them to the point cloud.
|
||||||
|
/// @note If a sufficient number of neighbor points is not found, a zero vector and an identity matrix are set to the point.
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
struct NormalCovarianceSetter {
|
struct NormalCovarianceSetter {
|
||||||
/// @brief Handle invalid case (too few points).
|
/// @brief Handle invalid case (too few points).
|
||||||
|
|
@ -107,6 +110,8 @@ void estimate_local_features(PointCloud& cloud, int num_neighbors) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals
|
/// @brief Estimate point normals
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
|
|
@ -115,6 +120,8 @@ void estimate_normals(PointCloud& cloud, int num_neighbors = 20) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals
|
/// @brief Estimate point normals
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
@ -124,6 +131,8 @@ void estimate_normals(PointCloud& cloud, KdTree& kdtree, int num_neighbors = 20)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point covariances
|
/// @brief Estimate point covariances
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
|
|
@ -132,6 +141,8 @@ void estimate_covariances(PointCloud& cloud, int num_neighbors = 20) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point covariances
|
/// @brief Estimate point covariances
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
@ -141,6 +152,8 @@ void estimate_covariances(PointCloud& cloud, KdTree& kdtree, int num_neighbors =
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals and covariances
|
/// @brief Estimate point normals and covariances
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
|
|
@ -149,6 +162,8 @@ void estimate_normals_covariances(PointCloud& cloud, int num_neighbors = 20) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals and covariances
|
/// @brief Estimate point normals and covariances
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,8 @@ void estimate_local_features_omp(PointCloud& cloud, KdTree& kdtree, int num_neig
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals with OpenMP
|
/// @brief Estimate point normals with OpenMP
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
/// @param num_threads Number of threads
|
/// @param num_threads Number of threads
|
||||||
|
|
@ -35,6 +37,8 @@ void estimate_normals_omp(PointCloud& cloud, int num_neighbors = 20, int num_thr
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals with OpenMP
|
/// @brief Estimate point normals with OpenMP
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
@ -45,6 +49,8 @@ void estimate_normals_omp(PointCloud& cloud, KdTree& kdtree, int num_neighbors =
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point covariances with OpenMP
|
/// @brief Estimate point covariances with OpenMP
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
/// @param num_threads Number of threads
|
/// @param num_threads Number of threads
|
||||||
|
|
@ -54,6 +60,8 @@ void estimate_covariances_omp(PointCloud& cloud, int num_neighbors = 20, int num
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point covariances with OpenMP
|
/// @brief Estimate point covariances with OpenMP
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
@ -64,6 +72,8 @@ void estimate_covariances_omp(PointCloud& cloud, KdTree& kdtree, int num_neighbo
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals and covariances with OpenMP
|
/// @brief Estimate point normals and covariances with OpenMP
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
/// @param num_threads Number of threads
|
/// @param num_threads Number of threads
|
||||||
|
|
@ -73,6 +83,8 @@ void estimate_normals_covariances_omp(PointCloud& cloud, int num_neighbors = 20,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals and covariances with OpenMP
|
/// @brief Estimate point normals and covariances with OpenMP
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,8 @@ void estimate_local_features_tbb(PointCloud& cloud, int num_neighbors) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals with TBB
|
/// @brief Estimate point normals with TBB
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
|
|
@ -38,6 +40,8 @@ void estimate_normals_tbb(PointCloud& cloud, int num_neighbors = 20) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals with TBB
|
/// @brief Estimate point normals with TBB
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
@ -47,6 +51,8 @@ void estimate_normals_tbb(PointCloud& cloud, KdTree& kdtree, int num_neighbors =
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point covariances with TBB
|
/// @brief Estimate point covariances with TBB
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
|
|
@ -55,6 +61,8 @@ void estimate_covariances_tbb(PointCloud& cloud, int num_neighbors = 20) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point covariances with TBB
|
/// @brief Estimate point covariances with TBB
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
@ -64,6 +72,8 @@ void estimate_covariances_tbb(PointCloud& cloud, KdTree& kdtree, int num_neighbo
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals and covariances with TBB
|
/// @brief Estimate point normals and covariances with TBB
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
template <typename PointCloud>
|
template <typename PointCloud>
|
||||||
|
|
@ -72,6 +82,8 @@ void estimate_normals_covariances_tbb(PointCloud& cloud, int num_neighbors = 20)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief Estimate point normals and covariances with TBB
|
/// @brief Estimate point normals and covariances with TBB
|
||||||
|
/// @note If a sufficient number of neighbor points for normal/covariance estimation (5 points) is not found,
|
||||||
|
/// an invalid normal/covariance is set to the point (normal=zero vector, covariance=identity matrix).
|
||||||
/// @param cloud [in/out] Point cloud
|
/// @param cloud [in/out] Point cloud
|
||||||
/// @param kdtree Nearest neighbor search
|
/// @param kdtree Nearest neighbor search
|
||||||
/// @param num_neighbors Number of neighbors used for attribute estimation
|
/// @param num_neighbors Number of neighbors used for attribute estimation
|
||||||
|
|
|
||||||
|
|
@ -22,10 +22,6 @@ markdown_extensions:
|
||||||
- pymdownx.inlinehilite
|
- pymdownx.inlinehilite
|
||||||
- pymdownx.snippets
|
- pymdownx.snippets
|
||||||
- pymdownx.superfences
|
- pymdownx.superfences
|
||||||
- pymdownx.emoji:
|
|
||||||
emoji_index: !!python/name:materialx.emoji.twemoji
|
|
||||||
emoji_generator: !!python/name:materialx.emoji.to_svg
|
|
||||||
- fontawesome_markdown
|
|
||||||
|
|
||||||
copyright: Copyright © 2024 Kenji Koide
|
copyright: Copyright © 2024 Kenji Koide
|
||||||
extra:
|
extra:
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
||||||
<package format="3">
|
<package format="3">
|
||||||
<name>small_gicp</name>
|
<name>small_gicp</name>
|
||||||
<version>0.1.0</version>
|
<version>1.0.0</version>
|
||||||
<description>Efficient and parallelized algorithms for point cloud registration</description>
|
<description>Efficient and parallelized algorithms for point cloud registration</description>
|
||||||
|
|
||||||
<maintainer email="k.koide@aist.go.jp">Kenji Koide</maintainer>
|
<maintainer email="k.koide@aist.go.jp">Kenji Koide</maintainer>
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "small_gicp"
|
name = "small_gicp"
|
||||||
version = "0.1.0"
|
version = "1.0.0"
|
||||||
authors = [{name = "Kenji Koide", email = "k.koide@aist.go.jp"}]
|
authors = [{name = "Kenji Koide", email = "k.koide@aist.go.jp"}]
|
||||||
description = "Efficient and parallelized algorithms for fine point cloud registration"
|
description = "Efficient and parallelized algorithms for fine point cloud registration"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,66 @@
|
||||||
|
#include <small_gicp/benchmark/benchmark_odom.hpp>
|
||||||
|
|
||||||
|
#include <small_gicp/factors/gicp_factor.hpp>
|
||||||
|
#include <small_gicp/points/point_cloud.hpp>
|
||||||
|
#include <small_gicp/ann/gaussian_voxelmap.hpp>
|
||||||
|
#include <small_gicp/util/normal_estimation_omp.hpp>
|
||||||
|
#include <small_gicp/registration/reduction_omp.hpp>
|
||||||
|
#include <small_gicp/registration/registration.hpp>
|
||||||
|
|
||||||
|
namespace small_gicp {
|
||||||
|
|
||||||
|
class SmallGICPModelOnlineOdometryEstimationOMP : public OnlineOdometryEstimation {
|
||||||
|
public:
|
||||||
|
explicit SmallGICPModelOnlineOdometryEstimationOMP(const OdometryEstimationParams& params) : OnlineOdometryEstimation(params), T_world_lidar(Eigen::Isometry3d::Identity()) {}
|
||||||
|
|
||||||
|
Eigen::Isometry3d estimate(const PointCloud::Ptr& points) override {
|
||||||
|
Stopwatch sw;
|
||||||
|
sw.start();
|
||||||
|
|
||||||
|
// Note that input points here is already downsampled
|
||||||
|
// Estimate per-point covariances
|
||||||
|
estimate_covariances_omp(*points, params.num_neighbors, params.num_threads);
|
||||||
|
|
||||||
|
if (voxelmap == nullptr) {
|
||||||
|
// This is the very first frame
|
||||||
|
voxelmap = std::make_shared<IncrementalVoxelMap<FlatContainerCov>>(params.voxel_resolution);
|
||||||
|
voxelmap->insert(*points);
|
||||||
|
return T_world_lidar;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Registration using GICP + OMP-based parallel reduction
|
||||||
|
Registration<GICPFactor, ParallelReductionOMP> registration;
|
||||||
|
registration.reduction.num_threads = params.num_threads;
|
||||||
|
auto result = registration.align(*voxelmap, *points, *voxelmap, T_world_lidar);
|
||||||
|
|
||||||
|
// Update T_world_lidar with the estimated transformation
|
||||||
|
T_world_lidar = result.T_target_source;
|
||||||
|
|
||||||
|
// Insert points to the target voxel map
|
||||||
|
voxelmap->insert(*points, T_world_lidar);
|
||||||
|
|
||||||
|
sw.stop();
|
||||||
|
reg_times.push(sw.msec());
|
||||||
|
|
||||||
|
if (params.visualize) {
|
||||||
|
update_model_points(T_world_lidar, traits::voxel_points(*voxelmap));
|
||||||
|
}
|
||||||
|
|
||||||
|
return T_world_lidar;
|
||||||
|
}
|
||||||
|
|
||||||
|
void report() override { //
|
||||||
|
std::cout << "registration_time_stats=" << reg_times.str() << " [msec/scan] total_throughput=" << total_times.str() << " [msec/scan]" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Summarizer reg_times;
|
||||||
|
|
||||||
|
IncrementalVoxelMap<FlatContainerCov>::Ptr voxelmap; // Target voxel map that is an accumulation of past point clouds
|
||||||
|
Eigen::Isometry3d T_world_lidar; // Current world-to-lidar transformation
|
||||||
|
};
|
||||||
|
|
||||||
|
static auto small_gicp_model_omp_registry =
|
||||||
|
register_odometry("small_gicp_model_omp", [](const OdometryEstimationParams& params) { return std::make_shared<SmallGICPModelOnlineOdometryEstimationOMP>(params); });
|
||||||
|
|
||||||
|
} // namespace small_gicp
|
||||||
|
|
@ -0,0 +1,66 @@
|
||||||
|
#include <small_gicp/benchmark/benchmark_odom.hpp>
|
||||||
|
|
||||||
|
#include <small_gicp/factors/gicp_factor.hpp>
|
||||||
|
#include <small_gicp/points/point_cloud.hpp>
|
||||||
|
#include <small_gicp/ann/gaussian_voxelmap.hpp>
|
||||||
|
#include <small_gicp/util/normal_estimation_omp.hpp>
|
||||||
|
#include <small_gicp/registration/reduction_omp.hpp>
|
||||||
|
#include <small_gicp/registration/registration.hpp>
|
||||||
|
|
||||||
|
namespace small_gicp {
|
||||||
|
|
||||||
|
class SmallVGICPModelOnlineOdometryEstimationOMP : public OnlineOdometryEstimation {
|
||||||
|
public:
|
||||||
|
explicit SmallVGICPModelOnlineOdometryEstimationOMP(const OdometryEstimationParams& params) : OnlineOdometryEstimation(params), T_world_lidar(Eigen::Isometry3d::Identity()) {}
|
||||||
|
|
||||||
|
Eigen::Isometry3d estimate(const PointCloud::Ptr& points) override {
|
||||||
|
Stopwatch sw;
|
||||||
|
sw.start();
|
||||||
|
|
||||||
|
// Note that input points here is already downsampled
|
||||||
|
// Estimate per-point covariances
|
||||||
|
estimate_covariances_omp(*points, params.num_neighbors, params.num_threads);
|
||||||
|
|
||||||
|
if (voxelmap == nullptr) {
|
||||||
|
// This is the very first frame
|
||||||
|
voxelmap = std::make_shared<GaussianVoxelMap>(params.voxel_resolution);
|
||||||
|
voxelmap->insert(*points);
|
||||||
|
return T_world_lidar;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Registration using GICP + OMP-based parallel reduction
|
||||||
|
Registration<GICPFactor, ParallelReductionOMP> registration;
|
||||||
|
registration.reduction.num_threads = params.num_threads;
|
||||||
|
auto result = registration.align(*voxelmap, *points, *voxelmap, T_world_lidar);
|
||||||
|
|
||||||
|
// Update T_world_lidar with the estimated transformation
|
||||||
|
T_world_lidar = result.T_target_source;
|
||||||
|
|
||||||
|
// Insert points to the target voxel map
|
||||||
|
voxelmap->insert(*points, T_world_lidar);
|
||||||
|
|
||||||
|
sw.stop();
|
||||||
|
reg_times.push(sw.msec());
|
||||||
|
|
||||||
|
if (params.visualize) {
|
||||||
|
update_model_points(T_world_lidar, traits::voxel_points(*voxelmap));
|
||||||
|
}
|
||||||
|
|
||||||
|
return T_world_lidar;
|
||||||
|
}
|
||||||
|
|
||||||
|
void report() override { //
|
||||||
|
std::cout << "registration_time_stats=" << reg_times.str() << " [msec/scan] total_throughput=" << total_times.str() << " [msec/scan]" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Summarizer reg_times;
|
||||||
|
|
||||||
|
GaussianVoxelMap::Ptr voxelmap; // Target voxel map that is an accumulation of past point clouds
|
||||||
|
Eigen::Isometry3d T_world_lidar; // Current world-to-lidar transformation
|
||||||
|
};
|
||||||
|
|
||||||
|
static auto small_gicp_model_omp_registry =
|
||||||
|
register_odometry("small_vgicp_model_omp", [](const OdometryEstimationParams& params) { return std::make_shared<SmallVGICPModelOnlineOdometryEstimationOMP>(params); });
|
||||||
|
|
||||||
|
} // namespace small_gicp
|
||||||
|
|
@ -32,7 +32,10 @@ void define_align(py::module& m) {
|
||||||
double downsampling_resolution,
|
double downsampling_resolution,
|
||||||
double max_correspondence_distance,
|
double max_correspondence_distance,
|
||||||
int num_threads,
|
int num_threads,
|
||||||
int max_iterations) {
|
int max_iterations,
|
||||||
|
double rotation_epsilon,
|
||||||
|
double translation_epsilon,
|
||||||
|
bool verbose) {
|
||||||
if (target_points.cols() != 3 && target_points.cols() != 4) {
|
if (target_points.cols() != 3 && target_points.cols() != 4) {
|
||||||
std::cerr << "target_points must be Nx3 or Nx4" << std::endl;
|
std::cerr << "target_points must be Nx3 or Nx4" << std::endl;
|
||||||
return RegistrationResult(Eigen::Isometry3d::Identity());
|
return RegistrationResult(Eigen::Isometry3d::Identity());
|
||||||
|
|
@ -60,6 +63,10 @@ void define_align(py::module& m) {
|
||||||
setting.downsampling_resolution = downsampling_resolution;
|
setting.downsampling_resolution = downsampling_resolution;
|
||||||
setting.max_correspondence_distance = max_correspondence_distance;
|
setting.max_correspondence_distance = max_correspondence_distance;
|
||||||
setting.num_threads = num_threads;
|
setting.num_threads = num_threads;
|
||||||
|
setting.max_iterations = max_iterations;
|
||||||
|
setting.rotation_eps = rotation_epsilon;
|
||||||
|
setting.translation_eps = translation_epsilon;
|
||||||
|
setting.verbose = verbose;
|
||||||
|
|
||||||
std::vector<Eigen::Vector4d> target(target_points.rows());
|
std::vector<Eigen::Vector4d> target(target_points.rows());
|
||||||
if (target_points.cols() == 3) {
|
if (target_points.cols() == 3) {
|
||||||
|
|
@ -94,16 +101,22 @@ void define_align(py::module& m) {
|
||||||
py::arg("max_correspondence_distance") = 1.0,
|
py::arg("max_correspondence_distance") = 1.0,
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
py::arg("max_iterations") = 20,
|
py::arg("max_iterations") = 20,
|
||||||
|
py::arg("rotation_epsilon") = 0.1 * M_PI / 180.0,
|
||||||
|
py::arg("translation_epsilon") = 1e-3,
|
||||||
|
py::arg("verbose") = false,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Align two point clouds using various ICP-like algorithms.
|
Align two point clouds using various ICP-like algorithms.
|
||||||
|
This function first performs preprocessing (downsampling, normal estimation, KdTree construction) and then estimates the transformation.
|
||||||
|
|
||||||
|
See also: :class:`voxelgrid_sampling` :class:`estimate_normals` :class:`preprocess_points`
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
target_points : NDArray[np.float64]
|
target_points : :class:`numpy.ndarray[np.float64]`
|
||||||
Nx3 or Nx4 matrix representing the target point cloud.
|
Nx3 or Nx4 matrix representing the target point cloud.
|
||||||
source_points : NDArray[np.float64]
|
source_points : numpy.ndarray[np.float64]
|
||||||
Nx3 or Nx4 matrix representing the source point cloud.
|
Nx3 or Nx4 matrix representing the source point cloud.
|
||||||
init_T_target_source : np.ndarray[np.float64]
|
init_T_target_source : numpy.ndarray[np.float64]
|
||||||
4x4 matrix representing the initial transformation from target to source.
|
4x4 matrix representing the initial transformation from target to source.
|
||||||
registration_type : str = 'GICP'
|
registration_type : str = 'GICP'
|
||||||
Type of registration algorithm to use ('ICP', 'PLANE_ICP', 'GICP', 'VGICP').
|
Type of registration algorithm to use ('ICP', 'PLANE_ICP', 'GICP', 'VGICP').
|
||||||
|
|
@ -111,15 +124,23 @@ void define_align(py::module& m) {
|
||||||
Resolution of voxels used for correspondence search (used only in VGICP).
|
Resolution of voxels used for correspondence search (used only in VGICP).
|
||||||
downsampling_resolution : float = 0.25
|
downsampling_resolution : float = 0.25
|
||||||
Resolution for downsampling the point clouds.
|
Resolution for downsampling the point clouds.
|
||||||
|
Input points out of the 21bit range after discretization will be ignored (See also: :class:`voxelgrid_sampling`).
|
||||||
max_correspondence_distance : float = 1.0
|
max_correspondence_distance : float = 1.0
|
||||||
Maximum distance for matching points between point clouds.
|
Maximum distance for matching points between point clouds.
|
||||||
num_threads : int = 1
|
num_threads : int = 1
|
||||||
Number of threads to use for parallel processing.
|
Number of threads to use for parallel processing.
|
||||||
max_iterations : int = 20
|
max_iterations : int = 20
|
||||||
Maximum number of iterations for the optimization algorithm.
|
Maximum number of iterations for the optimization algorithm.
|
||||||
|
rotation_epsilon: double = 0.1 * M_PI / 180.0
|
||||||
|
Convergence criteria for rotation change
|
||||||
|
translation_epsilon: double = 1e-3
|
||||||
|
Convergence criteria for transformation change
|
||||||
|
verbose : bool = False
|
||||||
|
If True, print debug information during the optimization process.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
RegistrationResult
|
result : :class:`RegistrationResult`
|
||||||
Object containing the final transformation matrix and convergence status.
|
Object containing the final transformation matrix and convergence status.
|
||||||
)pbdoc");
|
)pbdoc");
|
||||||
|
|
||||||
|
|
@ -134,7 +155,10 @@ void define_align(py::module& m) {
|
||||||
const std::string& registration_type,
|
const std::string& registration_type,
|
||||||
double max_correspondence_distance,
|
double max_correspondence_distance,
|
||||||
int num_threads,
|
int num_threads,
|
||||||
int max_iterations) {
|
int max_iterations,
|
||||||
|
double rotation_epsilon,
|
||||||
|
double translation_epsilon,
|
||||||
|
bool verbose) {
|
||||||
RegistrationSetting setting;
|
RegistrationSetting setting;
|
||||||
if (registration_type == "ICP") {
|
if (registration_type == "ICP") {
|
||||||
setting.type = RegistrationSetting::ICP;
|
setting.type = RegistrationSetting::ICP;
|
||||||
|
|
@ -148,6 +172,10 @@ void define_align(py::module& m) {
|
||||||
}
|
}
|
||||||
setting.max_correspondence_distance = max_correspondence_distance;
|
setting.max_correspondence_distance = max_correspondence_distance;
|
||||||
setting.num_threads = num_threads;
|
setting.num_threads = num_threads;
|
||||||
|
setting.max_iterations = max_iterations;
|
||||||
|
setting.rotation_eps = rotation_epsilon;
|
||||||
|
setting.translation_eps = translation_epsilon;
|
||||||
|
setting.verbose = verbose;
|
||||||
|
|
||||||
if (target_tree == nullptr) {
|
if (target_tree == nullptr) {
|
||||||
target_tree = std::make_shared<KdTree<PointCloud>>(target, KdTreeBuilderOMP(num_threads));
|
target_tree = std::make_shared<KdTree<PointCloud>>(target, KdTreeBuilderOMP(num_threads));
|
||||||
|
|
@ -162,18 +190,23 @@ void define_align(py::module& m) {
|
||||||
py::arg("max_correspondence_distance") = 1.0,
|
py::arg("max_correspondence_distance") = 1.0,
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
py::arg("max_iterations") = 20,
|
py::arg("max_iterations") = 20,
|
||||||
|
py::arg("rotation_epsilon") = 0.1 * M_PI / 180.0,
|
||||||
|
py::arg("translation_epsilon") = 1e-3,
|
||||||
|
py::arg("verbose") = false,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Align two point clouds using specified ICP-like algorithms, utilizing point cloud and KD-tree inputs.
|
Align two point clouds using specified ICP-like algorithms, utilizing point cloud and KD-tree inputs.
|
||||||
|
Input point clouds are assumed to be preprocessed (downsampled, normals estimated, KD-tree built).
|
||||||
|
See also: :class:`voxelgrid_sampling` :class:`estimate_normals` :class:`preprocess_points`
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
target : PointCloud::ConstPtr
|
target : :class:`PointCloud`
|
||||||
Pointer to the target point cloud.
|
Target point cloud.
|
||||||
source : PointCloud::ConstPtr
|
source : :class:`PointCloud`
|
||||||
Pointer to the source point cloud.
|
Source point cloud.
|
||||||
target_tree : KdTree<PointCloud>::ConstPtr, optional
|
target_tree : :class:`KdTree`, optional
|
||||||
Pointer to the KD-tree of the target for nearest neighbor search. If nullptr, a new tree is built.
|
KdTree for the target point cloud. If not given, a new KdTree is built.
|
||||||
init_T_target_source : NDArray[np.float64]
|
init_T_target_source : numpy.ndarray[np.float64]
|
||||||
4x4 matrix representing the initial transformation from target to source.
|
4x4 matrix representing the initial transformation from target to source.
|
||||||
registration_type : str = 'GICP'
|
registration_type : str = 'GICP'
|
||||||
Type of registration algorithm to use ('ICP', 'PLANE_ICP', 'GICP').
|
Type of registration algorithm to use ('ICP', 'PLANE_ICP', 'GICP').
|
||||||
|
|
@ -183,9 +216,16 @@ void define_align(py::module& m) {
|
||||||
Number of threads to use for computation.
|
Number of threads to use for computation.
|
||||||
max_iterations : int = 20
|
max_iterations : int = 20
|
||||||
Maximum number of iterations for the optimization algorithm.
|
Maximum number of iterations for the optimization algorithm.
|
||||||
|
rotation_epsilon: double = 0.1 * M_PI / 180.0
|
||||||
|
Convergence criteria for rotation change
|
||||||
|
translation_epsilon: double = 1e-3
|
||||||
|
Convergence criteria for transformation change
|
||||||
|
verbose : bool = False
|
||||||
|
If True, print debug information during the optimization process.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
RegistrationResult
|
result : :class:`RegistrationResult`
|
||||||
Object containing the final transformation matrix and convergence status.
|
Object containing the final transformation matrix and convergence status.
|
||||||
)pbdoc");
|
)pbdoc");
|
||||||
|
|
||||||
|
|
@ -198,11 +238,17 @@ void define_align(py::module& m) {
|
||||||
const Eigen::Matrix4d& init_T_target_source,
|
const Eigen::Matrix4d& init_T_target_source,
|
||||||
double max_correspondence_distance,
|
double max_correspondence_distance,
|
||||||
int num_threads,
|
int num_threads,
|
||||||
int max_iterations) {
|
int max_iterations,
|
||||||
|
double rotation_epsilon,
|
||||||
|
double translation_epsilon,
|
||||||
|
bool verbose) {
|
||||||
Registration<GICPFactor, ParallelReductionOMP> registration;
|
Registration<GICPFactor, ParallelReductionOMP> registration;
|
||||||
registration.rejector.max_dist_sq = max_correspondence_distance * max_correspondence_distance;
|
registration.rejector.max_dist_sq = max_correspondence_distance * max_correspondence_distance;
|
||||||
registration.reduction.num_threads = num_threads;
|
registration.reduction.num_threads = num_threads;
|
||||||
registration.optimizer.max_iterations = max_iterations;
|
registration.optimizer.max_iterations = max_iterations;
|
||||||
|
registration.criteria.rotation_eps = rotation_epsilon;
|
||||||
|
registration.criteria.translation_eps = translation_epsilon;
|
||||||
|
registration.optimizer.verbose = verbose;
|
||||||
|
|
||||||
return registration.align(target_voxelmap, source, target_voxelmap, Eigen::Isometry3d(init_T_target_source));
|
return registration.align(target_voxelmap, source, target_voxelmap, Eigen::Isometry3d(init_T_target_source));
|
||||||
},
|
},
|
||||||
|
|
@ -212,16 +258,21 @@ void define_align(py::module& m) {
|
||||||
py::arg("max_correspondence_distance") = 1.0,
|
py::arg("max_correspondence_distance") = 1.0,
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
py::arg("max_iterations") = 20,
|
py::arg("max_iterations") = 20,
|
||||||
|
py::arg("rotation_epsilon") = 0.1 * M_PI / 180.0,
|
||||||
|
py::arg("translation_epsilon") = 1e-3,
|
||||||
|
py::arg("verbose") = false,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Align two point clouds using voxel-based GICP algorithm, utilizing a Gaussian Voxel Map.
|
Align two point clouds using voxel-based GICP algorithm, utilizing a Gaussian Voxel Map.
|
||||||
|
Input source point cloud is assumed to be preprocessed (downsampled, normals estimated, KD-tree built).
|
||||||
|
See also: :class:`voxelgrid_sampling` :class:`estimate_normals` :class:`preprocess_points`
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
target_voxelmap : GaussianVoxelMap
|
target_voxelmap : :class:`GaussianVoxelMap`
|
||||||
Voxel map constructed from the target point cloud.
|
Voxel map constructed from the target point cloud.
|
||||||
source : PointCloud
|
source : :class:`PointCloud`
|
||||||
Source point cloud to align to the target.
|
Source point cloud to align to the target.
|
||||||
init_T_target_source : NDArray[np.float64]
|
init_T_target_source : numpy.ndarray[np.float64]
|
||||||
4x4 matrix representing the initial transformation from target to source.
|
4x4 matrix representing the initial transformation from target to source.
|
||||||
max_correspondence_distance : float = 1.0
|
max_correspondence_distance : float = 1.0
|
||||||
Maximum distance for corresponding point pairs.
|
Maximum distance for corresponding point pairs.
|
||||||
|
|
@ -229,10 +280,16 @@ void define_align(py::module& m) {
|
||||||
Number of threads to use for computation.
|
Number of threads to use for computation.
|
||||||
max_iterations : int = 20
|
max_iterations : int = 20
|
||||||
Maximum number of iterations for the optimization algorithm.
|
Maximum number of iterations for the optimization algorithm.
|
||||||
|
rotation_epsilon: double = 0.1 * M_PI / 180.0
|
||||||
|
Convergence criteria for rotation change
|
||||||
|
translation_epsilon: double = 1e-3
|
||||||
|
Convergence criteria for transformation change
|
||||||
|
verbose : bool = False
|
||||||
|
If True, print debug information during the optimization process.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
RegistrationResult
|
result : :class:`RegistrationResult`
|
||||||
Object containing the final transformation matrix and convergence status.
|
Object containing the final transformation matrix and convergence status.
|
||||||
)pbdoc");
|
)pbdoc");
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,12 +20,34 @@ using namespace small_gicp;
|
||||||
|
|
||||||
void define_factors(py::module& m) {
|
void define_factors(py::module& m) {
|
||||||
// DistanceRejector
|
// DistanceRejector
|
||||||
py::class_<DistanceRejector>(m, "DistanceRejector", "Correspondence rejection based on the distance between points.")
|
py::class_<DistanceRejector>(
|
||||||
|
m,
|
||||||
|
"DistanceRejector",
|
||||||
|
R"pbdoc(
|
||||||
|
Correspondence rejection based on the distance between points.
|
||||||
|
)pbdoc")
|
||||||
.def(py::init<>())
|
.def(py::init<>())
|
||||||
.def("set_max_distance", [](DistanceRejector& rejector, double dist) { rejector.max_dist_sq = dist * dist; }, py::arg("dist"), "Set the maximum distance.");
|
.def(
|
||||||
|
"set_max_distance",
|
||||||
|
[](DistanceRejector& rejector, double dist) { rejector.max_dist_sq = dist * dist; },
|
||||||
|
py::arg("dist"),
|
||||||
|
R"pbdoc(
|
||||||
|
Set maximum correspondence distance.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dist : float
|
||||||
|
Maximum correspondence distance.
|
||||||
|
)pbdoc");
|
||||||
|
|
||||||
// ICPFactor
|
// ICPFactor
|
||||||
py::class_<ICPFactor>(m, "ICPFactor", "ICP per-point factor")
|
py::class_<ICPFactor>(m, "ICPFactor", R"pbdoc(
|
||||||
|
ICP per-point factor based on the conventional point-to-point distance.
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
Zhang, "Iterative Point Matching for Registration of Free-Form Curve", IJCV1994
|
||||||
|
)pbdoc")
|
||||||
.def(py::init<>())
|
.def(py::init<>())
|
||||||
.def(
|
.def(
|
||||||
"linearize",
|
"linearize",
|
||||||
|
|
@ -49,10 +71,44 @@ void define_factors(py::module& m) {
|
||||||
py::arg("T"),
|
py::arg("T"),
|
||||||
py::arg("source_index"),
|
py::arg("source_index"),
|
||||||
py::arg("rejector"),
|
py::arg("rejector"),
|
||||||
"Linearize the factor. Returns a tuple of success, Hessian, gradient, and error.");
|
R"pbdoc(
|
||||||
|
Linearize the factor for the i-th source point.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
target : :class:`PointCloud`
|
||||||
|
Target point cloud.
|
||||||
|
source : PointCloud
|
||||||
|
Source point cloud.
|
||||||
|
kdtree : KdTree
|
||||||
|
KdTree for the target point cloud.
|
||||||
|
T : numpy.ndarray
|
||||||
|
Transformation matrix. (4x4)
|
||||||
|
source_index : int
|
||||||
|
Index of the source point.
|
||||||
|
rejector : DistanceRejector
|
||||||
|
Correspondence rejector.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
success: bool
|
||||||
|
Success flag. If false, the correspondence is rejected.
|
||||||
|
H : numpy.ndarray
|
||||||
|
Hessian matrix (6x6).
|
||||||
|
b : numpy.ndarray
|
||||||
|
Gradient vector (6,).
|
||||||
|
e : float
|
||||||
|
Error.
|
||||||
|
)pbdoc");
|
||||||
|
|
||||||
// PointToPlaneICPFactor
|
// PointToPlaneICPFactor
|
||||||
py::class_<PointToPlaneICPFactor>(m, "PointToPlaneICPFactor", "Point-to-plane ICP per-point factor")
|
py::class_<PointToPlaneICPFactor>(m, "PointToPlaneICPFactor", R"pbdoc(
|
||||||
|
Point-to-plane ICP per-point factor based on the point-to-plane distance.
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
Zhang, "Iterative Point Matching for Registration of Free-Form Curve", IJCV1994
|
||||||
|
)pbdoc")
|
||||||
.def(py::init<>())
|
.def(py::init<>())
|
||||||
.def(
|
.def(
|
||||||
"linearize",
|
"linearize",
|
||||||
|
|
@ -76,10 +132,44 @@ void define_factors(py::module& m) {
|
||||||
py::arg("T"),
|
py::arg("T"),
|
||||||
py::arg("source_index"),
|
py::arg("source_index"),
|
||||||
py::arg("rejector"),
|
py::arg("rejector"),
|
||||||
"Linearize the factor. Returns a tuple of success, Hessian, gradient, and error.");
|
R"pbdoc(
|
||||||
|
Linearize the factor for the i-th source point.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
target : PointCloud
|
||||||
|
Target point cloud. Must have normals.
|
||||||
|
source : PointCloud
|
||||||
|
Source point cloud.
|
||||||
|
kdtree : KdTree
|
||||||
|
KdTree for the target point cloud.
|
||||||
|
T : numpy.ndarray
|
||||||
|
Transformation matrix. (4x4)
|
||||||
|
source_index : int
|
||||||
|
Index of the source point.
|
||||||
|
rejector : DistanceRejector
|
||||||
|
Correspondence rejector.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
success: bool
|
||||||
|
Success flag. If false, the correspondence is rejected.
|
||||||
|
H : numpy.ndarray
|
||||||
|
Hessian matrix (6x6).
|
||||||
|
b : numpy.ndarray
|
||||||
|
Gradient vector (6,).
|
||||||
|
e : float
|
||||||
|
Error.
|
||||||
|
)pbdoc");
|
||||||
|
|
||||||
// GICPFactor
|
// GICPFactor
|
||||||
py::class_<GICPFactor>(m, "GICPFactor", "Generalized ICP per-point factor") //
|
py::class_<GICPFactor>(m, "GICPFactor", R"pbdoc(
|
||||||
|
Generalized ICP per-point factor based on distribution-to-distribution distance.
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
Segal et al., "Generalized-ICP", RSS2005
|
||||||
|
)pbdoc") //
|
||||||
.def(py::init<>())
|
.def(py::init<>())
|
||||||
.def(
|
.def(
|
||||||
"linearize",
|
"linearize",
|
||||||
|
|
@ -103,5 +193,33 @@ void define_factors(py::module& m) {
|
||||||
py::arg("T"),
|
py::arg("T"),
|
||||||
py::arg("source_index"),
|
py::arg("source_index"),
|
||||||
py::arg("rejector"),
|
py::arg("rejector"),
|
||||||
"Linearize the factor. Returns a tuple of success, Hessian, gradient, and error.");
|
R"pbdoc(
|
||||||
|
Linearize the factor for the i-th source point.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
target : PointCloud
|
||||||
|
Target point cloud. Must have covariances.
|
||||||
|
source : PointCloud
|
||||||
|
Source point cloud. Must have covariances.
|
||||||
|
kdtree : KdTree
|
||||||
|
KdTree for the target point cloud.
|
||||||
|
T : numpy.ndarray
|
||||||
|
Transformation matrix. (4x4)
|
||||||
|
source_index : int
|
||||||
|
Index of the source point.
|
||||||
|
rejector : DistanceRejector
|
||||||
|
Correspondence rejector.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
success: bool
|
||||||
|
Success flag. If false, the correspondence is rejected.
|
||||||
|
H : numpy.ndarray
|
||||||
|
Hessian matrix (6x6).
|
||||||
|
b : numpy.ndarray
|
||||||
|
Gradient vector (6,).
|
||||||
|
e : float
|
||||||
|
Error.
|
||||||
|
)pbdoc");
|
||||||
}
|
}
|
||||||
|
|
@ -17,16 +17,48 @@ using namespace small_gicp;
|
||||||
void define_kdtree(py::module& m) {
|
void define_kdtree(py::module& m) {
|
||||||
// KdTree
|
// KdTree
|
||||||
py::class_<KdTree<PointCloud>, std::shared_ptr<KdTree<PointCloud>>>(m, "KdTree") //
|
py::class_<KdTree<PointCloud>, std::shared_ptr<KdTree<PointCloud>>>(m, "KdTree") //
|
||||||
|
.def(
|
||||||
|
py::init([](const Eigen::MatrixXd& points_numpy, int num_threads) {
|
||||||
|
if (points_numpy.cols() != 3 && points_numpy.cols() != 4) {
|
||||||
|
throw std::invalid_argument("points must have shape (n, 3) or (n, 4)");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto points = std::make_shared<PointCloud>();
|
||||||
|
points->resize(points_numpy.rows());
|
||||||
|
|
||||||
|
for (int i = 0; i < points_numpy.rows(); i++) {
|
||||||
|
points->point(i) << points_numpy(i, 0), points_numpy(i, 1), points_numpy(i, 2), 1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(num_threads));
|
||||||
|
}),
|
||||||
|
py::arg("points"),
|
||||||
|
py::arg("num_threads") = 1,
|
||||||
|
R"""(
|
||||||
|
KdTree(points: numpy.ndarray, num_threads: int = 1)
|
||||||
|
|
||||||
|
Construct a KdTree from a numpy array.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
points : :class:`numpy.ndarray`, shape (n, 3) or (n, 4)
|
||||||
|
The input point cloud.
|
||||||
|
num_threads : int, optional
|
||||||
|
The number of threads to use for KdTree construction. Default is 1.
|
||||||
|
)""")
|
||||||
|
|
||||||
.def(
|
.def(
|
||||||
py::init([](const PointCloud::ConstPtr& points, int num_threads) { return std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(num_threads)); }),
|
py::init([](const PointCloud::ConstPtr& points, int num_threads) { return std::make_shared<KdTree<PointCloud>>(points, KdTreeBuilderOMP(num_threads)); }),
|
||||||
py::arg("points"),
|
py::arg("points"),
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
R"""(
|
R"""(
|
||||||
Construct a KdTree from a point cloud.
|
KdTree(points: PointCloud, num_threads: int = 1)
|
||||||
|
|
||||||
|
Construct a KdTree from a small_gicp.PointCloud.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : PointCloud
|
points : :class:`PointCloud`
|
||||||
The input point cloud.
|
The input point cloud.
|
||||||
num_threads : int, optional
|
num_threads : int, optional
|
||||||
The number of threads to use for KdTree construction. Default is 1.
|
The number of threads to use for KdTree construction. Default is 1.
|
||||||
|
|
@ -46,7 +78,7 @@ void define_kdtree(py::module& m) {
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
pt : NDArray, shape (3,)
|
pt : numpy.ndarray, shape (3,)
|
||||||
The input point.
|
The input point.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
|
|
@ -54,10 +86,11 @@ void define_kdtree(py::module& m) {
|
||||||
found : int
|
found : int
|
||||||
Whether a neighbor was found (1 if found, 0 if not).
|
Whether a neighbor was found (1 if found, 0 if not).
|
||||||
k_index : int
|
k_index : int
|
||||||
The index of the nearest neighbor in the point cloud.
|
The index of the nearest neighbor in the point cloud. If a neighbor was not found, the index is -1.
|
||||||
k_sq_dist : float
|
k_sq_dist : float
|
||||||
The squared distance to the nearest neighbor.
|
The squared distance to the nearest neighbor.
|
||||||
)""")
|
)""")
|
||||||
|
|
||||||
.def(
|
.def(
|
||||||
"knn_search",
|
"knn_search",
|
||||||
[](const KdTree<PointCloud>& kdtree, const Eigen::Vector3d& pt, int k) {
|
[](const KdTree<PointCloud>& kdtree, const Eigen::Vector3d& pt, int k) {
|
||||||
|
|
@ -73,23 +106,30 @@ void define_kdtree(py::module& m) {
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
pt : NDArray, shape (3,)
|
pt : numpy.ndarray, shape (3,)
|
||||||
The input point.
|
The input point.
|
||||||
k : int
|
k : int
|
||||||
The number of nearest neighbors to search for.
|
The number of nearest neighbors to search for.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
k_indices : NDArray, shape (k,)
|
k_indices : numpy.ndarray, shape (k,)
|
||||||
The indices of the k nearest neighbors in the point cloud.
|
The indices of the k nearest neighbors in the point cloud. If a neighbor was not found, the index is -1.
|
||||||
k_sq_dists : NDArray, shape (k,)
|
k_sq_dists : numpy.ndarray, shape (k,)
|
||||||
The squared distances to the k nearest neighbors.
|
The squared distances to the k nearest neighbors (Sorted in ascending order).
|
||||||
)""")
|
)""")
|
||||||
|
|
||||||
.def(
|
.def(
|
||||||
"batch_nearest_neighbor_search",
|
"batch_nearest_neighbor_search",
|
||||||
[](const KdTree<PointCloud>& kdtree, const Eigen::MatrixXd& pts) {
|
[](const KdTree<PointCloud>& kdtree, const Eigen::MatrixXd& pts, int num_threads) {
|
||||||
|
if (pts.cols() != 3 && pts.cols() != 4) {
|
||||||
|
throw std::invalid_argument("pts must have shape (n, 3) or (n, 4)");
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<size_t> k_indices(pts.rows(), -1);
|
std::vector<size_t> k_indices(pts.rows(), -1);
|
||||||
std::vector<double> k_sq_dists(pts.rows(), std::numeric_limits<double>::max());
|
std::vector<double> k_sq_dists(pts.rows(), std::numeric_limits<double>::max());
|
||||||
|
|
||||||
|
#pragma omp parallel for num_threads(num_threads) schedule(guided, 4)
|
||||||
for (int i = 0; i < pts.rows(); ++i) {
|
for (int i = 0; i < pts.rows(); ++i) {
|
||||||
const size_t found = traits::nearest_neighbor_search(kdtree, Eigen::Vector4d(pts(i, 0), pts(i, 1), pts(i, 2), 1.0), &k_indices[i], &k_sq_dists[i]);
|
const size_t found = traits::nearest_neighbor_search(kdtree, Eigen::Vector4d(pts(i, 0), pts(i, 1), pts(i, 2), 1.0), &k_indices[i], &k_sq_dists[i]);
|
||||||
if (!found) {
|
if (!found) {
|
||||||
|
|
@ -97,57 +137,77 @@ void define_kdtree(py::module& m) {
|
||||||
k_sq_dists[i] = std::numeric_limits<double>::max();
|
k_sq_dists[i] = std::numeric_limits<double>::max();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_pair(k_indices, k_sq_dists);
|
return std::make_pair(k_indices, k_sq_dists);
|
||||||
},
|
},
|
||||||
py::arg("pts"),
|
py::arg("pts"),
|
||||||
|
py::arg("num_threads") = 1,
|
||||||
R"""(
|
R"""(
|
||||||
Find the nearest neighbors for a batch of points.
|
Find the nearest neighbors for a batch of points.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
pts : NDArray, shape (n, 3)
|
pts : numpy.ndarray, shape (n, 3) or (n, 4)
|
||||||
The input points.
|
The input points.
|
||||||
|
num_threads : int, optional
|
||||||
|
The number of threads to use for the search. Default is 1.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
k_indices : NDArray, shape (n,)
|
k_indices : numpy.ndarray, shape (n, k)
|
||||||
The indices of the nearest neighbors for each input point.
|
The indices of the nearest neighbors for each input point. If a neighbor was not found, the index is -1.
|
||||||
k_sq_dists : NDArray, shape (n,)
|
k_sq_dists : numpy.ndarray, shape (n, k)
|
||||||
The squared distances to the nearest neighbors for each input point.
|
The squared distances to the nearest neighbors for each input point.
|
||||||
)""")
|
)""")
|
||||||
|
|
||||||
.def(
|
.def(
|
||||||
"batch_knn_search",
|
"batch_knn_search",
|
||||||
[](const KdTree<PointCloud>& kdtree, const Eigen::MatrixXd& pts, int k) {
|
[](const KdTree<PointCloud>& kdtree, const Eigen::MatrixXd& pts, int k, int num_threads) {
|
||||||
std::vector<std::vector<size_t>> k_indices(pts.rows(), std::vector<size_t>(k, -1));
|
if (pts.cols() != 3 && pts.cols() != 4) {
|
||||||
std::vector<std::vector<double>> k_sq_dists(pts.rows(), std::vector<double>(k, std::numeric_limits<double>::max()));
|
throw std::invalid_argument("pts must have shape (n, 3) or (n, 4)");
|
||||||
|
}
|
||||||
|
|
||||||
|
Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> k_indices(pts.rows(), k);
|
||||||
|
Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> k_sq_dists(pts.rows(), k);
|
||||||
|
k_indices.setConstant(-1);
|
||||||
|
k_sq_dists.setConstant(std::numeric_limits<double>::max());
|
||||||
|
|
||||||
|
#pragma omp parallel for num_threads(num_threads) schedule(guided, 4)
|
||||||
for (int i = 0; i < pts.rows(); ++i) {
|
for (int i = 0; i < pts.rows(); ++i) {
|
||||||
const size_t found = traits::knn_search(kdtree, Eigen::Vector4d(pts(i, 0), pts(i, 1), pts(i, 2), 1.0), k, k_indices[i].data(), k_sq_dists[i].data());
|
size_t* k_indices_begin = k_indices.data() + i * k;
|
||||||
|
double* k_sq_dists_begin = k_sq_dists.data() + i * k;
|
||||||
|
|
||||||
|
const size_t found = traits::knn_search(kdtree, Eigen::Vector4d(pts(i, 0), pts(i, 1), pts(i, 2), 1.0), k, k_indices_begin, k_sq_dists_begin);
|
||||||
if (found < k) {
|
if (found < k) {
|
||||||
for (size_t j = found; j < k; ++j) {
|
for (size_t j = found; j < k; ++j) {
|
||||||
k_indices[i][j] = -1;
|
k_indices_begin[j] = -1;
|
||||||
k_sq_dists[i][j] = std::numeric_limits<double>::max();
|
k_sq_dists_begin[j] = std::numeric_limits<double>::max();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_pair(k_indices, k_sq_dists);
|
return std::make_pair(k_indices, k_sq_dists);
|
||||||
},
|
},
|
||||||
py::arg("pts"),
|
py::arg("pts"),
|
||||||
py::arg("k"),
|
py::arg("k"),
|
||||||
|
py::arg("num_threads") = 1,
|
||||||
R"""(
|
R"""(
|
||||||
Find the k nearest neighbors for a batch of points.
|
Find the k nearest neighbors for a batch of points.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
pts : NDArray, shape (n, 3)
|
pts : numpy.ndarray, shape (n, 3) or (n, 4)
|
||||||
The input points.
|
The input points.
|
||||||
k : int
|
k : int
|
||||||
The number of nearest neighbors to search for.
|
The number of nearest neighbors to search for.
|
||||||
|
num_threads : int, optional
|
||||||
|
The number of threads to use for the search. Default is 1.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
k_indices : list of NDArray, shape (n,)
|
k_indices : list of numpy.ndarray, shape (n,)
|
||||||
The list of indices of the k nearest neighbors for each input point.
|
The list of indices of the k nearest neighbors for each input point. If a neighbor was not found, the index is -1.
|
||||||
k_sq_dists : list of NDArray, shape (n,)
|
k_sq_dists : list of numpy.ndarray, shape (n,)
|
||||||
The list of squared distances to the k nearest neighbors for each input point.
|
The list of squared distances to the k nearest neighbors for each input point (sorted in ascending order).
|
||||||
)""");
|
)""");
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,6 @@ void define_misc(py::module& m) {
|
||||||
const auto points = read_ply(filename);
|
const auto points = read_ply(filename);
|
||||||
return std::make_shared<PointCloud>(points);
|
return std::make_shared<PointCloud>(points);
|
||||||
},
|
},
|
||||||
"Read PLY file",
|
"Read PLY file. This function can only read simple point clouds with XYZ properties for testing purposes. Do not use this for general PLY IO.",
|
||||||
py::arg("filename"));
|
py::arg("filename"));
|
||||||
}
|
}
|
||||||
|
|
@ -16,7 +16,8 @@ using namespace small_gicp;
|
||||||
void define_pointcloud(py::module& m) {
|
void define_pointcloud(py::module& m) {
|
||||||
// PointCloud
|
// PointCloud
|
||||||
py::class_<PointCloud, std::shared_ptr<PointCloud>>(m, "PointCloud") //
|
py::class_<PointCloud, std::shared_ptr<PointCloud>>(m, "PointCloud") //
|
||||||
.def(py::init([](const Eigen::MatrixXd& points) {
|
.def(
|
||||||
|
py::init([](const Eigen::MatrixXd& points) {
|
||||||
if (points.cols() != 3 && points.cols() != 4) {
|
if (points.cols() != 3 && points.cols() != 4) {
|
||||||
std::cerr << "points must be Nx3 or Nx4" << std::endl;
|
std::cerr << "points must be Nx3 or Nx4" << std::endl;
|
||||||
return std::make_shared<PointCloud>();
|
return std::make_shared<PointCloud>();
|
||||||
|
|
@ -35,32 +36,124 @@ void define_pointcloud(py::module& m) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return pc;
|
return pc;
|
||||||
})) //
|
}),
|
||||||
|
py::arg("points"),
|
||||||
|
R"""(
|
||||||
|
PointCloud(points: numpy.ndarray)
|
||||||
|
|
||||||
|
Construct a PointCloud from a numpy array.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
points : numpy.ndarray, shape (n, 3) or (n, 4)
|
||||||
|
The input point cloud.
|
||||||
|
)""")
|
||||||
.def("__repr__", [](const PointCloud& points) { return "small_gicp.PointCloud (" + std::to_string(points.size()) + " points)"; })
|
.def("__repr__", [](const PointCloud& points) { return "small_gicp.PointCloud (" + std::to_string(points.size()) + " points)"; })
|
||||||
.def("__len__", [](const PointCloud& points) { return points.size(); })
|
.def("__len__", [](const PointCloud& points) { return points.size(); })
|
||||||
.def("empty", &PointCloud::empty, "Check if the point cloud is empty.")
|
.def(
|
||||||
.def("size", &PointCloud::size, "Get the number of points.")
|
"empty",
|
||||||
|
&PointCloud::empty,
|
||||||
|
R"pbdoc(
|
||||||
|
Check if the point cloud is empty
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
empty : bool
|
||||||
|
True if the point cloud is empty.
|
||||||
|
)pbdoc")
|
||||||
|
.def(
|
||||||
|
"size",
|
||||||
|
&PointCloud::size,
|
||||||
|
R"pbdoc(
|
||||||
|
Get the number of points.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
num_points : int
|
||||||
|
Number of points.
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"points",
|
"points",
|
||||||
[](const PointCloud& points) -> Eigen::MatrixXd { return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(points.points[0].data(), points.size(), 4); },
|
[](const PointCloud& points) -> Eigen::MatrixXd { return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(points.points[0].data(), points.size(), 4); },
|
||||||
"Get the points as a Nx4 matrix.")
|
R"pbdoc(
|
||||||
|
Get the points as a Nx4 matrix.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
points : numpy.ndarray
|
||||||
|
Points.
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"normals",
|
"normals",
|
||||||
[](const PointCloud& points) -> Eigen::MatrixXd { return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(points.normals[0].data(), points.size(), 4); },
|
[](const PointCloud& points) -> Eigen::MatrixXd { return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(points.normals[0].data(), points.size(), 4); },
|
||||||
"Get the normals as a Nx4 matrix.")
|
R"pbdoc(
|
||||||
|
Get the normals as a Nx4 matrix.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
normals : numpy.ndarray
|
||||||
|
Normals.
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"covs",
|
"covs",
|
||||||
[](const PointCloud& points) { return points.covs; },
|
[](const PointCloud& points) { return points.covs; },
|
||||||
"Get the covariance matrices.")
|
R"pbdoc(
|
||||||
|
Get the covariance matrices as a list of 4x4 matrices.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
covs : list of numpy.ndarray
|
||||||
|
Covariance matrices.
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"point",
|
"point",
|
||||||
[](const PointCloud& points, size_t i) -> Eigen::Vector4d { return points.point(i); },
|
[](const PointCloud& points, size_t i) -> Eigen::Vector4d { return points.point(i); },
|
||||||
py::arg("i"),
|
py::arg("i"),
|
||||||
"Get the i-th point.")
|
R"pbdoc(
|
||||||
|
Get the i-th point.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
i : int
|
||||||
|
Index of the point.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
point : numpy.ndarray, shape (4,)
|
||||||
|
Point.
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"normal",
|
"normal",
|
||||||
[](const PointCloud& points, size_t i) -> Eigen::Vector4d { return points.normal(i); },
|
[](const PointCloud& points, size_t i) -> Eigen::Vector4d { return points.normal(i); },
|
||||||
py::arg("i"),
|
py::arg("i"),
|
||||||
"Get the i-th normal.")
|
R"pbdoc(
|
||||||
.def("cov", [](const PointCloud& points, size_t i) -> Eigen::Matrix4d { return points.cov(i); }, py::arg("i"), "Get the i-th covariance matrix.");
|
Get the i-th normal.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
i : int
|
||||||
|
Index of the point.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
normal : numpy.ndarray, shape (4,)
|
||||||
|
Normal.
|
||||||
|
)pbdoc")
|
||||||
|
.def(
|
||||||
|
"cov",
|
||||||
|
[](const PointCloud& points, size_t i) -> Eigen::Matrix4d { return points.cov(i); },
|
||||||
|
py::arg("i"),
|
||||||
|
R"pbdoc(
|
||||||
|
Get the i-th covariance matrix.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
i : int
|
||||||
|
Index of the point.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
cov : numpy.ndarray, shape (4, 4)
|
||||||
|
Covariance matrix.
|
||||||
|
)pbdoc");
|
||||||
}
|
}
|
||||||
|
|
@ -35,9 +35,18 @@ void define_preprocess(py::module& m) {
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Voxelgrid downsampling.
|
Voxelgrid downsampling.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
When multi-threading is enabled, this function has minor run-by-run non-deterministic behavior due to parallel data collection that results
|
||||||
|
in a deviation of the number of points in the downsampling results (up to 10% increase from the single-thread version).
|
||||||
|
|
||||||
|
Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
||||||
|
For example, if the downsampling resolution is 0.01 m, point coordinates must be in [-10485.76, 10485.75] m.
|
||||||
|
Points outside the valid range will be ignored.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : PointCloud
|
points : :class:`PointCloud`
|
||||||
Input point cloud.
|
Input point cloud.
|
||||||
resolution : float
|
resolution : float
|
||||||
Voxel size.
|
Voxel size.
|
||||||
|
|
@ -46,7 +55,7 @@ void define_preprocess(py::module& m) {
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
PointCloud
|
:class:`PointCloud`
|
||||||
Downsampled point cloud.
|
Downsampled point cloud.
|
||||||
)pbdoc");
|
)pbdoc");
|
||||||
|
|
||||||
|
|
@ -71,6 +80,15 @@ void define_preprocess(py::module& m) {
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Voxelgrid downsampling.
|
Voxelgrid downsampling.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
When multi-threading is enabled, this function has minor run-by-run non-deterministic behavior due to parallel data collection that results
|
||||||
|
in a deviation of the number of points in the downsampling results (up to 10% increase from the single-thread version).
|
||||||
|
|
||||||
|
Discretized voxel coords must be in 21bit range [-1048576, 1048575].
|
||||||
|
For example, if the downsampling resolution is 0.01 m, point coordinates must be in [-10485.76, 10485.75] m.
|
||||||
|
Points outside the valid range will be ignored.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : [np.float64]
|
points : [np.float64]
|
||||||
|
|
@ -82,7 +100,7 @@ void define_preprocess(py::module& m) {
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
PointCloud
|
:class:`PointCloud`
|
||||||
Downsampled point cloud.
|
Downsampled point cloud.
|
||||||
)pbdoc");
|
)pbdoc");
|
||||||
|
|
||||||
|
|
@ -106,12 +124,13 @@ void define_preprocess(py::module& m) {
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Estimate point normals.
|
Estimate point normals.
|
||||||
|
If a sufficient number of neighbor points (5 points) is not found, a zero vector is set to the point.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : PointCloud
|
points : :class:`PointCloud`
|
||||||
Input point cloud. Normals will be estimated in-place. (in/out)
|
Input point cloud. Normals will be estimated in-place. (in/out)
|
||||||
tree : KdTree, optional
|
tree : :class:`KdTree`, optional
|
||||||
Nearest neighbor search. If None, create a new KdTree (default: None)
|
Nearest neighbor search. If None, create a new KdTree (default: None)
|
||||||
num_neighbors : int, optional
|
num_neighbors : int, optional
|
||||||
Number of neighbors. (default: 20)
|
Number of neighbors. (default: 20)
|
||||||
|
|
@ -139,12 +158,13 @@ void define_preprocess(py::module& m) {
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Estimate point covariances.
|
Estimate point covariances.
|
||||||
|
If a sufficient number of neighbor points (5 points) is not found, an identity matrix is set to the point.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : PointCloud
|
points : :class:`PointCloud`
|
||||||
Input point cloud. Covariances will be estimated in-place. (in/out)
|
Input point cloud. Covariances will be estimated in-place. (in/out)
|
||||||
tree : KdTree, optional
|
tree : :class:`KdTree`, optional
|
||||||
Nearest neighbor search. If None, create a new KdTree (default: None)
|
Nearest neighbor search. If None, create a new KdTree (default: None)
|
||||||
num_neighbors : int, optional
|
num_neighbors : int, optional
|
||||||
Number of neighbors. (default: 20)
|
Number of neighbors. (default: 20)
|
||||||
|
|
@ -172,12 +192,13 @@ void define_preprocess(py::module& m) {
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Estimate point normals and covariances.
|
Estimate point normals and covariances.
|
||||||
|
If a sufficient number of neighbor points (5 points) is not found, a zero vector and an identity matrix is set to the point.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : PointCloud
|
points : :class:`PointCloud`
|
||||||
Input point cloud. Normals and covariances will be estimated in-place. (in/out)
|
Input point cloud. Normals and covariances will be estimated in-place. (in/out)
|
||||||
tree : KdTree, optional
|
tree : :class:`KdTree`, optional
|
||||||
Nearest neighbor search. If None, create a new KdTree (default: None)
|
Nearest neighbor search. If None, create a new KdTree (default: None)
|
||||||
num_neighbors : int, optional
|
num_neighbors : int, optional
|
||||||
Number of neighbors. (default: 20)
|
Number of neighbors. (default: 20)
|
||||||
|
|
@ -215,13 +236,14 @@ void define_preprocess(py::module& m) {
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Preprocess point cloud (Downsampling and normal/covariance estimation).
|
Preprocess point cloud (Downsampling and normal/covariance estimation).
|
||||||
|
See also: :func:`voxelgrid_sampling`, :func:`estimate_normals_covariances`.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : [np.float64]
|
points : [np.float64]
|
||||||
Input point cloud. Nx3 or Nx4.
|
Input point cloud. Nx3 or Nx4.
|
||||||
downsampling_resolution : float, optional
|
downsampling_resolution : float, optional
|
||||||
Resolution for downsampling the point clouds. (default: 0.25)
|
Resolution for downsampling the point clouds. Input points out of 21bit range after discretization are ignored (See also: :func:`voxelgrid_sampling`). (default: 0.25)
|
||||||
num_neighbors : int, optional
|
num_neighbors : int, optional
|
||||||
Number of neighbors used for attribute estimation. (default: 10)
|
Number of neighbors used for attribute estimation. (default: 10)
|
||||||
num_threads : int, optional
|
num_threads : int, optional
|
||||||
|
|
@ -229,9 +251,9 @@ void define_preprocess(py::module& m) {
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
PointCloud
|
:class:`PointCloud`
|
||||||
Downsampled point cloud.
|
Downsampled point cloud.
|
||||||
KdTree
|
:class:`KdTree`
|
||||||
KdTree for the downsampled point cloud.
|
KdTree for the downsampled point cloud.
|
||||||
)pbdoc");
|
)pbdoc");
|
||||||
|
|
||||||
|
|
@ -255,13 +277,14 @@ void define_preprocess(py::module& m) {
|
||||||
py::arg("num_threads") = 1,
|
py::arg("num_threads") = 1,
|
||||||
R"pbdoc(
|
R"pbdoc(
|
||||||
Preprocess point cloud (Downsampling and normal/covariance estimation).
|
Preprocess point cloud (Downsampling and normal/covariance estimation).
|
||||||
|
See also: :func:`voxelgrid_sampling`, :func:`estimate_normals_covariances`.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
points : PointCloud
|
points : :class:`PointCloud`
|
||||||
Input point cloud.
|
Input point cloud.
|
||||||
downsampling_resolution : float, optional
|
downsampling_resolution : float, optional
|
||||||
Resolution for downsampling the point clouds. (default: 0.25)
|
Resolution for downsampling the point clouds. Input points out of 21bit range after discretization are ignored (See also: :func:`voxelgrid_sampling`). (default: 0.25)
|
||||||
num_neighbors : int, optional
|
num_neighbors : int, optional
|
||||||
Number of neighbors used for attribute estimation. (default: 10)
|
Number of neighbors used for attribute estimation. (default: 10)
|
||||||
num_threads : int, optional
|
num_threads : int, optional
|
||||||
|
|
@ -269,9 +292,9 @@ void define_preprocess(py::module& m) {
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
PointCloud
|
:class:`PointCloud`
|
||||||
Downsampled point cloud.
|
Downsampled point cloud.
|
||||||
KdTree
|
:class:`KdTree`
|
||||||
KdTree for the downsampled point cloud.
|
KdTree for the downsampled point cloud.
|
||||||
)pbdoc");
|
)pbdoc");
|
||||||
}
|
}
|
||||||
|
|
@ -33,11 +33,11 @@ void define_result(py::module& m) {
|
||||||
.def_property_readonly(
|
.def_property_readonly(
|
||||||
"T_target_source",
|
"T_target_source",
|
||||||
[](const RegistrationResult& result) -> Eigen::Matrix4d { return result.T_target_source.matrix(); },
|
[](const RegistrationResult& result) -> Eigen::Matrix4d { return result.T_target_source.matrix(); },
|
||||||
"Final transformation matrix")
|
"NDArray[np.float64] : Final transformation matrix (4x4). This transformation brings a point in the source cloud frame to the target cloud frame.")
|
||||||
.def_readonly("converged", &RegistrationResult::converged, "Convergence flag")
|
.def_readonly("converged", &RegistrationResult::converged, "bool : Convergence flag")
|
||||||
.def_readonly("iterations", &RegistrationResult::iterations, "Number of iterations")
|
.def_readonly("iterations", &RegistrationResult::iterations, "int : Number of iterations")
|
||||||
.def_readonly("num_inliers", &RegistrationResult::num_inliers, "Number of inliers")
|
.def_readonly("num_inliers", &RegistrationResult::num_inliers, "int : Number of inliers")
|
||||||
.def_readonly("H", &RegistrationResult::H, "Final Hessian matrix")
|
.def_readonly("H", &RegistrationResult::H, "NDArray[np.float64] : Final Hessian matrix (6x6)")
|
||||||
.def_readonly("b", &RegistrationResult::b, "Final information vector")
|
.def_readonly("b", &RegistrationResult::b, "NDArray[np.float64] : Final information vector (6,)")
|
||||||
.def_readonly("error", &RegistrationResult::error, "Final error");
|
.def_readonly("error", &RegistrationResult::error, "float : Final error");
|
||||||
}
|
}
|
||||||
|
|
@ -18,7 +18,23 @@ using namespace small_gicp;
|
||||||
template <typename VoxelMap, bool has_normals, bool has_covs>
|
template <typename VoxelMap, bool has_normals, bool has_covs>
|
||||||
auto define_class(py::module& m, const std::string& name) {
|
auto define_class(py::module& m, const std::string& name) {
|
||||||
py::class_<VoxelMap> vox(m, name.c_str());
|
py::class_<VoxelMap> vox(m, name.c_str());
|
||||||
vox.def(py::init<double>())
|
vox
|
||||||
|
.def(
|
||||||
|
py::init<double>(),
|
||||||
|
py::arg("leaf_size"),
|
||||||
|
R"pbdoc(
|
||||||
|
Construct a Incremental voxelmap.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
This class supports incremental point cloud insertion and LRU-based voxel deletion that removes voxels that are not recently referenced.
|
||||||
|
It can handle arbitrary number of voxels and arbitrary range of voxel coordinates (in 32-bit int range).
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
leaf_size : float
|
||||||
|
Voxel size.
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"__repr__",
|
"__repr__",
|
||||||
[=](const VoxelMap& voxelmap) {
|
[=](const VoxelMap& voxelmap) {
|
||||||
|
|
@ -27,13 +43,38 @@ auto define_class(py::module& m, const std::string& name) {
|
||||||
return sst.str();
|
return sst.str();
|
||||||
})
|
})
|
||||||
.def("__len__", [](const VoxelMap& voxelmap) { return voxelmap.size(); })
|
.def("__len__", [](const VoxelMap& voxelmap) { return voxelmap.size(); })
|
||||||
.def("size", &VoxelMap::size, "Get the number of voxels.")
|
.def(
|
||||||
|
"size",
|
||||||
|
&VoxelMap::size,
|
||||||
|
R"pbdoc(
|
||||||
|
Get the number of voxels.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
num_voxels : int
|
||||||
|
Number of voxels.
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"insert",
|
"insert",
|
||||||
[](VoxelMap& voxelmap, const PointCloud& points, const Eigen::Matrix4d& T) { voxelmap.insert(points, Eigen::Isometry3d(T)); },
|
[](VoxelMap& voxelmap, const PointCloud& points, const Eigen::Matrix4d& T) { voxelmap.insert(points, Eigen::Isometry3d(T)); },
|
||||||
py::arg("points"),
|
py::arg("points"),
|
||||||
py::arg("T") = Eigen::Matrix4d::Identity(),
|
py::arg("T") = Eigen::Matrix4d::Identity(),
|
||||||
"Insert a point cloud.")
|
R"pbdoc(
|
||||||
|
Insert a point cloud into the voxel map and delete voxels that are not recently accessed.
|
||||||
|
|
||||||
|
Note
|
||||||
|
----
|
||||||
|
If this class is based on FlatContainer (i.e., IncrementalVoxelMap*), input points are ignored if
|
||||||
|
1) there are too many points in the cell or
|
||||||
|
2) the input point is too close to existing points in the cell.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
points : :class:`PointCloud`
|
||||||
|
Input source point cloud.
|
||||||
|
T : numpy.ndarray, optional
|
||||||
|
Transformation matrix to be applied to the input point cloud (i.e., T_voxelmap_source). (default: identity)
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"set_lru",
|
"set_lru",
|
||||||
[](VoxelMap& voxelmap, size_t horizon, size_t clear_cycle) {
|
[](VoxelMap& voxelmap, size_t horizon, size_t clear_cycle) {
|
||||||
|
|
@ -42,14 +83,30 @@ auto define_class(py::module& m, const std::string& name) {
|
||||||
},
|
},
|
||||||
py::arg("horizon") = 100,
|
py::arg("horizon") = 100,
|
||||||
py::arg("clear_cycle") = 10,
|
py::arg("clear_cycle") = 10,
|
||||||
"Set the LRU cache parameters.")
|
R"pbdoc(
|
||||||
|
Set the LRU cache parameters.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
horizon : int, optional
|
||||||
|
LRU horizon size. Voxels that have not been accessed for lru_horizon steps are deleted. (default: 100)
|
||||||
|
clear_cycle : int, optional
|
||||||
|
LRU clear cycle. Voxel deletion is performed every lru_clear_cycle steps. (default: 10)
|
||||||
|
)pbdoc")
|
||||||
.def(
|
.def(
|
||||||
"voxel_points",
|
"voxel_points",
|
||||||
[](const VoxelMap& voxelmap) -> Eigen::MatrixXd {
|
[](const VoxelMap& voxelmap) -> Eigen::MatrixXd {
|
||||||
auto points = traits::voxel_points(voxelmap);
|
auto points = traits::voxel_points(voxelmap);
|
||||||
return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(points[0].data(), points.size(), 4);
|
return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(points[0].data(), points.size(), 4);
|
||||||
},
|
},
|
||||||
"Get the voxel points.");
|
R"pbdoc(
|
||||||
|
Get the voxel points.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
voxel_points : numpy.ndarray
|
||||||
|
Voxel points. (Nx4)
|
||||||
|
)pbdoc");
|
||||||
|
|
||||||
if constexpr (has_normals) {
|
if constexpr (has_normals) {
|
||||||
vox.def(
|
vox.def(
|
||||||
|
|
@ -58,7 +115,14 @@ auto define_class(py::module& m, const std::string& name) {
|
||||||
auto normals = traits::voxel_normals(voxelmap);
|
auto normals = traits::voxel_normals(voxelmap);
|
||||||
return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(normals[0].data(), normals.size(), 4);
|
return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(normals[0].data(), normals.size(), 4);
|
||||||
},
|
},
|
||||||
"Get the voxel normals.");
|
R"pbdoc(
|
||||||
|
Get the voxel normals.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
voxel_normals : numpy.ndarray
|
||||||
|
Voxel normals. (Nx4)
|
||||||
|
)pbdoc");
|
||||||
}
|
}
|
||||||
|
|
||||||
if constexpr (has_covs) {
|
if constexpr (has_covs) {
|
||||||
|
|
@ -68,7 +132,14 @@ auto define_class(py::module& m, const std::string& name) {
|
||||||
auto covs = traits::voxel_covs(voxelmap);
|
auto covs = traits::voxel_covs(voxelmap);
|
||||||
return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(covs[0].data(), covs.size(), 16);
|
return Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(covs[0].data(), covs.size(), 16);
|
||||||
},
|
},
|
||||||
"Get the voxel covariance matrices.");
|
R"pbdoc(
|
||||||
|
Get the voxel normals.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
voxel_covs : list of numpy.ndarray
|
||||||
|
Voxel covariance matrices. (Nx4x4)
|
||||||
|
)pbdoc");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -76,6 +147,6 @@ void define_voxelmap(py::module& m) {
|
||||||
define_class<IncrementalVoxelMap<FlatContainerPoints>, false, false>(m, "IncrementalVoxelMap");
|
define_class<IncrementalVoxelMap<FlatContainerPoints>, false, false>(m, "IncrementalVoxelMap");
|
||||||
define_class<IncrementalVoxelMap<FlatContainerNormal>, true, false>(m, "IncrementalVoxelMapNormal");
|
define_class<IncrementalVoxelMap<FlatContainerNormal>, true, false>(m, "IncrementalVoxelMapNormal");
|
||||||
define_class<IncrementalVoxelMap<FlatContainerCov>, false, true>(m, "IncrementalVoxelMapCov");
|
define_class<IncrementalVoxelMap<FlatContainerCov>, false, true>(m, "IncrementalVoxelMapCov");
|
||||||
define_class<IncrementalVoxelMap<FlatContainerNormalCov>, true, true>(m, "FlatContainerNormalCov");
|
define_class<IncrementalVoxelMap<FlatContainerNormalCov>, true, true>(m, "IncrementalVoxelMapNormalCov");
|
||||||
define_class<GaussianVoxelMap, false, true>(m, "GaussianVoxelMap");
|
define_class<GaussianVoxelMap, false, true>(m, "GaussianVoxelMap");
|
||||||
}
|
}
|
||||||
|
|
@ -91,6 +91,7 @@ align(const PointCloud& target, const PointCloud& source, const KdTree<PointClou
|
||||||
registration.criteria.rotation_eps = setting.rotation_eps;
|
registration.criteria.rotation_eps = setting.rotation_eps;
|
||||||
registration.criteria.translation_eps = setting.translation_eps;
|
registration.criteria.translation_eps = setting.translation_eps;
|
||||||
registration.optimizer.max_iterations = setting.max_iterations;
|
registration.optimizer.max_iterations = setting.max_iterations;
|
||||||
|
registration.optimizer.verbose = setting.verbose;
|
||||||
return registration.align(target, source, target_tree, init_T);
|
return registration.align(target, source, target_tree, init_T);
|
||||||
}
|
}
|
||||||
case RegistrationSetting::PLANE_ICP: {
|
case RegistrationSetting::PLANE_ICP: {
|
||||||
|
|
@ -100,6 +101,7 @@ align(const PointCloud& target, const PointCloud& source, const KdTree<PointClou
|
||||||
registration.criteria.rotation_eps = setting.rotation_eps;
|
registration.criteria.rotation_eps = setting.rotation_eps;
|
||||||
registration.criteria.translation_eps = setting.translation_eps;
|
registration.criteria.translation_eps = setting.translation_eps;
|
||||||
registration.optimizer.max_iterations = setting.max_iterations;
|
registration.optimizer.max_iterations = setting.max_iterations;
|
||||||
|
registration.optimizer.verbose = setting.verbose;
|
||||||
return registration.align(target, source, target_tree, init_T);
|
return registration.align(target, source, target_tree, init_T);
|
||||||
}
|
}
|
||||||
case RegistrationSetting::GICP: {
|
case RegistrationSetting::GICP: {
|
||||||
|
|
@ -109,6 +111,7 @@ align(const PointCloud& target, const PointCloud& source, const KdTree<PointClou
|
||||||
registration.criteria.rotation_eps = setting.rotation_eps;
|
registration.criteria.rotation_eps = setting.rotation_eps;
|
||||||
registration.criteria.translation_eps = setting.translation_eps;
|
registration.criteria.translation_eps = setting.translation_eps;
|
||||||
registration.optimizer.max_iterations = setting.max_iterations;
|
registration.optimizer.max_iterations = setting.max_iterations;
|
||||||
|
registration.optimizer.verbose = setting.verbose;
|
||||||
return registration.align(target, source, target_tree, init_T);
|
return registration.align(target, source, target_tree, init_T);
|
||||||
}
|
}
|
||||||
case RegistrationSetting::VGICP: {
|
case RegistrationSetting::VGICP: {
|
||||||
|
|
@ -129,6 +132,7 @@ RegistrationResult align(const GaussianVoxelMap& target, const PointCloud& sourc
|
||||||
registration.criteria.rotation_eps = setting.rotation_eps;
|
registration.criteria.rotation_eps = setting.rotation_eps;
|
||||||
registration.criteria.translation_eps = setting.translation_eps;
|
registration.criteria.translation_eps = setting.translation_eps;
|
||||||
registration.optimizer.max_iterations = setting.max_iterations;
|
registration.optimizer.max_iterations = setting.max_iterations;
|
||||||
|
registration.optimizer.verbose = setting.verbose;
|
||||||
return registration.align(target, source, target, init_T);
|
return registration.align(target, source, target, init_T);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
# SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
# SPDX-FileCopyrightText: Copyright 2024 Kenji Koide
|
||||||
# SPDX-License-Identifier: MIT
|
# SPDX-License-Identifier: MIT
|
||||||
import numpy
|
import numpy
|
||||||
|
from scipy.spatial import KDTree
|
||||||
from scipy.spatial.transform import Rotation
|
from scipy.spatial.transform import Rotation
|
||||||
|
|
||||||
import small_gicp
|
import small_gicp
|
||||||
|
|
@ -188,3 +189,69 @@ def test_registration(load_points):
|
||||||
|
|
||||||
result = small_gicp.align(target_voxelmap, source)
|
result = small_gicp.align(target_voxelmap, source)
|
||||||
verify_result(result.T_target_source, gt_T_target_source)
|
verify_result(result.T_target_source, gt_T_target_source)
|
||||||
|
|
||||||
|
# KdTree test
|
||||||
|
def test_kdtree(load_points):
|
||||||
|
_, target_raw_numpy, source_raw_numpy = load_points
|
||||||
|
|
||||||
|
target, target_tree = small_gicp.preprocess_points(target_raw_numpy, downsampling_resolution=0.5)
|
||||||
|
source, source_tree = small_gicp.preprocess_points(source_raw_numpy, downsampling_resolution=0.5)
|
||||||
|
|
||||||
|
target_tree_ref = KDTree(target.points())
|
||||||
|
source_tree_ref = KDTree(source.points())
|
||||||
|
|
||||||
|
def batch_test(points, queries, tree, tree_ref, num_threads):
|
||||||
|
# test for batch interface
|
||||||
|
k_dists_ref, k_indices_ref = tree_ref.query(queries, k=1)
|
||||||
|
k_indices, k_sq_dists = tree.batch_nearest_neighbor_search(queries)
|
||||||
|
assert numpy.all(numpy.abs(numpy.square(k_dists_ref) - k_sq_dists) < 1e-6)
|
||||||
|
assert numpy.all(numpy.abs(numpy.linalg.norm(points[k_indices] - queries, axis=1) ** 2 - k_sq_dists) < 1e-6)
|
||||||
|
|
||||||
|
for k in [2, 10]:
|
||||||
|
k_dists_ref, k_indices_ref = tree_ref.query(queries, k=k)
|
||||||
|
k_sq_dists_ref, k_indices_ref = numpy.array(k_dists_ref) ** 2, numpy.array(k_indices_ref)
|
||||||
|
|
||||||
|
k_indices, k_sq_dists = tree.batch_knn_search(queries, k, num_threads=num_threads)
|
||||||
|
k_indices, k_sq_dists = numpy.array(k_indices), numpy.array(k_sq_dists)
|
||||||
|
|
||||||
|
assert(numpy.all(numpy.abs(k_sq_dists_ref - k_sq_dists) < 1e-6))
|
||||||
|
for i in range(k):
|
||||||
|
diff = numpy.linalg.norm(points[k_indices[:, i]] - queries, axis=1) ** 2 - k_sq_dists[:, i]
|
||||||
|
assert(numpy.all(numpy.abs(diff) < 1e-6))
|
||||||
|
|
||||||
|
# test for single query interface
|
||||||
|
if num_threads != 1:
|
||||||
|
return
|
||||||
|
|
||||||
|
k_dists_ref, k_indices_ref = tree_ref.query(queries, k=1)
|
||||||
|
k_indices2, k_sq_dists2 = [], []
|
||||||
|
for query in queries:
|
||||||
|
found, index, sq_dist = tree.nearest_neighbor_search(query[:3])
|
||||||
|
assert found
|
||||||
|
k_indices2.append(index)
|
||||||
|
k_sq_dists2.append(sq_dist)
|
||||||
|
|
||||||
|
assert numpy.all(numpy.abs(numpy.square(k_dists_ref) - k_sq_dists2) < 1e-6)
|
||||||
|
assert numpy.all(numpy.abs(numpy.linalg.norm(points[k_indices2] - queries, axis=1) ** 2 - k_sq_dists2) < 1e-6)
|
||||||
|
|
||||||
|
for k in [2, 10]:
|
||||||
|
k_dists_ref, k_indices_ref = tree_ref.query(queries, k=k)
|
||||||
|
k_sq_dists_ref, k_indices_ref = numpy.array(k_dists_ref) ** 2, numpy.array(k_indices_ref)
|
||||||
|
|
||||||
|
k_indices2, k_sq_dists2 = [], []
|
||||||
|
for query in queries:
|
||||||
|
indices, sq_dists = tree.knn_search(query[:3], k)
|
||||||
|
k_indices2.append(indices)
|
||||||
|
k_sq_dists2.append(sq_dists)
|
||||||
|
k_indices2, k_sq_dists2 = numpy.array(k_indices2), numpy.array(k_sq_dists2)
|
||||||
|
|
||||||
|
assert(numpy.all(numpy.abs(k_sq_dists_ref - k_sq_dists2) < 1e-6))
|
||||||
|
for i in range(k):
|
||||||
|
diff = numpy.linalg.norm(points[k_indices2[:, i]] - queries, axis=1) ** 2 - k_sq_dists2[:, i]
|
||||||
|
assert(numpy.all(numpy.abs(diff) < 1e-6))
|
||||||
|
|
||||||
|
|
||||||
|
for num_threads in [1, 2]:
|
||||||
|
batch_test(target.points(), target.points(), target_tree, target_tree_ref, num_threads=num_threads)
|
||||||
|
batch_test(target.points(), source.points(), target_tree, target_tree_ref, num_threads=num_threads)
|
||||||
|
batch_test(source.points(), target.points(), source_tree, source_tree_ref, num_threads=num_threads)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue