@inbook{7405262be87247b08fec60810679dd81,
title = "Visual Odometry for Indoor Mobile Robot by Recognizing Local Manhattan Structures",
abstract = "In this paper, we propose a novel 3-DOF visual odometry method to estimate the location and pose (yaw) of a mobile robot when the robot is navigating indoors. Particularly, we mainly aim at dealing with the corridor-like scenarios where the RGB-D camera mounted on the robot can capture apparent planar structures such as floor or walls. The novelties of our method lie in two-folds. First, to fully exploit the planar structures for odometry estimation, we propose a fast plane segmentation scheme based on efficiently extracted inverse-depth induced histograms. This training-free scheme can extract dominant planar structures by only exploiting the depth image of the RGB-D camera. Second, we regard the global indoor scene as a composition of some local Manhattan-like structures. At any specific location, we recognize at least one local Manhattan coordinate frame based on the detected planar structures. Pose estimation is realized based on the alignment of the camera coordinate frame to one dominant local Manhattan coordinate frame. Knowing pose information, the location estimation is carried out by a combination of a one-point RANSAC method and the ICP algorithm depending on the number of point matches available. We evaluate our work extensively on real-world data, the experimental result shows the promising performance in term of accuracy and robustness.",
author = "Zhixing Hou and Yaqing Ding and Ying Wang and Hang Yang and Hui Kong",
year = "2019",
month = may,
day = "26",
doi = "10.1007/978-3-030-20873-8_11",
language = "English",
isbn = "978-3-030-20872-1",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
booktitle = "Computer Vision – ACCV 2018",
address = "Germany",
note = "14th Asian Conference on Computer Vision (ACCV 2018), ACCV 2018 ; Conference date: 02-12-2018 Through 06-12-2018",
}