@InProceedings{Gandhi-ICRA-2012,
  IS = { zkontrolovano 23 Jan 2014 },
  UPDATE  = { 2014-01-06 },
  author       = {Gandhi, Vineet and Cech, Jan and Horaud, Radu P.},
  title        = {High-Resolution Depth Maps Based on TOF-Stereo Fusion},
  booktitle    = {Proceedings of the IEEE International Conference on Robotics and Automation},
  pages        = {4742-4749},
  day          = {14--18},
  month        = {May},
  year         = {2012},
  dio = {10.1109/ICRA.2012.6224771},
  publisher = {IEEE Robotics and Automation Society},
  address = {Piscataway, USA},
  book_pages = {5436},
  editor      = { Parker, Lynne },
  ISBN         = {978-1-4673-1403-9},
  venue        = {St Paul, Minnesota, USA},
  url          = {http://perception.inrialpes.fr/Publications/2012/GCH12},
  ANNOTE       = {The combination of range sensors with color cameras
                  can be very useful for robot navigation, semantic
                  perception, manipulation, and telepresence. Several
                  methods of combining range- and color-data have been
                  investigated and successfully used in various
                  robotic applications. Most of these systems suffer
                  from the problems of noise in the range-data and
                  resolution mismatch between the range sensor and the
                  color cameras, since the resolution of current range
                  sensors is much less than the resolution of color
                  cameras. High-resolution depth maps can be obtained
                  using stereo matching, but this often fails to
                  construct accurate depth maps of weakly/repetitively
                  textured scenes, or if the scene exhibits complex
                  self-occlusions. Range sensors provide coarse depth
                  information regardless of presence/absence of
                  texture. The use of a calibrated system, composed of
                  a time-of-flight (TOF) camera and of a stereoscopic
                  camera pair, allows data fusion thus overcoming the
                  weaknesses of both individual sensors. We propose a
                  novel TOF-stereo fusion method based on an efficient
                  seed-growing algorithm which uses the TOF data
                  projected onto the stereo image pair as an initial
                  set of correspondences. These initial ``seeds'' are
                  then propagated based on a Bayesian model which
                  combines an image similarity score with rough depth
                  priors computed from the low-resolution range
                  data. The overall result is a dense and accurate
                  depth map at the resolution of the color cameras at
                  hand. We show that the proposed algorithm
                  outperforms 2D image-based stereo algorithms and
                  that the results are of higher resolution than
                  off-the-shelf color-range sensors, e.g.,
                  Kinect. Moreover, the algorithm potentially exhibits
                  real-time performance on a single CPU.},
  keywords = {time-of-flight camera, TOF, stereo matching, seed growing},
}