IS = { zkontrolovano 02 Feb 2010 },
  UPDATE  = { 2009-09-29 },
  author =      {Torii, Akihiko and Havlena, Michal and Pajdla, Tomas},
  title =       {From Google Street View to 3D City Models},
  year =        {2009},
  month =       {Octorber},
  day =         {4},
  venue =       {Kyoto, Japan},
  book_pages =  {94},
  pages =       {8},
  booktitle =   {OMNIVIS '09: 9th IEEE Workshop on Omnidirectional Vision, 
                 Camera Networks and Non-classical Cameras},
  publisher =   {IEEE Computer Society Press},
  address =     {Los Alamitos, USA},
  authorship =  {40-30-30},
  project =     {FP6-IST-027787, MSM6840770038},
  annote = {We present a structure-from-motion (SfM) pipeline for
    visual 3D modeling of a large city area using 360 deg. field of
    view Google Street View images.  The core of the pipeline combines
    the state of the art techniques such as SURF feature detection,
    tentative matching by an approximate nearest neighbour search,
    relative camera motion estimation by solving 5-pt minimal camera
    pose problem, and sparse bundle adjustment.  The robust and stable
    camera poses estimated by PROSAC with soft voting and by scale
    selection using a visual cone test bring high quality initial
    structure for bundle adjustment.  Furthermore, searching for
    trajectory loops based on co-occurring visual words and closing
    them by adding new constraints for the bundle adjustment enforce
    the global consistency of camera poses and 3D structure in the
    sequence.  We present a large-scale reconstruction computed from
    4,799 images of the Google Street View Pittsburgh Research Data
  keywords = {Structure from Motion, Omnidirectional Vision},
  isbn = {978-1-4244-4441-0},
  note = {CD-ROM},