IS = { zkontrolovano 28 Mar 2015 },
  UPDATE  = { 2015-03-28 },
  author     = {Torii, Akihiko and Dong, Yafei and Okutomi, Masatoshi
                  and Sivic, Josef and Pajdla, Tomas},
  title      = {Efficient Localization of Panoramic Images Using Tiled
                  Image Descriptors},
  year       = {2014},
  pages      = {58-62},
  journal    = {IPSJ Transactions on Computer Vision and Applications},
  editor     = {Yagi, Yasushi},
  publisher  = {Information Processing Society of Japan},
  address    = {Kagaku-kaikan (Chemistry Hall) 4F 1-5 Kanda-Surugadai,
                  Chiyoda-ku, Tokyo, Japan},
  issn = {1882-6695},
  volume     = {6},
  month      = {July},
  keywords   = {visual place recognition, bag of visual words and VLAD
                  image representation, panorama image localization},
  annote   = {We seek to localize a query panorama with a wide field
                  of view given a large database of street-level geo-
                  tagged imagery. This is a challenging task because
                  of significant changes in appearance due to
                  viewpoint, season, occluding people or newly
                  constructed buildings. An additional key challenge
                  is the computational and memory ef- ficiency due to
                  the planet-scale size of the available geotagged
                  image databases. The contributions of this paper are
                  two-fold. First, we develop a compact image
                  representation for scalable retrieval of panoramic
                  images that represents each panorama as an ordered
                  set of vertical image tiles. Two panoramas are
                  matched by efficiently searching for their optimal
                  horizontal alignment, while respecting the tile
                  ordering constraint. Second, we collect a new
                  challenging query test dataset from Shibuya, Tokyo
                  containing more than thousand panoramic and
                  perspective query images with manu- ally verified
                  ground truth geolocation. We demonstrate significant
                  improvements of the proposed method compared to the
                  standard bag-of-visual-words and VLAD baselines.},
  project    = {FP7-SPACE-312377 PRoViDE},
  authorship = {20-20-20-20-20},