@Article{mikulik_ijcv13,
  IS = { zkontrolovano 23 Jun 2013 },
  UPDATE  = { 2013-06-21 },
  author     = {Mikulik, Andrej and Perdoch, Michal and 
                Chum, Ond{\v r}ej and Matas, Ji{\v r}{\' i}},
  title      = {Learning Vocabularies over a Fine Quantization},
  journal    = {International Journal of Computer Vision},
  year       = {2013},
  month      = {May},
  volume     = {103},
  number     = {1},
  pages      = {163-175},
  doi        = {10.1007/s11263-012-0600-1},
  issn       = {0920-5691},
  keywords   = {Image retrieval, Vocabulary, Feature track},
  publisher  = {Springer},
  address    = {New York, USA},
  authorship = {25-25-25-25},
  annote     = {A novel similarity measure for bag-of-words type large
    scale image retrieval is presented. The similarity function is
    learned in an unsupervised manner, requires no extra space over
    the standard bag-of-words method and is more discriminative than
    both L2-based soft assignment and Hamming embedding.  The novel
    similarity function achieves mean average precision that is
    superior to any result published in the literature on the standard
    Oxford 5k, Oxford 105k and Paris datasets/protocols.  We study the
    effect of a fine quantization and very large vocabularies (up to
    64 million words) and show that the performance of specific object
    retrieval increases with the size of the vocabulary. This
    observation is in contradiction with previously published
    methods. We further demonstrate that the large vocabularies
    increase the speed of the tf-idf scoring step.},
  psurl      = {http://cmp.felk.cvut.cz/~qqmikula/publications/mikulik_ijcv12.pdf},
  url        = {http://dx.doi.org/10.1007/s11263-012-0600-1},
  project    = {GACR P103/12/2310},
  ut_isi     = {000318413500007},
}