@inproceedings{Aldana-IAR14,
  IS = { zkontrolovano 19 Jan 2015 },
  UPDATE  = { 2015-01-19 },
year={2014},
isbn={978-3-319-11757-7},
booktitle={Image Analysis and Recognition: 11th International Conference (ICIAR 2014) },
book_pages = { 	518 },
venue = { 	Vilamoura, Algarve , Portugal},
month = {October},
day = {22-24},
series={Lecture Notes in Computer Science},
volume = {8814},
editor={Campilho, Aur{\' e}lio and Kamel, Mohamed},
doi={10.1007/978-3-319-11758-4_46},
title={Relevance Assessment for Visual Video Re-ranking},
url={http://dx.doi.org/10.1007/978-3-319-11758-4_46 [dx.doi.org]},
publisher={Springer-Verlag},
address = {Berlin, Germany},
keywords={Video re-ranking; Object detection; Wide-baseline stereo matching},
author={Aldana-Iuit, Javier and Chum, Ond{\v r}ej and Matas, Ji{\v r}{\'\i}},
pages={421-430},
language={English},
keywords={video retrieval, visual verification},
annote={The following problem is considered: Given a name or phrase specifying
an object, collect images and videos from the internet possibly depicting
the object using a textual query on their name or annotation. A visual model
from the images is built and used to rank the videos by relevance to the object of
interest. Shot relevance is defined as the duration of the visibility of the object of
interest. The model is based on local image features. The relevant shot detection
builds on wide baseline stereo matching. The method is tested on 10 text phrases
corresponding to 10 landmarks. The pool of 100 videos collected querying You-
Tube with includes seven relevant videos for each landmark. The implementation
runs faster than real-time at 208 frames per second. Averaged over the set of
landmarks, at recall 0.95 the method has mean precision of 0.65, and the mean
Average Precision (mAP) of 0.92.},
authorship={34,33,33},
affiliation = {13133-13133-13133},
Project = {GACR P103/12/2310,SGS13/142/OHK3/2T/13,GACR P103/12/G084},
}