@InProceedings{Vojir-ICCV2013,
  IS = { zkontrolovano 12 Jan 2014 },
  UPDATE  = { 2014-01-06 },
  author =      {Kristan, Matej and Pflugfelder, Roman and  Leonardis, Ales  and
                 Matas, Ji{\v r}{\'\i} and  Porikli, Fatih and Cehovin, Luka and
                 Nebehay, Georg and  Fernandez, Gustavo and 
                 Voj{\'\i}{\v r}, Tom{\' a}{\v s} et al.},
  title =       {The Visual Object Tracking VOT2013 Challenge Results},
  year =        {2013},
  pages =       {98-111},
  booktitle =   {2013 IEEE International Conference on Computer Vision (ICCV 2013) Worskhops},
  publisher =   {IEEE},
  address =     {Piscataway, US},
  isbn =        {978-0-7695-5161-6},
  issn =        {1550-5499},
  book_pages =  {915},
  month =      {December},
  day =        {2},
  venue =      {Sydney, Australia},
  annote =      {Visual tracking has attracted a significant attention in the last few
   decades. The recent surge in the number of publications on tracking-related
   problems have made it almost impossible to follow the developments in the
   field. One of the reasons is that there is a lack of commonly accepted
   annotated data-sets and standardized evaluation protocols that would allow
   objective comparison of different tracking methods. To address this issue,
   the Visual Object Tracking (VOT) workshop was organized in conjunction with
   ICCV2013. Researchers from academia as well as industry were invited to
   participate in the first VOT2013 challenge which aimed at single-object
   visual trackers that do not apply pre-learned models of object appearance
   (modelfree). Presented here is the VOT2013 benchmark dataset for evaluation
   of single-object visual trackers as well as the results obtained by the
   trackers competing in the challenge. In contrast to related attempts in
   tracker benchmarking, the dataset is labeled per-frame by visual attributes
   that indicate occlusion, illumination change, motion change, size change and
   camera motion, offering a more systematic comparison of the trackers.
   Furthermore, we have designed an automated system for performing and
   evaluating the experiments. We present the evaluation protocol of the VOT2013
   challenge and the results of a comparison of 27 trackers on the benchmark
   dataset. The dataset, the evaluation tools and the tracker rankings are
   publicly available from the challenge website1.},
 keywords =    {tracking, short-term },
 prestige =    {international},
 authorship =  {10-10-10-10-10-10-10-10-10},
 project =     {SGS13/142/OHK3/2T/13, TACR TE01020415 V3C},
 doi =         {10.1109/ICCVW.2013.26},
}