IS = { zkontrolovano 29 Sep 2009 },
  UPDATE  = { 2009-09-21 },
 author = {Zimmermann, K. and Svoboda, T. and Matas, J. },
 title = {Anytime learning for the NoSLLiP tracker},
 journal = {Image and Vision Computing, Special Issue: Perception Action Learning},
 year = {2009},
 volume = {27},
 number = {11},
 publisher = {Elsevier B.V.},
 address = {New York, USA},
 issn = {0262-8856},
 pages = {1695-1701},
 authorship = {40-30-30},
 month = {October},
 day = {2},
 doi = {10.1016/j.physletb.2003.10.07},
 keywords = {computer vision, tracking, learning, real-time},
 project = {1ET101210407, 1M0567, ICT-215078 DIPLECS, FP6-IST-004176},
 annote = {We propose an anytime learning for the Sequence of Learned Linear
  Predictors (SLLiP) tracker. Since the learning might be time consuming
  for large problems, we present an anytime learning algorithm which,
  after a very short initialization period, provides a solution with
  defined precision. As SLLiP tracking requires only a fraction of the
  processing power of an ordinary PC, the learning can continue in a
  parallel background thread continuously delivering improved
  SLLiPs, ie. faster, with lower computational complexity, with the same
  pre-defined precision.
  The proposed approach is verified on publicly-available sequences
  with approximately 12000 ground truthed frames. The learning time is
  shown to be twenty times smaller than learning based on linear
  programming proposed in the paper that introduced the SLLiP tracker
  [TR]. Its robustness and accuracy is similar. Superiority in
  frame-rate and robustness with respect to the SIFT detector,
  Lucas-Kanade tracker and Jurie's tracker is also demonstrated.},