IS = { zkontrolovano 15 Jan 2013 },
  UPDATE  = { 2011-08-01 },
 author =	 {{\v S}ochman, Jan},
 supervisor =	 {Matas, Ji{\v r}{\'\i} },
 title =	 {Learning for Sequential Classification},
 school =	 {Center for Machine Perception, 
                  K13133 FEE Czech Technical University},
 address =	 {Prague, Czech Republic},
 year =	 	 {2009},
 month =	 {December},
 day =		 {2},
 type =	 	 {{PhD Thesis CTU--CMP--2009--24}},
 issn =	 	 {1213-2365},
 pages =	 {85},
 figures =	 {31},
 authorship =	 {100},
 psurl =	 {[Sochman-TR-2009-24.pdf]},
 project =	 {CTU 0307313, FP6-IST-027113, Dur IG2003-2 062},
 annote =	 {In many computer vision classification problems,
   both the error rates and the evaluation time characterises the
   quality of a decision. Yet most of the learning algorithms do not
   optimise the evaluation time explicitly. We show that such learning
   problems can be formalised in the framework of sequential
   decision-making. The proposed formalisation has been already
   studied (without learning) by Wald in the 1940's, who proved that
   the optimal sequential strategy in terms of the shortest average
   time to decision (number of measurements used) is the sequential
   probability ratio test (SPRT). We built on the SPRT test and
   enlarge its capabilities to problems with dependent
   measurements. We show, how the limitations of SPRT to a priori
   ordered measurements and known joint probability density functions
   can be overcome. We propose a learning algorithm, called WaldBoost,
   which handles the evaluation time vs. error rate trade-off during
   the learning, which integrates the AdaBoost learning algorithm for
   measurement selection and ordering and the joint probability
   density estimation, with the optimal SPRT decision strategy. The
   WaldBoost algorithm is tested on the face detection problem. The
   reached detection results are superior to the state of the art
   methods in the average evaluation time and comparable in the
   detection rates. Next, we show how existing (slow) binary decision
   algorithms can be approximated by a (fast) trained WaldBoost
   classifier. The WaldBoost learning is used to minimise the decision
   time of the emulated algorithm while guaranteeing predefined
   approximation precision. Moreover, we show that the WaldBoost
   algorithm together with bootstrapping is able to efficiently handle
   an effectively unlimited number of training examples provided by
   the implementation of the approximated algorithm. Two interest
   point detectors, the Hessian-Laplace and the Kadir-Brady saliency
   detectors, are emulated to demonstrate the approach. Experiments
   show that while the repeatability and matching scores are similar
   for the original and emulated algorithms, a 9-fold speed-up for the
   Hessian-Laplace detector and a 142-fold speed-up for the
   Kadir-Brady detector is achieved. For the Hessian-Laplace detector,
   the achieved speed is similar to SURF, a popular and very fast
   handcrafted modification of Hessian-Laplace; the WaldBoost emulator
   approximates the output of the Hessian-Laplace detector more
   precisely. Finally, an on-line learning version of the WaldBoost
   algorithm is proposed. On-line boosting allows to adapt a trained
   classifier to changing environmental conditions or to use
   sequentially available training data. Yet, two important problems
   in the on-line boosting training remain unsolved: (i) classifier
   evaluation speed optimisation and, (ii) automatic classifier
   complexity estimation. We show how the on-line boosting can be
   combined with Wald's sequential decision theory to solve both of
   the problems. The properties of the proposed on-line WaldBoost
   algorithm are demonstrated on a visual tracking problem. The
   complexity of the classifier is changing dynamically depending on
   the difficulty of the problem. On average, a speedup of a factor of
   5-10 is achieved compared to the non-sequential on-line boosting.},
 keywords =	 {sequential decision making, AdaBoost, 
   sequential probability ratio test, Wald-Boost, machine learning, 
   face detection, interest point detection, tracking},