IS = { zkontrolovano 31 Dec 2009 },
  UPDATE  = { 2009-12-31 },
  author =    {Franc, Vojt{\v e}ch and Savchynskyy, Bogdan},
  title =     {Discriminative Learning of Max-Sum Classifiers },
  journal =   {Journal of Machine Learning Research},
  year =      {2008},
  number =    {1},
  publisher = {Microtome Publishing},
  address =   {Brookline},
  issn =      {1532-4435},
  volume =    {9},
  month =     {January},
  pages =     {67--104},
  psurl = {[Franc-MaxSum-JMLR08.pdf]},
  annote = {The max-sum classifier predicts $n$-tuple of labels from
    n-tuple of observable variables by maximizing a sum of quality
    functions defined over neighbouring pairs of labels and observable
    variables.  Predicting labels as MAP assignments of a Random
    Markov Field is a particular example of the max-sum classifier.
    Learning parameters of the max-sum classifier is a challenging
    problem because even computing the response of such classifier is
    NP-complete in general. Estimating parameters using the Maximum
    Likelihood approach is feasible only for a subclass of max-sum
    classifiers with an acyclic structure of neighbouring
    pairs. Recently, the discriminative methods represented by the
    perceptron and the Support Vector Machines, originally designed
    for binary linear classifiers, have been extended for learning
    some subclasses of the max-sum classifier. Besides the max-sum
    classifiers with the acyclic neighbouring structure, it has been
    shown that the discriminative learning is possible even with
    arbitrary neighbouring structure provided the quality functions
    fulfill some additional constraints. In this article, we extend
    the discriminative approach to other three classes of max-sum
    classifiers with an arbitrary neighbourhood structure. We derive
    learning algorithms for two subclasses of max-sum classifiers
    whose response can be computed in polynomial time: (i) the max-sum
    classifiers with supermodular quality functions and (ii) the
    max-sum classifiers whose response can be computed exactly by a
    linear programming relaxation. Moreover, we show that the learning
    problem can be approximately solved even for a general max-sum
  keywords = {Max-Sum classifier, hidden Markov networks, 
              support vector machines},