@TechReport{Alameda-RAVEL-TR,
  IS = { zkontrolovano 23 Jan 2014 },
  UPDATE  = { 2014-01-06 },
  author       = {Alameda-Pineda, Xavier and Sanchez-Riera, Jordi and Wienke, Johannes and Franc, Vojtech and Cech, Jan and Kulkarni, Kaustubh and Deleforge, Antoine and Horaud, Radu P.},
  title        = {RAVEL: An Annotated Corpus for Training Robots with Audiovisual Abilities},
  institution  = {INRIA Rhone-Alpes},
  number       = {7709},
  month        = {July},
  year         = {2012},
  pages = {20},
  address      = {Montbonnot Saint-Martin, Grenoble, France},
  url          = {http://perception.inrialpes.fr/Publications/2012/ASWFCKDH12a},
  ANNOTE       = {We introduce Ravel (Robots with Audiovisual
    Abilities), a publicly available data set which covers examples of
    Human Robot Interaction (HRI) scenarios. These scenarios are
    recorded using the audio-visual robot head POPEYE, equipped with
    two cameras and four microphones, two of which being plugged into
    the ears of a dummy head. All the recordings were performed in a
    standard room with no special equipment, thus providing a
    challenging indoor scenario. This data set provides a basis to
    test and benchmark methods and algorithms for audio-visual scene
    analysis with the ultimate goal of enabling robots to interact
    with people in the most natural way. The data acquisition setup,
    sensor calibration, data annotation and data content are fully
    detailed. Moreover, three examples of using the recorded data are
    provided, illustrating its appropriateness for carrying out a
    large variety of HRI experiments. The Ravel data are publicly
    available at: http://ravel.humavips.eu/.},
  keywords =     {audio-visual database, human robot interaction},
  project =      {FP7-ICT-247525 HUMAVIPS},
}