@PhDThesis{Jancosek-TR-2014-13,
  IS = { zkontrolovano 11 Jan 2016 },
  UPDATE  = { 2014-10-27 },
  author =       {Jan{\v c}o{\v s}ek, Michal and Pajdla, Tom{\'a}{\v s}},
  supervisor =   {Tom{\'a}{\v s} Pajdla},
c_title = { Rekonstrukce povrchu velkych scen na zaklade viditelnosti},
title =        {Large Scale Surface Reconstruction based on Point Visibility},
  school =       {Center for Machine Perception, K13133 FEE Czech Technical
                  University},
  address =      {Prague, Czech Republic},
  year =         {2015},
  month =        {June},
  day =          {1},
  type =         {{PhD Thesis CTU--CMP--2014--13}},
  issn =         {1213-2365},
  pages =        {137},
  figures =      {69},
  authorship =   {100},
  psurl =        {[Jancosek-TR-
 2014-13.pdf]},
  project =      {FP7-SPACE-312377 ProViDe, FP7-SPACE-218814 PRoVisG,
                  FP7-SPACE-241523 proVIScout, FP6-IST-027787 DIRAC,
                  MSM6840770038, CTU0908713, TACR TA02011275 ATOM,
                  SGS10/186/OHK3/2T/13},
  annote =       {The problem of 3D surface reconstruction from
                  calibrated images or laser-scans is well studied in
                  computer vision. There are two main sub-problems
                  that are studied. It is the problem of depth-map
                  computation from images and the problem of
                  depth-maps fusion.  In this work, we present several
                  contributions to both sub-problems.  First, we
                  present methods that contribute to the sub-problem
                  of depth-map compu- tation from images. In
                  particular we describe an eective seed
                  construction method for 3D reconstruction which
                  starts with initial estimates of seed position,
                  improves them and computes good estimates
                  normals. Next, to avoid searching for optimal
                  surface po- sition and orientation based on
                  nondiscriminative texture, we (over)segment images
                  into segments of low variation of color and
                  intensity and use each segment to generate a can-
                  didate 3D planar patch explaining the underlying 3D
                  surface. We use the effective seed construction
                  method to improve the candidate 3D planar patch. The
                  method further improve, filter and combine the 3D
                  planar patches to produce the resulting 3D surface
                  reconstruction. Finally, we present a scalable
                  multi-view stereo reconstruction method which can
                  deal with a large number of large unorganized images
                  in affordable time and effort. The computational
                  effort of our technique is a linear function of the
                  surface area of the observed scene which is
                  conveniently discretized to represent suffcient but
                  not excessive detail. Our technique works as a
                  filter on a limited number of images at a time and
                  can thus process arbitrarily large data sets using
                  limited memory.  Second, we present methods that
                  contribute to the sub-problem of depth-maps fusion.
                  We compute input points augmented with visibility
                  information from the input depth- maps. We observe
                  that it is even possible to reconstruct a surface
                  that does not contain input points. Instead of
                  modeling the surface from input points, we model
                  free space from visibility information of the input
                  points. The complement of the modeled free space is
                  considered as full space. The surface occurs at
                  interface between the free and the full space. We
                  show that under certain conditions a part of the
                  full space surrounded by the free space must contain
                  a real object also when the real object does not
                  contain any input points, i.e., an occluder reveals
                  itself through occlusion. Our key contribution is
                  the proposal of a new interface classifier that can
                  also detect the presence of an occluder in the scene
                  just from the visibility of input points. To be
                  practical, we assume that the occluder surface
                  contains a reasonable number of input points in
                  order to be able to approximately reconstruct the
                  occluders surface i.e., weakly-supported surface. We
                  use the interface classifier to modify a
                  state-of-the-art surface reconstruction method so
                  that it gains the ability to reconstruct
                  weakly-supported surfaces.  Finally, we present
                  methods that contribute to the problem of
                  hallucinations removal from 3D surface
                  reconstruction and to the problem of globally
                  optimal large-scale 3D surface reconstruction by
                  parts. },
  keywords =     {large scale, surface reconstruction, 3D
                  reconstruction, multi-view stereo, weakly-supported
                  surfaces, hallucinations removal, depth-map fusion,
                  plane-sweeping},
}