h1

h2

h3

h4

h5
h6
% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@PHDTHESIS{Ibing:980084,
      author       = {Ibing, Moritz},
      othercontributors = {Kobbelt, Leif and Nießner, Matthias},
      title        = {{L}ocalized control over the latent space of neural
                      networks},
      school       = {RWTH Aachen University},
      type         = {Dissertation},
      address      = {Aachen},
      publisher    = {RWTH Aachen University},
      reportid     = {RWTH-2024-02081},
      pages        = {1 Online-Ressource : Illustrationen},
      year         = {2023},
      note         = {Veröffentlicht auf dem Publikationsserver der RWTH Aachen
                      University 2024; Dissertation, RWTH Aachen University, 2023},
      abstract     = {Neural networks (NNs) are prevalent today when it comes to
                      analyzing (classifying, segmenting, detecting, etc.) or
                      generating data in all kinds of modalities (text, images, 3D
                      shapes, etc.). They are so useful in these areas, because
                      they have great representation power, while being easy to
                      optimize and generalizing well to unseen data. However,
                      their complexity makes them hard to interpret and modify.
                      Neural networks are usually used to compute a mapping
                      between the data space and a so-called latent space. Often
                      we are interested in local properties of such a mapping. For
                      example, we might want to slightly change the embedding of a
                      data point to achieve a different classification. Such local
                      modifications however are difficult, as NNs usually have
                      globally entangled properties. In this work we willpropose
                      ideas how to deal with this problem. Local control is
                      especially of importance for shape representations. It has
                      been shown that NNs are well suited to represent these e.g.
                      as parametric or implicit functions. However, when a global
                      function is used, local supervision is hard to model. We
                      therefore impose additional structure on the latent space of
                      functional representations, making them easier to work with
                      and more expressive. Such a structured representation makes
                      downstream tasks easier, as we are more versatile regarding
                      the shapes we can represent, we can make use of its
                      regularity for the network design, and it allows a
                      compressed encoding that can help to reduce memory
                      consumption. Our focus will be on general shape generation,
                      but we will also present more specific applications like
                      shape completion or super-resolution among others. Our
                      approaches set the state-of-the-art among generative models
                      both in previously used metrics and a newly introduced
                      measure we adapt for this purpose.},
      cin          = {122310 / 120000},
      ddc          = {004},
      cid          = {$I:(DE-82)122310_20140620$ / $I:(DE-82)120000_20140620$},
      pnm          = {ACROSS - 3D Reconstruction and Modeling across Different
                      Levels of Abstraction (340884)},
      pid          = {G:(EU-Grant)340884},
      typ          = {PUB:(DE-HGF)11},
      doi          = {10.18154/RWTH-2024-02081},
      url          = {https://publications.rwth-aachen.de/record/980084},
}