h1

h2

h3

h4

h5
h6
% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@PHDTHESIS{Mllers:229326,
      author       = {Möllers, Maximilian Heinrich Gerhard},
      othercontributors = {Borchers, Jan Oliver},
      title        = {{C}apturing the expressiveness of touch : detect, improve,
                      and extend},
      address      = {Aachen},
      reportid     = {RWTH-CONV-144296},
      pages        = {XXIII Bl., 115 S. : Ill., graph. Darst.},
      year         = {2013},
      note         = {Prüfungsjahr: 2013. - Publikationsjahr: 2014; Aachen,
                      Techn. Hochsch., Diss., 2013},
      abstract     = {Human-Computer-Interaction always tries to reduce user
                      input to a very small amount of in- formation (here touch to
                      2D point). This allows for easy input processing and easy
                      systems development because the state machine has fewer
                      transitions. It can also help the user, because a simpler UI
                      can be easier to learn. However, we are missing out on a lot
                      of expressiveness in the input from the user, and we will
                      show how we could capture this expressiveness and how we
                      could use it to give more precise input and more natural
                      interfaces. Touch is a means of communicating user intent,
                      and we will show throughout this thesis that a
                      simplification to a 2D point is a significant bottleneck for
                      the interaction between a human and a computer, both on a
                      micro and a macro level, and what we should do instead to
                      grasp the user’s intent and support more natural
                      interactions. As an introduction, we take a close look at
                      what happens before a touch is performed and how this touch
                      is then typically interpreted. Our first project shows a
                      technical solution to get very precise touch information
                      even on large tabletops, necessary for any deeper analysis
                      of touches. We then show how in touch sequences one touch is
                      affected by its predecessor, and that we can exploit this
                      systematic error to improve touch accuracy. We also show
                      extensions of current touch models to take into account body
                      posture, relative location, etc. when interacting with
                      larger tabletops. User location also plays an important role
                      if we extend our direct manipulation surface to a 3D
                      display. We show that the direct manipulation conflicts with
                      the perspectively correct 3D rendering and how we can solve
                      this conflict to have error-free direct manipulation. So
                      far, we have only covered flat, horizontal surfaces, but
                      touches can be performed on any surface, and actually,
                      people have more non-flat and/or mobile input devices at
                      their disposal than tabletops: keyboards, mice, smartphones,
                      remote controls, etc. Similar to our tabletop section, we
                      start out with a technical solution to detect touches on
                      arbitrary objects in 3D space. This raw touch data, however,
                      contains a lot of misguided information because contact with
                      the user’s palm or fingers that are only holding a mobile
                      device create the same input as the actual touch input. To
                      reduce this problem, we propose an algorithm that infers the
                      hand posture from touch data on an arbitrary object, making
                      it easier to understand the user’s intent. As a closure
                      for this thesis, we extend the existing touch-based GUI
                      metaphor to support ad hoc interactions with arbitrary
                      objects. We show how we can repurpose everyday objects as
                      input controllers and remove the necessity of dedicated
                      input devices to control our computers.},
      keywords     = {Mensch-Maschine-Schnittstelle (SWD) / Touchscreen (SWD) /
                      Benutzeroberfläche (SWD)},
      cin          = {120000 / 122710},
      ddc          = {004},
      cid          = {$I:(DE-82)120000_20140620$ / $I:(DE-82)122710_20140620$},
      shelfmark    = {H.5.2},
      typ          = {PUB:(DE-HGF)11},
      urn          = {urn:nbn:de:hbz:82-opus-49650},
      url          = {https://publications.rwth-aachen.de/record/229326},
}