h1

h2

h3

h4

h5
h6
% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@PHDTHESIS{Rglin:50046,
      author       = {Röglin, Heiko},
      othercontributors = {Vöcking, Berthold},
      title        = {{T}he complexity of {N}ash {E}quilibria, {L}ocal {O}ptima,
                      and {P}areto-{O}ptimal solutions},
      address      = {Aachen},
      publisher    = {Publikationsserver der RWTH Aachen University},
      reportid     = {RWTH-CONV-112610},
      pages        = {160 S. : graph. Darst.},
      year         = {2008},
      note         = {Zusammenfassung in engl. und dt. Sprache; Aachen, Techn.
                      Hochsch., Diss., 2008},
      abstract     = {An instance of a combinatorial optimization problem is
                      usually described by an objective function that is to be
                      optimized over a set of feasible solutions. The decisions
                      that economic entities face every day are more complex for
                      various reasons: In many situations, there is more than one
                      objective and one is rather interested in finding the most
                      appropriate trade-off than in optimizing a single criterion.
                      Further complications arise when decisions are made by
                      selfish agents instead of being centrally coordinated, and
                      even decisions that can be modeled as combinatorial
                      optimization problems are often intractable under standard
                      complexity-theoretic assumptions. These difficulties gave
                      rise to a variety of solution concepts, including
                      Pareto-optimal solutions, Nash equilibria, and local optima,
                      which are the topic of this thesis. When it comes to the
                      allocation of scarce resources, decisions are often made by
                      selfish agents. The predominant solution concept in such
                      situations is that of a Nash equilibrium, which is a state
                      in which no agent can benefit from unilaterally changing her
                      strategy. We consider congestion games and two-sided
                      markets, two extensively studied models for resource
                      allocation among selfish agents. For congestion games, we
                      analyze the complexity of computing a pure Nash equilibrium,
                      and we present a new approach that enables us to prove
                      PLS-hardness results for different classes of congestion
                      games. Our approach also yields a short proof for the
                      PLS-completeness of network congestion games for directed
                      and undirected networks. In two-sided markets, pure Nash
                      equilibria can be computed efficiently, but many real-life
                      markets lack a central authority to match agents. This
                      motivates the study of the processes that take place when
                      players consecutively improve their strategies. We
                      demonstrate that coordination is necessary by constructing
                      examples on which uncoordinated agents need in expectation
                      exponentially many steps to reach an equilibrium if they
                      improve their strategies in a random order. We conclude the
                      part about resource allocation among selfish agents by
                      introducing a natural class of resource sharing games that
                      both extends congestion games and two-sided markets. In the
                      second part of this dissertation, we consider optimization
                      problems with two criteria. Solutions that are optimal in
                      the sense that no criterion can be improved without
                      deteriorating the other one are called Pareto-optimal, and
                      the set of Pareto-optimal solutions is an important solution
                      concept for bicriteria optimization problems as it helps to
                      filter out unreasonable trade-offs. Even though in practice
                      for most bicriteria problems the number of Pareto-optimal
                      solutions is typically small, for almost all bicriteria
                      problems, instances exist with an exponentially large Pareto
                      set. The discrepancy between theory and practice arises
                      because worst-case results are overly pessimistic as inputs
                      occurring in practice are often very different from
                      worst-case instances. We study the number of Pareto-optimal
                      solutions in the framework of smoothed analysis, which is a
                      hybrid of worst-case and average-case analysis, in which an
                      adversary specifies an instance that is subsequently
                      slightly perturbed at random. We prove an almost tight
                      polynomial bound on the expected number of Pareto-optimal
                      solutions for general bicriteria integer optimization
                      problems. For certain problems such as the bicriteria
                      spanning tree problem, there are no algorithms known for
                      enumerating the set of Pareto-optimal solutions efficiently
                      in its size. We present a method that allows us to enumerate
                      the set of Pareto-optimal solutions for semi-random inputs
                      in expected polynomial time with a small failure probability
                      for all problems for which this set can be enumerated in
                      pseudopolynomial time. Local search is not only important
                      because computing pure Nash equilibria can be phrased as a
                      local search problem, but it also plays a crucial role in
                      the design of heuristics for various NP-hard optimization
                      problems. In particular, for the famous traveling
                      salesperson problem, local search heuristics like 2-Opt
                      achieve amazingly good results on real-world instances both
                      with respect to running time and approximation ratio. We
                      present, for every p, a family of two-dimensional Lp
                      instances on which 2-Opt can take an exponential number of
                      steps. In order to explain the discrepancy between this
                      worst-case result and the observations in practice, we
                      analyze 2-Opt in the framework of smoothed analysis. We show
                      that the expected number of local improvements on
                      semi-random Manhattan and Euclidean instances is polynomial,
                      improving previous average-case results significantly. In
                      addition, we prove an upper bound on the expected
                      approximation factor with respect to all Lp metrics that
                      depends polynomially on the magnitude of the random
                      perturbation.},
      keywords     = {Nash-Gleichgewicht (SWD) / Pareto-Optimum (SWD) /
                      Spieltheorie (SWD) / Ganzzahlige Optimierung (SWD)},
      cin          = {121110 / 120000},
      ddc          = {004},
      cid          = {$I:(DE-82)121110_20140620$ / $I:(DE-82)120000_20140620$},
      typ          = {PUB:(DE-HGF)11},
      urn          = {urn:nbn:de:hbz:82-opus-23130},
      url          = {https://publications.rwth-aachen.de/record/50046},
}