% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@PHDTHESIS{Neider:444982,
author = {Neider, Daniel},
othercontributors = {Thomas, Wolfgang and Löding, Christof and Leucker, Martin},
title = {{A}pplications of automata learning in verification and
synthesis},
address = {Aachen},
publisher = {Publikationsserver der RWTH Aachen University},
reportid = {RWTH-CONV-145293},
pages = {XV, 267 S. : graph. Darst.},
year = {2014},
note = {Aachen, Techn. Hochsch., Diss., 2014},
abstract = {The objective of this thesis is to explore automata
learning, which is an umbrella term for techniques that
derive finite automata from external information sources, in
the areas of verification and synthesis. We consider four
application scenarios that turn out to be particularly
well-suited: Regular Model Checking, quantified invariants
of linear data structures, automatic reachability games, and
labeled safety games. The former two scenarios stem from the
area of verification whereas the latter two stem from the
area of synthesis (more precisely, from the area of
infinite-duration two-player games over graphs, as
popularized by McNaughton). Regular Model Checking is a
special kind of Model Checking in which the program to
verify is modeled in terms of finite automata. We develop
various (semi-)algorithms for computing invariants in
Regular Model Checking: a white-box algorithm, which takes
the whole program as input; two semi-black-box algorithms,
which have access to a part of the program and learn missing
information from a teacher; finally, two black-box
algorithms, which obtain all information about the program
from a teacher. For the black-box algorithms, we employ a
novel paradigm, called ICE-learning, which is a generic
learning setting for learning invariants. Quantified
invariants of linear data structures occur in
Floyd-Hoare-style verification of programs manipulating
arrays and lists. To learn such invariants, we introduce the
notion of quantified data automata and develop an active
learning algorithm for these automata. Based on a finite
sample of configurations that manifest on executions of the
program in question, we learn a quantified data automaton
and translate it into a logic formula expressing the
invariant. The latter potentially requires an additional
abstraction step to ensure that the resulting formula falls
into a decidable logic. Automatic reachability games are
classical reachability games played over automatic graphs;
automatic graphs are defined by means of asynchronous
transducers and subsume various types of graphs, such as
finite graphs, pushdown graphs, and configuration graphs of
Turing machines. We first consider automatic reachability
games over finite graphs and present a symbolic fixed-point
algorithm for computing attractors that uses deterministic
finite automata to represent sets of vertices. Since such a
fixed-point algorithm is not guaranteed to terminate on
games over infinite graphs, we subsequently develop a
learning-based (semi-)algorithm that learns the attractor
(if possible) from a teacher rather than computing it
iteratively. Finally, we consider labeled safety games,
which are safety games played over finite graphs whose edges
are deterministically labeled with actions. The problem we
are interested in is to compute a winning strategy that,
when implemented as as a circuit or a piece of code, results
in a small implementation. To this end, we model strategies
as so-called strategy automata and exploit a common property
of active learning algorithms, namely that such algorithms
produce conjectures of increasing size. The idea is to start
learning the set of all winning plays and stop the learning
prematurely as soon as the learner conjectures a winning
strategy automaton. This procedure guarantees that the
resulting strategy automaton is at most as large as the
underlying game graph. To improve the performance of our
algorithm further, we develop domain-specific optimizations
of Angluin's as well as Kearns and Vazirani's active
learning algorithms.},
keywords = {Theoretische Informatik (SWD) / Synthese (SWD) /
Verifikation (SWD) / Maschinelles Lernen (SWD) /
Automatentheorie (SWD) / Invariante (SWD)},
cin = {120000 / 122110},
ddc = {004},
cid = {$I:(DE-82)120000_20140620$ / $I:(DE-82)122110_20140620$},
shelfmark = {68Q45 * 68Q32 * 05C57 * 68Q60},
typ = {PUB:(DE-HGF)11},
urn = {urn:nbn:de:hbz:82-opus-51699},
url = {https://publications.rwth-aachen.de/record/444982},
}