% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@PHDTHESIS{Keup:849327,
author = {Keup, Christian},
othercontributors = {Helias, Moritz and Krämer, Michael},
title = {{F}ield theoretic approaches to computation in neuronal
networks},
school = {RWTH Aachen University},
type = {Dissertation},
address = {Aachen},
publisher = {RWTH Aachen University},
reportid = {RWTH-2022-06733},
pages = {1 Online-Ressource : Illustrationen, Diagramme},
year = {2022},
note = {Veröffentlicht auf dem Publikationsserver der RWTH Aachen
University; Dissertation, RWTH Aachen University, 2022},
abstract = {This thesis is centered around the application of
statistical field theory to the question of how computation
is performed by neuronal networks. The spiking activity in
dense networks of neurons, such as in the brain, tends to be
strongly chaotic. How, then, can these circuits reliably
process information? Past investigations have studied mainly
weakly chaotic firing-rate models. Here we demonstrate a
universal mechanism explaining how even strongly chaotic
activity can support powerful computations. The calculations
use a novel unified theoretical framework that allows to
compare models of neural networks across scales of model
complexity. Here, the framework is applied to two common
model classes: binary neurons, which switch between a
pulse-emitting and non-emitting state, and rate neurons,
which describe just the number of pulses per second. These
implement two different assumptions about the substrate of
computation, commonly referred to as spike-coding vs.
rate-coding. We calculate the transition to chaos in random
binary networks and show that each chaotic binary network
corresponds to an equivalent rate network with the same
activity statistics, but with nonchaotic dynamics. Therefore
results on the well-studied edge-of-chaos in firing-rate
models cannot be directly transferred to spiking-type
networks. Next, considering strongly chaotic regimes, we
show that the activity transiently promotes the separability
of different input stimuli. This effect arises because state
trajectories for different inputs diverge from one another
in a stereotypical, distance-dependent manner. Binary
networks and pulse-coupled networks offer a particularly
fast separation that can be exploited for fast, event-based
computation, which, however, requires control of the initial
conditions. These results provide predictions for
experimental recordings in brain circuits and invite
research on the use of chaotic dynamics in artificial neural
networks. We further generalize the theoretical framework,
which can serve as a bridge between many types of existing
neural-network models and provides a systematic method to
derive self-consistent, time-dependent Gaussian
approximations and perturbation corrections for such
systems. Furthermore, a parallel line of work is presented
using the same type of techniques to study how the data
representation is transformed in the process of computation
by recurrent reservoir networks and trained artificial
feed-forward networks. Because deep networks can exploit
interactions between all scales in the data, these networks
are difficult to understand based on their microscopic
structure. We find that for close to Gaussian data classes,
the computation can be captured by a Gaussian theory for the
high-dimensional activity in each layer. Nonetheless, it
remains a fundamental challenge to extend such a theory to
strongly non-Gaussian distributions, and a graphical
intuition to describe transformations of high-dimensional
structured probability distributions is largely lacking.
Therefore, inspired by our field-theoretic work we develop a
graphical explanation for the transformations learned in
classification tasks. We demonstrate how the transformations
of the data manifold can be linked to folding operations
which have a low-dimensional intuition that stays valid in
the high-dimensional case, thereby opening an exiting link
between the mathematics of folding algorithms and neuronal
networks.},
cin = {136930 / 130000},
ddc = {530},
cid = {$I:(DE-82)136930_20160614$ / $I:(DE-82)130000_20140620$},
typ = {PUB:(DE-HGF)11},
doi = {10.18154/RWTH-2022-06733},
url = {https://publications.rwth-aachen.de/record/849327},
}