h1

h2

h3

h4

h5
h6
% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@PHDTHESIS{Yegenoglu:969331,
      author       = {Yegenoglu, Alper},
      othercontributors = {Morrison, Abigail Joanna Rhodes and Herty, Michael},
      title        = {{G}radient-free optimization of artificial and biological
                      networks using learning to learn},
      volume       = {55},
      school       = {RWTH Aachen University},
      type         = {Dissertation},
      address      = {Jülich},
      publisher    = {Forschungszentrum Jülich GmbH, Zentralbibliothek, Verlag},
      reportid     = {RWTH-2023-09115},
      isbn         = {978-3-95806-719-6},
      series       = {Schriften des Forschungszentrums Jülich. IAS series},
      pages        = {1 Online-Ressource (136 Seiten) : Illustrationen,
                      Diagramme},
      year         = {2023},
      note         = {Druckausgabe: 2023. - Onlineausgabe: 2023. - Auch
                      veröffentlicht auf dem Publikationsserver der RWTH Aachen
                      University; Dissertation, RWTH Aachen University, 2023},
      abstract     = {Understanding intelligence and how it allows humans to
                      learn, to make decision and form memories, is a long-lasting
                      quest in neuroscience. Our brain is formed by networks of
                      neurons and other cells, however, it is not clear how those
                      networks are trained to learn to solve specific tasks. In
                      machine learning and artificial intelligence it is common to
                      train and optimize neural networks with gradient descent and
                      backpropagation. How to transfer this optimization strategy
                      to biological, spiking networks (SNNs) is still a matter of
                      research. Due to the binary communication scheme between
                      neurons of an SNN via spikes, a direct application of
                      gradient descent and backpropagation is not possible without
                      further approximations. In my work, I present gradient-free
                      optimization techniques that are directly applicable to
                      artificial and biological neural networks. I utilize
                      metaheuristics, such as genetic algorithms and the ensemble
                      Kalman Filter, to optimize network parameters and train
                      networks to learn to solve specific tasks. The optimization
                      is embedded into the concept of meta-learning and learning
                      to learn respectively. The learning to learn concept
                      consists of a two loop optimization procedure. In the first,
                      inner loop the algorithm or network is trained on a family
                      of tasks, and in the second, outer loop the hyper-parameters
                      and parameters of the network are optimized. First, I apply
                      the EnKF on a convolution neural network, resulting in high
                      accuracy when classifying digits. Then, I employ the same
                      optimization procedure on a spiking reservoir network within
                      the L2L framework. The L2L framework, an implementation of
                      the learning to learn concept, allows me to easily deploy
                      and execute multiple instances of the network in parallel on
                      high performance computing systems. In order to understand
                      how the network learning evolves, I analyze the connection
                      weights over multiple generations and investigate a
                      covariance matrix of the EnKF in the principle component
                      space. The analysis not only shows the convergence behaviour
                      of the optimization process, but also how sampling
                      techniques influence the optimization procedure. Next, I
                      embed the EnKF into the L2L inner loop and adapt the
                      hyper-parameters of the optimizer using a genetic algorithm
                      (GA). In contrast to the manual parameter setting, the GA
                      suggests an alternative configuration. Finally, I present an
                      ant colony simulation foraging for food while being steered
                      by SNNs. While training the network, self-coordination and
                      self-organization in the colony emerges. I employ various
                      analysis methods to better understand the ants’ behaviour.
                      With my work I leverage optimization for different
                      scientific domains utilizing meta-learning and illustrate
                      how gradient-free optimization can be applied on biological
                      and artificial networks.},
      cin          = {124920 / 120000 / 080031},
      ddc          = {004},
      cid          = {$I:(DE-82)124920_20200227$ / $I:(DE-82)120000_20140620$ /
                      $I:(DE-82)080031_20200305$},
      pnm          = {HBP SGA3 - Human Brain Project Specific Grant Agreement 3
                      (945539) / SLNS - SimLab Neuroscience (Helmholtz-SLNS) / JL
                      SMHB - Joint Lab Supercomputing and Modeling for the Human
                      Brain (JL SMHB-2021-2027) / HDS LEE - Helmholtz School for
                      Data Science in Life, Earth and Energy (HDS LEE)
                      (HDS-LEE-20190612) / CSD-SSD - Center for Simulation and
                      Data Science (CSD) - School for Simulation and Data Science
                      (SSD) (CSD-SSD-20190612) / Doktorandenprogramm
                      (PHD-PROGRAM-20170404)},
      pid          = {G:(EU-Grant)945539 / G:(DE-Juel1)Helmholtz-SLNS /
                      G:(DE-Juel1)JL SMHB-2021-2027 / G:(DE-Juel1)HDS-LEE-20190612
                      / G:(DE-Juel1)CSD-SSD-20190612 /
                      G:(DE-HGF)PHD-PROGRAM-20170404},
      typ          = {PUB:(DE-HGF)11 / PUB:(DE-HGF)3},
      urn          = {urn:nbn:de:hbz:5:2-1257933},
      doi          = {10.18154/RWTH-2023-09115},
      url          = {https://publications.rwth-aachen.de/record/969331},
}