% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@PHDTHESIS{Koeppe:819355,
author = {Koeppe, Arnd},
othercontributors = {Markert, Bernd and Herty, Michael},
title = {{D}eep learning in the finite element method},
volume = {IAM-11},
school = {Rheinisch-Westfälische Technische Hochschule Aachen},
type = {Dissertation},
address = {Aachen},
publisher = {RWTH Aachen University},
reportid = {RWTH-2021-04990},
series = {Report. IAM, Institute of General Mechanics},
pages = {1 Online-Ressource : Illustrationen, Diagramme},
year = {2021},
note = {Veröffentlicht auf dem Publikationsserver der RWTH Aachen
University; Dissertation, Rheinisch-Westfälische Technische
Hochschule Aachen, 2021},
abstract = {In mechanics and engineering, the Finite Element Method
(FEM) represents the predominant numerical simulation
method. It is extraordinarily modular and flexible since it
can simulate complex structures assembled from generic
elements and utilizing various constitutive models. However,
nonlinear problems, such as elastoplasticity, demand many
Degrees of Freedom (DOF) and numerous iterations, which make
the FEM numerically expensive. To increase numerical
efficiency, data-driven algorithms and Artificial
Intelligence (AI) offer an attractive approach to infer
accurate nonlinear solutions from reduced-order inputs,
thereby accelerating simulations. Inspired by the human
brain, deep learning algorithms, i.e., (artificial) neural
networks, organize and connect numerous neurons in layers
and cells to train universal function approximations. Neural
networks have demonstrated excellent performance and
efficiency through parallelization in various applications.
Because of the myriads of neurons and possible ways to
connect them, neural networks often elude human
understanding. Therefore, simpler models have been favored,
even if they exhibit inferior performance. This thesis aims
to integrate deep learning algorithms into the FEM,
accelerate computations, and interpret neural networks in
mechanics. Towards those objectives, a data-driven
methodology is developed that deducts strategies to design
neural networks for mechanics. Moreover, inductive
approaches search optimal neural network configurations and
explain neural network learning. Leveraging the fundamental
data structure in mechanical balance equations, the
data-driven methodology yields strategies and methods to
interface neural networks with the FEM at three integration
levels. At the highest level, intelligent surrogate models
substitute entire finite element models and achieve
efficient computations. At the lowest level, intelligent
constitutive models offer flexibility, modularity, and
straightforward integration. Combining the advantages of
both approaches, intelligent meta elements yield
considerable speed-ups and flexibility using substructuring.
Additionally, strategies for data generation, preprocessing,
and postprocessing translate and augmented mechanical data
to train new neural network architectures with convolutions
and recursions. Finally, a novel explainable AI approach
interprets the black box of Recurrent Neural Networks
(RNNs). Focusing on elastoplasticity, numerical
demonstrators establish the performance of the deducted
methods and strategies. Achieving considerable speed-ups by
several orders of magnitude, mechanical field quantities are
inferred accurately. Lastly, the new explainable AI approach
investigates RNNs trained for constitutive behavior.},
cin = {411110},
ddc = {620},
cid = {$I:(DE-82)411110_20140620$},
typ = {PUB:(DE-HGF)11 / PUB:(DE-HGF)3},
doi = {10.18154/RWTH-2021-04990},
url = {https://publications.rwth-aachen.de/record/819355},
}