% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@PHDTHESIS{Werz:1020690,
author = {Werz, Johanna Miriam},
othercontributors = {Isenhardt, Ingrid and Ziefle, Martina},
title = {{K}ünstliche {I}ntelligenz erklären, verstehen, nutzen :
{A}nforderungen an {T}ransparenz und ihr {E}influss auf die
{N}utzung von {KI}-{E}ntscheidungsunterstützungssystemen},
school = {Rheinisch-Westfälische Technische Hochschule Aachen},
type = {Dissertation},
address = {Aachen},
publisher = {RWTH Aachen University},
reportid = {RWTH-2025-09177},
pages = {1 Online-Ressource : Illustrationen},
year = {2025},
note = {Veröffentlicht auf dem Publikationsserver der RWTH Aachen
University; Dissertation, Rheinisch-Westfälische Technische
Hochschule Aachen, 2025},
abstract = {Despite the increasing number of artificial intelligence
(AI) systems for private usage, AI transparency has long
been researched primarily from a technical perspective.
However, study results with end users show that system
transparency does not automatically lead to system
acceptance. Therefore, the question arises of how
transparency of AI decision support systems affects the use
of these systems by end users. In this dissertation, this
research question was investigated using three studies with
a mixed-method approach. The first study, a quantitative
online experiment with n = 169 participants, analyzed how
accuracy information about an algorithm influences the use
of this algorithm after an error. The second study,
qualitative focus group discussions with n = 26
participants, identified requirements for AI transparency
from the perspective of end users. The third study, a
quantitative online experiment with n = 151 participants,
compared four different types of transparency regarding
their effect on trust and use of the respective algorithms.
The results show that technical explanations alone are not
sufficient to strengthen trust in AI systems or increase
their usage. More than explanations of how an AI works,
background information about developers, the motives of the
institutions behind the AI or external audits help to build
trust. Accuracy information has a limited positive effect on
usage, while explanations about why a single result emerged
are desirable when errors occur. The requirements towards AI
transparency depend on the characteristics of the system, in
particular how severe errors would be, and users' previous
experience. More important than detailed transparency is
ensuring that users understand the transparency measures and
conveying the reliability of the AI-system. The work
emphasizes the importance of a user-centered development of
AI transparency due to the individuality of systems and user
groups. In addition to further implications, a transparency
matrix for developers was elaborated, which can be used to
identify the necessary transparency implications based on
given system characteristics. Implications also arise for
political decision-makers to promote transparency in AI
systems. In addition, limitations of the individual studies
and the overall work are discussed and follow-up questions
for further research are derived.},
cin = {735410},
ddc = {300},
cid = {$I:(DE-82)735410_20230123$},
pnm = {FAIRWork - Flexibilization of complex Ecosystems using
Democratic AI based Decision Support and Recommendation
Systems at Work (101069499) / OPSF654 - Transparency in
Artificial Intelligence: Considering Explainability, User
and System Factors (TAIGERS) (EXS-SF-OPSF654) / Exploratory
Research Space: Seed Fund (2) als Anschubfinanzierung zur
Erforschung neuer interdisziplinärer Ideen (EXS-SF) /
Excellence Strategy (EXS)},
pid = {G:(EU-Grant)101069499 / G:(DE-82)EXS-SF-OPSF654 /
G:(DE-82)EXS-SF / G:(DE-82)EXS},
typ = {PUB:(DE-HGF)11},
doi = {10.18154/RWTH-2025-09177},
url = {https://publications.rwth-aachen.de/record/1020690},
}