h1

h2

h3

h4

h5
h6
% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@PHDTHESIS{Kempt:959574,
      author       = {Kempt, Hendrik},
      othercontributors = {Nagel, Saskia K. and Nyholm, Sven},
      title        = {{E}thical investigations of {AI} in medical diagnostics},
      school       = {Rheinisch-Westfälische Technische Hochschule Aachen},
      type         = {Dissertation},
      address      = {Aachen},
      publisher    = {RWTH Aachen University},
      reportid     = {RWTH-2023-05683},
      pages        = {1 Online-Ressource},
      year         = {2023},
      note         = {Veröffentlicht auf dem Publikationsserver der RWTH Aachen
                      University; Dissertation, Rheinisch-Westfälische Technische
                      Hochschule Aachen, 2023},
      abstract     = {Since the advent of machine learning as an artificial
                      intelligence paradigm, opportunities for automating clinical
                      processes and decision-making have emerged that were
                      previously considered unattainable. This dissertation aims
                      to develop ethical analyses of the issues surrounding the
                      use of "medical AI" for various diagnostic purposes. To
                      motivate and contextualize the articles, a brief overview of
                      the developments of artificial intelligence in medical
                      diagnostics and the related ethical debate is first
                      presented. For this endeavor, three different levels are
                      presented against which diagnostic AI can be analyzed in
                      terms of its intent and function. The first level concerns
                      the role of AI as a decision support system. In this role,
                      AI is primarily understood according to the assistance of
                      physicians and patients in the decision-making process. The
                      requirements for AI in this role are thus to compensate for
                      human limitations. At the same time, such assistance can not
                      only support decisions, but also make new decisions possible
                      in the first place. The second level assumes that AI can
                      replace human decisions, i.e. those of both physicians and
                      patients. In this view, AI applications for diagnostic
                      purposes become successful when they exceed the benchmarks
                      of human decision-making. As a third level, the influence of
                      AI on medical practice is examined. Here, the role of AI as
                      an assistant or replacement is less important than the
                      consequences that the use of such AI may have for other
                      medical processes. This concerns, for example, norms of the
                      doctor-patient relationship. This level allows for an
                      analysis of the impact of AI on medical norms that goes
                      beyond the analysis of the technology. The first paper
                      discusses several of these conceptual and normative issues
                      and proposes a solution to a specific use case of AI for the
                      provision of second medical opinions. To this end, a
                      previously unavailable taxonomy of types of clinical second
                      opinions is first undertaken. With an overview of the
                      concepts of responsibility, explainability, and peer
                      disagreements, this paper points to a pragmatic yet
                      normative solution that allows AI-based decision support
                      systems to take on as many tasks in diagnostic processes as
                      possible: In the case of a second opinion of an AI
                      confirming the initial diagnosis, this can be considered
                      sufficient evidence for a diagnosis; should such a second
                      opinion deviate from the initial diagnosis of the treating
                      physician, this must be considered as an indicator that a
                      further, human assessment of that case is necessary. This
                      norm is referred to here as the "rule of disagreement". The
                      second paper builds on these findings and addresses the
                      question of how decision support systems can be disagreed
                      with in clinical contexts. Physicians who come to different
                      diagnostic conclusions than an AI they use as such a
                      decision support system face a double challenge: on the one
                      hand, the AI's diagnostic proposal is evidence that needs to
                      be taken into account; on the other hand, the decision for a
                      diagnosis is in the hands of physicians, and a rejection of
                      the evidence produced by the AI requires further,
                      alternative justifications that are not readily available.
                      To this end, we first distinguish the forms of conflict
                      between physicians and technology: malfunctions, mistakes,
                      and disagreements. Starting from the notion of Meaningful
                      Human Control (MHC) as a normative standard for assessing
                      the ethical permissibility of cooperative systems in
                      clinical processes, the notion of meaningful disagreement is
                      introduced and elaborated. In the third part, the notion of
                      explainability of AI-generated diagnoses is normatively
                      examined. To this end, the various goods that explainability
                      of AI diagnoses may entail are first described. Through the
                      conceptual distinction between "absolute" and "relative"
                      explainability, a normative distinction is then also
                      introduced to examine the standards against which the
                      explanations of an AI diagnosis are measured. Insofar as
                      such explanations are understood in comparison to human
                      standards ("relative") of diagnoses rather than absolute,
                      higher claims for AI diagnoses cannot be held. Last, it is
                      shown that the goods of explainability presented at the
                      beginning can be replaced without loss by recourse to
                      appropriate risk assessment through certification
                      ("certifiability") and reference to the physician's
                      expertise in translating the AI-based diagnosis to the
                      patient’s understanding ("interpretability"). The fourth
                      paper continues the distinctions made in the third regarding
                      the relative and absolute explainability of AI diagnoses and
                      transfers them to the context of global standardizations of
                      AI systems and therefore additionally distinguishes between
                      local and global standards of explainability. High standards
                      of explainable diagnoses in countries with high medical
                      standards create a risk that AIs developed in these
                      countries may not be approved globally due to insufficient
                      standards of explainability; to avoid regions with a lack of
                      experts in explaining diagnoses being unable to use these
                      AIs, it is suggested that the decision to set standards
                      should be left to the communities themselves. The fifth part
                      examines the extent to which AI diagnostic tools should be
                      used as placebos. For this purpose, the debate about
                      placebos in clinical contexts is first examined and a
                      distinction is made between seven different forms of placebo
                      effect eliciting tools. An analysis of doctor-patient
                      relationships as an intimate relationship of trust and the
                      resulting "normative entanglement" establishes the
                      permissibility of placebos. The next step is to examine the
                      skill of AI applications as placebos. Since an AI cannot
                      replace the normative entanglement of doctors and patients,
                      its admissibility is examined solely in the context of
                      trusting doctor-patient treatments. In the sixth part, the
                      question is answered whether an AI should make judgments of
                      medical interventions as "medically necessary". To this end,
                      we first introduce the distinction between medical necessity
                      as a judgment about the instrumental necessity of an
                      intervention and as a judgment about the socially
                      justifiable minimum of medical services ("social medical
                      necessity"). By focusing on the first distinction, questions
                      of judgment are raised by AIs. Since these judgments always
                      include judgments about the goals of an intervention, which
                      are socially and morally agreed upon, a skeptical position
                      of AI as a judge of interventions as medically necessary is
                      taken. This is followed by a discussion of both the findings
                      of these papers and the prospect of further research
                      questions in this context. The approach of assessing the
                      uses and influences of AI according to the three levels
                      allows for a problem-oriented, pragmatic view of the use of
                      AI, but also allows for the setting of clear boundaries and
                      the introduction of sharp ethical distinctions. Through
                      these distinctions and the theses connected to them, further
                      issues also emerge, such as the question of democratizing
                      standards of explicability, global AI regulations and
                      growing inequalities in healthcare, new attributions of
                      responsibility in physician-AI collaborations, and changes
                      in clinical work and care environments.},
      cin          = {711120},
      ddc          = {100},
      cid          = {$I:(DE-82)711120_20180704$},
      pnm          = {BMBF 01GP1910A - Verbundprojekt - Teilprojekt Umfragen,
                      Interviews (BMBF-01GP1910A)},
      pid          = {G:(BMBF)BMBF-01GP1910A},
      typ          = {PUB:(DE-HGF)11},
      doi          = {10.18154/RWTH-2023-05683},
      url          = {https://publications.rwth-aachen.de/record/959574},
}