% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@PHDTHESIS{Heinen:1024771,
author = {Heinen, Katharina Svenja},
othercontributors = {Wolfart, Stefan and Wolf, Michael},
title = {{M}ehrwert eines digitalen {A}nalysesystems für die
{B}ewertung von {Z}ahnpräparationen für eine
vollverblendete {K}rone in der vorklinischen {A}usbildung},
school = {Rheinisch-Westfälische Technische Hochschule Aachen},
type = {Dissertation},
address = {Aachen},
publisher = {RWTH Aachen University},
reportid = {RWTH-2026-00326},
pages = {1 Online-Ressource : Illustrationen},
year = {2025},
note = {Veröffentlicht auf dem Publikationsserver der RWTH Aachen
University 2026; Dissertation, Rheinisch-Westfälische
Technische Hochschule Aachen, 2025},
abstract = {Objective: This study examines the added value of a digital
analysis software for evaluating student crown preparations
compared to conventional visual assessment using a
checklist.Materials and Methods: Twenty typodont teeth
prepared by students were assessed by twelve dentists. In
the first session, assessments were conducted conventionally
using a checklist with twelve criteria. After calibrating
the examiners and training them to use the analysis
software, in the second session, part of the preparations
was re-evaluated visually (n=10), while another part (n=5)
was evaluated using the software “prepCheck” (Sirona).
Statistical analyses focused on time required as well as
inter- and intra-rater reliability.Results: The results show
that the conventional method with a checklist demonstrated
high inter-rater reliability (correlation coefficient 0.88),
yet revealed fluctuations in intra-rater reliability.
Assessment of individual parameters, such as proximal
distance, was more consistent, while more complex criteria,
such as occlusal and circumferential reduction, showed
significant variability. Using “prepCheck,” five of the
twelve criteria were rated significantly higher, with a
generally lower standard deviation. However, the time
required for prepCheck was approximately seven times higher.
Conclusions: Despite the checklist, the conventional method
often lacks consistent intra-rater reliability, which can
lead to subjective evaluations in preclinical courses.
However, the high inter-rater reliability suggests that
calibrations and checklists help foster objectivity. The
prepCheck analysis software resulted in more objective and
consistent evaluations, with less stringent judgments and
reduced variability, suggesting greater accuracy in
assessment. Despite the high time investment, using such
software in dental education is promising, particularly for
promoting digital competence and consistent performance
evaluation.},
cin = {542000-2 ; 937710},
ddc = {610},
cid = {$I:(DE-82)542000-2_20140620$},
typ = {PUB:(DE-HGF)11},
doi = {10.18154/RWTH-2026-00326},
url = {https://publications.rwth-aachen.de/record/1024771},
}