@inproceedings{SeligBauerFrikeletal., author = {Selig, Tim and Bauer, Patrick and Frikel, J{\"u}rgen and M{\"a}rz, Thomas and Storath, Martin and Weinmann, Andreas}, title = {Two-stage Approach for Low-dose and Sparse-angle CT Reconstruction using Backprojection}, series = {Bildverarbeitung f{\"u}r die Medizin 2025 (BVM 2025): Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025 (BVM 2025): Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas M.}, publisher = {Springer VS}, address = {Wiesbaden}, isbn = {978-3-658-47421-8}, doi = {10.1007/978-3-658-47422-5_67}, pages = {286 -- 291}, abstract = {This paper presents a novel two-stage approach for computed tomography (CT) reconstruction, focusing on sparse-angle and low-dose setups to minimize radiation exposure while maintaining high image quality. Two-stage approaches consist of an initial reconstruction followed by a neural network for image refinement. In the initial reconstruction, we apply the backprojection (BP) instead of the traditional filtered backprojection (FBP). This enhances computational speed and offers potential advantages for more complex geometries, such as fan-beam and cone-beam CT. Additionally, BP addresses noise and artifacts in sparse-angle CT by leveraging its inherent noise-smoothing effect, which reduces streaking artifacts common in FBP reconstructions. For the second stage, we fine-tune the DRUNet proposed by Zhang et al. to further improve reconstruction quality. We call our method BP-DRUNet and evaluate its performance on a synthetically generated ellipsoid dataset alongside thewell-established LoDoPaBCT dataset. Our results show that BP-DRUNet produces competetive results in terms of PSNR and SSIM metrics compared to the FBP-based counterpart, FBPDRUNet, and delivers visually competitive results across all tested angular setups.}, language = {en} } @article{BeyerWeigertQuicketal., author = {Beyer, Thomas and Weigert, Markus and Quick, Harald H. and Pietrzyk, Uwe and Vogt, Florian and Palm, Christoph and Antoch, Gerald and M{\"u}ller, Stefan P. and Bockisch, Andreas}, title = {MR-based attenuation correction for torso-PET/MR imaging}, series = {European Journal of Nuclear Medicine and Molecular Imaging}, volume = {35}, journal = {European Journal of Nuclear Medicine and Molecular Imaging}, number = {6}, doi = {10.1007/s00259-008-0734-0}, pages = {1142 -- 1146}, abstract = {Purpose MR-based attenuation correction (AC) will become an integral part of combined PET/MR systems. Here, we propose a toolbox to validate MR-AC of clinical PET/MRI data sets. Methods Torso scans of ten patients were acquired on a combined PET/CT and on a 1.5-T MRI system. MR-based attenuation data were derived from the CT following MR-CT image co-registration and subsequent histogram matching. PET images were reconstructed after CT- (PET/CT) and MR-based AC (PET/MRI). Lesion-to-background (L/B) ratios were estimated on PET/CT and PET/MRI. Results MR-CT histogram matching leads to a mean voxel intensity difference in the CT- and MR-based attenuation images of 12\% (max). Mean differences between PET/MRI and PET/CT were 19\% (max). L/B ratios were similar except for the lung where local misregistration and intensity transformation leads to a biased PET/MRI. Conclusion Our toolbox can be used to study pitfalls in MR-AC. We found that co-registration accuracy and pixel value transformation determine the accuracy of PET/MRI.}, subject = {Kernspintomografie}, language = {en} } @article{HartmannNieberlePalmetal., author = {Hartmann, Robin and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Kuehle, Reinald and Reichert, Torsten E. and Taxis, Juergen and Ettl, Tobias}, title = {Utility of Smartphone-based Three-dimensional Surface Imaging for Digital Facial Anthropometry}, series = {JPRAS Open}, volume = {39}, journal = {JPRAS Open}, publisher = {Elsevier}, doi = {10.1016/j.jpra.2024.01.014}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70348}, pages = {330 -- 343}, abstract = {Background The utilization of three-dimensional (3D) surface imaging for facial anthropometry is a significant asset for patients undergoing maxillofacial surgery. Notably, there have been recent advancements in smartphone technology that enable 3D surface imaging. In this study, anthropometric assessments of the face were performed using a smartphone and a sophisticated 3D surface imaging system. Methods 30 healthy volunteers (15 females and 15 males) were included in the study. An iPhone 14 Pro (Apple Inc., USA) using the application 3D Scanner App (Laan Consulting Corp., USA) and the Vectra M5 (Canfield Scientific, USA) were employed to create 3D surface models. For each participant, 19 anthropometric measurements were conducted on the 3D surface models. Subsequently, the anthropometric measurements generated by the two approaches were compared. The statistical techniques employed included the paired t-test, paired Wilcoxon signed-rank test, Bland-Altman analysis, and calculation of the intraclass correlation coefficient (ICC). Results All measurements showed excellent agreement between smartphone-based and Vectra M5-based measurements (ICC between 0.85 and 0.97). Statistical analysis revealed no statistically significant differences in the central tendencies for 17 of the 19 linear measurements. Despite the excellent agreement found, Bland-Altman analysis revealed that the 95\% limits of agreement between the two methods exceeded ±3 mm for the majority of measurements. Conclusion Digital facial anthropometry using smartphones can serve as a valuable supplementary tool for surgeons, enhancing their communication with patients. However, the proposed data suggest that digital facial anthropometry using smartphones may not yet be suitable for certain diagnostic purposes that require high accuracy.}, language = {en} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @article{KnoedlerBaecherKaukeNavarroetal., author = {Kn{\"o}dler, Leonard and Baecher, Helena and Kauke-Navarro, Martin and Prantl, Lukas and Machens, Hans-G{\"u}nther and Scheuermann, Philipp and Palm, Christoph and Baumann, Raphael and Kehrer, Andreas and Panayi, Adriana C. and Knoedler, Samuel}, title = {Towards a Reliable and Rapid Automated Grading System in Facial Palsy Patients: Facial Palsy Surgery Meets Computer Science}, series = {Journal of Clinical Medicine}, volume = {11}, journal = {Journal of Clinical Medicine}, number = {17}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/jcm11174998}, abstract = {Background: Reliable, time- and cost-effective, and clinician-friendly diagnostic tools are cornerstones in facial palsy (FP) patient management. Different automated FP grading systems have been developed but revealed persisting downsides such as insufficient accuracy and cost-intensive hardware. We aimed to overcome these barriers and programmed an automated grading system for FP patients utilizing the House and Brackmann scale (HBS). Methods: Image datasets of 86 patients seen at the Department of Plastic, Hand, and Reconstructive Surgery at the University Hospital Regensburg, Germany, between June 2017 and May 2021, were used to train the neural network and evaluate its accuracy. Nine facial poses per patient were analyzed by the algorithm. Results: The algorithm showed an accuracy of 100\%. Oversampling did not result in altered outcomes, while the direct form displayed superior accuracy levels when compared to the modular classification form (n = 86; 100\% vs. 99\%). The Early Fusion technique was linked to improved accuracy outcomes in comparison to the Late Fusion and sequential method (n = 86; 100\% vs. 96\% vs. 97\%). Conclusions: Our automated FP grading system combines high-level accuracy with cost- and time-effectiveness. Our algorithm may accelerate the grading process in FP patients and facilitate the FP surgeon's workflow.}, language = {en} } @article{SouzaJrPalmMendeletal., author = {Souza Jr., Luis Antonio de and Palm, Christoph and Mendel, Robert and Hook, Christian and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Weber, Silke A. T. and Papa, Jo{\~a}o Paulo}, title = {A survey on Barrett's esophagus analysis using machine learning}, series = {Computers in Biology and Medicine}, volume = {96}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2018.03.014}, pages = {203 -- 213}, abstract = {This work presents a systematic review concerning recent studies and technologies of machine learning for Barrett's esophagus (BE) diagnosis and treatment. The use of artificial intelligence is a brand new and promising way to evaluate such disease. We compile some works published at some well-established databases, such as Science Direct, IEEEXplore, PubMed, Plos One, Multidisciplinary Digital Publishing Institute (MDPI), Association for Computing Machinery (ACM), Springer, and Hindawi Publishing Corporation. Each selected work has been analyzed to present its objective, methodology, and results. The BE progression to dysplasia or adenocarcinoma shows a complex pattern to be detected during endoscopic surveillance. Therefore, it is valuable to assist its diagnosis and automatic identification using computer analysis. The evaluation of the BE dysplasia can be performed through manual or automated segmentation through machine learning techniques. Finally, in this survey, we reviewed recent studies focused on the automatic detection of the neoplastic region for classification purposes using machine learning methods.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{OttPalmVogtetal., author = {Ott, Tankred and Palm, Christoph and Vogt, Robert and Oberprieler, Christoph}, title = {GinJinn: An object-detection pipeline for automated feature extraction from herbarium specimens}, series = {Applications in Plant Sciences}, volume = {8}, journal = {Applications in Plant Sciences}, number = {6}, publisher = {Wiley, Botanical Society of America}, issn = {2168-0450}, doi = {10.1002/aps3.11351}, pages = {e11351}, abstract = {PREMISE: The generation of morphological data in evolutionary, taxonomic, and ecological studies of plants using herbarium material has traditionally been a labor-intensive task. Recent progress in machine learning using deep artificial neural networks (deep learning) for image classification and object detection has facilitated the establishment of a pipeline for the automatic recognition and extraction of relevant structures in images of herbarium specimens. METHODS AND RESULTS: We implemented an extendable pipeline based on state-of-the-art deep-learning object-detection methods to collect leaf images from herbarium specimens of two species of the genus Leucanthemum. Using 183 specimens as the training data set, our pipeline extracted one or more intact leaves in 95\% of the 61 test images. CONCLUSIONS: We establish GinJinn as a deep-learning object-detection tool for the automatic recognition and extraction of individual leaves or other structures from herbarium specimens. Our pipeline offers greater flexibility and a lower entrance barrier than previous image-processing approaches based on hand-crafted features.}, subject = {Deep Learning}, language = {en} } @article{EbigboPalmProbstetal., author = {Ebigbo, Alanna and Palm, Christoph and Probst, Andreas and Mendel, Robert and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Siersema, Peter and Messmann, Helmut}, title = {A technical review of artificial intelligence as applied to gastrointestinal endoscopy: clarifying the terminology}, series = {Endoscopy International Open}, volume = {07}, journal = {Endoscopy International Open}, number = {12}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1010-5705}, pages = {1616 -- 1623}, abstract = {The growing number of publications on the application of artificial intelligence (AI) in medicine underlines the enormous importance and potential of this emerging field of research. In gastrointestinal endoscopy, AI has been applied to all segments of the gastrointestinal tract most importantly in the detection and characterization of colorectal polyps. However, AI research has been published also in the stomach and esophagus for both neoplastic and non-neoplastic disorders. The various technical as well as medical aspects of AI, however, remain confusing especially for non-expert physicians. This physician-engineer co-authored review explains the basic technical aspects of AI and provides a comprehensive overview of recent publications on AI in gastrointestinal endoscopy. Finally, a basic insight is offered into understanding publications on AI in gastrointestinal endoscopy.}, subject = {Diagnose}, language = {en} } @inproceedings{WoehlHuberLoibletal., author = {W{\"o}hl, Rebecca and Huber, Michaela and Loibl, Markus and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Palm, Christoph}, title = {The Impact of Semi-Automated Segmentation and 3D Analysis on Testing New Osteosynthesis Material}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_30}, pages = {122 -- 127}, abstract = {A new protocol for testing osteosynthesis material postoperatively combining semi-automated segmentation and 3D analysis of surface meshes is proposed. By various steps of transformation and measuring, objective data can be collected. In this study the specifications of a locking plate used for mediocarpal arthrodesis of the wrist were examined. The results show, that union of the lunate, triquetrum, hamate and capitate was achieved and that the plate is comparable to coexisting arthrodesis systems. Additionally, it was shown, that the complications detected correlate to the clinical outcome. In synopsis, this protocol is considered beneficial and should be taken into account in further studies.}, subject = {Osteosynthese}, language = {en} } @inproceedings{Palm, author = {Palm, Christoph}, title = {Fusion of Serial 2D Section Images and MRI Reference}, series = {Workshop Innovative Verarbeitung bioelektrischer und biomagnetischer Signale (bbs2014), Berlin, 10.04.2014}, booktitle = {Workshop Innovative Verarbeitung bioelektrischer und biomagnetischer Signale (bbs2014), Berlin, 10.04.2014}, doi = {10.13140/RG.2.1.1358.3449}, abstract = {Serial 2D section images with high resolution, resulting from innovative imaging methods become even more valuable, if they are fused with in vivo volumes. Achieving this goal, the 3D context of the sections would be restored, the deformations would be corrected and the artefacts would be eliminated. However, the registration in this field faces big challenges and is not solved in general. On the other hand, several approaches have been introduced dealing at least with some of these difficulties. Here, a brief overview of the topic is given and some of the solutions are presented. It does not constitute the claim to be a complete review, but could be a starting point for those who are interested in this field.}, subject = {Kernspintomografie}, language = {en} } @article{MeinikheimMendelPalmetal., author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Influence of artificial intelligence on the diagnostic performance of endoscopists in the assessment of Barrett's esophagus: a tandem randomized and video trial}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-2296-5696}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-72818}, pages = {641 -- 649}, abstract = {Background This study evaluated the effect of an artificial intelligence (AI)-based clinical decision support system on the performance and diagnostic confidence of endoscopists in their assessment of Barrett's esophagus (BE). Methods 96 standardized endoscopy videos were assessed by 22 endoscopists with varying degrees of BE experience from 12 centers. Assessment was randomized into two video sets: group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a stand-alone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.3\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1\%, and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.8\% [95\%CI 65.2\%-74.2\%] to 78.0\% [95\%CI 74.0\%-82.0\%]; specificity 67.3\% [95\%CI 62.5\%-72.2\%] to 72.7\% [95\%CI 68.2\%-77.3\%]). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from additional AI. BE experts and nonexperts remained significantly below the stand-alone performance of AI, suggesting that there may be other factors influencing endoscopists' decisions to follow or discard AI advice.}, language = {en} } @misc{EbigboRauberAyoubetal., author = {Ebigbo, Alanna and Rauber, David and Ayoub, Mousa and Birzle, Lisa and Matsumura, Tomoaki and Probst, Andreas and Steinbr{\"u}ck, Ingo and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Scheppach, Markus W. and Palm, Christoph and Messmann, Helmut}, title = {Early Esophageal Cancer and the Generalizability of Artificial Intelligence}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783775}, pages = {S428}, abstract = {Aims Artificial Intelligence (AI) systems in gastrointestinal endoscopy are narrow because they are trained to solve only one specific task. Unlike Narrow-AI, general AI systems may be able to solve multiple and unrelated tasks. We aimed to understand whether an AI system trained to detect, characterize, and segment early Barrett's neoplasia (Barrett's AI) is only capable of detecting this pathology or can also detect and segment other diseases like early squamous cell cancer (SCC). Methods 120 white light (WL) and narrow-band endoscopic images (NBI) from 60 patients (1 WL and 1 NBI image per patient) were extracted from the endoscopic database of the University Hospital Augsburg. Images were annotated by three expert endoscopists with extensive experience in the diagnosis and endoscopic resection of early esophageal neoplasias. An AI system based on DeepLabV3+architecture dedicated to early Barrett's neoplasia was tested on these images. The AI system was neither trained with SCC images nor had it seen the test images prior to evaluation. The overlap between the three expert annotations („expert-agreement") was the ground truth for evaluating AI performance. Results Barrett's AI detected early SCC with a mean intersection over reference (IoR) of 92\% when at least 1 pixel of the AI prediction overlapped with the expert-agreement. When the threshold was increased to 5\%, 10\%, and 20\% overlap with the expert-agreement, the IoR was 88\%, 85\% and 82\%, respectively. The mean Intersection Over Union (IoU) - a metric according to segmentation quality between the AI prediction and the expert-agreement - was 0.45. The mean expert IoU as a measure of agreement between the three experts was 0.60. Conclusions In the context of this pilot study, the predictions of SCC by a Barrett's dedicated AI showed some overlap to the expert-agreement. Therefore, features learned from Barrett's cancer-related training might be helpful also for SCC prediction. Our results allow different possible explanations. On the one hand, some Barrett's cancer features generalize toward the related task of assessing early SCC. On the other hand, the Barrett's AI is less specific to Barrett's cancer than a general predictor of pathological tissue. However, we expect to enhance the detection quality significantly by extending the training to SCC-specific data. The insight of this study opens the way towards a transfer learning approach for more efficient training of AI to solve tasks in other domains.}, language = {en} } @misc{ScheppachMendelRauberetal., author = {Scheppach, Markus W. and Mendel, Robert and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) improves endoscopists' vessel detection during endoscopic submucosal dissection (ESD)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1782891}, pages = {S93}, abstract = {Aims While AI has been successfully implemented in detecting and characterizing colonic polyps, its role in therapeutic endoscopy remains to be elucidated. Especially third space endoscopy procedures like ESD and peroral endoscopic myotomy (POEM) pose a technical challenge and the risk of operator-dependent complications like intraprocedural bleeding and perforation. Therefore, we aimed at developing an AI-algorithm for intraprocedural real time vessel detection during ESD and POEM. Methods A training dataset consisting of 5470 annotated still images from 59 full-length videos (47 ESD, 12 POEM) and 179681 unlabeled images was used to train a DeepLabV3+neural network with the ECMT semi-supervised learning method. Evaluation for vessel detection rate (VDR) and time (VDT) of 19 endoscopists with and without AI-support was performed using a testing dataset of 101 standardized video clips with 200 predefined blood vessels. Endoscopists were stratified into trainees and experts in third space endoscopy. Results The AI algorithm had a mean VDR of 93.5\% and a median VDT of 0.32 seconds. AI support was associated with a statistically significant increase in VDR from 54.9\% to 73.0\% and from 59.0\% to 74.1\% for trainees and experts, respectively. VDT significantly decreased from 7.21 sec to 5.09 sec for trainees and from 6.10 sec to 5.38 sec for experts in the AI-support group. False positive (FP) readings occurred in 4.5\% of frames. FP structures were detected significantly shorter than true positives (0.71 sec vs. 5.99 sec). Conclusions AI improved VDR and VDT of trainees and experts in third space endoscopy and may reduce performance variability during training. Further research is needed to evaluate the clinical impact of this new technology.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and R{\"o}mmele, Christoph and Nagl, Sandra and Schnoy, Elisabeth and Messmann, Helmut and Ebigbo, Alanna and Palm, Christoph}, title = {Artificial intelligence as a tool in the detection of the papillary ostium during ERCP}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783138}, pages = {S198}, abstract = {Aims Endoscopic retrograde cholangiopancreaticography (ERCP) is the gold standard in the diagnosis as well as treatment of diseases of the pancreatobiliary tract. However, it is technically complex and has a relatively high complication rate. In particular, cannulation of the papillary ostium remains challenging. The aim of this study is to examine whether a deep-learning algorithm can be used to detect the major duodenal papilla and in particular the papillary ostium reliably and could therefore be a valuable tool for inexperienced endoscopists, particularly in training situation. Methods We analyzed a total of 654 retrospectively collected images of 85 patients. Both the major duodenal papilla and the ostium were then segmented. Afterwards, a neural network was trained using a deep-learning algorithm. A 5-fold cross-validation was performed. Subsequently, we ran the algorithm on 5 prospectively collected videos of ERCPs. Results 5-fold cross-validation on the 654 labeled data resulted in an F1 value of 0.8007, a sensitivity of 0.8409 and a specificity of 0.9757 for the class papilla, and an F1 value of 0.5724, a sensitivity of 0.5456 and a specificity of 0.9966 for the class ostium. Regardless of the class, the average F1 value (class papilla and class ostium) was 0.6866, the sensitivity 0.6933 and the specificity 0.9861. In 100\% of cases the AI-detected localization of the papillary ostium in the prospectively collected videos corresponded to the localization of the cannulation performed by the endoscopist. Conclusions In the present study, the neural network was able to identify the major duodenal papilla with a high sensitivity and high specificity. In detecting the papillary ostium, the sensitivity was notably lower. However, when used on videos, the AI was able to identify the location of the subsequent cannulation with 100\% accuracy. In the future, the neural network will be trained with more data. Thus, a suitable tool for ERCP could be established, especially in the training situation.}, language = {en} } @misc{ScheppachNunesArizietal., author = {Scheppach, Markus W. and Nunes, Danilo Weber and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Procedural phase recognition in endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783804}, pages = {S439}, abstract = {Aims Recent evidence suggests the possibility of intraprocedural phase recognition in surgical operations as well as endoscopic interventions such as peroral endoscopic myotomy and endoscopic submucosal dissection (ESD) by AI-algorithms. The intricate measurement of intraprocedural phase distribution may deepen the understanding of the procedure. Furthermore, real-time quality assessment as well as automation of reporting may become possible. Therefore, we aimed to develop an AI-algorithm for intraprocedural phase recognition during ESD. Methods A training dataset of 364385 single images from 9 full-length ESD videos was compiled. Each frame was classified into one procedural phase. Phases included scope manipulation, marking, injection, application of electrical current and bleeding. Allocation of each frame was only possible to one category. This training dataset was used to train a Video Swin transformer to recognize the phases. Temporal information was included via logarithmic frame sampling. Validation was performed using two separate ESD videos with 29801 single frames. Results The validation yielded sensitivities of 97.81\%, 97.83\%, 95.53\%, 85.01\% and 87.55\% for scope manipulation, marking, injection, electric application and bleeding, respectively. Specificities of 77.78\%, 90.91\%, 95.91\%, 93.65\% and 84.76\% were measured for the same parameters. Conclusions The developed algorithm was able to classify full-length ESD videos on a frame-by-frame basis into the predefined classes with high sensitivities and specificities. Future research will aim at the development of quality metrics based on single-operator phase distribution.}, language = {en} } @misc{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Performance comparison of a deep learning algorithm with endoscopists in the detection of duodenal villous atrophy (VA)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765421}, pages = {S165}, abstract = {Aims VA is an endoscopic finding of celiac disease (CD), which can easily be missed if pretest probability is low. In this study, we aimed to develop an artificial intelligence (AI) algorithm for the detection of villous atrophy on endoscopic images. Methods 858 images from 182 patients with VA and 846 images from 323 patients with normal duodenal mucosa were used for training and internal validation of an AI algorithm (ResNet18). A separate dataset was used for external validation, as well as determination of detection performance of experts, trainees and trainees with AI support. According to the AI consultation distribution, images were stratified into "easy" and "difficult". Results Internal validation showed 82\%, 85\% and 84\% for sensitivity, specificity and accuracy. External validation showed 90\%, 76\% and 84\%. The algorithm was significantly more sensitive and accurate than trainees, trainees with AI support and experts in endoscopy. AI support in trainees was associated with significantly improved performance. While all endoscopists showed significantly lower detection for "difficult" images, AI performance remained stable. Conclusions The algorithm outperformed trainees and experts in sensitivity and accuracy for VA detection. The significant improvement with AI support suggests a potential clinical benefit. Stable performance of the algorithm in "easy" and "difficult" test images may indicate an advantage in macroscopically challenging cases.}, language = {en} } @misc{ScheppachWeberNunesArizietal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Single frame workflow recognition during endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {57}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806324}, pages = {S511}, abstract = {Aims Precise surgical phase recognition and evaluation may improve our understanding of complex endoscopic procedures. Furthermore, quality control measurements and endoscopy training could benefit from objective descriptions of surgical phase distributions. Therefore, we aimed to develop an artificial intelligence algorithm for frame-by-frame operational phase recognition during endoscopic submucosal dissection (ESD). Methods Full length ESD-videos from 31 patients comprising 6.297.782 single images were collected retrospectively. Videos were annotated on a frame-by-frame basis for the operational macro-phases diagnostics, marking, injection, dissection and bleeding. Further subphases were the application of electrical current, visible injection of fluid into the submucosal space and scope manipulation, leading to 11 phases in total. 4.975.699 frames (21 patients) were used for training of a video swin transformer using uniform frame sampling for temporal information. Hyperparameter tuning was performed with 897.325 further frames (6 patients), while 424.758 frames (4 patients) were used for validation. Results The overall F1 scores on the test dataset for the macro-phases and all 11 phases were 0.96 and 0.90, respectively. The recall values for diagnostics, marking, injection, dissection and bleeding were 1.00, 1.00, 0.95, 0.96 and 0.93, respectively. Conclusions The algorithm classified operational phases during ESD with high accuracy. A precise evaluation of phase distribution may allow for the development of objective quality metrics for quality control and training.}, language = {en} } @article{WeigertPietrzykMuelleretal., author = {Weigert, Markus and Pietrzyk, Uwe and M{\"u}ller, Stefan P. and Palm, Christoph and Beyer, Thomas}, title = {Whole-body PET/CT imaging}, series = {Zeitschrift f{\"u}r Medizinische Physik}, volume = {18}, journal = {Zeitschrift f{\"u}r Medizinische Physik}, number = {1}, doi = {10.1016/j.zemedi.2007.07.004}, pages = {59 -- 66}, abstract = {Aim Combined whole-body (WB) PET/CT imaging provides better overall co-registration compared to separate CT and PET. However, in clinical routine local PET-CT mis-registration cannot be avoided. Thus, the reconstructed PET tracer distribution may be biased when using the misaligned CT transmission data for CT-based attenuation correction (CT-AC). We investigate the feasibility of retrospective co-registration techniques to align CT and PET images prior to CT-AC, thus improving potentially the quality of combined PET/CT imaging in clinical routine. Methods First, using a commercial software registration package CT images were aligned to the uncorrected PET data by rigid and non-rigid registration methods. Co-registration accuracy of both alignment approaches was assessed by reviewing the PET tracer uptake patterns (visual, linked cursor display) following attenuation correction based on the original and co-registered CT. Second, we investigated non-rigid registration based on a prototype ITK implementation of the B-spline algorithm on a similar targeted MR-CT registration task, there showing promising results. Results Manual rigid, landmark-based co-registration introduced unacceptable misalignment, in particular in peripheral areas of the whole-body images. Manual, non-rigid landmark-based co-registration prior to CT-AC was successful with minor loco-regional distortions. Nevertheless, neither rigid nor non-rigid automatic co-registration based on the Mutual Information image to image metric succeeded in co-registering the CT and noAC-PET images. In contrast to widely available commercial software registration our implementation of an alternative automated, non-rigid B-spline co-registration technique yielded promising results in this setting with MR-CT data. Conclusion In clinical PET/CT imaging, retrospective registration of CT and uncorrected PET images may improve the quality of the AC-PET images. As of today no validated and clinically viable commercial registration software is in routine use. This has triggered our efforts in pursuing new approaches to a validated, non-rigid co-registration algorithm applicable to whole-body PET/CT imaging of which first results are presented here. This approach appears suitable for applications in retrospective WB-PET/CT alignment. Ziel Kombinierte PET/CT-Bildgebung erm{\"o}glicht verbesserte Koregistrierung von PET- und CT-Daten gegen{\"u}ber separat akquirierten Bildern. Trotzdem entstehen in der klinischen Anwendung lokale Fehlregistrierungen, die zu Fehlern in der rekonstruierten PET- Tracerverteilung f{\"u}hren k{\"o}nnen, falls die unregistrierten CT-Daten zur Schw{\"a}chungskorrektur (AC) der Emissionsdaten verwendet werden. Wir untersuchen daher die Anwendung von Bildregistrierungsalgorithmen vor der CT-basierten AC zur Verbesserung der PET-Aufnahmen. Methoden Mittels einer kommerziellen Registrierungssoftware wurden die CT-Daten eines PET/CT- Tomographen durch landmarken- und intensit{\"a}tsbasierte rigide (starre) und nicht-rigide Registrierungsverfahren r{\"a}umlich an die unkorrigierten PET-Emissionsdaten angepasst und zur AC verwendet. Zur Bewertung wurden die Tracerverteilungen in den PET-Bildern (vor AC, CT-AC, CT-AC nach Koregistrierung) visuell und mit Hilfe korrelierter Fadenkreuze verglichen. Zus{\"a}tzlich untersuchten wir die ITK-Implementierung der bekannten B-spline basierten, nicht-rigiden Registrierungsans{\"a}tze im Hinblick auf ihre Verwendbarkeit f{\"u}r die multimodale PET/CT-Ganzk{\"o}rperregistrierung. Ergebnisse Mittels landmarkenbasierter, nicht-rigider Registrierung konnte die Tracerverteilung in den PET-Daten lokal verbessert werden. Landmarkenbasierte rigide Registrierung f{\"u}hrte zu starker Fehlregistrierung in entfernten K{\"o}rperregionen. Automatische rigide und nicht-rigide Registrierung unter Verwendung der Mutual-Information-{\"A}hnlichkeitsmetrik versagte auf allen verwendeten Datens{\"a}tzen. Die automatische Registrierung mit B-spline-Funktionen zeigte vielversprechende Resultate in der Anwendung auf einem {\"a}hnlich gelagerten CT-MR-Registrierungsproblem. Fazit Retrospektive, nicht-rigide Registrierung unkorrigierter PET- und CT-Aufnahmen aus kombinierten Aufnahmensystemen vor der AC kann die Qualit{\"a}t von PET-Aufnahmen im klinischen Einsatz verbessern. Trotzdem steht bis heute im klinischen Alltag keine validierte, automatische Registrierungssoftware zur Verf{\"u}gung. Wir verfolgen dazu Ans{\"a}tze f{\"u}r validierte, nicht-rigide Bildregistrierung f{\"u}r den klinischen Einsatz und pr{\"a}sentieren erste Ergebnisse.}, subject = {Positronen-Emissions-Tomografie}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fernandez-Esparrach, G. and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Birzle, Lisa and Aehling, Niklas and Schulz, Dominik Andreas Helmut Otto and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {K{\"u}nstliche Intelligenz als Hilfsmittel zur Detektion der Papilla duodeni major und des papill{\"a}ren Ostiums w{\"a}hrend der ERCP}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {63}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {5}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806882}, pages = {e295}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der endoskopischen Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Allerdings ist sie technisch anspruchsvoll, schwer zu erlernen und mit einer relativ hohen Komplikationsrate assoziiert. Daher soll in der vorliegenden Machbarkeitsstudie gepr{\"u}ft werden, ob mithilfe eines Deeplearning- Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und dieser f{\"u}r Endoskopiker, insbesondere in der Ausbildungssituation, ein geeignetes Hilfsmittel darstellen k{\"o}nnte. Material und Methodik Insgesamt wurden 1534 ERCP-Bilder von 134 Patienten analysiert, wobei sowohl die Papilla duodeni major als auch das Ostium segmentiert wurden. Anschließend erfolgte das Training eines neuronalen Netzes unter Verwendung eines Deep-Learning-Algorithmus. F{\"u}r den Test des Algorithmus erfolgte eine f{\"u}nffache Kreuzvalidierung. Ergebnisse Auf den 1534 gelabelten Bildern wurden f{\"u}r die Klasse Papille ein F1-Wert von 0,7996, eine Sensitivit{\"a}t von 0,8488 und eine Spezifit{\"a}t von 0,9822 erzielt. F{\"u}r die Klasse Ostium ergaben sich ein F1-Wert von 0,5198, eine Sensitivit{\"a}t von 0,5945 und eine Spezifit{\"a}t von 0,9974. Klassen{\"u}bergreifend (Klasse Papille und Klasse Ostium) betrug der F1-Wert 0,6593, die Sensitivit{\"a}t 0,7216 und f{\"u}r die Spezifit{\"a}t 0,9898. Zusammenfassung In der vorliegenden Machbarkeitsstudie zeigte das neuronale Netz eine hohe Sensitivit{\"a}t und eine sehr hohe Spezifit{\"a}t bei der Identifikation der Papilla duodeni major. Die Detektion des Ostiums erfolgte hingegen mit einer deutlich geringeren Sensitivit{\"a}t. Zuk{\"u}nftig ist eine Erweiterung des Trainingsdatensatzes um Videos und klinische Daten vorgesehen, um die Leistungsf{\"a}higkeit des Netzwerks zu verbessern. Hierdurch k{\"o}nnte langfristig ein geeignetes Assistenzsystem f{\"u}r die ERCP, insbesondere in der Ausbildungssituation etabliert werden.}, language = {de} }