@article{OttPalmVogtetal., author = {Ott, Tankred and Palm, Christoph and Vogt, Robert and Oberprieler, Christoph}, title = {GinJinn: An object-detection pipeline for automated feature extraction from herbarium specimens}, series = {Applications in Plant Sciences}, volume = {8}, journal = {Applications in Plant Sciences}, number = {6}, publisher = {Wiley, Botanical Society of America}, issn = {2168-0450}, doi = {10.1002/aps3.11351}, pages = {e11351}, abstract = {PREMISE: The generation of morphological data in evolutionary, taxonomic, and ecological studies of plants using herbarium material has traditionally been a labor-intensive task. Recent progress in machine learning using deep artificial neural networks (deep learning) for image classification and object detection has facilitated the establishment of a pipeline for the automatic recognition and extraction of relevant structures in images of herbarium specimens. METHODS AND RESULTS: We implemented an extendable pipeline based on state-of-the-art deep-learning object-detection methods to collect leaf images from herbarium specimens of two species of the genus Leucanthemum. Using 183 specimens as the training data set, our pipeline extracted one or more intact leaves in 95\% of the 61 test images. CONCLUSIONS: We establish GinJinn as a deep-learning object-detection tool for the automatic recognition and extraction of individual leaves or other structures from herbarium specimens. Our pipeline offers greater flexibility and a lower entrance barrier than previous image-processing approaches based on hand-crafted features.}, subject = {Deep Learning}, language = {en} } @article{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Stallmach, Andreas and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection of duodenal villous atrophy on endoscopic images using a deep learning algorithm}, series = {Gastrointestinal Endoscopy}, journal = {Gastrointestinal Endoscopy}, publisher = {Elsevier}, doi = {10.1016/j.gie.2023.01.006}, abstract = {Background and aims Celiac disease with its endoscopic manifestation of villous atrophy is underdiagnosed worldwide. The application of artificial intelligence (AI) for the macroscopic detection of villous atrophy at routine esophagogastroduodenoscopy may improve diagnostic performance. Methods A dataset of 858 endoscopic images of 182 patients with villous atrophy and 846 images from 323 patients with normal duodenal mucosa was collected and used to train a ResNet 18 deep learning model to detect villous atrophy. An external data set was used to test the algorithm, in addition to six fellows and four board certified gastroenterologists. Fellows could consult the AI algorithm's result during the test. From their consultation distribution, a stratification of test images into "easy" and "difficult" was performed and used for classified performance measurement. Results External validation of the AI algorithm yielded values of 90 \%, 76 \%, and 84 \% for sensitivity, specificity, and accuracy, respectively. Fellows scored values of 63 \%, 72 \% and 67 \%, while the corresponding values in experts were 72 \%, 69 \% and 71 \%, respectively. AI consultation significantly improved all trainee performance statistics. While fellows and experts showed significantly lower performance for "difficult" images, the performance of the AI algorithm was stable. Conclusion In this study, an AI algorithm outperformed endoscopy fellows and experts in the detection of villous atrophy on endoscopic still images. AI decision support significantly improved the performance of non-expert endoscopists. The stable performance on "difficult" images suggests a further positive add-on effect in challenging cases.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Verwendung k{\"u}nstlicher Intelligenz bei der Detektion der Papilla duodeni major}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1772000}, pages = {e593-e540}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der Diagnostik und Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Jedoch ist sie technisch sehr anspruchsvoll und weist eine vergleichsweise hohe Komplikationsrate auf. Ziele In der vorliegenden Machbarkeitsstudie soll gepr{\"u}ft werden, ob mithilfe eines Deep-learning-Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und somit f{\"u}r Endoskopiker mit geringer Erfahrung ein geeignetes Hilfsmittel, insbesondere f{\"u}r die Ausbildungssituation, darstellen k{\"o}nnten. Methodik Wir betrachteten insgesamt 606 Bilddatens{\"a}tze von 65 Patienten. In diesen wurde sowohl die Papilla duodeni major als auch das Ostium segmentiert. Anschließend wurde eine neuronales Netz mittels eines Deep-learning-Algorithmus trainiert. Außerdem erfolgte eine 5-fache Kreuzvaldierung. Ergebnisse Bei einer 5-fachen Kreuzvaldierung auf den 606 gelabelten Daten konnte f{\"u}r die Klasse Papille eine F1-Wert von 0,7908, eine Sensitivit{\"a}t von 0,7943 und eine Spezifit{\"a}t von 0,9785 erreicht werden, f{\"u}r die Klasse Ostium eine F1-Wert von 0,5538, eine Sensitivit{\"a}t von 0,5094 und eine Spezifit{\"a}t von 0,9970 (vgl. [Tab. 1]). Unabh{\"a}ngig von der Klasse zeigte sich gemittelt (Klasse Papille und Klasse Ostium) ein F1-Wert von 0,6673, eine Sensitivit{\"a}t von 0,6519 und eine Spezifit{\"a}t von 0,9877 (vgl. [Tab. 2]). Schlussfolgerung In vorliegende Machbarkeitsstudie konnte das neuronale Netz die Papilla duodeni major mit einer hohen Sensitivit{\"a}t und sehr hohen Spezifit{\"a}t identifizieren. Bei der Detektion des Ostiums war die Sensitivit{\"a}t deutlich geringer. Zuk{\"u}nftig soll das das neuronale Netz mit mehr Daten trainiert werden. Außerdem ist geplant, den Algorithmus auch auf Videos anzuwenden. Somit k{\"o}nnte langfristig ein geeignetes Hilfsmittel f{\"u}r die ERCP etabliert werden.}, language = {de} } @misc{ScheppachRauberStallhoferetal., author = {Scheppach, Markus and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Performance comparison of a deep learning algorithm with endoscopists in the detection of duodenal villous atrophy (VA)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765421}, pages = {S165}, abstract = {Aims VA is an endoscopic finding of celiac disease (CD), which can easily be missed if pretest probability is low. In this study, we aimed to develop an artificial intelligence (AI) algorithm for the detection of villous atrophy on endoscopic images. Methods 858 images from 182 patients with VA and 846 images from 323 patients with normal duodenal mucosa were used for training and internal validation of an AI algorithm (ResNet18). A separate dataset was used for external validation, as well as determination of detection performance of experts, trainees and trainees with AI support. According to the AI consultation distribution, images were stratified into "easy" and "difficult". Results Internal validation showed 82\%, 85\% and 84\% for sensitivity, specificity and accuracy. External validation showed 90\%, 76\% and 84\%. The algorithm was significantly more sensitive and accurate than trainees, trainees with AI support and experts in endoscopy. AI support in trainees was associated with significantly improved performance. While all endoscopists showed significantly lower detection for "difficult" images, AI performance remained stable. Conclusions The algorithm outperformed trainees and experts in sensitivity and accuracy for VA detection. The significant improvement with AI support suggests a potential clinical benefit. Stable performance of the algorithm in "easy" and "difficult" test images may indicate an advantage in macroscopically challenging cases.}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, D. A. and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, A. and Scheppach, Markus W. and Nagl, S. and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, D. and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, T. and Fernandez-Esparrach, G. and Parsa, N. and Byrne, M. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @article{MeinikheimMendelPalmetal.2024, author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus Wolfgang and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F and Messmann, Helmut and Ebigbo, Alanna}, title = {Effect of AI on performance of endoscopists to detect Barrett neoplasia: A Randomized Tandem Trial}, series = {Endoscopy}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, issn = {0013-726X}, doi = {10.1055/a-2296-5696}, year = {2024}, abstract = {Background and study aims To evaluate the effect of an AI-based clinical decision support system (AI) on the performance and diagnostic confidence of endoscopists during the assessment of Barrett's esophagus (BE). Patients and Methods Ninety-six standardized endoscopy videos were assessed by 22 endoscopists from 12 different centers with varying degrees of BE experience. The assessment was randomized into two video sets: Group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a standalone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.6\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1 and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.7\% (95\% CI, 65.2\% - 74.2\%) to 78.0\% (95\% CI, 74.0\% - 82.0\%); specificity 67.3\% (95\% CI, 62.5\% - 72.2\%) to 72.7\% (95 CI, 68.2\% - 77.3\%). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from the additional AI. BE experts and nonexperts remained below the standalone performance of AI, suggesting that there may be other factors influencing endoscopists to follow or discard AI advice.}, language = {en} } @article{RoemmeleMendelBarrettetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Barrett, Caroline and Kiesl, Hans and Rauber, David and R{\"u}ckert, Tobias and Kraus, Lisa and Heinkele, Jakob and Dhillon, Christine and Grosser, Bianca and Prinz, Friederike and Wanzl, Julia and Fleischmann, Carola and Nagl, Sandra and Schnoy, Elisabeth and Schlottmann, Jakob and Dellon, Evan S. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {An artificial intelligence algorithm is highly accurate for detecting endoscopic features of eosinophilic esophagitis}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, publisher = {Nature Portfolio}, address = {London}, doi = {10.1038/s41598-022-14605-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-46928}, pages = {10}, abstract = {The endoscopic features associated with eosinophilic esophagitis (EoE) may be missed during routine endoscopy. We aimed to develop and evaluate an Artificial Intelligence (AI) algorithm for detecting and quantifying the endoscopic features of EoE in white light images, supplemented by the EoE Endoscopic Reference Score (EREFS). An AI algorithm (AI-EoE) was constructed and trained to differentiate between EoE and normal esophagus using endoscopic white light images extracted from the database of the University Hospital Augsburg. In addition to binary classification, a second algorithm was trained with specific auxiliary branches for each EREFS feature (AI-EoE-EREFS). The AI algorithms were evaluated on an external data set from the University of North Carolina, Chapel Hill (UNC), and compared with the performance of human endoscopists with varying levels of experience. The overall sensitivity, specificity, and accuracy of AI-EoE were 0.93 for all measures, while the AUC was 0.986. With additional auxiliary branches for the EREFS categories, the AI algorithm (AI-EoEEREFS) performance improved to 0.96, 0.94, 0.95, and 0.992 for sensitivity, specificity, accuracy, and AUC, respectively. AI-EoE and AI-EoE-EREFS performed significantly better than endoscopy beginners and senior fellows on the same set of images. An AI algorithm can be trained to detect and quantify endoscopic features of EoE with excellent performance scores. The addition of the EREFS criteria improved the performance of the AI algorithm, which performed significantly better than endoscopists with a lower or medium experience level.}, language = {en} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and De Souza Jr., Luis Antonio and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @misc{RoemmeleMendelRauberetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Rauber, David and R{\"u}ckert, Tobias and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Endoscopic Diagnosis of Eosinophilic Esophagitis Using a deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724274}, abstract = {Aims Eosinophilic esophagitis (EoE) is easily missed during endoscopy, either because physicians are not familiar with its endoscopic features or the morphologic changes are too subtle. In this preliminary paper, we present the first attempt to detect EoE in endoscopic white light (WL) images using a deep learning network (EoE-AI). Methods 401 WL images of eosinophilic esophagitis and 871 WL images of normal esophageal mucosa were evaluated. All images were assessed for the Endoscopic Reference score (EREFS) (edema, rings, exudates, furrows, strictures). Images with strictures were excluded. EoE was defined as the presence of at least 15 eosinophils per high power field on biopsy. A convolutional neural network based on the ResNet architecture with several five-fold cross-validation runs was used. Adding auxiliary EREFS-classification branches to the neural network allowed the inclusion of the scores as optimization criteria during training. EoE-AI was evaluated for sensitivity, specificity, and F1-score. In addition, two human endoscopists evaluated the images. Results EoE-AI showed a mean sensitivity, specificity, and F1 of 0.759, 0.976, and 0.834 respectively, averaged over the five distinct cross-validation runs. With the EREFS-augmented architecture, a mean sensitivity, specificity, and F1-score of 0.848, 0.945, and 0.861 could be demonstrated respectively. In comparison, the two human endoscopists had an average sensitivity, specificity, and F1-score of 0.718, 0.958, and 0.793. Conclusions To the best of our knowledge, this is the first application of deep learning to endoscopic images of EoE which were also assessed after augmentation with the EREFS-score. The next step is the evaluation of EoE-AI using an external dataset. We then plan to assess the EoE-AI tool on endoscopic videos, and also in real-time. This preliminary work is encouraging regarding the ability for AI to enhance physician detection of EoE, and potentially to do a true "optical biopsy" but more work is needed.}, language = {en} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Prinz, Friederike and Schlottmann, Jakob and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {Einfluss von K{\"u}nstlicher Intelligenz auf die Performance von niedergelassenen Gastroenterolog:innen bei der Beurteilung von Barrett-{\"O}sophagus}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {8}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1771711}, abstract = {Einleitung Die Differenzierung zwischen nicht dysplastischem Barrett-{\"O}sophagus (NDBE) und mit Barrett-{\"O}sophagus assoziierten Neoplasien (BERN) w{\"a}hrend der endoskopischen Inspektion erfordert viel Expertise. Die fr{\"u}he Diagnosestellung ist wichtig f{\"u}r die weitere Prognose des Barrett-Karzinoms. In Deutschland werden Patient:innen mit einem Barrett-{\"O}sophagus (BE) in der Regel im niedergelassenen Sektor {\"u}berwacht. Ziele Ziel ist es, den Einfluss von einem auf K{\"u}nstlicher Intelligenz (KI) basierenden klinischen Entscheidungsunterst{\"u}tzungssystems (CDSS) auf die Performance von niedergelassenen Gastroenterolog:innen (NG) bei der Evaluation von Barrett-{\"O}sophagus (BE) zu untersuchen. Methodik Es erfolgte die prospektive Sammlung von 96 unver{\"a}nderten hochaufl{\"o}senden Videos mit F{\"a}llen von Patient:innen mit histologisch best{\"a}tigtem NDBE und BERN. Alle eingeschlossenen F{\"a}lle enthielten mindestens zwei der folgenden Darstellungsmethoden: HD-Weißlichtendoskopie, Narrow Band Imaging oder Texture and Color Enhancement Imaging. Sechs NG von sechs unterschiedlichen Praxen wurden als Proband:innen eingeschlossen. Es erfolgte eine permutierte Block-Randomisierung der Videof{\"a}lle in entweder Gruppe A oder Gruppe B. Gruppe A implizierte eine Evaluation des Falls durch Proband:innen zun{\"a}chst ohne KI und anschließend mit KI als CDSS. In Gruppe B erfolgte die Evaluation in umgekehrter Reihenfolge. Anschließend erfolgte eine zuf{\"a}llige Wiedergabe der so entstandenen Subgruppen im Rahmen des Tests. Ergebnis In diesem Test konnte ein von uns entwickeltes KI-System (Barrett-Ampel) eine Sensitivit{\"a}t von 92,2\%, eine Spezifit{\"a}t von 68,9\% und eine Accuracy von 81,3\% erreichen. Mit der Hilfe von KI verbesserte sich die Sensitivit{\"a}t der NG von 64,1\% auf 71,2\% (p<0,001) und die Accuracy von 66,3\% auf 70,8\% (p=0,006) signifikant. Eine signifikante Verbesserung dieser Parameter zeigte sich ebenfalls, wenn die Proband:innen die F{\"a}lle zun{\"a}chst ohne KI evaluierten (Gruppe A). Wurde der Fall jedoch als Erstes mit der Hilfe von KI evaluiert (Gruppe B), blieb die Performance nahezu konstant. Schlussfolgerung Es konnte ein performantes KI-System zur Evaluation von BE entwickelt werden. NG verbessern sich bei der Evaluation von BE durch den Einsatz von KI.}, language = {de} }