@article{OttPalmVogtetal., author = {Ott, Tankred and Palm, Christoph and Vogt, Robert and Oberprieler, Christoph}, title = {GinJinn: An object-detection pipeline for automated feature extraction from herbarium specimens}, series = {Applications in Plant Sciences}, volume = {8}, journal = {Applications in Plant Sciences}, number = {6}, publisher = {Wiley, Botanical Society of America}, issn = {2168-0450}, doi = {10.1002/aps3.11351}, pages = {e11351}, abstract = {PREMISE: The generation of morphological data in evolutionary, taxonomic, and ecological studies of plants using herbarium material has traditionally been a labor-intensive task. Recent progress in machine learning using deep artificial neural networks (deep learning) for image classification and object detection has facilitated the establishment of a pipeline for the automatic recognition and extraction of relevant structures in images of herbarium specimens. METHODS AND RESULTS: We implemented an extendable pipeline based on state-of-the-art deep-learning object-detection methods to collect leaf images from herbarium specimens of two species of the genus Leucanthemum. Using 183 specimens as the training data set, our pipeline extracted one or more intact leaves in 95\% of the 61 test images. CONCLUSIONS: We establish GinJinn as a deep-learning object-detection tool for the automatic recognition and extraction of individual leaves or other structures from herbarium specimens. Our pipeline offers greater flexibility and a lower entrance barrier than previous image-processing approaches based on hand-crafted features.}, subject = {Deep Learning}, language = {en} } @article{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Stallmach, Andreas and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection of duodenal villous atrophy on endoscopic images using a deep learning algorithm}, series = {Gastrointestinal Endoscopy}, journal = {Gastrointestinal Endoscopy}, publisher = {Elsevier}, doi = {10.1016/j.gie.2023.01.006}, abstract = {Background and aims Celiac disease with its endoscopic manifestation of villous atrophy is underdiagnosed worldwide. The application of artificial intelligence (AI) for the macroscopic detection of villous atrophy at routine esophagogastroduodenoscopy may improve diagnostic performance. Methods A dataset of 858 endoscopic images of 182 patients with villous atrophy and 846 images from 323 patients with normal duodenal mucosa was collected and used to train a ResNet 18 deep learning model to detect villous atrophy. An external data set was used to test the algorithm, in addition to six fellows and four board certified gastroenterologists. Fellows could consult the AI algorithm's result during the test. From their consultation distribution, a stratification of test images into "easy" and "difficult" was performed and used for classified performance measurement. Results External validation of the AI algorithm yielded values of 90 \%, 76 \%, and 84 \% for sensitivity, specificity, and accuracy, respectively. Fellows scored values of 63 \%, 72 \% and 67 \%, while the corresponding values in experts were 72 \%, 69 \% and 71 \%, respectively. AI consultation significantly improved all trainee performance statistics. While fellows and experts showed significantly lower performance for "difficult" images, the performance of the AI algorithm was stable. Conclusion In this study, an AI algorithm outperformed endoscopy fellows and experts in the detection of villous atrophy on endoscopic still images. AI decision support significantly improved the performance of non-expert endoscopists. The stable performance on "difficult" images suggests a further positive add-on effect in challenging cases.}, language = {en} } @misc{ScheppachRauberStallhoferetal., author = {Scheppach, Markus and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Performance comparison of a deep learning algorithm with endoscopists in the detection of duodenal villous atrophy (VA)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765421}, pages = {S165}, abstract = {Aims VA is an endoscopic finding of celiac disease (CD), which can easily be missed if pretest probability is low. In this study, we aimed to develop an artificial intelligence (AI) algorithm for the detection of villous atrophy on endoscopic images. Methods 858 images from 182 patients with VA and 846 images from 323 patients with normal duodenal mucosa were used for training and internal validation of an AI algorithm (ResNet18). A separate dataset was used for external validation, as well as determination of detection performance of experts, trainees and trainees with AI support. According to the AI consultation distribution, images were stratified into "easy" and "difficult". Results Internal validation showed 82\%, 85\% and 84\% for sensitivity, specificity and accuracy. External validation showed 90\%, 76\% and 84\%. The algorithm was significantly more sensitive and accurate than trainees, trainees with AI support and experts in endoscopy. AI support in trainees was associated with significantly improved performance. While all endoscopists showed significantly lower detection for "difficult" images, AI performance remained stable. Conclusions The algorithm outperformed trainees and experts in sensitivity and accuracy for VA detection. The significant improvement with AI support suggests a potential clinical benefit. Stable performance of the algorithm in "easy" and "difficult" test images may indicate an advantage in macroscopically challenging cases.}, language = {en} } @article{RoemmeleMendelBarrettetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Barrett, Caroline and Kiesl, Hans and Rauber, David and R{\"u}ckert, Tobias and Kraus, Lisa and Heinkele, Jakob and Dhillon, Christine and Grosser, Bianca and Prinz, Friederike and Wanzl, Julia and Fleischmann, Carola and Nagl, Sandra and Schnoy, Elisabeth and Schlottmann, Jakob and Dellon, Evan S. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {An artificial intelligence algorithm is highly accurate for detecting endoscopic features of eosinophilic esophagitis}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, publisher = {Nature Portfolio}, address = {London}, doi = {10.1038/s41598-022-14605-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-46928}, pages = {10}, abstract = {The endoscopic features associated with eosinophilic esophagitis (EoE) may be missed during routine endoscopy. We aimed to develop and evaluate an Artificial Intelligence (AI) algorithm for detecting and quantifying the endoscopic features of EoE in white light images, supplemented by the EoE Endoscopic Reference Score (EREFS). An AI algorithm (AI-EoE) was constructed and trained to differentiate between EoE and normal esophagus using endoscopic white light images extracted from the database of the University Hospital Augsburg. In addition to binary classification, a second algorithm was trained with specific auxiliary branches for each EREFS feature (AI-EoE-EREFS). The AI algorithms were evaluated on an external data set from the University of North Carolina, Chapel Hill (UNC), and compared with the performance of human endoscopists with varying levels of experience. The overall sensitivity, specificity, and accuracy of AI-EoE were 0.93 for all measures, while the AUC was 0.986. With additional auxiliary branches for the EREFS categories, the AI algorithm (AI-EoEEREFS) performance improved to 0.96, 0.94, 0.95, and 0.992 for sensitivity, specificity, accuracy, and AUC, respectively. AI-EoE and AI-EoE-EREFS performed significantly better than endoscopy beginners and senior fellows on the same set of images. An AI algorithm can be trained to detect and quantify endoscopic features of EoE with excellent performance scores. The addition of the EREFS criteria improved the performance of the AI algorithm, which performed significantly better than endoscopists with a lower or medium experience level.}, language = {en} } @misc{RoemmeleMendelRauberetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Rauber, David and R{\"u}ckert, Tobias and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Endoscopic Diagnosis of Eosinophilic Esophagitis Using a deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724274}, abstract = {Aims Eosinophilic esophagitis (EoE) is easily missed during endoscopy, either because physicians are not familiar with its endoscopic features or the morphologic changes are too subtle. In this preliminary paper, we present the first attempt to detect EoE in endoscopic white light (WL) images using a deep learning network (EoE-AI). Methods 401 WL images of eosinophilic esophagitis and 871 WL images of normal esophageal mucosa were evaluated. All images were assessed for the Endoscopic Reference score (EREFS) (edema, rings, exudates, furrows, strictures). Images with strictures were excluded. EoE was defined as the presence of at least 15 eosinophils per high power field on biopsy. A convolutional neural network based on the ResNet architecture with several five-fold cross-validation runs was used. Adding auxiliary EREFS-classification branches to the neural network allowed the inclusion of the scores as optimization criteria during training. EoE-AI was evaluated for sensitivity, specificity, and F1-score. In addition, two human endoscopists evaluated the images. Results EoE-AI showed a mean sensitivity, specificity, and F1 of 0.759, 0.976, and 0.834 respectively, averaged over the five distinct cross-validation runs. With the EREFS-augmented architecture, a mean sensitivity, specificity, and F1-score of 0.848, 0.945, and 0.861 could be demonstrated respectively. In comparison, the two human endoscopists had an average sensitivity, specificity, and F1-score of 0.718, 0.958, and 0.793. Conclusions To the best of our knowledge, this is the first application of deep learning to endoscopic images of EoE which were also assessed after augmentation with the EREFS-score. The next step is the evaluation of EoE-AI using an external dataset. We then plan to assess the EoE-AI tool on endoscopic videos, and also in real-time. This preliminary work is encouraging regarding the ability for AI to enhance physician detection of EoE, and potentially to do a true "optical biopsy" but more work is needed.}, language = {en} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Schnoy, Elisabeth and Nagl, Sandra and R{\"o}mmele, Christoph and Prinz, Friederike and Schlottmann, Jakob and Golger, Daniela and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {AI-assisted detection and characterization of early Barrett's neoplasia: Results of an Interim analysis}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765437}, pages = {S169}, abstract = {Aims Evaluation of the add-on effect an artificial intelligence (AI) based clinical decision support system has on the performance of endoscopists with different degrees of expertise in the field of Barrett's esophagus (BE) and Barrett's esophagus-related neoplasia (BERN). Methods The support system is based on a multi-task deep learning model trained to solve a segmentation and several classification tasks. The training approach represents an extension of the ECMT semi-supervised learning algorithm. The complete system evaluates a decision tree between estimated motion, classification, segmentation, and temporal constraints, to decide when and how the prediction is highlighted to the observer. In our current study, ninety-six video cases of patients with BE and BERN were prospectively collected and assessed by Barrett's specialists and non-specialists. All video cases were evaluated twice - with and without AI assistance. The order of appearance, either with or without AI support, was assigned randomly. Participants were asked to detect and characterize regions of dysplasia or early neoplasia within the video sequences. Results Standalone sensitivity, specificity, and accuracy of the AI system were 92.16\%, 68.89\%, and 81.25\%, respectively. Mean sensitivity, specificity, and accuracy of expert endoscopists without AI support were 83,33\%, 58,20\%, and 71,48 \%, respectively. Gastroenterologists without Barrett's expertise but with AI support had a comparable performance with a mean sensitivity, specificity, and accuracy of 76,63\%, 65,35\%, and 71,36\%, respectively. Conclusions Non-Barrett's experts with AI support had a similar performance as experts in a video-based study.}, language = {en} } @article{MeinikheimMendelPalmetal., author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik A. H. and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and Rueckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Influence of artificial intelligence on the diagnostic performance of endoscopists in the assessment of Barrett's esophagus: a tandem randomized and video trial}, series = {Endoscopy}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-2296-5696}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-72818}, pages = {9}, abstract = {Background This study evaluated the effect of an artificial intelligence (AI)-based clinical decision support system on the performance and diagnostic confidence of endoscopists in their assessment of Barrett's esophagus (BE). Methods 96 standardized endoscopy videos were assessed by 22 endoscopists with varying degrees of BE experience from 12 centers. Assessment was randomized into two video sets: group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a stand-alone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.3\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1\%, and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.8\% [95\%CI 65.2\%-74.2\%] to 78.0\% [95\%CI 74.0\%-82.0\%]; specificity 67.3\% [95\%CI 62.5\%-72.2\%] to 72.7\% [95\%CI 68.2\%-77.3\%]). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from additional AI. BE experts and nonexperts remained significantly below the stand-alone performance of AI, suggesting that there may be other factors influencing endoscopists' decisions to follow or discard AI advice.}, language = {en} } @misc{ScheppachWeberNunesArizietal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Procedural phase recognition in endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783804}, pages = {S439}, abstract = {Aims Recent evidence suggests the possibility of intraprocedural phase recognition in surgical operations as well as endoscopic interventions such as peroral endoscopic myotomy and endoscopic submucosal dissection (ESD) by AI-algorithms. The intricate measurement of intraprocedural phase distribution may deepen the understanding of the procedure. Furthermore, real-time quality assessment as well as automation of reporting may become possible. Therefore, we aimed to develop an AI-algorithm for intraprocedural phase recognition during ESD. Methods A training dataset of 364385 single images from 9 full-length ESD videos was compiled. Each frame was classified into one procedural phase. Phases included scope manipulation, marking, injection, application of electrical current and bleeding. Allocation of each frame was only possible to one category. This training dataset was used to train a Video Swin transformer to recognize the phases. Temporal information was included via logarithmic frame sampling. Validation was performed using two separate ESD videos with 29801 single frames. Results The validation yielded sensitivities of 97.81\%, 97.83\%, 95.53\%, 85.01\% and 87.55\% for scope manipulation, marking, injection, electric application and bleeding, respectively. Specificities of 77.78\%, 90.91\%, 95.91\%, 93.65\% and 84.76\% were measured for the same parameters. Conclusions The developed algorithm was able to classify full-length ESD videos on a frame-by-frame basis into the predefined classes with high sensitivities and specificities. Future research will aim at the development of quality metrics based on single-operator phase distribution.}, language = {en} } @misc{EbigboRauberAyoubetal., author = {Ebigbo, Alanna and Rauber, David and Ayoub, Mousa and Birzle, Lisa and Matsumura, Tomoaki and Probst, Andreas and Steinbr{\"u}ck, Ingo and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Scheppach, Markus W. and Palm, Christoph and Messmann, Helmut}, title = {Early Esophageal Cancer and the Generalizability of Artificial Intelligence}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783775}, pages = {S428}, abstract = {Aims Artificial Intelligence (AI) systems in gastrointestinal endoscopy are narrow because they are trained to solve only one specific task. Unlike Narrow-AI, general AI systems may be able to solve multiple and unrelated tasks. We aimed to understand whether an AI system trained to detect, characterize, and segment early Barrett's neoplasia (Barrett's AI) is only capable of detecting this pathology or can also detect and segment other diseases like early squamous cell cancer (SCC). Methods 120 white light (WL) and narrow-band endoscopic images (NBI) from 60 patients (1 WL and 1 NBI image per patient) were extracted from the endoscopic database of the University Hospital Augsburg. Images were annotated by three expert endoscopists with extensive experience in the diagnosis and endoscopic resection of early esophageal neoplasias. An AI system based on DeepLabV3+architecture dedicated to early Barrett's neoplasia was tested on these images. The AI system was neither trained with SCC images nor had it seen the test images prior to evaluation. The overlap between the three expert annotations („expert-agreement") was the ground truth for evaluating AI performance. Results Barrett's AI detected early SCC with a mean intersection over reference (IoR) of 92\% when at least 1 pixel of the AI prediction overlapped with the expert-agreement. When the threshold was increased to 5\%, 10\%, and 20\% overlap with the expert-agreement, the IoR was 88\%, 85\% and 82\%, respectively. The mean Intersection Over Union (IoU) - a metric according to segmentation quality between the AI prediction and the expert-agreement - was 0.45. The mean expert IoU as a measure of agreement between the three experts was 0.60. Conclusions In the context of this pilot study, the predictions of SCC by a Barrett's dedicated AI showed some overlap to the expert-agreement. Therefore, features learned from Barrett's cancer-related training might be helpful also for SCC prediction. Our results allow different possible explanations. On the one hand, some Barrett's cancer features generalize toward the related task of assessing early SCC. On the other hand, the Barrett's AI is less specific to Barrett's cancer than a general predictor of pathological tissue. However, we expect to enhance the detection quality significantly by extending the training to SCC-specific data. The insight of this study opens the way towards a transfer learning approach for more efficient training of AI to solve tasks in other domains.}, language = {en} } @misc{ScheppachMendelRauberetal., author = {Scheppach, Markus W. and Mendel, Robert and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) improves endoscopists' vessel detection during endoscopic submucosal dissection (ESD)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1782891}, pages = {S93}, abstract = {Aims While AI has been successfully implemented in detecting and characterizing colonic polyps, its role in therapeutic endoscopy remains to be elucidated. Especially third space endoscopy procedures like ESD and peroral endoscopic myotomy (POEM) pose a technical challenge and the risk of operator-dependent complications like intraprocedural bleeding and perforation. Therefore, we aimed at developing an AI-algorithm for intraprocedural real time vessel detection during ESD and POEM. Methods A training dataset consisting of 5470 annotated still images from 59 full-length videos (47 ESD, 12 POEM) and 179681 unlabeled images was used to train a DeepLabV3+neural network with the ECMT semi-supervised learning method. Evaluation for vessel detection rate (VDR) and time (VDT) of 19 endoscopists with and without AI-support was performed using a testing dataset of 101 standardized video clips with 200 predefined blood vessels. Endoscopists were stratified into trainees and experts in third space endoscopy. Results The AI algorithm had a mean VDR of 93.5\% and a median VDT of 0.32 seconds. AI support was associated with a statistically significant increase in VDR from 54.9\% to 73.0\% and from 59.0\% to 74.1\% for trainees and experts, respectively. VDT significantly decreased from 7.21 sec to 5.09 sec for trainees and from 6.10 sec to 5.38 sec for experts in the AI-support group. False positive (FP) readings occurred in 4.5\% of frames. FP structures were detected significantly shorter than true positives (0.71 sec vs. 5.99 sec). Conclusions AI improved VDR and VDT of trainees and experts in third space endoscopy and may reduce performance variability during training. Further research is needed to evaluate the clinical impact of this new technology.}, language = {en} }