@misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Verwendung k{\"u}nstlicher Intelligenz bei der Detektion der Papilla duodeni major}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1772000}, pages = {e593-e540}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der Diagnostik und Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Jedoch ist sie technisch sehr anspruchsvoll und weist eine vergleichsweise hohe Komplikationsrate auf. Ziele In der vorliegenden Machbarkeitsstudie soll gepr{\"u}ft werden, ob mithilfe eines Deep-learning-Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und somit f{\"u}r Endoskopiker mit geringer Erfahrung ein geeignetes Hilfsmittel, insbesondere f{\"u}r die Ausbildungssituation, darstellen k{\"o}nnten. Methodik Wir betrachteten insgesamt 606 Bilddatens{\"a}tze von 65 Patienten. In diesen wurde sowohl die Papilla duodeni major als auch das Ostium segmentiert. Anschließend wurde eine neuronales Netz mittels eines Deep-learning-Algorithmus trainiert. Außerdem erfolgte eine 5-fache Kreuzvaldierung. Ergebnisse Bei einer 5-fachen Kreuzvaldierung auf den 606 gelabelten Daten konnte f{\"u}r die Klasse Papille eine F1-Wert von 0,7908, eine Sensitivit{\"a}t von 0,7943 und eine Spezifit{\"a}t von 0,9785 erreicht werden, f{\"u}r die Klasse Ostium eine F1-Wert von 0,5538, eine Sensitivit{\"a}t von 0,5094 und eine Spezifit{\"a}t von 0,9970 (vgl. [Tab. 1]). Unabh{\"a}ngig von der Klasse zeigte sich gemittelt (Klasse Papille und Klasse Ostium) ein F1-Wert von 0,6673, eine Sensitivit{\"a}t von 0,6519 und eine Spezifit{\"a}t von 0,9877 (vgl. [Tab. 2]). Schlussfolgerung In vorliegende Machbarkeitsstudie konnte das neuronale Netz die Papilla duodeni major mit einer hohen Sensitivit{\"a}t und sehr hohen Spezifit{\"a}t identifizieren. Bei der Detektion des Ostiums war die Sensitivit{\"a}t deutlich geringer. Zuk{\"u}nftig soll das das neuronale Netz mit mehr Daten trainiert werden. Außerdem ist geplant, den Algorithmus auch auf Videos anzuwenden. Somit k{\"o}nnte langfristig ein geeignetes Hilfsmittel f{\"u}r die ERCP etabliert werden.}, language = {de} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and R{\"o}mmele, Christoph and Nagl, Sandra and Schnoy, Elisabeth and Messmann, Helmut and Ebigbo, Alanna and Palm, Christoph}, title = {Artificial intelligence as a tool in the detection of the papillary ostium during ERCP}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783138}, pages = {S198}, abstract = {Aims Endoscopic retrograde cholangiopancreaticography (ERCP) is the gold standard in the diagnosis as well as treatment of diseases of the pancreatobiliary tract. However, it is technically complex and has a relatively high complication rate. In particular, cannulation of the papillary ostium remains challenging. The aim of this study is to examine whether a deep-learning algorithm can be used to detect the major duodenal papilla and in particular the papillary ostium reliably and could therefore be a valuable tool for inexperienced endoscopists, particularly in training situation. Methods We analyzed a total of 654 retrospectively collected images of 85 patients. Both the major duodenal papilla and the ostium were then segmented. Afterwards, a neural network was trained using a deep-learning algorithm. A 5-fold cross-validation was performed. Subsequently, we ran the algorithm on 5 prospectively collected videos of ERCPs. Results 5-fold cross-validation on the 654 labeled data resulted in an F1 value of 0.8007, a sensitivity of 0.8409 and a specificity of 0.9757 for the class papilla, and an F1 value of 0.5724, a sensitivity of 0.5456 and a specificity of 0.9966 for the class ostium. Regardless of the class, the average F1 value (class papilla and class ostium) was 0.6866, the sensitivity 0.6933 and the specificity 0.9861. In 100\% of cases the AI-detected localization of the papillary ostium in the prospectively collected videos corresponded to the localization of the cannulation performed by the endoscopist. Conclusions In the present study, the neural network was able to identify the major duodenal papilla with a high sensitivity and high specificity. In detecting the papillary ostium, the sensitivity was notably lower. However, when used on videos, the AI was able to identify the location of the subsequent cannulation with 100\% accuracy. In the future, the neural network will be trained with more data. Thus, a suitable tool for ERCP could be established, especially in the training situation.}, language = {en} } @misc{WeigertPalmQuicketal., author = {Weigert, Markus and Palm, Christoph and Quick, Harald H. and M{\"u}ller, Stefan P. and Pietrzyk, Uwe and Beyer, Thomas}, title = {Template for MR-based attenuation correction for whole-body PET/MR imaging}, series = {Nuklearmedizin}, volume = {46}, journal = {Nuklearmedizin}, number = {2}, pages = {A115}, subject = {Kernspintomografie}, language = {en} } @misc{WeigertBeyerQuicketal., author = {Weigert, Markus and Beyer, Thomas and Quick, Harald H. and Pietrzyk, Uwe and Palm, Christoph and M{\"u}ller, Stefan P.}, title = {Generation of a MRI reference data set for the validation of automatic, non-rigid image co-registration algorithms}, series = {Nuklearmedizin}, volume = {46}, journal = {Nuklearmedizin}, number = {2}, pages = {A116}, subject = {Kernspintomografie}, language = {en} } @misc{SchroederSemmelmannSiegmundetal., author = {Schroeder, Josef A. and Semmelmann, Matthias and Siegmund, Heiko and Grafe, Claudia and Evert, Matthias and Palm, Christoph}, title = {Improved interactive computer-assisted approach for evaluation of ultrastructural cilia abnormalities}, series = {Ultrastructural Pathology}, volume = {41}, journal = {Ultrastructural Pathology}, number = {1}, doi = {10.1080/01913123.2016.1270978}, pages = {112 -- 113}, subject = {Zilie}, language = {en} } @misc{ScheppachWeberNunesArizietal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Procedural phase recognition in endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783804}, pages = {S439}, abstract = {Aims Recent evidence suggests the possibility of intraprocedural phase recognition in surgical operations as well as endoscopic interventions such as peroral endoscopic myotomy and endoscopic submucosal dissection (ESD) by AI-algorithms. The intricate measurement of intraprocedural phase distribution may deepen the understanding of the procedure. Furthermore, real-time quality assessment as well as automation of reporting may become possible. Therefore, we aimed to develop an AI-algorithm for intraprocedural phase recognition during ESD. Methods A training dataset of 364385 single images from 9 full-length ESD videos was compiled. Each frame was classified into one procedural phase. Phases included scope manipulation, marking, injection, application of electrical current and bleeding. Allocation of each frame was only possible to one category. This training dataset was used to train a Video Swin transformer to recognize the phases. Temporal information was included via logarithmic frame sampling. Validation was performed using two separate ESD videos with 29801 single frames. Results The validation yielded sensitivities of 97.81\%, 97.83\%, 95.53\%, 85.01\% and 87.55\% for scope manipulation, marking, injection, electric application and bleeding, respectively. Specificities of 77.78\%, 90.91\%, 95.91\%, 93.65\% and 84.76\% were measured for the same parameters. Conclusions The developed algorithm was able to classify full-length ESD videos on a frame-by-frame basis into the predefined classes with high sensitivities and specificities. Future research will aim at the development of quality metrics based on single-operator phase distribution.}, language = {en} } @misc{ScheppachRauberMendeletal., author = {Scheppach, Markus W. and Rauber, David and Mendel, Robert and Palm, Christoph and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection Of Celiac Disease Using A Deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724970}, abstract = {Aims Celiac disease (CD) is a complex condition caused by an autoimmune reaction to ingested gluten. Due to its polymorphic manifestation and subtle endoscopic presentation, the diagnosis is difficult and thus the disorder is underreported. We aimed to use deep learning to identify celiac disease on endoscopic images of the small bowel. Methods Patients with small intestinal histology compatible with CD (MARSH classification I-III) were extracted retrospectively from the database of Augsburg University hospital. They were compared to patients with no clinical signs of CD and histologically normal small intestinal mucosa. In a first step MARSH III and normal small intestinal mucosa were differentiated with the help of a deep learning algorithm. For this, the endoscopic white light images were divided into five equal-sized subsets. We avoided splitting the images of one patient into several subsets. A ResNet-50 model was trained with the images from four subsets and then validated with the remaining subset. This process was repeated for each subset, such that each subset was validated once. Sensitivity, specificity, and harmonic mean (F1) of the algorithm were determined. Results The algorithm showed values of 0.83, 0.88, and 0.84 for sensitivity, specificity, and F1, respectively. Further data showing a comparison between the detection rate of the AI model and that of experienced endoscopists will be available at the time of the upcoming conference. Conclusions We present the first clinical report on the use of a deep learning algorithm for the detection of celiac disease using endoscopic images. Further evaluation on an external data set, as well as in the detection of CD in real-time, will follow. However, this work at least suggests that AI can assist endoscopists in the endoscopic diagnosis of CD, and ultimately may be able to do a true optical biopsy in live-time.}, language = {en} } @misc{ScheppachMendelRauberetal., author = {Scheppach, Markus W. and Mendel, Robert and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) improves endoscopists' vessel detection during endoscopic submucosal dissection (ESD)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1782891}, pages = {S93}, abstract = {Aims While AI has been successfully implemented in detecting and characterizing colonic polyps, its role in therapeutic endoscopy remains to be elucidated. Especially third space endoscopy procedures like ESD and peroral endoscopic myotomy (POEM) pose a technical challenge and the risk of operator-dependent complications like intraprocedural bleeding and perforation. Therefore, we aimed at developing an AI-algorithm for intraprocedural real time vessel detection during ESD and POEM. Methods A training dataset consisting of 5470 annotated still images from 59 full-length videos (47 ESD, 12 POEM) and 179681 unlabeled images was used to train a DeepLabV3+neural network with the ECMT semi-supervised learning method. Evaluation for vessel detection rate (VDR) and time (VDT) of 19 endoscopists with and without AI-support was performed using a testing dataset of 101 standardized video clips with 200 predefined blood vessels. Endoscopists were stratified into trainees and experts in third space endoscopy. Results The AI algorithm had a mean VDR of 93.5\% and a median VDT of 0.32 seconds. AI support was associated with a statistically significant increase in VDR from 54.9\% to 73.0\% and from 59.0\% to 74.1\% for trainees and experts, respectively. VDT significantly decreased from 7.21 sec to 5.09 sec for trainees and from 6.10 sec to 5.38 sec for experts in the AI-support group. False positive (FP) readings occurred in 4.5\% of frames. FP structures were detected significantly shorter than true positives (0.71 sec vs. 5.99 sec). Conclusions AI improved VDR and VDT of trainees and experts in third space endoscopy and may reduce performance variability during training. Further research is needed to evaluate the clinical impact of this new technology.}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Rauber, David and Rueckert, Tobias and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Real-time detection and delineation of tissue during third-space endoscopy using artificial intelligence (AI)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765128}, pages = {S53 -- S54}, abstract = {Aims AI has proven great potential in assisting endoscopists in diagnostics, however its role in therapeutic endoscopy remains unclear. Endoscopic submucosal dissection (ESD) is a technically demanding intervention with a slow learning curve and relevant risks like bleeding and perforation. Therefore, we aimed to develop an algorithm for the real-time detection and delineation of relevant structures during third-space endoscopy. Methods 5470 still images from 59 full length videos (47 ESD, 12 POEM) were annotated. 179681 additional unlabeled images were added to the training dataset. Consequently, a DeepLabv3+ neural network architecture was trained with the ECMT semi-supervised algorithm (under review elsewhere). Evaluation of vessel detection was performed on a dataset of 101 standardized video clips from 15 separate third-space endoscopy videos with 200 predefined blood vessels. Results Internal validation yielded an overall mean Dice score of 85\% (68\% for blood vessels, 86\% for submucosal layer, 88\% for muscle layer). On the video test data, the overall vessel detection rate (VDR) was 94\% (96\% for ESD, 74\% for POEM). The median overall vessel detection time (VDT) was 0.32 sec (0.3 sec for ESD, 0.62 sec for POEM). Conclusions Evaluation of the developed algorithm on a video test dataset showed high VDR and quick VDT, especially for ESD. Further research will focus on a possible clinical benefit of the AI application for VDR and VDT during third-space endoscopy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Nagl, Sandra and Meinikheim, Michael and Yip, Hon Chi and Lau, Louis Ho Shing and Chiu, Philip Wai Yan and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Effekt eines K{\"u}nstliche Intelligenz (KI) - Algorithmus auf die Gef{\"a}ßdetektion bei third space Endoskopien}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1771980}, pages = {e528-e529}, abstract = {Einleitung Third space Endoskopieprozeduren wie die endoskopische Submukosadissektion (ESD) und die perorale endoskopische Myotomie (POEM) sind technisch anspruchsvoll und gehen mit untersucherabh{\"a}ngigen Komplikationen wie Blutungen und Perforationen einher. Grund hierf{\"u}r ist die unabsichtliche Durchschneidung von submukosalen Blutgef{\"a}ßen ohne pr{\"a}emptive Koagulation. Ziele Die Forschungsfrage, ob ein KI-Algorithmus die intraprozedurale Gef{\"a}ßerkennung bei ESD und POEM unterst{\"u}tzen und damit Komplikationen wie Blutungen verhindern k{\"o}nnte, erscheint in Anbetracht des erfolgreichen Einsatzes von KI bei der Erkennung von Kolonpolypen interessant. Methoden Auf 5470 Einzelbildern von 59 third space Endoscopievideos wurden submukosale Blutgef{\"a}ße annotiert. Zusammen mit weiteren 179.681 nicht-annotierten Bildern wurde ein DeepLabv3+neuronales Netzwerk mit dem ECMT-Verfahren f{\"u}r semi-supervised learning trainiert, um Blutgef{\"a}ße in Echtzeit erkennen zu k{\"o}nnen. F{\"u}r die Evaluation wurde ein Videotest mit 101 Videoclips aus 15 vom Trainingsdatensatz separaten Prozeduren mit 200 vordefinierten Gef{\"a}ßen erstellt. Die Gef{\"a}ßdetektionsrate, -zeit und -dauer, definiert als der Prozentsatz an Einzelbildern eines Videos bezogen auf den Goldstandard, auf denen ein definiertes Gef{\"a}ß erkannt wurde, wurden erhoben. Acht erfahrene Endoskopiker wurden mithilfe dieses Videotests im Hinblick auf Gef{\"a}ßdetektion getestet, wobei eine H{\"a}lfte der Videos nativ, die andere H{\"a}lfte nach Markierung durch den KI-Algorithmus angesehen wurde. Ergebnisse Der mittlere Dice Score des Algorithmus f{\"u}r Blutgef{\"a}ße war 68\%. Die mittlere Gef{\"a}ßdetektionsrate im Videotest lag bei 94\% (96\% f{\"u}r ESD; 74\% f{\"u}r POEM). Die mediane Gef{\"a}ßdetektionszeit des Algorithmus lag bei 0,32 Sekunden (0,3 Sekunden f{\"u}r ESD; 0,62 Sekunden f{\"u}r POEM). Die mittlere Gef{\"a}ßdetektionsdauer lag bei 59,1\% (60,6\% f{\"u}r ESD; 44,8\% f{\"u}r POEM) des Goldstandards. Alle Endoskopiker hatten mit KI-Unterst{\"u}tzung eine h{\"o}here Gef{\"a}ßdetektionsrate als ohne KI. Die mittlere Gef{\"a}ßdetektionsrate ohne KI lag bei 56,4\%, mit KI bei 71,2\% (p<0.001). Schlussfolgerung KI-Unterst{\"u}tzung war mit einer statistisch signifikant h{\"o}heren Gef{\"a}ßdetektionsrate vergesellschaftet. Die mediane Gef{\"a}ßdetektionszeit von deutlich unter einer Sekunde sowie eine Gef{\"a}ßdetektionsdauer von gr{\"o}ßer 50\% des Goldstandards wurden f{\"u}r den klinischen Einsatz als ausreichend erachtet. In prospektiven Anwendungsstudien sollte der KI-Algorithmus auf klinische Relevanz getestet werden.}, language = {de} }