@misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Prinz, Friederike and Schlottmann, Jakob and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {Einfluss von K{\"u}nstlicher Intelligenz auf die Performance von niedergelassenen Gastroenterolog:innen bei der Beurteilung von Barrett-{\"O}sophagus}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {8}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1771711}, abstract = {Einleitung Die Differenzierung zwischen nicht dysplastischem Barrett-{\"O}sophagus (NDBE) und mit Barrett-{\"O}sophagus assoziierten Neoplasien (BERN) w{\"a}hrend der endoskopischen Inspektion erfordert viel Expertise. Die fr{\"u}he Diagnosestellung ist wichtig f{\"u}r die weitere Prognose des Barrett-Karzinoms. In Deutschland werden Patient:innen mit einem Barrett-{\"O}sophagus (BE) in der Regel im niedergelassenen Sektor {\"u}berwacht. Ziele Ziel ist es, den Einfluss von einem auf K{\"u}nstlicher Intelligenz (KI) basierenden klinischen Entscheidungsunterst{\"u}tzungssystems (CDSS) auf die Performance von niedergelassenen Gastroenterolog:innen (NG) bei der Evaluation von Barrett-{\"O}sophagus (BE) zu untersuchen. Methodik Es erfolgte die prospektive Sammlung von 96 unver{\"a}nderten hochaufl{\"o}senden Videos mit F{\"a}llen von Patient:innen mit histologisch best{\"a}tigtem NDBE und BERN. Alle eingeschlossenen F{\"a}lle enthielten mindestens zwei der folgenden Darstellungsmethoden: HD-Weißlichtendoskopie, Narrow Band Imaging oder Texture and Color Enhancement Imaging. Sechs NG von sechs unterschiedlichen Praxen wurden als Proband:innen eingeschlossen. Es erfolgte eine permutierte Block-Randomisierung der Videof{\"a}lle in entweder Gruppe A oder Gruppe B. Gruppe A implizierte eine Evaluation des Falls durch Proband:innen zun{\"a}chst ohne KI und anschließend mit KI als CDSS. In Gruppe B erfolgte die Evaluation in umgekehrter Reihenfolge. Anschließend erfolgte eine zuf{\"a}llige Wiedergabe der so entstandenen Subgruppen im Rahmen des Tests. Ergebnis In diesem Test konnte ein von uns entwickeltes KI-System (Barrett-Ampel) eine Sensitivit{\"a}t von 92,2\%, eine Spezifit{\"a}t von 68,9\% und eine Accuracy von 81,3\% erreichen. Mit der Hilfe von KI verbesserte sich die Sensitivit{\"a}t der NG von 64,1\% auf 71,2\% (p<0,001) und die Accuracy von 66,3\% auf 70,8\% (p=0,006) signifikant. Eine signifikante Verbesserung dieser Parameter zeigte sich ebenfalls, wenn die Proband:innen die F{\"a}lle zun{\"a}chst ohne KI evaluierten (Gruppe A). Wurde der Fall jedoch als Erstes mit der Hilfe von KI evaluiert (Gruppe B), blieb die Performance nahezu konstant. Schlussfolgerung Es konnte ein performantes KI-System zur Evaluation von BE entwickelt werden. NG verbessern sich bei der Evaluation von BE durch den Einsatz von KI.}, language = {de} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Nagl, Sandra and Meinikheim, Michael and Yip, Hon Chi and Lau, Louis Ho Shing and Chiu, Philip Wai Yan and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Effekt eines K{\"u}nstliche Intelligenz (KI) - Algorithmus auf die Gef{\"a}ßdetektion bei third space Endoskopien}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1771980}, pages = {e528-e529}, abstract = {Einleitung Third space Endoskopieprozeduren wie die endoskopische Submukosadissektion (ESD) und die perorale endoskopische Myotomie (POEM) sind technisch anspruchsvoll und gehen mit untersucherabh{\"a}ngigen Komplikationen wie Blutungen und Perforationen einher. Grund hierf{\"u}r ist die unabsichtliche Durchschneidung von submukosalen Blutgef{\"a}ßen ohne pr{\"a}emptive Koagulation. Ziele Die Forschungsfrage, ob ein KI-Algorithmus die intraprozedurale Gef{\"a}ßerkennung bei ESD und POEM unterst{\"u}tzen und damit Komplikationen wie Blutungen verhindern k{\"o}nnte, erscheint in Anbetracht des erfolgreichen Einsatzes von KI bei der Erkennung von Kolonpolypen interessant. Methoden Auf 5470 Einzelbildern von 59 third space Endoscopievideos wurden submukosale Blutgef{\"a}ße annotiert. Zusammen mit weiteren 179.681 nicht-annotierten Bildern wurde ein DeepLabv3+neuronales Netzwerk mit dem ECMT-Verfahren f{\"u}r semi-supervised learning trainiert, um Blutgef{\"a}ße in Echtzeit erkennen zu k{\"o}nnen. F{\"u}r die Evaluation wurde ein Videotest mit 101 Videoclips aus 15 vom Trainingsdatensatz separaten Prozeduren mit 200 vordefinierten Gef{\"a}ßen erstellt. Die Gef{\"a}ßdetektionsrate, -zeit und -dauer, definiert als der Prozentsatz an Einzelbildern eines Videos bezogen auf den Goldstandard, auf denen ein definiertes Gef{\"a}ß erkannt wurde, wurden erhoben. Acht erfahrene Endoskopiker wurden mithilfe dieses Videotests im Hinblick auf Gef{\"a}ßdetektion getestet, wobei eine H{\"a}lfte der Videos nativ, die andere H{\"a}lfte nach Markierung durch den KI-Algorithmus angesehen wurde. Ergebnisse Der mittlere Dice Score des Algorithmus f{\"u}r Blutgef{\"a}ße war 68\%. Die mittlere Gef{\"a}ßdetektionsrate im Videotest lag bei 94\% (96\% f{\"u}r ESD; 74\% f{\"u}r POEM). Die mediane Gef{\"a}ßdetektionszeit des Algorithmus lag bei 0,32 Sekunden (0,3 Sekunden f{\"u}r ESD; 0,62 Sekunden f{\"u}r POEM). Die mittlere Gef{\"a}ßdetektionsdauer lag bei 59,1\% (60,6\% f{\"u}r ESD; 44,8\% f{\"u}r POEM) des Goldstandards. Alle Endoskopiker hatten mit KI-Unterst{\"u}tzung eine h{\"o}here Gef{\"a}ßdetektionsrate als ohne KI. Die mittlere Gef{\"a}ßdetektionsrate ohne KI lag bei 56,4\%, mit KI bei 71,2\% (p<0.001). Schlussfolgerung KI-Unterst{\"u}tzung war mit einer statistisch signifikant h{\"o}heren Gef{\"a}ßdetektionsrate vergesellschaftet. Die mediane Gef{\"a}ßdetektionszeit von deutlich unter einer Sekunde sowie eine Gef{\"a}ßdetektionsdauer von gr{\"o}ßer 50\% des Goldstandards wurden f{\"u}r den klinischen Einsatz als ausreichend erachtet. In prospektiven Anwendungsstudien sollte der KI-Algorithmus auf klinische Relevanz getestet werden.}, language = {de} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Verwendung k{\"u}nstlicher Intelligenz bei der Detektion der Papilla duodeni major}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1772000}, pages = {e539 -- e540}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der Diagnostik und Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Jedoch ist sie technisch sehr anspruchsvoll und weist eine vergleichsweise hohe Komplikationsrate auf. Ziele In der vorliegenden Machbarkeitsstudie soll gepr{\"u}ft werden, ob mithilfe eines Deep-learning-Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und somit f{\"u}r Endoskopiker mit geringer Erfahrung ein geeignetes Hilfsmittel, insbesondere f{\"u}r die Ausbildungssituation, darstellen k{\"o}nnten. Methodik Wir betrachteten insgesamt 606 Bilddatens{\"a}tze von 65 Patienten. In diesen wurde sowohl die Papilla duodeni major als auch das Ostium segmentiert. Anschließend wurde eine neuronales Netz mittels eines Deep-learning-Algorithmus trainiert. Außerdem erfolgte eine 5-fache Kreuzvaldierung. Ergebnisse Bei einer 5-fachen Kreuzvaldierung auf den 606 gelabelten Daten konnte f{\"u}r die Klasse Papille eine F1-Wert von 0,7908, eine Sensitivit{\"a}t von 0,7943 und eine Spezifit{\"a}t von 0,9785 erreicht werden, f{\"u}r die Klasse Ostium eine F1-Wert von 0,5538, eine Sensitivit{\"a}t von 0,5094 und eine Spezifit{\"a}t von 0,9970 (vgl. [Tab. 1]). Unabh{\"a}ngig von der Klasse zeigte sich gemittelt (Klasse Papille und Klasse Ostium) ein F1-Wert von 0,6673, eine Sensitivit{\"a}t von 0,6519 und eine Spezifit{\"a}t von 0,9877 (vgl. [Tab. 2]). Schlussfolgerung In vorliegende Machbarkeitsstudie konnte das neuronale Netz die Papilla duodeni major mit einer hohen Sensitivit{\"a}t und sehr hohen Spezifit{\"a}t identifizieren. Bei der Detektion des Ostiums war die Sensitivit{\"a}t deutlich geringer. Zuk{\"u}nftig soll das das neuronale Netz mit mehr Daten trainiert werden. Außerdem ist geplant, den Algorithmus auch auf Videos anzuwenden. Somit k{\"o}nnte langfristig ein geeignetes Hilfsmittel f{\"u}r die ERCP etabliert werden.}, language = {de} } @article{HartmannNieberlePalmetal., author = {Hartmann, Robin and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Kuehle, Reinald and Reichert, Torsten E. and Taxis, Juergen and Ettl, Tobias}, title = {Utility of Smartphone-based Three-dimensional Surface Imaging for Digital Facial Anthropometry}, series = {JPRAS Open}, volume = {39}, journal = {JPRAS Open}, publisher = {Elsevier}, doi = {10.1016/j.jpra.2024.01.014}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70348}, pages = {330 -- 343}, abstract = {Background The utilization of three-dimensional (3D) surface imaging for facial anthropometry is a significant asset for patients undergoing maxillofacial surgery. Notably, there have been recent advancements in smartphone technology that enable 3D surface imaging. In this study, anthropometric assessments of the face were performed using a smartphone and a sophisticated 3D surface imaging system. Methods 30 healthy volunteers (15 females and 15 males) were included in the study. An iPhone 14 Pro (Apple Inc., USA) using the application 3D Scanner App (Laan Consulting Corp., USA) and the Vectra M5 (Canfield Scientific, USA) were employed to create 3D surface models. For each participant, 19 anthropometric measurements were conducted on the 3D surface models. Subsequently, the anthropometric measurements generated by the two approaches were compared. The statistical techniques employed included the paired t-test, paired Wilcoxon signed-rank test, Bland-Altman analysis, and calculation of the intraclass correlation coefficient (ICC). Results All measurements showed excellent agreement between smartphone-based and Vectra M5-based measurements (ICC between 0.85 and 0.97). Statistical analysis revealed no statistically significant differences in the central tendencies for 17 of the 19 linear measurements. Despite the excellent agreement found, Bland-Altman analysis revealed that the 95\% limits of agreement between the two methods exceeded ±3 mm for the majority of measurements. Conclusion Digital facial anthropometry using smartphones can serve as a valuable supplementary tool for surgeons, enhancing their communication with patients. However, the proposed data suggest that digital facial anthropometry using smartphones may not yet be suitable for certain diagnostic purposes that require high accuracy.}, language = {en} } @article{MeinikheimMendelPalmetal., author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Influence of artificial intelligence on the diagnostic performance of endoscopists in the assessment of Barrett's esophagus: a tandem randomized and video trial}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-2296-5696}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-72818}, pages = {641 -- 649}, abstract = {Background This study evaluated the effect of an artificial intelligence (AI)-based clinical decision support system on the performance and diagnostic confidence of endoscopists in their assessment of Barrett's esophagus (BE). Methods 96 standardized endoscopy videos were assessed by 22 endoscopists with varying degrees of BE experience from 12 centers. Assessment was randomized into two video sets: group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a stand-alone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.3\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1\%, and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.8\% [95\%CI 65.2\%-74.2\%] to 78.0\% [95\%CI 74.0\%-82.0\%]; specificity 67.3\% [95\%CI 62.5\%-72.2\%] to 72.7\% [95\%CI 68.2\%-77.3\%]). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from additional AI. BE experts and nonexperts remained significantly below the stand-alone performance of AI, suggesting that there may be other factors influencing endoscopists' decisions to follow or discard AI advice.}, language = {en} } @misc{EbigboRauberAyoubetal., author = {Ebigbo, Alanna and Rauber, David and Ayoub, Mousa and Birzle, Lisa and Matsumura, Tomoaki and Probst, Andreas and Steinbr{\"u}ck, Ingo and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Scheppach, Markus W. and Palm, Christoph and Messmann, Helmut}, title = {Early Esophageal Cancer and the Generalizability of Artificial Intelligence}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783775}, pages = {S428}, abstract = {Aims Artificial Intelligence (AI) systems in gastrointestinal endoscopy are narrow because they are trained to solve only one specific task. Unlike Narrow-AI, general AI systems may be able to solve multiple and unrelated tasks. We aimed to understand whether an AI system trained to detect, characterize, and segment early Barrett's neoplasia (Barrett's AI) is only capable of detecting this pathology or can also detect and segment other diseases like early squamous cell cancer (SCC). Methods 120 white light (WL) and narrow-band endoscopic images (NBI) from 60 patients (1 WL and 1 NBI image per patient) were extracted from the endoscopic database of the University Hospital Augsburg. Images were annotated by three expert endoscopists with extensive experience in the diagnosis and endoscopic resection of early esophageal neoplasias. An AI system based on DeepLabV3+architecture dedicated to early Barrett's neoplasia was tested on these images. The AI system was neither trained with SCC images nor had it seen the test images prior to evaluation. The overlap between the three expert annotations („expert-agreement") was the ground truth for evaluating AI performance. Results Barrett's AI detected early SCC with a mean intersection over reference (IoR) of 92\% when at least 1 pixel of the AI prediction overlapped with the expert-agreement. When the threshold was increased to 5\%, 10\%, and 20\% overlap with the expert-agreement, the IoR was 88\%, 85\% and 82\%, respectively. The mean Intersection Over Union (IoU) - a metric according to segmentation quality between the AI prediction and the expert-agreement - was 0.45. The mean expert IoU as a measure of agreement between the three experts was 0.60. Conclusions In the context of this pilot study, the predictions of SCC by a Barrett's dedicated AI showed some overlap to the expert-agreement. Therefore, features learned from Barrett's cancer-related training might be helpful also for SCC prediction. Our results allow different possible explanations. On the one hand, some Barrett's cancer features generalize toward the related task of assessing early SCC. On the other hand, the Barrett's AI is less specific to Barrett's cancer than a general predictor of pathological tissue. However, we expect to enhance the detection quality significantly by extending the training to SCC-specific data. The insight of this study opens the way towards a transfer learning approach for more efficient training of AI to solve tasks in other domains.}, language = {en} } @misc{ScheppachMendelRauberetal., author = {Scheppach, Markus W. and Mendel, Robert and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) improves endoscopists' vessel detection during endoscopic submucosal dissection (ESD)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1782891}, pages = {S93}, abstract = {Aims While AI has been successfully implemented in detecting and characterizing colonic polyps, its role in therapeutic endoscopy remains to be elucidated. Especially third space endoscopy procedures like ESD and peroral endoscopic myotomy (POEM) pose a technical challenge and the risk of operator-dependent complications like intraprocedural bleeding and perforation. Therefore, we aimed at developing an AI-algorithm for intraprocedural real time vessel detection during ESD and POEM. Methods A training dataset consisting of 5470 annotated still images from 59 full-length videos (47 ESD, 12 POEM) and 179681 unlabeled images was used to train a DeepLabV3+neural network with the ECMT semi-supervised learning method. Evaluation for vessel detection rate (VDR) and time (VDT) of 19 endoscopists with and without AI-support was performed using a testing dataset of 101 standardized video clips with 200 predefined blood vessels. Endoscopists were stratified into trainees and experts in third space endoscopy. Results The AI algorithm had a mean VDR of 93.5\% and a median VDT of 0.32 seconds. AI support was associated with a statistically significant increase in VDR from 54.9\% to 73.0\% and from 59.0\% to 74.1\% for trainees and experts, respectively. VDT significantly decreased from 7.21 sec to 5.09 sec for trainees and from 6.10 sec to 5.38 sec for experts in the AI-support group. False positive (FP) readings occurred in 4.5\% of frames. FP structures were detected significantly shorter than true positives (0.71 sec vs. 5.99 sec). Conclusions AI improved VDR and VDT of trainees and experts in third space endoscopy and may reduce performance variability during training. Further research is needed to evaluate the clinical impact of this new technology.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and R{\"o}mmele, Christoph and Nagl, Sandra and Schnoy, Elisabeth and Messmann, Helmut and Ebigbo, Alanna and Palm, Christoph}, title = {Artificial intelligence as a tool in the detection of the papillary ostium during ERCP}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783138}, pages = {S198}, abstract = {Aims Endoscopic retrograde cholangiopancreaticography (ERCP) is the gold standard in the diagnosis as well as treatment of diseases of the pancreatobiliary tract. However, it is technically complex and has a relatively high complication rate. In particular, cannulation of the papillary ostium remains challenging. The aim of this study is to examine whether a deep-learning algorithm can be used to detect the major duodenal papilla and in particular the papillary ostium reliably and could therefore be a valuable tool for inexperienced endoscopists, particularly in training situation. Methods We analyzed a total of 654 retrospectively collected images of 85 patients. Both the major duodenal papilla and the ostium were then segmented. Afterwards, a neural network was trained using a deep-learning algorithm. A 5-fold cross-validation was performed. Subsequently, we ran the algorithm on 5 prospectively collected videos of ERCPs. Results 5-fold cross-validation on the 654 labeled data resulted in an F1 value of 0.8007, a sensitivity of 0.8409 and a specificity of 0.9757 for the class papilla, and an F1 value of 0.5724, a sensitivity of 0.5456 and a specificity of 0.9966 for the class ostium. Regardless of the class, the average F1 value (class papilla and class ostium) was 0.6866, the sensitivity 0.6933 and the specificity 0.9861. In 100\% of cases the AI-detected localization of the papillary ostium in the prospectively collected videos corresponded to the localization of the cannulation performed by the endoscopist. Conclusions In the present study, the neural network was able to identify the major duodenal papilla with a high sensitivity and high specificity. In detecting the papillary ostium, the sensitivity was notably lower. However, when used on videos, the AI was able to identify the location of the subsequent cannulation with 100\% accuracy. In the future, the neural network will be trained with more data. Thus, a suitable tool for ERCP could be established, especially in the training situation.}, language = {en} } @misc{ScheppachNunesArizietal., author = {Scheppach, Markus W. and Nunes, Danilo Weber and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Procedural phase recognition in endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783804}, pages = {S439}, abstract = {Aims Recent evidence suggests the possibility of intraprocedural phase recognition in surgical operations as well as endoscopic interventions such as peroral endoscopic myotomy and endoscopic submucosal dissection (ESD) by AI-algorithms. The intricate measurement of intraprocedural phase distribution may deepen the understanding of the procedure. Furthermore, real-time quality assessment as well as automation of reporting may become possible. Therefore, we aimed to develop an AI-algorithm for intraprocedural phase recognition during ESD. Methods A training dataset of 364385 single images from 9 full-length ESD videos was compiled. Each frame was classified into one procedural phase. Phases included scope manipulation, marking, injection, application of electrical current and bleeding. Allocation of each frame was only possible to one category. This training dataset was used to train a Video Swin transformer to recognize the phases. Temporal information was included via logarithmic frame sampling. Validation was performed using two separate ESD videos with 29801 single frames. Results The validation yielded sensitivities of 97.81\%, 97.83\%, 95.53\%, 85.01\% and 87.55\% for scope manipulation, marking, injection, electric application and bleeding, respectively. Specificities of 77.78\%, 90.91\%, 95.91\%, 93.65\% and 84.76\% were measured for the same parameters. Conclusions The developed algorithm was able to classify full-length ESD videos on a frame-by-frame basis into the predefined classes with high sensitivities and specificities. Future research will aim at the development of quality metrics based on single-operator phase distribution.}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fernandez-Esparrach, G. and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @inproceedings{SouzaPachecodeAngeloetal., author = {Souza, Luis A. and Pacheco, Andr{\´e} G.C. and de Angelo, Gabriel G. and Oliveira-Santos, Thiago and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {LiwTERM: A Lightweight Transformer-Based Model for Dermatological Multimodal Lesion Detection}, series = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, booktitle = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, publisher = {IEEE}, isbn = {979-8-3503-7603-6}, doi = {10.1109/SIBGRAPI62404.2024.10716324}, pages = {1 -- 6}, abstract = {Skin cancer is the most common type of cancer in the world, accounting for approximately 30\% of all diagnosed tumors. Early diagnosis reduces mortality rates and prevents disfiguring effects in different body regions. In recent years, machine learning techniques, particularly deep learning, have shown promising results in this task, presenting studies that have demonstrated that combining a patient's clinical information with images of the lesion is crucial for improving the classification of skin lesions. Despite that, meaningful use of clinical information with multiple images is mandatory, requiring further investigation. Thus, this project aims to contribute to developing multimodal machine learning-based models to cope with the skin lesion classification task employing a lightweight transformer model. As a main hypothesis, models can take multiple images from different sources as input, along with clinical information from the patient's history, leading to a more reliable diagnosis. Our model deals with the not-trivial task of combining images and clinical information (from anamneses) concerning the skin lesions in a lightweight transformer architecture that does not demand high computation resources but still presents competitive classification results.}, language = {en} } @article{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, series = {Computers in Biology and Medicine}, volume = {169}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.compbiomed.2024.107929}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-69830}, pages = {24}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images and videos. In particular, the determination of the position and type of instruments is of great interest. Current work involves both spatial and temporal information, with the idea that predicting the movement of surgical tools over time may improve the quality of the final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify and characterize datasets used for method development and evaluation and quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images and videos. The paper focuses on methods that work purely visually, without markers of any kind attached to the instruments, considering both single-frame semantic and instance segmentation approaches, as well as those that incorporate temporal information. The publications analyzed were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking", resulting in a total of 741 articles published between 01/2015 and 07/2023, of which 123 were included using systematic selection criteria. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing the available potential for future developments.}, subject = {Deep Learning}, language = {en} } @article{SouzaJrPassosSantanaetal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Rauber, David and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Layer-selective deep representation to improve esophageal cancer classification}, series = {Medical \& Biological Engineering \& Computing}, volume = {62}, journal = {Medical \& Biological Engineering \& Computing}, publisher = {Springer Nature}, address = {Heidelberg}, doi = {10.1007/s11517-024-03142-8}, pages = {3355 -- 3372}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis.For this task, the deep learning techniques' black-box nature must somehow be lightened up to clarify its promising results. Hence, we aim to investigate the impact of the ResNet-50 deep convolutional design for Barrett's esophagus and adenocarcinoma classification. For such a task, and aiming at proposing a two-step learning technique, the output of each convolutional layer that composes the ResNet-50 architecture was trained and classified for further definition of layers that would provide more impact in the architecture. We showed that local information and high-dimensional features are essential to improve the classification for our task. Besides, we observed a significant improvement when the most discriminative layers expressed more impact in the training and classification of ResNet-50 for Barrett's esophagus and adenocarcinoma classification, demonstrating that both human knowledge and computational processing may influence the correct learning of such a problem.}, language = {en} } @inproceedings{GutbrodGeislerRauberetal., author = {Gutbrod, Max and Geisler, Benedikt and Rauber, David and Palm, Christoph}, title = {Data Augmentation for Images of Chronic Foot Wounds}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_71}, pages = {261 -- 266}, abstract = {Training data for Neural Networks is often scarce in the medical domain, which often results in models that struggle to generalize and consequently showpoor performance on unseen datasets. Generally, adding augmentation methods to the training pipeline considerably enhances a model's performance. Using the dataset of the Foot Ulcer Segmentation Challenge, we analyze two additional augmentation methods in the domain of chronic foot wounds - local warping of wound edges along with projection and blurring of shapes inside wounds. Our experiments show that improvements in the Dice similarity coefficient and Normalized Surface Distance metrics depend on a sensible selection of those augmentation methods.}, language = {en} } @article{SouzaJrPachecoPassosetal., author = {Souza Jr., Luis Antonio de and Pacheco, Andr{\´e} G.C. and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {DeepCraftFuse: visual and deeply-learnable features work better together for esophageal cancer detection in patients with Barrett's esophagus}, series = {Neural Computing and Applications}, volume = {36}, journal = {Neural Computing and Applications}, publisher = {Springer}, address = {London}, doi = {10.1007/s00521-024-09615-z}, pages = {10445 -- 10459}, abstract = {Limitations in computer-assisted diagnosis include lack of labeled data and inability to model the relation between what experts see and what computers learn. Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis. While deep learning techniques are broad so that unseen information might help learn patterns of interest, human insights to describe objects of interest help in decision-making. This paper proposes a novel approach, DeepCraftFuse, to address the challenge of combining information provided by deep networks with visual-based features to significantly enhance the correct identification of cancerous tissues in patients affected with Barrett's esophagus (BE). We demonstrate that DeepCraftFuse outperforms state-of-the-art techniques on private and public datasets, reaching results of around 95\% when distinguishing patients affected by BE that is either positive or negative to esophageal cancer.}, subject = {Deep Learning}, language = {en} } @misc{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Corrigendum to "Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art" [Comput. Biol. Med. 169 (2024) 107929]}, series = {Computers in Biology and Medicine}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2024.108027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70337}, pages = {1}, abstract = {The authors regret that the SAR-RARP50 dataset is missing from the description of publicly available datasets presented in Chapter 4.}, language = {en} } @article{HammerNunesHammeretal., author = {Hammer, Simone and Nunes, Danilo Weber and Hammer, Michael and Zeman, Florian and Akers, Michael and G{\"o}tz, Andrea and Balla, Annika and Doppler, Michael Christian and Fellner, Claudia and Da Platz Batista Silva, Natascha and Thurn, Sylvia and Verloh, Niklas and Stroszczynski, Christian and Wohlgemuth, Walter Alexander and Palm, Christoph and Uller, Wibke}, title = {Deep learning-based differentiation of peripheral high-flow and low-flow vascular malformations in T2-weighted short tau inversion recovery MRI}, series = {Clinical hemorheology and microcirculation}, journal = {Clinical hemorheology and microcirculation}, edition = {Pre-press}, publisher = {IOP Press}, doi = {10.3233/CH-232071}, pages = {1 -- 15}, abstract = {BACKGROUND Differentiation of high-flow from low-flow vascular malformations (VMs) is crucial for therapeutic management of this orphan disease. OBJECTIVE A convolutional neural network (CNN) was evaluated for differentiation of peripheral vascular malformations (VMs) on T2-weighted short tau inversion recovery (STIR) MRI. METHODS 527 MRIs (386 low-flow and 141 high-flow VMs) were randomly divided into training, validation and test set for this single-center study. 1) Results of the CNN's diagnostic performance were compared with that of two expert and four junior radiologists. 2) The influence of CNN's prediction on the radiologists' performance and diagnostic certainty was evaluated. 3) Junior radiologists' performance after self-training was compared with that of the CNN. RESULTS Compared with the expert radiologists the CNN achieved similar accuracy (92\% vs. 97\%, p = 0.11), sensitivity (80\% vs. 93\%, p = 0.16) and specificity (97\% vs. 100\%, p = 0.50). In comparison to the junior radiologists, the CNN had a higher specificity and accuracy (97\% vs. 80\%, p <  0.001; 92\% vs. 77\%, p <  0.001). CNN assistance had no significant influence on their diagnostic performance and certainty. After self-training, the junior radiologists' specificity and accuracy improved and were comparable to that of the CNN. CONCLUSIONS Diagnostic performance of the CNN for differentiating high-flow from low-flow VM was comparable to that of expert radiologists. CNN did not significantly improve the simulated daily practice of junior radiologists, self-training was more effective.}, language = {en} } @misc{ScheppachMendelMuzalyovaetal., author = {Scheppach, Markus W. and Mendel, Robert and Muzalyova, Anna and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Yip, Hon Chi and Lau, Louis Ho Shing and G{\"o}lder, Stefan Karl and Schmidt, Arthur and Kouladouros, Konstantinos and Abdelhafez, Mohamed and Walter, B. and Meinikheim, Michael and Chiu, Philip Wai Yan and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {K{\"u}nstliche Intelligenz erh{\"o}ht die Gef{\"a}ßerkennung von Endoskopikern bei third space Endoskopie}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {62}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {09}, publisher = {Georg Thieme Verlag KG}, doi = {10.1055/s-0044-1790087}, pages = {e830}, abstract = {Einleitung: K{\"u}nstliche Intelligenz (KI)-Algorithmen unterst{\"u}tzen Endoskopiker bei der Erkennung und Charakterisierung von Kolonpolypen in der klinischen Praxis und f{\"u}hren zu einer Erh{\"o}hung der Adenomdetektionsrate. Auch bei therapeutischen Maßnahmen wie der endoskopischen Submukosadissektion (ESD) k{\"o}nne relevante anatomische Strukturen durch KI mit hoher Genauigkeit erkannt und im endoskopischen Bild in Echtzeit markiert werden. Der Effekt einer solchen Applikation auf die Gef{\"a}ßdetektion von Endoskopikern ist bislang nicht erforscht. Ziele: In dieser Studie wurde der Effekt eines KI-Algorithmus zur Echtzeit-Gef{\"a}ßmarkierung bei ESD auf die Gef{\"a}ßdetektionsrate von Endoskopikern untersucht. Methodik: 59 third space Endoskopievideos wurde aus der Datenbank des Universit{\"a}tsklinikums Augsburg extrahiert. Auf 5470 Einzelbildern dieser Untersuchungen wurde submukosale Blutgef{\"a}ße annotiert. Zusammen mit weiteren 179681 unmarkierten Bildern wurde ein DeepLabV3+ neuronales Netzwerk mit einer semi-supervised learning Methode darin trainiert, submukosale Blutgef{\"a}ße auf dem endoskopischen Bild zu erkennen und in Echtzeit einzuzeichnen. Anhand eines Videotests mit 101 Videoclips und 200 vordefinierten Blutgef{\"a}ßen wurden 19 Endoskopiker mit und ohne KI Unterst{\"u}tzung getestet. Ergebnis: Der Algorithmus erkannte in dem Videotest 93.5\% der Gef{\"a}ße in einer Detektionszeit von im Median 0,3 Sekunden. Die Gef{\"a}ßdetektionsrate von Endoskopikern erh{\"o}hte sich durch KI Unterst{\"u}tzung von 56,4\% auf 72,4\% (p<0.001). Die Gef{\"a}ßdetektionszeit reduzierte sich durch KI-Unterst{\"u}tzung von 6,7 auf 5.2 Sekunden (p<0.001). Der Algorithmus zeigte eine Rate an falsch positiven Detektionen in 4.5\% der Einzelbilder. Falsch positiv erkannte Strukturen wurde k{\"u}rzer detektiert, als richtig positive (0.7 und 6.0 Sekunden, p<0.001). Schlussfolgerung: KI Unterst{\"u}tzung f{\"u}hrte zu einer erh{\"o}hten Gef{\"a}ßdetektionsrate und schnelleren Gef{\"a}ßdetektionszeit von Endoskopikern. Ein m{\"o}glicher klinischer Effekt auf die intraprozedurale Komplikationsrate oder Operationszeit k{\"o}nnte in prospektiven Studien ermittelt werden.}, language = {de} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Muzalyova, Anna and Rauber, David and R{\"u}ckert, Tobias and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Mensch-Maschine-Interaktion: Einfluss k{\"u}nstlicher Intelligenz auf das diagnostische Vertrauen von Endoskopikern bei der Beurteilung des Barrett-{\"O}sophagus}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {62}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {09}, publisher = {Georg Thieme Verlag KG}, doi = {10.1055/s-0044-1789656}, pages = {e575 -- e576}, abstract = {Ziele: Das Ziel der Studie war es, den Einfluss von KI auf die diagnostische Sicherheit (Konfidenzniveau) von Endoskopikern anhand von B{\"O}-Videos zu untersuchen und m{\"o}gliche Korrelationen mit der Untersuchungsqualit{\"a}t zu erforschen. Methodik: 22 Endoskopiker aus zw{\"o}lf Zentren mit unterschiedlicher Barrett-Erfahrung untersuchten 96 standardisierte Endoskopievideos. Die Untersucher wurden in Experten und Nicht-Experten eingeteilt und nach dem Zufallsprinzip f{\"u}r die Bewertung der Videos mit oder ohne KI eingeteilt. Die Teilnehmer wurden in zwei Gruppen aufgeteilt: Arm A bewertete zun{\"a}chst Videos ohne KI und dann mit KI, w{\"a}hrend Arm B die umgekehrte Reihenfolge einhielt. Die Untersucher hatten die Aufgabe, B{\"O}-assoziierte Neoplasien zu erkennen und ihr Konfidenzniveau sowohl mit als auch ohne KI auf einer Skala von 0 bis 9 anzugeben. Ergebnis: In Arm A erh{\"o}hte der Einsatz von KI das Konfidenzniveau bei beiden signifikant (p<0.001). Bemerkenswert ist, dass jedoch nur Nicht-Experten durch die KI eine signifikante Verbesserung der Sensitivit{\"a}t und Spezifit{\"a}t (p<0.001 bzw. p<0.05) erfuhren. W{\"a}hrend Experten ohne KI im Vergleich zu Nicht-Experten mit KI ein h{\"o}heres Konfidenzniveau aufwiesen, gab es keinen signifikanten Unterschied in der Genauigkeit. In Arm B zeigten beide Gruppen eine signifikante Abnahme des Konfidenzniveaus (p<0.001) bei gleichbleibender Genauigkeit. Dar{\"u}ber hinaus wurden in 9\% der Entscheidungen trotz korrekter KI eine falsche Wahl getroffen. Schlussfolgerung: Der Einsatz k{\"u}nstlicher Intelligenz steigerte das Konfidenzniveau sowohl bei Experten als auch bei Nicht-Experten signifikant - ein Effekt, der im Studienmodell reversibel war. Dar{\"u}ber hinaus wiesen Experten mit oder ohne KI durchweg h{\"o}here Konfidenzniveaus auf als Nicht-Experten mit KI, trotz vergleichbarer Ergebnisse. Zudem konnte beobachtet werden, dass die Untersucher in 9\% der F{\"a}lle die KI zuungunsten des Patienten ignorierten.}, language = {de} } @misc{ScheppachNunesArizietal., author = {Scheppach, Markus W. and Nunes, Danilo Weber and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Intraoperative Phasenerkennung bei endoskopischer Submukosadissektion mit Hilfe von k{\"u}nstlicher Intelligenz}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {62}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {09}, publisher = {Georg Thieme Verlag KG}, doi = {10.1055/s-0044-1790084}, pages = {e828}, abstract = {Einleitung: K{\"u}nstliche Intelligenz (KI) wird in der Endoskopie des Gastrointestinaltraktes zur Erkennung und Charakterisierung von Kolonpolypen eingesetzt. Die Rolle von KI bei therapeutischen Maßnahmen wurde noch nicht eingehend untersucht. Eine intraprozedurale Phasenerkennung bei endoskopischer Submukoasdissektion (ESD) k{\"o}nnte die Erhebung von Qualit{\"a}tsindikatoren erm{\"o}glichen. Weiterhin k{\"o}nnte diese Technologie zu einem tieferen Verst{\"a}ndnis {\"u}ber die Eigenschaften der Prozedur f{\"u}hren und weiterf{\"u}hrende Applikationen zur automatischen Dokumentation oder standardisiertem Training vorbereiten. Ziele: Ziel dieser Studie war die Entwicklung eines KI Algorithmus zur intraprozeduralen Phasenerkennung bei endoskopischer Submukosadissektion. Methodik: 2071546 Einzelbilder aus 27 ESD Videos in voller L{\"a}nge wurden f{\"u}r die {\"u}bergeordneten Klassen Diagnostik, Markierung, Nadelinjektion, Dissektion und Blutung, sowie die untergeordneten Klassen Endoskop-Manipulation, Injektion und Applikation von elektrischem Strom annotiert. Mit einem Trainingsdatensatz (898440 Einzelbilder, 17 ESDs) wurde ein Video Swin Transformer mit uniformer Stichprobenentnahme trainiert und intern validiert (769523 Einzelbilder, 6 ESDs). Neben der internen Validierung wurde der Algorithmus anhand von einem separaten Testdatensatz (403583 Einzelbilder, 4 ESDs) evaluiert. Ergebnis: Der F1 Score des Algorithmus f{\"u}r alle Klassen lag in der internen Validierung bei 83\%, in dem separaten Test bei 90\%. Anhand des separaten Tests wurden true positive (TP)-Raten f{\"u}r Diagnostik, Markierung, Nadelinjektion, Dissektion und Blutung von 100\%, 100\%, 96\%, 97\% und 93\% ermittelt. F{\"u}r Endoskopmanipulation, Injektion und Applikation von Elektrizit{\"a}t lagen die TP-Raten bei 92\%, 98\% und 91\%. Schlussfolgerung: Der entwickelte Algorithmus klassifizierte ESD Videos in voller L{\"a}nge und anhand jedes einzelnen Bildes mit hoher Genauigkeit. Zuk{\"u}nftige Forschungsvorhaben k{\"o}nnten intraoperative Qualit{\"a}tsindikatioren auf Basis dieser Informationen entwickeln und eine automatisierte Dokumentation erm{\"o}glichen.}, language = {de} } @misc{ScheppachWeberNunesArizietal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Single frame workflow recognition during endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {57}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806324}, pages = {S511}, abstract = {Aims Precise surgical phase recognition and evaluation may improve our understanding of complex endoscopic procedures. Furthermore, quality control measurements and endoscopy training could benefit from objective descriptions of surgical phase distributions. Therefore, we aimed to develop an artificial intelligence algorithm for frame-by-frame operational phase recognition during endoscopic submucosal dissection (ESD). Methods Full length ESD-videos from 31 patients comprising 6.297.782 single images were collected retrospectively. Videos were annotated on a frame-by-frame basis for the operational macro-phases diagnostics, marking, injection, dissection and bleeding. Further subphases were the application of electrical current, visible injection of fluid into the submucosal space and scope manipulation, leading to 11 phases in total. 4.975.699 frames (21 patients) were used for training of a video swin transformer using uniform frame sampling for temporal information. Hyperparameter tuning was performed with 897.325 further frames (6 patients), while 424.758 frames (4 patients) were used for validation. Results The overall F1 scores on the test dataset for the macro-phases and all 11 phases were 0.96 and 0.90, respectively. The recall values for diagnostics, marking, injection, dissection and bleeding were 1.00, 1.00, 0.95, 0.96 and 0.93, respectively. Conclusions The algorithm classified operational phases during ESD with high accuracy. A precise evaluation of phase distribution may allow for the development of objective quality metrics for quality control and training.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Birzle, Lisa and Aehling, Niklas and Schulz, Dominik Andreas Helmut Otto and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {K{\"u}nstliche Intelligenz als Hilfsmittel zur Detektion der Papilla duodeni major und des papill{\"a}ren Ostiums w{\"a}hrend der ERCP}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {63}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {5}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806882}, pages = {e295}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der endoskopischen Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Allerdings ist sie technisch anspruchsvoll, schwer zu erlernen und mit einer relativ hohen Komplikationsrate assoziiert. Daher soll in der vorliegenden Machbarkeitsstudie gepr{\"u}ft werden, ob mithilfe eines Deeplearning- Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und dieser f{\"u}r Endoskopiker, insbesondere in der Ausbildungssituation, ein geeignetes Hilfsmittel darstellen k{\"o}nnte. Material und Methodik Insgesamt wurden 1534 ERCP-Bilder von 134 Patienten analysiert, wobei sowohl die Papilla duodeni major als auch das Ostium segmentiert wurden. Anschließend erfolgte das Training eines neuronalen Netzes unter Verwendung eines Deep-Learning-Algorithmus. F{\"u}r den Test des Algorithmus erfolgte eine f{\"u}nffache Kreuzvalidierung. Ergebnisse Auf den 1534 gelabelten Bildern wurden f{\"u}r die Klasse Papille ein F1-Wert von 0,7996, eine Sensitivit{\"a}t von 0,8488 und eine Spezifit{\"a}t von 0,9822 erzielt. F{\"u}r die Klasse Ostium ergaben sich ein F1-Wert von 0,5198, eine Sensitivit{\"a}t von 0,5945 und eine Spezifit{\"a}t von 0,9974. Klassen{\"u}bergreifend (Klasse Papille und Klasse Ostium) betrug der F1-Wert 0,6593, die Sensitivit{\"a}t 0,7216 und f{\"u}r die Spezifit{\"a}t 0,9898. Zusammenfassung In der vorliegenden Machbarkeitsstudie zeigte das neuronale Netz eine hohe Sensitivit{\"a}t und eine sehr hohe Spezifit{\"a}t bei der Identifikation der Papilla duodeni major. Die Detektion des Ostiums erfolgte hingegen mit einer deutlich geringeren Sensitivit{\"a}t. Zuk{\"u}nftig ist eine Erweiterung des Trainingsdatensatzes um Videos und klinische Daten vorgesehen, um die Leistungsf{\"a}higkeit des Netzwerks zu verbessern. Hierdurch k{\"o}nnte langfristig ein geeignetes Assistenzsystem f{\"u}r die ERCP, insbesondere in der Ausbildungssituation etabliert werden.}, language = {de} } @article{HartmannWeihererNieberleetal., author = {Hartmann, Robin and Weiherer, Maximilian and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Lamby, Philipp and Reichert, Torsten E. and Taxis, J{\"u}rgen and Ettl, Tobias}, title = {Evaluating smartphone-based 3D imaging techniques for clinical application in oral and maxillofacial surgery: A comparative study with the vectra M5}, series = {Oral and Maxillofacial Surgery}, volume = {29}, journal = {Oral and Maxillofacial Surgery}, publisher = {Springer Nature}, doi = {10.1007/s10006-024-01322-2}, pages = {17}, abstract = {PURPOSE This study aimed to clarify the applicability of smartphone-based three-dimensional (3D) surface imaging for clinical use in oral and maxillofacial surgery, comparing two smartphone-based approaches to the gold standard. METHODS Facial surface models (SMs) were generated for 30 volunteers (15 men, 15 women) using the Vectra M5 (Canfield Scientific, USA), the TrueDepth camera of the iPhone 14 Pro (Apple Inc., USA), and the iPhone 14 Pro with photogrammetry. Smartphone-based SMs were superimposed onto Vectra-based SMs. Linear measurements and volumetric evaluations were performed to evaluate surface-to-surface deviation. To assess inter-observer reliability, all measurements were performed independently by a second observer. Statistical analyses included Bland-Altman analyses, the Wilcoxon signed-rank test for paired samples, and Intraclass correlation coefficients. RESULTS Photogrammetry-based SMs exhibited an overall landmark-to-landmark deviation of M = 0.8 mm (SD =  ± 0.58 mm, n = 450), while TrueDepth-based SMs displayed a deviation of M = 1.1 mm (SD =  ± 0.72 mm, n = 450). The mean volumetric difference for photogrammetry-based SMs was M = 1.8 cc (SD =  ± 2.12 cc, n = 90), and M = 3.1 cc (SD =  ± 2.64 cc, n = 90) for TrueDepth-based SMs. When comparing the two approaches, most landmark-to-landmark measurements demonstrated 95\% Bland-Altman limits of agreement (LoA) of ≤ 2 mm. Volumetric measurements revealed LoA > 2 cc. Photogrammetry-based measurements demonstrated higher inter-observer reliability for overall landmark-to-landmark deviation. CONCLUSION Both approaches for smartphone-based 3D surface imaging exhibit potential in capturing the face. Photogrammetry-based SMs demonstrated superior alignment and volumetric accuracy with Vectra-based SMs than TrueDepth-based SMs.}, language = {en} } @article{RoserMeinikheimMuzalyovaetal., author = {Roser, David and Meinikheim, Michael and Muzalyova, Anna and Mendel, Robert and Palm, Christoph and Probst, Andreas and Nagl, Sandra and Scheppach, Markus W. and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial intelligence-assisted endoscopy and examiner confidence : a study on human-artificial intelligence interaction in Barrett's Esophagus (With Video)}, series = {DEN Open}, volume = {6}, journal = {DEN Open}, number = {1}, publisher = {Wiley}, doi = {10.1002/deo2.70150}, pages = {8}, abstract = {Objective Despite high stand-alone performance, studies demonstrate that artificial intelligence (AI)-supported endoscopic diagnostics often fall short in clinical applications due to human-AI interaction factors. This video-based trial on Barrett's esophagus aimed to investigate how examiner behavior, their levels of confidence, and system usability influence the diagnostic outcomes of AI-assisted endoscopy. Methods The present analysis employed data from a multicenter randomized controlled tandem video trial involving 22 endoscopists with varying degrees of expertise. Participants were tasked with evaluating a set of 96 endoscopic videos of Barrett's esophagus in two distinct rounds, with and without AI assistance. Diagnostic confidence levels were recorded, and decision changes were categorized according to the AI prediction. Additional surveys assessed user experience and system usability ratings. Results AI assistance significantly increased examiner confidence levels (p < 0.001) and accuracy. Withdrawing AI assistance decreased confidence (p < 0.001), but not accuracy. Experts consistently reported higher confidence than non-experts (p < 0.001), regardless of performance. Despite improved confidence, correct AI guidance was disregarded in 16\% of all cases, and 9\% of initially correct diagnoses were changed to incorrect ones. Overreliance on AI, algorithm aversion, and uncertainty in AI predictions were identified as key factors influencing outcomes. The System Usability Scale questionnaire scores indicated good to excellent usability, with non-experts scoring 73.5 and experts 85.6. Conclusions Our findings highlight the pivotal function of examiner behavior in AI-assisted endoscopy. To fully realize the benefits of AI, implementing explainable AI, improving user interfaces, and providing targeted training are essential. Addressing these factors could enhance diagnostic accuracy and confidence in clinical practice.}, language = {en} } @article{ScheppachMendelMuzalyovaetal., author = {Scheppach, Markus W. and Mendel, Robert and Muzalyova, Anna and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Yip, Hon Chi and Lau, Louis Ho Shing and G{\"o}lder, Stefan Karl and Schmidt, Arthur and Kouladouros, Konstantinos and Abdelhafez, Mohamed and Walter, Benjamin M. and Meinikheim, Michael and Chiu, Philip Wai Yan and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial intelligence improves submucosal vessel detection during third space endoscopy}, series = {Endoscopy}, journal = {Endoscopy}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-2534-1164}, abstract = {Background and study aims: While artificial intelligence (AI) shows high potential in decision support for diagnostic gastrointestinal endoscopy, its role in therapeutic endoscopy remains unclear. Third space endoscopic procedures pose the risk of intraprocedural bleeding. Therefore, we aimed to develop an AI algorithm for intraprocedural blood vessel detection. Patients and Methods: Using a test dataset with 101 standardized video clips containing 200 predefined submucosal blood vessels, 19 endoscopists were evaluated for the vessel detection rate (VDR) and time (VDT) with and without support of an AI algorithm. Test subjects were grouped according to experience in ESD. Results: With AI support, endoscopists VDR increased from 56.4\% [CI 54.1-58.6] to 72.4\% [CI 70.3-74.4]. Endoscopists' VDT dropped from 6.7sec [CI 6.2-7.1] to 5.2sec [CI 4.8-5.7]. False positive (FP) readings appeared in 4.5\% of frames and were marked significantly shorter than true positives (6.0sec [CI 5.28-6.70] vs. 0.7sec [CI 0.55-0.87]). Conclusions: AI improved the vessel detection rate and time of endoscopists during third space endoscopy. While these data need to be corroborated by clinical trials, AI may prove to be an invaluable tool for the improvement of endoscopic interventions.}, language = {en} } @article{MaerklRueckertRauberetal., author = {Maerkl, Raphaela and Rueckert, Tobias and Rauber, David and Gutbrod, Max and Weber Nunes, Danilo and Palm, Christoph}, title = {Enhancing generalization in zero-shot multi-label endoscopic instrument classification}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {20}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer Nature}, doi = {10.1007/s11548-025-03439-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-85674}, pages = {1577 -- 1587}, abstract = {Purpose Recognizing previously unseen classes with neural networks is a significant challenge due to their limited generalization capabilities. This issue is particularly critical in safety-critical domains such as medical applications, where accurate classification is essential for reliability and patient safety. Zero-shot learning methods address this challenge by utilizing additional semantic data, with their performance relying heavily on the quality of the generated embeddings. Methods This work investigates the use of full descriptive sentences, generated by a Sentence-BERT model, as class representations, compared to simpler category-based word embeddings derived from a BERT model. Additionally, the impact of z-score normalization as a post-processing step on these embeddings is explored. The proposed approach is evaluated on a multi-label generalized zero-shot learning task, focusing on the recognition of surgical instruments in endoscopic images from minimally invasive cholecystectomies. Results The results demonstrate that combining sentence embeddings and z-score normalization significantly improves model performance. For unseen classes, the AUROC improves from 43.9\% to 64.9\%, and the multi-label accuracy from 26.1\% to 79.5\%. Overall performance measured across both seen and unseen classes improves from 49.3\% to 64.9\% in AUROC and from 37.3\% to 65.1\% in multi-label accuracy, highlighting the effectiveness of our approach. Conclusion These findings demonstrate that sentence embeddings and z-score normalization can substantially enhance the generalization performance of zero-shot learning models. However, as the study is based on a single dataset, future work should validate the method across diverse datasets and application domains to establish its robustness and broader applicability.}, language = {en} } @misc{ScheppachWeberNunesRauberetal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Rauber, David and Arizi, X. and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Ebigbo, Alanna and Palm, Christoph and Messmann, Helmut}, title = {K{\"u}nstliche Intelligenz-basierte Erkennung von interventionellen Phasen bei der endoskopischen Submukosadissektion}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {63}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1811093}, pages = {e612 -- e613}, abstract = {Einleitung: Die endoskopische Submukosadissektion (ESD) ist ein komplexes endoskopisches Verfahren, das technische Expertise erfordert. Objektive Methoden zur Analyse von interventionellen Abl{\"a}ufen bei ESD k{\"o}nnten f{\"u}r Qualit{\"a}tssicherung und Ausbildung, wie auch eine automatische Befunderstellung von Nutzen sein. Ziele: In dieser Studie wurde ein KI-Algorithmus f{\"u}r die Erkennung und Klassifizierung der interventionellen Phasen der ESD entwickelt, um die technische Basis f{\"u}r eine standardisierte Leistungsbewertung und automatische Befunderstellung zu schaffen. Methodik: Vollst{\"a}ndige ESD-Videoaufnahmen von 49 Patienten wurden retrospektiv zusammengestellt. Der Datensatz umfasste 6.390.151 Einzelbilder, die alle f{\"u}r die folgenden interventionellen Phasen annotiert wurden: Diagnostik, Markierung, Injektion, Dissektion und H{\"a}mostase. 3.973.712 Bilder (28 Patienten) wurden f{\"u}r das Training eines Video-Swin-Transformers genutzt. Dabei wurde temporale Information durch standardisierte BIldextraktion in festgelegten zeitlichen Abst{\"a}nden zum analysierten Bild inkorporiert. 2.416.439 separate Bilder (21 Patienten) wurden f{\"u}r eine interne Validierung genutzt. Ergebnis: Bei der internen Evaluation erreichte das System insgesamt einen F1-Wert von 0,88. Es wurden F1-Werte von 0,99, 0,89, 0,89, 0,91 und 0,52 f{\"u}r Diagnostik, Markierung, Injektion, Dissektion bzw. Blutungsmanagement gemessen. Die Sensitivit{\"a}ten f{\"u}r dieselben Parameter betrugen 1,00, 0,80, 0,94, 0,89 und 0,67, die Spezifit{\"a}ten lagen bei 1,00, 1,00, 0,98, 0,88 und 0,93. Positive pr{\"a}diktive Werte wurden mit 0,98, 1,00, 0,85, 0,94 und 0,43 gemessen. Schlussfolgerung: In dieser vorl{\"a}ufigen Studie zeigte ein KI-Algorithmus eine hohe Leistungsf{\"a}higkeit f{\"u}r die Einzelbild-Erkennung von Verfahrensphasen w{\"a}hrend der ESD. Die vergleichsweise niedrige Leistung f{\"u}r die Blutungsphase wurde auf das seltene Auftreten von Blutungsepisoden im Trainingsdatensatz zur{\"u}ckgef{\"u}hrt, der zu diesem Zeitpunkt nur Videos in voller L{\"a}nge umfasste. Die zuk{\"u}nftige Entwicklung des Algorithmus wird sich auf die Reduzierung von Klassenungleichgewichten durch selektive Annotationsprotokolle konzentrieren.}, language = {de} } @misc{ScheppachRauberZingleretal., author = {Scheppach, Markus W. and Rauber, David and Zingler, C. and Weber Nunes, Danilo and Probst, Andreas and R{\"o}mmele, Christoph and Nagl, Sandra and Ebigbo, Alanna and Palm, Christoph and Messmann, Helmut}, title = {Instrumentenerkennung w{\"a}hrend der endoskopischen Submukosadissektion mittels k{\"u}nstlicher Intelligenz}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {63}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {8}, publisher = {Thieme}, doi = {10.1055/s-0045-1811092}, abstract = {Einleitung: Die endoskopische Submukosadissektion (ESD) ist eine komplexe Technik zur Resektion gastrointestinaler Fr{\"u}hneoplasien. Dabei werden f{\"u}r die verschiedenen Schritte der Intervention spezifische endoskopische Instrumente verwendet. Die pr{\"a}zise und automatische Erkennung und Abgrenzung der verwendeten Instrumente (Injektionsnadeln, elektrochirurgische Messer mit unterschiedlichen Konfigurationen, h{\"a}mostatische Zangen) k{\"o}nnte wertvolle Informationen {\"u}ber den Fortschritt und die Verfahrensmerkmale der ESD liefern und eine automatische standardisierte Berichterstattung erm{\"o}glichen. Ziele: Ziel dieser Studie war die Entwicklung eines KI-Algorithmus zur Erkennung und Delineation von endoskopischen Instrumenten bei der ESD. Methodik: 17 ESD-Videos (9×rektal, 5×{\"o}sophageal, 3×gastrisch) wurden retrospektiv zusammengestellt. Auf 8530 Einzelbilder dieser Videos wurden durch 2 Studienmitarbeiter die folgenden Klassen eingezeichnet: Hakenmesser - Spitze, Hakenmesser - Katheter, Nadelmesser - Spitze und - Katheter, Injektionsnadel -Spitze und - Katheter sowie h{\"a}mostatische Zange - Spitze und - Katheter. Der annotierte Datensatz wurde zum Training eines DeepLabV3+-Deep-Learning-Algorithmus mit ConvNeXt-Backbone zur Erkennung und Abgrenzung der genannten Klassen verwendet. Die Evaluation erfolgte durch 5-fache interne Kreuzvalidierung. Ergebnis: Die Validierung auf Einzelpixelbasis ergab insgesamt einen F1-Score von 0,80, eine Sensitivit{\"a}t von 0,81 und eine Spezifit{\"a}t von 1,00. Es wurden F1-Scores von 1,00, 0,97, 0,80, 0,98, 0,85, 0,97, 0,80, 0,51 bzw. 0,85 f{\"u}r die Klassen Hakenmesser - Katheter und - Spitze, Nadelmesser - Katheter und - Spitze, Injektionsnadel - Katheter und - Spitze, h{\"a}mostatische Zange - Katheter und - Spitze gemessen. Schlussfolgerung: In dieser Studie wurden die wichtigsten endoskopischen Instrumente, die w{\"a}hrend der ESD verwendet werden, mit hoher Genauigkeit erkannt. Die geringere Leistung bei der h{\"a}mostatische Zange - Katheter kann auf die Unterrepr{\"a}sentation dieser Klassen in den Trainingsdaten zur{\"u}ckgef{\"u}hrt werden. Zuk{\"u}nftige Studien werden sich auf die Erweiterung der Instrumentenklassen sowie auf die Ausbalancierung der Trainingsdaten konzentrieren.}, language = {de} } @inproceedings{KlausmannRueckertRauberetal., author = {Klausmann, Leonard and Rueckert, Tobias and Rauber, David and Maerkl, Raphaela and Yildiran, Suemeyye R. and Gutbrod, Max and Palm, Christoph}, title = {DIY challenge blueprint: from organization to technical realization in biomedical image analysis}, series = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2025 ; Proceedings Part XI}, booktitle = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2025 ; Proceedings Part XI}, publisher = {Springer}, address = {Cham}, isbn = {978-3-032-05141-7}, doi = {10.1007/978-3-032-05141-7_9}, pages = {85 -- 95}, abstract = {Biomedical image analysis challenges have become the de facto standard for publishing new datasets and benchmarking different state-of-the-art algorithms. Most challenges use commercial cloud-based platforms, which can limit custom options and involve disadvantages such as reduced data control and increased costs for extended functionalities. In contrast, Do-It-Yourself (DIY) approaches have the capability to emphasize reliability, compliance, and custom features, providing a solid basis for low-cost, custom designs in self-hosted systems. Our approach emphasizes cost efficiency, improved data sovereignty, and strong compliance with regulatory frameworks, such as the GDPR. This paper presents a blueprint for DIY biomedical imaging challenges, designed to provide institutions with greater autonomy over their challenge infrastructure. Our approach comprehensively addresses both organizational and technical dimensions, including key user roles, data management strategies, and secure, efficient workflows. Key technical contributions include a modular, containerized infrastructure based on Docker, integration of open-source identity management, and automated solution evaluation workflows. Practical deployment guidelines are provided to facilitate implementation and operational stability. The feasibility and adaptability of the proposed framework are demonstrated through the MICCAI 2024 PhaKIR challenge with multiple international teams submitting and validating their solutions through our self-hosted platform. This work can be used as a baseline for future self-hosted DIY implementations and our results encourage further studies in the area of biomedical image analysis challenges.}, language = {en} } @inproceedings{WeberNunesRauberPalm, author = {Weber Nunes, Danilo and Rauber, David and Palm, Christoph}, title = {Self-supervised 3D Vision Transformer Pre-training for Robust Brain Tumor Classification}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-47422-5_69}, pages = {298 -- 303}, abstract = {Brain tumors pose significant challenges in neurology, making precise classification crucial for prognosis and treatment planning. This work investigates the effectiveness of a self-supervised learning approach-masked autoencoding (MAE)-to pre-train a vision transformer (ViT) model for brain tumor classification. Our method uses non-domain specific data, leveraging the ADNI and OASIS-3 MRI datasets, which primarily focus on degenerative diseases, for pretraining. The model is subsequently fine-tuned and evaluated on the BraTS glioma and meningioma datasets, representing a novel use of these datasets for tumor classification. The pre-trained MAE ViT model achieves an average F1 score of 0.91 in a 5-fold cross-validation setting, outperforming the nnU-Net encoder trained from scratch, particularly under limited data conditions. These findings highlight the potential of self-supervised MAE in enhancing brain tumor classification accuracy, even with restricted labeled data.}, language = {en} } @inproceedings{WeiherervonRiedheimBrebantetal., author = {Weiherer, Maximilian and von Riedheim, Antonia and Br{\´e}bant, Vanessa and Egger, Bernhard and Palm, Christoph}, title = {iRBSM: A Deep Implicit 3D Breast Shape Model}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-47422-5_11}, pages = {38 -- 43}, abstract = {We present the first deep implicit 3D shape model of the female breast, building upon and improving the recently proposed Regensburg Breast Shape Model (RBSM). Compared to its PCA-based predecessor, our model employs implicit neural representations; hence, it can be trained on raw 3D breast scans and eliminates the need for computationally demanding non-rigid registration, a task that is particularly difficult for feature-less breast shapes. The resulting model, dubbed iRBSM, captures detailed surface geometry including fine structures such as nipples and belly buttons, is highly expressive, and outperforms the RBSM on different surface reconstruction tasks. Finally, leveraging the iRBSM, we present a prototype application to 3D reconstruct breast shapes from just a single image. Model and code publicly available at https://rbsm.re-mic.de/implicit.}, language = {en} } @article{SouzaPachecodeSouzaetal., author = {Souza, Luis A. and Pacheco, Andr{\´e} G.C. and de Souza, Alberto F. and Oliveira-Santos, Thiago and Badue, Claudine and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {TransConv: a lightweight architecture based on transformers and convolutional neural networks for adenocarcinoma and Barrett's esophagus identification}, series = {Neural Computing and Applications}, journal = {Neural Computing and Applications}, number = {37}, publisher = {Springer}, doi = {10.1007/s00521-025-11299-y}, pages = {15535 -- 15546}, abstract = {Barrett's esophagus, also known as BE, is commonly associated with repeated exposure to stomach acid. If not treated properly, it may evolve into esophageal adenocarcinoma, aka esophageal cancer. This paper proposes TransConv, a hybrid architecture that benefits from features learned by pre-trained vision transformers (ViTs) and convolutional neural networks (CNNs), followed by a shallow neural network composed of three normalizations, ReLU activations, and fully connected layers, and a SoftMax head to distinguish between BE and esophageal cancer. TransConv is designed to be training-lightweight, and for the ViT and CNN backbone models, weights are kept frozen during training, i.e., the primary goal of TransConv is to learn the weights of the fully connected layer from both backbones only, avoiding the burden of updating their weights but still learning their final descriptions for the lightweight convolutional model. We report promising results with low computational training costs in two datasets, one public and another private. From our achievements, TransConv was able to deliver balanced accuracy results around 85\% and 86\% for each evaluated dataset, respectively, in a design that required only 50 epochs of model training, a very reduced number compared to state-of-the-art conducted studies in the same domain.}, language = {en} } @inproceedings{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {OpenMIBOOD: Open Medical Imaging Benchmarks for Out-Of-Distribution Detection}, series = {2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 10.-17. June 2025, Nashville}, booktitle = {2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 10.-17. June 2025, Nashville}, publisher = {IEEE}, isbn = {979-8-3315-4364-8}, doi = {10.1109/CVPR52734.2025.02410}, pages = {25874 -- 25886}, abstract = {The growing reliance on Artificial Intelligence (AI) in critical domains such as healthcare demands robust mechanisms to ensure the trustworthiness of these systems, especially when faced with unexpected or anomalous inputs. This paper introduces the Open Medical Imaging Benchmarks for Out-Of-Distribution Detection (OpenMIBOOD), a comprehensive framework for evaluating out-of-distribution (OOD) detection methods specifically in medical imaging contexts. OpenMIBOOD includes three benchmarks from diverse medical domains, encompassing 14 datasets divided into covariate-shifted in-distribution, nearOOD, and far-OOD categories. We evaluate 24 post-hoc methods across these benchmarks, providing a standardized reference to advance the development and fair comparison of OODdetection methods. Results reveal that findings from broad-scale OOD benchmarks in natural image domains do not translate to medical applications, underscoring the critical need for such benchmarks in the medical field. By mitigating the risk of exposing AI models to inputs outside their training distribution, OpenMIBOOD aims to support the advancement of reliable and trustworthy AI systems in healthcare. The repository is available at https://github.com/remic-othr/OpenMIBOOD.}, language = {en} } @article{RueckertRauberMaerkletal., author = {Rueckert, Tobias and Rauber, David and Maerkl, Raphaela and Klausmann, Leonard and Yildiran, Suemeyye R. and Gutbrod, Max and Nunes, Danilo Weber and Moreno, Alvaro Fernandez and Luengo, Imanol and Stoyanov, Danail and Toussaint, Nicolas and Cho, Enki and Kim, Hyeon Bae and Choo, Oh Sung and Kim, Ka Young and Kim, Seong Tae and Arantes, Gon{\c{c}}alo and Song, Kehan and Zhu, Jianjun and Xiong, Junchen and Lin, Tingyi and Kikuchi, Shunsuke and Matsuzaki, Hiroki and Kouno, Atsushi and Manesco, Jo{\~a}o Renato Ribeiro and Papa, Jo{\~a}o Paulo and Choi, Tae-Min and Jeong, Tae Kyeong and Park, Juyoun and Alabi, Oluwatosin and Wei, Meng and Vercauteren, Tom and Wu, Runzhi and Xu, Mengya and Wang, An and Bai, Long and Ren, Hongliang and Yamlahi, Amine and Hennighausen, Jakob and Maier-Hein, Lena and Kondo, Satoshi and Kasai, Satoshi and Hirasawa, Kousuke and Yang, Shu and Wang, Yihui and Chen, Hao and Rodr{\´i}guez, Santiago and Aparicio, Nicol{\´a}s and Manrique, Leonardo and Palm, Christoph and Wilhelm, Dirk and Feussner, Hubertus and Rueckert, Daniel and Speidel, Stefanie and Nasirihaghighi, Sahar and Al Khalil, Yasmina and Li, Yiping and Arbel{\´a}ez, Pablo and Ayobi, Nicol{\´a}s and Hosie, Olivia and Lyons, Juan Camilo}, title = {Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge}, series = {Medical Image Analysis}, volume = {109}, journal = {Medical Image Analysis}, publisher = {Elsevier}, issn = {1361-8415}, doi = {10.1016/j.media.2026.103945}, pages = {31}, abstract = {Reliable recognition and localization of surgical instruments in endoscopic video recordings are foundational for a wide range of applications in computer- and robot-assisted minimally invasive surgery (RAMIS), including surgical training, skill assessment, and autonomous assistance. However, robust performance under real-world conditions remains a significant challenge. Incorporating surgical context - such as the current procedural phase - has emerged as a promising strategy to improve robustness and interpretability. To address these challenges, we organized the Surgical Procedure Phase, Keypoint, and Instrument Recognition (PhaKIR) sub-challenge as part of the Endoscopic Vision (EndoVis) challenge at MICCAI 2024. We introduced a novel, multi-center dataset comprising thirteen full-length laparoscopic cholecystectomy videos collected from three distinct medical institutions, with unified annotations for three interrelated tasks: surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation. Unlike existing datasets, ours enables joint investigation of instrument localization and procedural context within the same data while supporting the integration of temporal information across entire procedures. We report results and findings in accordance with the BIAS guidelines for biomedical image analysis challenges. The PhaKIR sub-challenge advances the field by providing a unique benchmark for developing temporally aware, context-driven methods in RAMIS and offers a high-quality resource to support future research in surgical scene understanding.}, language = {en} }