@article{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, series = {Computers in Biology and Medicine}, volume = {169}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.compbiomed.2024.107929}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-69830}, pages = {24}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images and videos. In particular, the determination of the position and type of instruments is of great interest. Current work involves both spatial and temporal information, with the idea that predicting the movement of surgical tools over time may improve the quality of the final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify and characterize datasets used for method development and evaluation and quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images and videos. The paper focuses on methods that work purely visually, without markers of any kind attached to the instruments, considering both single-frame semantic and instance segmentation approaches, as well as those that incorporate temporal information. The publications analyzed were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking", resulting in a total of 741 articles published between 01/2015 and 07/2023, of which 123 were included using systematic selection criteria. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing the available potential for future developments.}, subject = {Deep Learning}, language = {en} } @inproceedings{RueckertRiederFeussneretal., author = {R{\"u}ckert, Tobias and Rieder, Maximilian and Feussner, Hubertus and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Smoke Classification in Laparoscopic Cholecystectomy Videos Incorporating Spatio-temporal Information}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springeer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_78}, pages = {298 -- 303}, abstract = {Heavy smoke development represents an important challenge for operating physicians during laparoscopic procedures and can potentially affect the success of an intervention due to reduced visibility and orientation. Reliable and accurate recognition of smoke is therefore a prerequisite for the use of downstream systems such as automated smoke evacuation systems. Current approaches distinguish between non-smoked and smoked frames but often ignore the temporal context inherent in endoscopic video data. In this work, we therefore present a method that utilizes the pixel-wise displacement from randomly sampled images to the preceding frames determined using the optical flow algorithm by providing the transformed magnitude of the displacement as an additional input to the network. Further, we incorporate the temporal context at evaluation time by applying an exponential moving average on the estimated class probabilities of the model output to obtain more stable and robust results over time. We evaluate our method on two convolutional-based and one state-of-the-art transformer architecture and show improvements in the classification results over a baseline approach, regardless of the network used.}, language = {en} } @unpublished{MendelRueckertWilhelmetal., author = {Mendel, Robert and R{\"u}ckert, Tobias and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Motion-Corrected Moving Average: Including Post-Hoc Temporal Information for Improved Video Segmentation}, doi = {10.48550/arXiv.2403.03120}, pages = {9}, abstract = {Real-time computational speed and a high degree of precision are requirements for computer-assisted interventions. Applying a segmentation network to a medical video processing task can introduce significant inter-frame prediction noise. Existing approaches can reduce inconsistencies by including temporal information but often impose requirements on the architecture or dataset. This paper proposes a method to include temporal information in any segmentation model and, thus, a technique to improve video segmentation performance without alterations during training or additional labeling. With Motion-Corrected Moving Average, we refine the exponential moving average between the current and previous predictions. Using optical flow to estimate the movement between consecutive frames, we can shift the prior term in the moving-average calculation to align with the geometry of the current frame. The optical flow calculation does not require the output of the model and can therefore be performed in parallel, leading to no significant runtime penalty for our approach. We evaluate our approach on two publicly available segmentation datasets and two proprietary endoscopic datasets and show improvements over a baseline approach.}, subject = {Deep Learning}, language = {en} } @unpublished{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, doi = {10.48550/arXiv.2304.13014}, pages = {25}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images. Especially the determination of the position and type of the instruments is of great interest here. Current work involves both spatial and temporal information with the idea, that the prediction of movement of surgical tools over time may improve the quality of final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify datasets used for method development and evaluation, as well as quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images. The paper focuses on methods that work purely visually without attached markers of any kind on the instruments, taking into account both single-frame segmentation approaches as well as those involving temporal information. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing available potential for future developments. The publications considered were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking" and result in 408 articles published between 2015 and 2022 from which 109 were included using systematic selection criteria.}, language = {en} } @unpublished{RueckertRauberMaerkletal., author = {R{\"u}ckert, Tobias and Rauber, David and Maerkl, Raphaela and Klausmann, Leonard and Yildiran, Suemeyye R. and Gutbrod, Max and Nunes, Danilo Weber and Moreno, Alvaro Fernandez and Luengo, Imanol and Stoyanov, Danail and Toussaint, Nicolas and Cho, Enki and Kim, Hyeon Bae and Choo, Oh Sung and Kim, Ka Young and Kim, Seong Tae and Arantes, Gon{\c{c}}alo and Song, Kehan and Zhu, Jianjun and Xiong, Junchen and Lin, Tingyi and Kikuchi, Shunsuke and Matsuzaki, Hiroki and Kouno, Atsushi and Manesco, Jo{\~a}o Renato Ribeiro and Papa, Jo{\~a}o Paulo and Choi, Tae-Min and Jeong, Tae Kyeong and Park, Juyoun and Alabi, Oluwatosin and Wei, Meng and Vercauteren, Tom and Wu, Runzhi and Xu, Mengya and an Wang, and Bai, Long and Ren, Hongliang and Yamlahi, Amine and Hennighausen, Jakob and Maier-Hein, Lena and Kondo, Satoshi and Kasai, Satoshi and Hirasawa, Kousuke and Yang, Shu and Wang, Yihui and Chen, Hao and Rodr{\´i}guez, Santiago and Aparicio, Nicol{\´a}s and Manrique, Leonardo and Lyons, Juan Camilo and Hosie, Olivia and Ayobi, Nicol{\´a}s and Arbel{\´a}ez, Pablo and Li, Yiping and Khalil, Yasmina Al and Nasirihaghighi, Sahar and Speidel, Stefanie and R{\"u}ckert, Daniel and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge}, pages = {36}, abstract = {Reliable recognition and localization of surgical instruments in endoscopic video recordings are foundational for a wide range of applications in computer- and robot-assisted minimally invasive surgery (RAMIS), including surgical training, skill assessment, and autonomous assistance. However, robust performance under real-world conditions remains a significant challenge. Incorporating surgical context - such as the current procedural phase - has emerged as a promising strategy to improve robustness and interpretability. To address these challenges, we organized the Surgical Procedure Phase, Keypoint, and Instrument Recognition (PhaKIR) sub-challenge as part of the Endoscopic Vision (EndoVis) challenge at MICCAI 2024. We introduced a novel, multi-center dataset comprising thirteen full-length laparoscopic cholecystectomy videos collected from three distinct medical institutions, with unified annotations for three interrelated tasks: surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation. Unlike existing datasets, ours enables joint investigation of instrument localization and procedural context within the same data while supporting the integration of temporal information across entire procedures. We report results and findings in accordance with the BIAS guidelines for biomedical image analysis challenges. The PhaKIR sub-challenge advances the field by providing a unique benchmark for developing temporally aware, context-driven methods in RAMIS and offers a high-quality resource to support future research in surgical scene understanding.}, language = {en} } @misc{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Corrigendum to "Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art" [Comput. Biol. Med. 169 (2024) 107929]}, series = {Computers in Biology and Medicine}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2024.108027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70337}, pages = {1}, abstract = {The authors regret that the SAR-RARP50 dataset is missing from the description of publicly available datasets presented in Chapter 4.}, language = {en} } @article{HartwigBerletCzempieletal., author = {Hartwig, Regine and Berlet, Maximilian and Czempiel, Tobias and Fuchtmann, Jonas and R{\"u}ckert, Tobias and Feussner, Hubertus and Wilhelm, Dirk}, title = {Bildbasierte Unterst{\"u}tzungsmethoden f{\"u}r die zuk{\"u}nftige Anwendung in der Chirurgie}, series = {Die Chirurgie}, volume = {93}, journal = {Die Chirurgie}, publisher = {Springer}, doi = {10.1007/s00104-022-01668-x}, pages = {956 -- 965}, abstract = {Hintergrund: Die Entwicklung assistiver Technologien wird in den kommenden Jahren nicht nur in der Chirurgie von zunehmender Bedeutung sein. Die Wahrnehmung der Istsituation stellt hierbei die Grundlage jeder autonomen Handlung dar. Hierf{\"u}r k{\"o}nnen unterschiedliche Sensorsysteme genutzt werden, wobei videobasierte Systeme ein besonderes Potenzial aufweisen. Methode: Anhand von Literaturangaben und auf Basis eigener Forschungsarbeiten werden zentrale Aspekte bildbasierter Unterst{\"u}tzungssysteme f{\"u}r die Chirurgie dargestellt. Hierbei wird deren Potenzial, aber auch die Limitationen der Methoden erl{\"a}utert. Ergebnisse: Eine etablierte Anwendung stellt die Phasendetektion chirurgischer Eingriffe dar, f{\"u}r die Operationsvideos mittels neuronaler Netzwerke analysiert werden. Durch eine zeitlich gest{\"u}tzte und transformative Analyse konnten die Ergebnisse der Pr{\"a}diktion j{\"u}ngst deutlich verbessert werden. Aber auch robotische Kameraf{\"u}hrungssysteme nutzen Bilddaten, um das Laparoskop zuk{\"u}nftig autonom zu navigieren. Um die Zuverl{\"a}ssigkeit an die hohen Anforderungen in der Chirurgie anzugleichen, m{\"u}ssen diese jedoch durch zus{\"a}tzliche Informationen erg{\"a}nzt werden. Ein vergleichbarer multimodaler Ansatz wurde bereits f{\"u}r die Navigation und Lokalisation bei laparoskopischen Eingriffen umgesetzt. Hierzu werden Videodaten mittels verschiedener Methoden analysiert und diese Ergebnisse mit anderen Sensormodalit{\"a}ten fusioniert. Diskussion: Bildbasierte Unterst{\"u}tzungsmethoden sind bereits f{\"u}r diverse Aufgaben verf{\"u}gbar und stellen einen wichtigen Aspekt f{\"u}r die Chirurgie der Zukunft dar. Um hier jedoch zuverl{\"a}ssig und f{\"u}r autonome Funktionen eingesetzt werden zu k{\"o}nnen, m{\"u}ssen sie zuk{\"u}nftig in multimodale Ans{\"a}tze eingebettet werden, um die erforderliche Sicherheit bieten zu k{\"o}nnen.}, language = {de} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @article{MeinikheimMendelPalmetal., author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Influence of artificial intelligence on the diagnostic performance of endoscopists in the assessment of Barrett's esophagus: a tandem randomized and video trial}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-2296-5696}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-72818}, pages = {641 -- 649}, abstract = {Background This study evaluated the effect of an artificial intelligence (AI)-based clinical decision support system on the performance and diagnostic confidence of endoscopists in their assessment of Barrett's esophagus (BE). Methods 96 standardized endoscopy videos were assessed by 22 endoscopists with varying degrees of BE experience from 12 centers. Assessment was randomized into two video sets: group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a stand-alone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.3\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1\%, and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.8\% [95\%CI 65.2\%-74.2\%] to 78.0\% [95\%CI 74.0\%-82.0\%]; specificity 67.3\% [95\%CI 62.5\%-72.2\%] to 72.7\% [95\%CI 68.2\%-77.3\%]). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from additional AI. BE experts and nonexperts remained significantly below the stand-alone performance of AI, suggesting that there may be other factors influencing endoscopists' decisions to follow or discard AI advice.}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fernandez-Esparrach, G. and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @article{EbigboMendelRueckertetal., author = {Ebigbo, Alanna and Mendel, Robert and R{\"u}ckert, Tobias and Schuster, Laurin and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Mende, Matthias and Steinbr{\"u}ck, Ingo and Faiss, Siegbert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Deprez, Pierre and Oyama, Tsuneo and Takahashi, Akiko and Seewald, Stefan and Sharma, Prateek and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Endoscopic prediction of submucosal invasion in Barrett's cancer with the use of Artificial Intelligence: A pilot Study}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {09}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-1311-8570}, pages = {878 -- 883}, abstract = {Background and aims: The accurate differentiation between T1a and T1b Barrett's cancer has both therapeutic and prognostic implications but is challenging even for experienced physicians. We trained an Artificial Intelligence (AI) system on the basis of deep artificial neural networks (deep learning) to differentiate between T1a and T1b Barrett's cancer white-light images. Methods: Endoscopic images from three tertiary care centres in Germany were collected retrospectively. A deep learning system was trained and tested using the principles of cross-validation. A total of 230 white-light endoscopic images (108 T1a and 122 T1b) was evaluated with the AI-system. For comparison, the images were also classified by experts specialized in endoscopic diagnosis and treatment of Barrett's cancer. Results: The sensitivity, specificity, F1 and accuracy of the AI-system in the differentiation between T1a and T1b cancer lesions was 0.77, 0.64, 0.73 and 0.71, respectively. There was no statistically significant difference between the performance of the AI-system and that of human experts with sensitivity, specificity, F1 and accuracy of 0.63, 0.78, 0.67 and 0.70 respectively. Conclusion: This pilot study demonstrates the first multicenter application of an AI-based system in the prediction of submucosal invasion in endoscopic images of Barrett's cancer. AI scored equal to international experts in the field, but more work is necessary to improve the system and apply it to video sequences and in a real-life setting. Nevertheless, the correct prediction of submucosal invasion in Barret´s cancer remains challenging for both experts and AI.}, subject = {Maschinelles Lernen}, language = {en} } @article{RoemmeleMendelBarrettetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Barrett, Caroline and Kiesl, Hans and Rauber, David and R{\"u}ckert, Tobias and Kraus, Lisa and Heinkele, Jakob and Dhillon, Christine and Grosser, Bianca and Prinz, Friederike and Wanzl, Julia and Fleischmann, Carola and Nagl, Sandra and Schnoy, Elisabeth and Schlottmann, Jakob and Dellon, Evan S. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {An artificial intelligence algorithm is highly accurate for detecting endoscopic features of eosinophilic esophagitis}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, publisher = {Nature Portfolio}, address = {London}, doi = {10.1038/s41598-022-14605-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-46928}, pages = {10}, abstract = {The endoscopic features associated with eosinophilic esophagitis (EoE) may be missed during routine endoscopy. We aimed to develop and evaluate an Artificial Intelligence (AI) algorithm for detecting and quantifying the endoscopic features of EoE in white light images, supplemented by the EoE Endoscopic Reference Score (EREFS). An AI algorithm (AI-EoE) was constructed and trained to differentiate between EoE and normal esophagus using endoscopic white light images extracted from the database of the University Hospital Augsburg. In addition to binary classification, a second algorithm was trained with specific auxiliary branches for each EREFS feature (AI-EoE-EREFS). The AI algorithms were evaluated on an external data set from the University of North Carolina, Chapel Hill (UNC), and compared with the performance of human endoscopists with varying levels of experience. The overall sensitivity, specificity, and accuracy of AI-EoE were 0.93 for all measures, while the AUC was 0.986. With additional auxiliary branches for the EREFS categories, the AI algorithm (AI-EoEEREFS) performance improved to 0.96, 0.94, 0.95, and 0.992 for sensitivity, specificity, accuracy, and AUC, respectively. AI-EoE and AI-EoE-EREFS performed significantly better than endoscopy beginners and senior fellows on the same set of images. An AI algorithm can be trained to detect and quantify endoscopic features of EoE with excellent performance scores. The addition of the EREFS criteria improved the performance of the AI algorithm, which performed significantly better than endoscopists with a lower or medium experience level.}, language = {en} } @misc{RoemmeleMendelRauberetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Rauber, David and R{\"u}ckert, Tobias and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Endoscopic Diagnosis of Eosinophilic Esophagitis Using a deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724274}, abstract = {Aims Eosinophilic esophagitis (EoE) is easily missed during endoscopy, either because physicians are not familiar with its endoscopic features or the morphologic changes are too subtle. In this preliminary paper, we present the first attempt to detect EoE in endoscopic white light (WL) images using a deep learning network (EoE-AI). Methods 401 WL images of eosinophilic esophagitis and 871 WL images of normal esophageal mucosa were evaluated. All images were assessed for the Endoscopic Reference score (EREFS) (edema, rings, exudates, furrows, strictures). Images with strictures were excluded. EoE was defined as the presence of at least 15 eosinophils per high power field on biopsy. A convolutional neural network based on the ResNet architecture with several five-fold cross-validation runs was used. Adding auxiliary EREFS-classification branches to the neural network allowed the inclusion of the scores as optimization criteria during training. EoE-AI was evaluated for sensitivity, specificity, and F1-score. In addition, two human endoscopists evaluated the images. Results EoE-AI showed a mean sensitivity, specificity, and F1 of 0.759, 0.976, and 0.834 respectively, averaged over the five distinct cross-validation runs. With the EREFS-augmented architecture, a mean sensitivity, specificity, and F1-score of 0.848, 0.945, and 0.861 could be demonstrated respectively. In comparison, the two human endoscopists had an average sensitivity, specificity, and F1-score of 0.718, 0.958, and 0.793. Conclusions To the best of our knowledge, this is the first application of deep learning to endoscopic images of EoE which were also assessed after augmentation with the EREFS-score. The next step is the evaluation of EoE-AI using an external dataset. We then plan to assess the EoE-AI tool on endoscopic videos, and also in real-time. This preliminary work is encouraging regarding the ability for AI to enhance physician detection of EoE, and potentially to do a true "optical biopsy" but more work is needed.}, language = {en} } @misc{RueckertRiederRauberetal., author = {R{\"u}ckert, Tobias and Rieder, Maximilian and Rauber, David and Xiao, Michel and Humolli, Eg and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {Augmenting instrument segmentation in video sequences of minimally invasive surgery by synthetic smoky frames}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {18}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {Suppl 1}, publisher = {Springer Nature}, doi = {10.1007/s11548-023-02878-2}, pages = {S54 -- S56}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Rauber, David and R{\"u}ckert, Tobias and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Real-time detection and delineation of tissue during third-space endoscopy using artificial intelligence (AI)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765128}, pages = {S53 -- S54}, abstract = {Aims AI has proven great potential in assisting endoscopists in diagnostics, however its role in therapeutic endoscopy remains unclear. Endoscopic submucosal dissection (ESD) is a technically demanding intervention with a slow learning curve and relevant risks like bleeding and perforation. Therefore, we aimed to develop an algorithm for the real-time detection and delineation of relevant structures during third-space endoscopy. Methods 5470 still images from 59 full length videos (47 ESD, 12 POEM) were annotated. 179681 additional unlabeled images were added to the training dataset. Consequently, a DeepLabv3+ neural network architecture was trained with the ECMT semi-supervised algorithm (under review elsewhere). Evaluation of vessel detection was performed on a dataset of 101 standardized video clips from 15 separate third-space endoscopy videos with 200 predefined blood vessels. Results Internal validation yielded an overall mean Dice score of 85\% (68\% for blood vessels, 86\% for submucosal layer, 88\% for muscle layer). On the video test data, the overall vessel detection rate (VDR) was 94\% (96\% for ESD, 74\% for POEM). The median overall vessel detection time (VDT) was 0.32 sec (0.3 sec for ESD, 0.62 sec for POEM). Conclusions Evaluation of the developed algorithm on a video test dataset showed high VDR and quick VDT, especially for ESD. Further research will focus on a possible clinical benefit of the AI application for VDR and VDT during third-space endoscopy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Muzalyova, Anna and Rauber, David and R{\"u}ckert, Tobias and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Mensch-Maschine-Interaktion: Einfluss k{\"u}nstlicher Intelligenz auf das diagnostische Vertrauen von Endoskopikern bei der Beurteilung des Barrett-{\"O}sophagus}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {62}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {09}, publisher = {Georg Thieme Verlag KG}, doi = {10.1055/s-0044-1789656}, pages = {e575 -- e576}, abstract = {Ziele: Das Ziel der Studie war es, den Einfluss von KI auf die diagnostische Sicherheit (Konfidenzniveau) von Endoskopikern anhand von B{\"O}-Videos zu untersuchen und m{\"o}gliche Korrelationen mit der Untersuchungsqualit{\"a}t zu erforschen. Methodik: 22 Endoskopiker aus zw{\"o}lf Zentren mit unterschiedlicher Barrett-Erfahrung untersuchten 96 standardisierte Endoskopievideos. Die Untersucher wurden in Experten und Nicht-Experten eingeteilt und nach dem Zufallsprinzip f{\"u}r die Bewertung der Videos mit oder ohne KI eingeteilt. Die Teilnehmer wurden in zwei Gruppen aufgeteilt: Arm A bewertete zun{\"a}chst Videos ohne KI und dann mit KI, w{\"a}hrend Arm B die umgekehrte Reihenfolge einhielt. Die Untersucher hatten die Aufgabe, B{\"O}-assoziierte Neoplasien zu erkennen und ihr Konfidenzniveau sowohl mit als auch ohne KI auf einer Skala von 0 bis 9 anzugeben. Ergebnis: In Arm A erh{\"o}hte der Einsatz von KI das Konfidenzniveau bei beiden signifikant (p<0.001). Bemerkenswert ist, dass jedoch nur Nicht-Experten durch die KI eine signifikante Verbesserung der Sensitivit{\"a}t und Spezifit{\"a}t (p<0.001 bzw. p<0.05) erfuhren. W{\"a}hrend Experten ohne KI im Vergleich zu Nicht-Experten mit KI ein h{\"o}heres Konfidenzniveau aufwiesen, gab es keinen signifikanten Unterschied in der Genauigkeit. In Arm B zeigten beide Gruppen eine signifikante Abnahme des Konfidenzniveaus (p<0.001) bei gleichbleibender Genauigkeit. Dar{\"u}ber hinaus wurden in 9\% der Entscheidungen trotz korrekter KI eine falsche Wahl getroffen. Schlussfolgerung: Der Einsatz k{\"u}nstlicher Intelligenz steigerte das Konfidenzniveau sowohl bei Experten als auch bei Nicht-Experten signifikant - ein Effekt, der im Studienmodell reversibel war. Dar{\"u}ber hinaus wiesen Experten mit oder ohne KI durchweg h{\"o}here Konfidenzniveaus auf als Nicht-Experten mit KI, trotz vergleichbarer Ergebnisse. Zudem konnte beobachtet werden, dass die Untersucher in 9\% der F{\"a}lle die KI zuungunsten des Patienten ignorierten.}, language = {de} }