@article{SouzaJrPassosSantanaetal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Rauber, David and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Layer-selective deep representation to improve esophageal cancer classification}, series = {Medical \& Biological Engineering \& Computing}, volume = {62}, journal = {Medical \& Biological Engineering \& Computing}, publisher = {Springer Nature}, address = {Heidelberg}, doi = {10.1007/s11517-024-03142-8}, pages = {3355 -- 3372}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis.For this task, the deep learning techniques' black-box nature must somehow be lightened up to clarify its promising results. Hence, we aim to investigate the impact of the ResNet-50 deep convolutional design for Barrett's esophagus and adenocarcinoma classification. For such a task, and aiming at proposing a two-step learning technique, the output of each convolutional layer that composes the ResNet-50 architecture was trained and classified for further definition of layers that would provide more impact in the architecture. We showed that local information and high-dimensional features are essential to improve the classification for our task. Besides, we observed a significant improvement when the most discriminative layers expressed more impact in the training and classification of ResNet-50 for Barrett's esophagus and adenocarcinoma classification, demonstrating that both human knowledge and computational processing may influence the correct learning of such a problem.}, language = {en} } @inproceedings{WeberNunesRauberPalm, author = {Weber Nunes, Danilo and Rauber, David and Palm, Christoph}, title = {Self-supervised 3D Vision Transformer Pre-training for Robust Brain Tumor Classification}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-47422-5_69}, pages = {298 -- 303}, abstract = {Brain tumors pose significant challenges in neurology, making precise classification crucial for prognosis and treatment planning. This work investigates the effectiveness of a self-supervised learning approach-masked autoencoding (MAE)-to pre-train a vision transformer (ViT) model for brain tumor classification. Our method uses non-domain specific data, leveraging the ADNI and OASIS-3 MRI datasets, which primarily focus on degenerative diseases, for pretraining. The model is subsequently fine-tuned and evaluated on the BraTS glioma and meningioma datasets, representing a novel use of these datasets for tumor classification. The pre-trained MAE ViT model achieves an average F1 score of 0.91 in a 5-fold cross-validation setting, outperforming the nnU-Net encoder trained from scratch, particularly under limited data conditions. These findings highlight the potential of self-supervised MAE in enhancing brain tumor classification accuracy, even with restricted labeled data.}, language = {en} } @inproceedings{GutbrodGeislerRauberetal., author = {Gutbrod, Max and Geisler, Benedikt and Rauber, David and Palm, Christoph}, title = {Data Augmentation for Images of Chronic Foot Wounds}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_71}, pages = {261 -- 266}, abstract = {Training data for Neural Networks is often scarce in the medical domain, which often results in models that struggle to generalize and consequently showpoor performance on unseen datasets. Generally, adding augmentation methods to the training pipeline considerably enhances a model's performance. Using the dataset of the Foot Ulcer Segmentation Challenge, we analyze two additional augmentation methods in the domain of chronic foot wounds - local warping of wound edges along with projection and blurring of shapes inside wounds. Our experiments show that improvements in the Dice similarity coefficient and Normalized Surface Distance metrics depend on a sensible selection of those augmentation methods.}, language = {en} } @inproceedings{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {OpenMIBOOD: Open Medical Imaging Benchmarks for Out-Of-Distribution Detection}, series = {2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 10.-17. June 2025, Nashville}, booktitle = {2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 10.-17. June 2025, Nashville}, publisher = {IEEE}, isbn = {979-8-3315-4364-8}, doi = {10.1109/CVPR52734.2025.02410}, pages = {25874 -- 25886}, abstract = {The growing reliance on Artificial Intelligence (AI) in critical domains such as healthcare demands robust mechanisms to ensure the trustworthiness of these systems, especially when faced with unexpected or anomalous inputs. This paper introduces the Open Medical Imaging Benchmarks for Out-Of-Distribution Detection (OpenMIBOOD), a comprehensive framework for evaluating out-of-distribution (OOD) detection methods specifically in medical imaging contexts. OpenMIBOOD includes three benchmarks from diverse medical domains, encompassing 14 datasets divided into covariate-shifted in-distribution, nearOOD, and far-OOD categories. We evaluate 24 post-hoc methods across these benchmarks, providing a standardized reference to advance the development and fair comparison of OODdetection methods. Results reveal that findings from broad-scale OOD benchmarks in natural image domains do not translate to medical applications, underscoring the critical need for such benchmarks in the medical field. By mitigating the risk of exposing AI models to inputs outside their training distribution, OpenMIBOOD aims to support the advancement of reliable and trustworthy AI systems in healthcare. The repository is available at https://github.com/remic-othr/OpenMIBOOD.}, language = {en} } @unpublished{RueckertRauberMaerkletal., author = {R{\"u}ckert, Tobias and Rauber, David and Maerkl, Raphaela and Klausmann, Leonard and Yildiran, Suemeyye R. and Gutbrod, Max and Nunes, Danilo Weber and Moreno, Alvaro Fernandez and Luengo, Imanol and Stoyanov, Danail and Toussaint, Nicolas and Cho, Enki and Kim, Hyeon Bae and Choo, Oh Sung and Kim, Ka Young and Kim, Seong Tae and Arantes, Gon{\c{c}}alo and Song, Kehan and Zhu, Jianjun and Xiong, Junchen and Lin, Tingyi and Kikuchi, Shunsuke and Matsuzaki, Hiroki and Kouno, Atsushi and Manesco, Jo{\~a}o Renato Ribeiro and Papa, Jo{\~a}o Paulo and Choi, Tae-Min and Jeong, Tae Kyeong and Park, Juyoun and Alabi, Oluwatosin and Wei, Meng and Vercauteren, Tom and Wu, Runzhi and Xu, Mengya and an Wang, and Bai, Long and Ren, Hongliang and Yamlahi, Amine and Hennighausen, Jakob and Maier-Hein, Lena and Kondo, Satoshi and Kasai, Satoshi and Hirasawa, Kousuke and Yang, Shu and Wang, Yihui and Chen, Hao and Rodr{\´i}guez, Santiago and Aparicio, Nicol{\´a}s and Manrique, Leonardo and Lyons, Juan Camilo and Hosie, Olivia and Ayobi, Nicol{\´a}s and Arbel{\´a}ez, Pablo and Li, Yiping and Khalil, Yasmina Al and Nasirihaghighi, Sahar and Speidel, Stefanie and R{\"u}ckert, Daniel and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge}, pages = {36}, abstract = {Reliable recognition and localization of surgical instruments in endoscopic video recordings are foundational for a wide range of applications in computer- and robot-assisted minimally invasive surgery (RAMIS), including surgical training, skill assessment, and autonomous assistance. However, robust performance under real-world conditions remains a significant challenge. Incorporating surgical context - such as the current procedural phase - has emerged as a promising strategy to improve robustness and interpretability. To address these challenges, we organized the Surgical Procedure Phase, Keypoint, and Instrument Recognition (PhaKIR) sub-challenge as part of the Endoscopic Vision (EndoVis) challenge at MICCAI 2024. We introduced a novel, multi-center dataset comprising thirteen full-length laparoscopic cholecystectomy videos collected from three distinct medical institutions, with unified annotations for three interrelated tasks: surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation. Unlike existing datasets, ours enables joint investigation of instrument localization and procedural context within the same data while supporting the integration of temporal information across entire procedures. We report results and findings in accordance with the BIAS guidelines for biomedical image analysis challenges. The PhaKIR sub-challenge advances the field by providing a unique benchmark for developing temporally aware, context-driven methods in RAMIS and offers a high-quality resource to support future research in surgical scene understanding.}, language = {en} } @misc{ScheppachMendelMuzalyovaetal., author = {Scheppach, Markus W. and Mendel, Robert and Muzalyova, Anna and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Yip, Hon Chi and Lau, Louis Ho Shing and G{\"o}lder, Stefan Karl and Schmidt, Arthur and Kouladouros, Konstantinos and Abdelhafez, Mohamed and Walter, B. and Meinikheim, Michael and Chiu, Philip Wai Yan and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {K{\"u}nstliche Intelligenz erh{\"o}ht die Gef{\"a}ßerkennung von Endoskopikern bei third space Endoskopie}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {62}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {09}, publisher = {Georg Thieme Verlag KG}, doi = {10.1055/s-0044-1790087}, pages = {e830}, abstract = {Einleitung: K{\"u}nstliche Intelligenz (KI)-Algorithmen unterst{\"u}tzen Endoskopiker bei der Erkennung und Charakterisierung von Kolonpolypen in der klinischen Praxis und f{\"u}hren zu einer Erh{\"o}hung der Adenomdetektionsrate. Auch bei therapeutischen Maßnahmen wie der endoskopischen Submukosadissektion (ESD) k{\"o}nne relevante anatomische Strukturen durch KI mit hoher Genauigkeit erkannt und im endoskopischen Bild in Echtzeit markiert werden. Der Effekt einer solchen Applikation auf die Gef{\"a}ßdetektion von Endoskopikern ist bislang nicht erforscht. Ziele: In dieser Studie wurde der Effekt eines KI-Algorithmus zur Echtzeit-Gef{\"a}ßmarkierung bei ESD auf die Gef{\"a}ßdetektionsrate von Endoskopikern untersucht. Methodik: 59 third space Endoskopievideos wurde aus der Datenbank des Universit{\"a}tsklinikums Augsburg extrahiert. Auf 5470 Einzelbildern dieser Untersuchungen wurde submukosale Blutgef{\"a}ße annotiert. Zusammen mit weiteren 179681 unmarkierten Bildern wurde ein DeepLabV3+ neuronales Netzwerk mit einer semi-supervised learning Methode darin trainiert, submukosale Blutgef{\"a}ße auf dem endoskopischen Bild zu erkennen und in Echtzeit einzuzeichnen. Anhand eines Videotests mit 101 Videoclips und 200 vordefinierten Blutgef{\"a}ßen wurden 19 Endoskopiker mit und ohne KI Unterst{\"u}tzung getestet. Ergebnis: Der Algorithmus erkannte in dem Videotest 93.5\% der Gef{\"a}ße in einer Detektionszeit von im Median 0,3 Sekunden. Die Gef{\"a}ßdetektionsrate von Endoskopikern erh{\"o}hte sich durch KI Unterst{\"u}tzung von 56,4\% auf 72,4\% (p<0.001). Die Gef{\"a}ßdetektionszeit reduzierte sich durch KI-Unterst{\"u}tzung von 6,7 auf 5.2 Sekunden (p<0.001). Der Algorithmus zeigte eine Rate an falsch positiven Detektionen in 4.5\% der Einzelbilder. Falsch positiv erkannte Strukturen wurde k{\"u}rzer detektiert, als richtig positive (0.7 und 6.0 Sekunden, p<0.001). Schlussfolgerung: KI Unterst{\"u}tzung f{\"u}hrte zu einer erh{\"o}hten Gef{\"a}ßdetektionsrate und schnelleren Gef{\"a}ßdetektionszeit von Endoskopikern. Ein m{\"o}glicher klinischer Effekt auf die intraprozedurale Komplikationsrate oder Operationszeit k{\"o}nnte in prospektiven Studien ermittelt werden.}, language = {de} } @misc{ScheppachNunesArizietal., author = {Scheppach, Markus W. and Nunes, Danilo Weber and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Intraoperative Phasenerkennung bei endoskopischer Submukosadissektion mit Hilfe von k{\"u}nstlicher Intelligenz}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {62}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {09}, publisher = {Georg Thieme Verlag KG}, doi = {10.1055/s-0044-1790084}, pages = {e828}, abstract = {Einleitung: K{\"u}nstliche Intelligenz (KI) wird in der Endoskopie des Gastrointestinaltraktes zur Erkennung und Charakterisierung von Kolonpolypen eingesetzt. Die Rolle von KI bei therapeutischen Maßnahmen wurde noch nicht eingehend untersucht. Eine intraprozedurale Phasenerkennung bei endoskopischer Submukoasdissektion (ESD) k{\"o}nnte die Erhebung von Qualit{\"a}tsindikatoren erm{\"o}glichen. Weiterhin k{\"o}nnte diese Technologie zu einem tieferen Verst{\"a}ndnis {\"u}ber die Eigenschaften der Prozedur f{\"u}hren und weiterf{\"u}hrende Applikationen zur automatischen Dokumentation oder standardisiertem Training vorbereiten. Ziele: Ziel dieser Studie war die Entwicklung eines KI Algorithmus zur intraprozeduralen Phasenerkennung bei endoskopischer Submukosadissektion. Methodik: 2071546 Einzelbilder aus 27 ESD Videos in voller L{\"a}nge wurden f{\"u}r die {\"u}bergeordneten Klassen Diagnostik, Markierung, Nadelinjektion, Dissektion und Blutung, sowie die untergeordneten Klassen Endoskop-Manipulation, Injektion und Applikation von elektrischem Strom annotiert. Mit einem Trainingsdatensatz (898440 Einzelbilder, 17 ESDs) wurde ein Video Swin Transformer mit uniformer Stichprobenentnahme trainiert und intern validiert (769523 Einzelbilder, 6 ESDs). Neben der internen Validierung wurde der Algorithmus anhand von einem separaten Testdatensatz (403583 Einzelbilder, 4 ESDs) evaluiert. Ergebnis: Der F1 Score des Algorithmus f{\"u}r alle Klassen lag in der internen Validierung bei 83\%, in dem separaten Test bei 90\%. Anhand des separaten Tests wurden true positive (TP)-Raten f{\"u}r Diagnostik, Markierung, Nadelinjektion, Dissektion und Blutung von 100\%, 100\%, 96\%, 97\% und 93\% ermittelt. F{\"u}r Endoskopmanipulation, Injektion und Applikation von Elektrizit{\"a}t lagen die TP-Raten bei 92\%, 98\% und 91\%. Schlussfolgerung: Der entwickelte Algorithmus klassifizierte ESD Videos in voller L{\"a}nge und anhand jedes einzelnen Bildes mit hoher Genauigkeit. Zuk{\"u}nftige Forschungsvorhaben k{\"o}nnten intraoperative Qualit{\"a}tsindikatioren auf Basis dieser Informationen entwickeln und eine automatisierte Dokumentation erm{\"o}glichen.}, language = {de} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Verwendung k{\"u}nstlicher Intelligenz bei der Detektion der Papilla duodeni major}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1772000}, pages = {e539 -- e540}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der Diagnostik und Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Jedoch ist sie technisch sehr anspruchsvoll und weist eine vergleichsweise hohe Komplikationsrate auf. Ziele In der vorliegenden Machbarkeitsstudie soll gepr{\"u}ft werden, ob mithilfe eines Deep-learning-Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und somit f{\"u}r Endoskopiker mit geringer Erfahrung ein geeignetes Hilfsmittel, insbesondere f{\"u}r die Ausbildungssituation, darstellen k{\"o}nnten. Methodik Wir betrachteten insgesamt 606 Bilddatens{\"a}tze von 65 Patienten. In diesen wurde sowohl die Papilla duodeni major als auch das Ostium segmentiert. Anschließend wurde eine neuronales Netz mittels eines Deep-learning-Algorithmus trainiert. Außerdem erfolgte eine 5-fache Kreuzvaldierung. Ergebnisse Bei einer 5-fachen Kreuzvaldierung auf den 606 gelabelten Daten konnte f{\"u}r die Klasse Papille eine F1-Wert von 0,7908, eine Sensitivit{\"a}t von 0,7943 und eine Spezifit{\"a}t von 0,9785 erreicht werden, f{\"u}r die Klasse Ostium eine F1-Wert von 0,5538, eine Sensitivit{\"a}t von 0,5094 und eine Spezifit{\"a}t von 0,9970 (vgl. [Tab. 1]). Unabh{\"a}ngig von der Klasse zeigte sich gemittelt (Klasse Papille und Klasse Ostium) ein F1-Wert von 0,6673, eine Sensitivit{\"a}t von 0,6519 und eine Spezifit{\"a}t von 0,9877 (vgl. [Tab. 2]). Schlussfolgerung In vorliegende Machbarkeitsstudie konnte das neuronale Netz die Papilla duodeni major mit einer hohen Sensitivit{\"a}t und sehr hohen Spezifit{\"a}t identifizieren. Bei der Detektion des Ostiums war die Sensitivit{\"a}t deutlich geringer. Zuk{\"u}nftig soll das das neuronale Netz mit mehr Daten trainiert werden. Außerdem ist geplant, den Algorithmus auch auf Videos anzuwenden. Somit k{\"o}nnte langfristig ein geeignetes Hilfsmittel f{\"u}r die ERCP etabliert werden.}, language = {de} } @article{RueckertRauberMaerkletal., author = {Rueckert, Tobias and Rauber, David and Maerkl, Raphaela and Klausmann, Leonard and Yildiran, Suemeyye R. and Gutbrod, Max and Nunes, Danilo Weber and Moreno, Alvaro Fernandez and Luengo, Imanol and Stoyanov, Danail and Toussaint, Nicolas and Cho, Enki and Kim, Hyeon Bae and Choo, Oh Sung and Kim, Ka Young and Kim, Seong Tae and Arantes, Gon{\c{c}}alo and Song, Kehan and Zhu, Jianjun and Xiong, Junchen and Lin, Tingyi and Kikuchi, Shunsuke and Matsuzaki, Hiroki and Kouno, Atsushi and Manesco, Jo{\~a}o Renato Ribeiro and Papa, Jo{\~a}o Paulo and Choi, Tae-Min and Jeong, Tae Kyeong and Park, Juyoun and Alabi, Oluwatosin and Wei, Meng and Vercauteren, Tom and Wu, Runzhi and Xu, Mengya and Wang, An and Bai, Long and Ren, Hongliang and Yamlahi, Amine and Hennighausen, Jakob and Maier-Hein, Lena and Kondo, Satoshi and Kasai, Satoshi and Hirasawa, Kousuke and Yang, Shu and Wang, Yihui and Chen, Hao and Rodr{\´i}guez, Santiago and Aparicio, Nicol{\´a}s and Manrique, Leonardo and Palm, Christoph and Wilhelm, Dirk and Feussner, Hubertus and Rueckert, Daniel and Speidel, Stefanie and Nasirihaghighi, Sahar and Al Khalil, Yasmina and Li, Yiping and Arbel{\´a}ez, Pablo and Ayobi, Nicol{\´a}s and Hosie, Olivia and Lyons, Juan Camilo}, title = {Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge}, series = {Medical Image Analysis}, volume = {109}, journal = {Medical Image Analysis}, publisher = {Elsevier}, issn = {1361-8415}, doi = {10.1016/j.media.2026.103945}, pages = {31}, abstract = {Reliable recognition and localization of surgical instruments in endoscopic video recordings are foundational for a wide range of applications in computer- and robot-assisted minimally invasive surgery (RAMIS), including surgical training, skill assessment, and autonomous assistance. However, robust performance under real-world conditions remains a significant challenge. Incorporating surgical context - such as the current procedural phase - has emerged as a promising strategy to improve robustness and interpretability. To address these challenges, we organized the Surgical Procedure Phase, Keypoint, and Instrument Recognition (PhaKIR) sub-challenge as part of the Endoscopic Vision (EndoVis) challenge at MICCAI 2024. We introduced a novel, multi-center dataset comprising thirteen full-length laparoscopic cholecystectomy videos collected from three distinct medical institutions, with unified annotations for three interrelated tasks: surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation. Unlike existing datasets, ours enables joint investigation of instrument localization and procedural context within the same data while supporting the integration of temporal information across entire procedures. We report results and findings in accordance with the BIAS guidelines for biomedical image analysis challenges. The PhaKIR sub-challenge advances the field by providing a unique benchmark for developing temporally aware, context-driven methods in RAMIS and offers a high-quality resource to support future research in surgical scene understanding.}, language = {en} } @misc{RueckertRauberKlausmannetal., author = {Rueckert, Tobias and Rauber, David and Klausmann, Leonard and Gutbrod, Max and Rueckert, Daniel and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {PhaKIR Dataset - Surgical Procedure Phase, Keypoint, and Instrument Recognition [Data set]}, doi = {10.5281/zenodo.15740620}, abstract = {Note: A script for extracting the individual frames from the video files while preserving the challenge-compliant directory structure and frame-to-mask naming conventions is available on GitHub and can be accessed here: https://github.com/remic-othr/PhaKIR_Dataset. The dataset is described in the following publications: Rueckert, Tobias et al.: Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge. arXiv preprint, https://arxiv.org/abs/2507.16559. 2025. Rueckert, Tobias et al.: Video Dataset for Surgical Phase, Keypoint, and Instrument Recognition in Laparoscopic Surgery (PhaKIR). arXiv preprint, https://arxiv.org/abs/2511.06549. 2025. The proposed dataset was used as the training dataset in the PhaKIR challenge (https://phakir.re-mic.de/) as part of EndoVis-2024 at MICCAI 2024 and consists of eight real-world videos of human cholecystectomies ranging from 23 to 60 minutes in duration. The procedures were performed by experienced physicians, and the videos were recorded in three hospitals. In addition to existing datasets, our annotations provide pixel-wise instance segmentation masks of surgical instruments for a total of 19 categories, coordinates of relevant instrument keypoints (instrument tip(s), shaft-tip transition, shaft), both at an interval of one frame per second, and specifications regarding the intervention phases for a total of eight different phase categories for each individual frame in one dataset and thus comprehensively cover instrument localization and the context of the operation. Furthermore, the provision of the complete video sequences offers the opportunity to include the temporal information regarding the respective tasks and thus further optimize the resulting methods and outcomes.}, language = {en} } @misc{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {A cleaned subset of the first five CATARACTS test videos [Data set]}, doi = {10.5281/zenodo.14924735}, abstract = {This dataset is a subset of the original CATARACTS test dataset and is used by the OpenMIBOOD framework to evaluate a specific out-of-distribution setting. When using this dataset, it is mandatory to cite the corresponding publication (OpenMIBOOD (10.1109/CVPR52734.2025.02410)) and follow the acknowledgement and citation requirements of the original dataset (CATARACTS). The original CATARACTS dataset (associated publication,Homepage) consists of 50 videos of cataract surgeries, split into 25 train and 25 test videos. This subset contains the frames of the first 5 test videos. Further, black frames at the beginning of each video were removed.}, language = {en} } @misc{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {Cropped single instrument frames subset from Cholec80 [Data set]}, doi = {10.5281/zenodo.14921670}, abstract = {This dataset is a subset of the original Cholec80 dataset and is used by the OpenMIBOOD framework to evaluate a specific out-of-distribution setting. When using this dataset, it is mandatory to cite the corresponding publication (OpenMIBOOD) and to follow the acknowledgement and citation requirements of the original dataset (Cholec80). The original Cholec80 dataset (associated paper,Homepage) consists of 80 cholecystectomy surgery videos recorded at 25 fps, performed by 13 surgeons. It includes phase annotations (25 fps) and tool presence labels (1 fps), with phase definitions provided by a senior surgeon. A tool is considered present if at least half of its tip is visible. The dataset categorizes tools into seven types: Grasper, Bipolar, Hook, Scissors, Clipper, Irrigator, and Specimen bag. Multiple tools may be present in each frame. Additionally, 76 of the 80 videos exhibit a strong black vignette. For this dataset subset, frames were extracted based on tool presence labels, selecting only those containing Grasper, Bipolar, Hook, or Clipper while ensuring that only a single tool appears per frame. To enhance visual consistency, the black vignette was removed by extracting an inner rectangular region, where applicable.}, language = {en} } @misc{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {OpenMIBOOD's classification models for the MIDOG, PhaKIR, and OASIS-3 benchmarks [Data set]}, doi = {10.5281/zenodo.14982267}, abstract = {These models are provided for evaluating post-hoc out-of-distribution methods on the three OpenMIBOOD benchmarks: MIDOG, PhaKIR, and OASIS-3. When using these models, make sure to give appropriate credit and cite the OpenMIBOOD publication.}, language = {en} }