@article{WeiherervonRiedheimBrebantetal., author = {Weiherer, Maximilian and von Riedheim, Antonia and Br{\´e}bant, Vanessa and Egger, Bernhard and Palm, Christoph}, title = {Learning Neural Parametric 3D Breast Shape Models for Metrical Surface Reconstruction From Monocular RGB Videos}, series = {Machine Learning for Biomedical Imaging (MELBA)}, journal = {Machine Learning for Biomedical Imaging (MELBA)}, number = {MELBA-BVM 2025 Special Issue}, publisher = {Melba}, doi = {10.59275/j.melba.2026-8b23}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-89791}, pages = {95 -- 114}, abstract = {We present a neural parametric 3D breast shape model and, based on this model, introduce a low-cost and accessible 3D surface reconstruction pipeline capable of recovering accurate breast geometry from a monocular RGB video. In contrast to widely used, commercially available yet expensive 3D breast scanning solutions and existing low-cost alternatives, our method requires neither specialized hardware nor proprietary software and can be used with any device that is able to record RGB videos. The key building blocks of our pipeline are a state-of-the-art, off-the-shelf Structure-from-Motion pipeline, paired with a parametric breast model for robust surface reconstruction. Our model, similarly to the recently proposed implicit Regensburg Breast Shape Model (iRBSM), leverages implicit neural representations to model breast shapes. However, unlike the iRBSM, which employs a single global neural Signed Distance Function (SDF), our approach—inspired by recent state-of-the-art face models—decomposes the implicit breast domain into multiple smaller regions, each represented by a local neural SDF anchored at anatomical landmark positions. When incorporated into our surface reconstruction pipeline, the proposed model, dubbed liRBSM (short for localized iRBSM), significantly outperforms the iRBSM in terms of reconstruction quality, yielding more detailed surface reconstruction than its global counterpart. Overall, we find that the introduced pipeline is able to recover high-quality and metrically correct 3D breast geometry within an error margin of less than 2 mm. Our method is fast (requires less than six minutes), fully transparent and open-source, and together with the model publicly available at https://rbsm.re-mic.de/local-implicit.}, language = {en} } @inproceedings{GutbrodRauberPalm, author = {Gutbrod, Max and Rauber, David and Palm, Christoph}, title = {Improving Generalization in Mitotic Cell Detection via Domain Transformations}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, L{\"u}beck March 15-17, 2026}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, L{\"u}beck March 15-17, 2026}, editor = {Handels, Heinz and Breininger, Katharina and Deserno, Thomas M. and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-51100-5_71}, pages = {362 -- 367}, abstract = {We address domain generalization (DG) in mitotic-cell (MC) detection by combining a β-variational autoencoder (VAE) for domain transformations with feature-space alignment together with an object detector. The β-VAE synthesizes domain-transformed images, and the detector is trained to map originals and their transformed counterparts to equal representations. On the MIDOG++ dataset, this approach improves out-of-domain detection F1 scores by 7 and 3 percentage points compared to the color-variation augmentation and stain-normalization baselines. Results further suggest that morphology shifts hinder generalization more than stain shifts.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @misc{KlausmannRueckertRauberetal., author = {Klausmann, Leonard and Rueckert, Tobias and Rauber, David and Maerkl, Raphaela and Yildiran, Suemeyye R. and Gutbrod, Max and Palm, Christoph}, title = {Abstract: DIY Challenge Blueprint}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, L{\"u}beck March 15-17, 2026}, journal = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, L{\"u}beck March 15-17, 2026}, editor = {Handels, Heinz and Breininger, Katharina and Deserno, Thomas M. and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-51100-5_27}, pages = {131 -- 131}, abstract = {The high cost of challenge platforms prevents many people from organizing their own competitions. The do-it-yourself (DIY) challenge blueprint [1] allows you to host your own biomedical AI benchmark challenge. Our DIY approach circumvents the current constraints of commercial challenge platforms. A sovereign, extensible and cost-efficient deployment is provided via containerised, identity-managed and reproducible pipelines. Focus lies on GDPR-compliant hosting via infrastructure-as-code, automated evaluation, modular orchestration, and role-based identity and access management. The framework integrates Docker-based execution and standardised interfaces for task definitions, dataset curation and evaluation. All in all it is designed to be flexible and modular, as demonstrated in the MICCAI 2024 PhaKIR challenge [2, 3]. In this case study, different medical tasks on a multicentre laparoscopic dataset with framewise labels for phases and spatial annotations for instruments across fulllength videos were supported. This case study empirically validates the DIY challenge blueprint as a reproducible and customizable challenge-hosting infrastructure. The full code can be found at https://github.com/remic-othr/PhaKIR_DIY.}, subject = {Bildverarbeitung}, language = {en} } @article{deSouzaJuniorPachecoOliveiradosSantosetal., author = {de Souza J{\´u}nior, Luis Antonio and Pacheco, Andr{\´e} Georghton Cardoso and Oliveira dos Santos, Thiago and Fogos da Rocha, Wyctor and Bouzon, Pedro Henrique and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {LiwTERM-r: a Revised Lightweight Transformer-based Model for Multimodal Skin Lesion Detection Robust to Incomplete Input}, series = {Journal of the Brazilian Computer Society}, volume = {32}, journal = {Journal of the Brazilian Computer Society}, number = {1}, publisher = {Brazilian Computer Society}, doi = {10.5753/jbcs.2026.5871}, pages = {11}, abstract = {As the most common type of cancer in the world, skin cancer accounts for approximately 30\% of all diagnosed tumor-based lesions. Early diagnosis can reduce mortality and prevent disfiguring in different skin regions. With the application of machine learning techniques in recent years, especially deep learning, promising results in this task could be achieved, presenting studies demonstrating that the combination of patients' clinical anamneses and images of the injured lesion is essential for improving the correct classification of skin lesions. Despite that, meaningful use of anamneses with multiple collected images of the same skin lesion is mandatory, requiring further investigation. Thus, this project aims to contribute to developing multimodal machine learning-based models to solve the skin lesion classification problem by employing a lightweight transformer model that is robust to missing clinical information input. As a main hypothesis, models can be fed by multiple images from different sources as input along with clinical anamneses from the patient's historical evaluations, leading to a more factual and trustworthy diagnosis. Our model deals with the not-trivial task of combining images and clinical information concerning the skin lesions in a lightweight transformer architecture that does not demand high computation resources or even all the information from the anamneses but still presents competitive classification results.}, language = {en} } @misc{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {OpenMIBOOD's classification models for the MIDOG, PhaKIR, and OASIS-3 benchmarks [Data set]}, doi = {10.5281/zenodo.14982267}, abstract = {These models are provided for evaluating post-hoc out-of-distribution methods on the three OpenMIBOOD benchmarks: MIDOG, PhaKIR, and OASIS-3. When using these models, make sure to give appropriate credit and cite the OpenMIBOOD publication.}, language = {en} } @misc{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {Cropped single instrument frames subset from Cholec80 [Data set]}, doi = {10.5281/zenodo.14921670}, abstract = {This dataset is a subset of the original Cholec80 dataset and is used by the OpenMIBOOD framework to evaluate a specific out-of-distribution setting. When using this dataset, it is mandatory to cite the corresponding publication (OpenMIBOOD) and to follow the acknowledgement and citation requirements of the original dataset (Cholec80). The original Cholec80 dataset (associated paper,Homepage) consists of 80 cholecystectomy surgery videos recorded at 25 fps, performed by 13 surgeons. It includes phase annotations (25 fps) and tool presence labels (1 fps), with phase definitions provided by a senior surgeon. A tool is considered present if at least half of its tip is visible. The dataset categorizes tools into seven types: Grasper, Bipolar, Hook, Scissors, Clipper, Irrigator, and Specimen bag. Multiple tools may be present in each frame. Additionally, 76 of the 80 videos exhibit a strong black vignette. For this dataset subset, frames were extracted based on tool presence labels, selecting only those containing Grasper, Bipolar, Hook, or Clipper while ensuring that only a single tool appears per frame. To enhance visual consistency, the black vignette was removed by extracting an inner rectangular region, where applicable.}, language = {en} } @misc{RueckertRauberKlausmannetal., author = {Rueckert, Tobias and Rauber, David and Klausmann, Leonard and Gutbrod, Max and Rueckert, Daniel and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {PhaKIR Dataset - Surgical Procedure Phase, Keypoint, and Instrument Recognition [Data set]}, doi = {10.5281/zenodo.15740620}, abstract = {Note: A script for extracting the individual frames from the video files while preserving the challenge-compliant directory structure and frame-to-mask naming conventions is available on GitHub and can be accessed here: https://github.com/remic-othr/PhaKIR_Dataset. The dataset is described in the following publications: Rueckert, Tobias et al.: Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge. arXiv preprint, https://arxiv.org/abs/2507.16559. 2025. Rueckert, Tobias et al.: Video Dataset for Surgical Phase, Keypoint, and Instrument Recognition in Laparoscopic Surgery (PhaKIR). arXiv preprint, https://arxiv.org/abs/2511.06549. 2025. The proposed dataset was used as the training dataset in the PhaKIR challenge (https://phakir.re-mic.de/) as part of EndoVis-2024 at MICCAI 2024 and consists of eight real-world videos of human cholecystectomies ranging from 23 to 60 minutes in duration. The procedures were performed by experienced physicians, and the videos were recorded in three hospitals. In addition to existing datasets, our annotations provide pixel-wise instance segmentation masks of surgical instruments for a total of 19 categories, coordinates of relevant instrument keypoints (instrument tip(s), shaft-tip transition, shaft), both at an interval of one frame per second, and specifications regarding the intervention phases for a total of eight different phase categories for each individual frame in one dataset and thus comprehensively cover instrument localization and the context of the operation. Furthermore, the provision of the complete video sequences offers the opportunity to include the temporal information regarding the respective tasks and thus further optimize the resulting methods and outcomes.}, language = {en} } @misc{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {A cleaned subset of the first five CATARACTS test videos [Data set]}, doi = {10.5281/zenodo.14924735}, abstract = {This dataset is a subset of the original CATARACTS test dataset and is used by the OpenMIBOOD framework to evaluate a specific out-of-distribution setting. When using this dataset, it is mandatory to cite the corresponding publication (OpenMIBOOD (10.1109/CVPR52734.2025.02410)) and follow the acknowledgement and citation requirements of the original dataset (CATARACTS). The original CATARACTS dataset (associated publication,Homepage) consists of 50 videos of cataract surgeries, split into 25 train and 25 test videos. This subset contains the frames of the first 5 test videos. Further, black frames at the beginning of each video were removed.}, language = {en} } @article{RueckertRauberMaerkletal., author = {Rueckert, Tobias and Rauber, David and Maerkl, Raphaela and Klausmann, Leonard and Yildiran, Suemeyye R. and Gutbrod, Max and Nunes, Danilo Weber and Moreno, Alvaro Fernandez and Luengo, Imanol and Stoyanov, Danail and Toussaint, Nicolas and Cho, Enki and Kim, Hyeon Bae and Choo, Oh Sung and Kim, Ka Young and Kim, Seong Tae and Arantes, Gon{\c{c}}alo and Song, Kehan and Zhu, Jianjun and Xiong, Junchen and Lin, Tingyi and Kikuchi, Shunsuke and Matsuzaki, Hiroki and Kouno, Atsushi and Manesco, Jo{\~a}o Renato Ribeiro and Papa, Jo{\~a}o Paulo and Choi, Tae-Min and Jeong, Tae Kyeong and Park, Juyoun and Alabi, Oluwatosin and Wei, Meng and Vercauteren, Tom and Wu, Runzhi and Xu, Mengya and Wang, An and Bai, Long and Ren, Hongliang and Yamlahi, Amine and Hennighausen, Jakob and Maier-Hein, Lena and Kondo, Satoshi and Kasai, Satoshi and Hirasawa, Kousuke and Yang, Shu and Wang, Yihui and Chen, Hao and Rodr{\´i}guez, Santiago and Aparicio, Nicol{\´a}s and Manrique, Leonardo and Palm, Christoph and Wilhelm, Dirk and Feussner, Hubertus and Rueckert, Daniel and Speidel, Stefanie and Nasirihaghighi, Sahar and Al Khalil, Yasmina and Li, Yiping and Arbel{\´a}ez, Pablo and Ayobi, Nicol{\´a}s and Hosie, Olivia and Lyons, Juan Camilo}, title = {Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge}, series = {Medical Image Analysis}, volume = {109}, journal = {Medical Image Analysis}, publisher = {Elsevier}, issn = {1361-8415}, doi = {10.1016/j.media.2026.103945}, pages = {31}, abstract = {Reliable recognition and localization of surgical instruments in endoscopic video recordings are foundational for a wide range of applications in computer- and robot-assisted minimally invasive surgery (RAMIS), including surgical training, skill assessment, and autonomous assistance. However, robust performance under real-world conditions remains a significant challenge. Incorporating surgical context - such as the current procedural phase - has emerged as a promising strategy to improve robustness and interpretability. To address these challenges, we organized the Surgical Procedure Phase, Keypoint, and Instrument Recognition (PhaKIR) sub-challenge as part of the Endoscopic Vision (EndoVis) challenge at MICCAI 2024. We introduced a novel, multi-center dataset comprising thirteen full-length laparoscopic cholecystectomy videos collected from three distinct medical institutions, with unified annotations for three interrelated tasks: surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation. Unlike existing datasets, ours enables joint investigation of instrument localization and procedural context within the same data while supporting the integration of temporal information across entire procedures. We report results and findings in accordance with the BIAS guidelines for biomedical image analysis challenges. The PhaKIR sub-challenge advances the field by providing a unique benchmark for developing temporally aware, context-driven methods in RAMIS and offers a high-quality resource to support future research in surgical scene understanding.}, language = {en} } @inproceedings{KlausmannRueckertRauberetal., author = {Klausmann, Leonard and Rueckert, Tobias and Rauber, David and Maerkl, Raphaela and Yildiran, Suemeyye R. and Gutbrod, Max and Palm, Christoph}, title = {DIY challenge blueprint: from organization to technical realization in biomedical image analysis}, series = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2025 ; Proceedings Part XI}, booktitle = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2025 ; Proceedings Part XI}, publisher = {Springer}, address = {Cham}, isbn = {978-3-032-05141-7}, doi = {10.1007/978-3-032-05141-7_9}, pages = {85 -- 95}, abstract = {Biomedical image analysis challenges have become the de facto standard for publishing new datasets and benchmarking different state-of-the-art algorithms. Most challenges use commercial cloud-based platforms, which can limit custom options and involve disadvantages such as reduced data control and increased costs for extended functionalities. In contrast, Do-It-Yourself (DIY) approaches have the capability to emphasize reliability, compliance, and custom features, providing a solid basis for low-cost, custom designs in self-hosted systems. Our approach emphasizes cost efficiency, improved data sovereignty, and strong compliance with regulatory frameworks, such as the GDPR. This paper presents a blueprint for DIY biomedical imaging challenges, designed to provide institutions with greater autonomy over their challenge infrastructure. Our approach comprehensively addresses both organizational and technical dimensions, including key user roles, data management strategies, and secure, efficient workflows. Key technical contributions include a modular, containerized infrastructure based on Docker, integration of open-source identity management, and automated solution evaluation workflows. Practical deployment guidelines are provided to facilitate implementation and operational stability. The feasibility and adaptability of the proposed framework are demonstrated through the MICCAI 2024 PhaKIR challenge with multiple international teams submitting and validating their solutions through our self-hosted platform. This work can be used as a baseline for future self-hosted DIY implementations and our results encourage further studies in the area of biomedical image analysis challenges.}, language = {en} } @article{MaerklRueckertRauberetal., author = {Maerkl, Raphaela and Rueckert, Tobias and Rauber, David and Gutbrod, Max and Weber Nunes, Danilo and Palm, Christoph}, title = {Enhancing generalization in zero-shot multi-label endoscopic instrument classification}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {20}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer Nature}, doi = {10.1007/s11548-025-03439-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-85674}, pages = {1577 -- 1587}, abstract = {Purpose Recognizing previously unseen classes with neural networks is a significant challenge due to their limited generalization capabilities. This issue is particularly critical in safety-critical domains such as medical applications, where accurate classification is essential for reliability and patient safety. Zero-shot learning methods address this challenge by utilizing additional semantic data, with their performance relying heavily on the quality of the generated embeddings. Methods This work investigates the use of full descriptive sentences, generated by a Sentence-BERT model, as class representations, compared to simpler category-based word embeddings derived from a BERT model. Additionally, the impact of z-score normalization as a post-processing step on these embeddings is explored. The proposed approach is evaluated on a multi-label generalized zero-shot learning task, focusing on the recognition of surgical instruments in endoscopic images from minimally invasive cholecystectomies. Results The results demonstrate that combining sentence embeddings and z-score normalization significantly improves model performance. For unseen classes, the AUROC improves from 43.9\% to 64.9\%, and the multi-label accuracy from 26.1\% to 79.5\%. Overall performance measured across both seen and unseen classes improves from 49.3\% to 64.9\% in AUROC and from 37.3\% to 65.1\% in multi-label accuracy, highlighting the effectiveness of our approach. Conclusion These findings demonstrate that sentence embeddings and z-score normalization can substantially enhance the generalization performance of zero-shot learning models. However, as the study is based on a single dataset, future work should validate the method across diverse datasets and application domains to establish its robustness and broader applicability.}, language = {en} } @unpublished{RueckertRauberMaerkletal., author = {R{\"u}ckert, Tobias and Rauber, David and Maerkl, Raphaela and Klausmann, Leonard and Yildiran, Suemeyye R. and Gutbrod, Max and Nunes, Danilo Weber and Moreno, Alvaro Fernandez and Luengo, Imanol and Stoyanov, Danail and Toussaint, Nicolas and Cho, Enki and Kim, Hyeon Bae and Choo, Oh Sung and Kim, Ka Young and Kim, Seong Tae and Arantes, Gon{\c{c}}alo and Song, Kehan and Zhu, Jianjun and Xiong, Junchen and Lin, Tingyi and Kikuchi, Shunsuke and Matsuzaki, Hiroki and Kouno, Atsushi and Manesco, Jo{\~a}o Renato Ribeiro and Papa, Jo{\~a}o Paulo and Choi, Tae-Min and Jeong, Tae Kyeong and Park, Juyoun and Alabi, Oluwatosin and Wei, Meng and Vercauteren, Tom and Wu, Runzhi and Xu, Mengya and an Wang, and Bai, Long and Ren, Hongliang and Yamlahi, Amine and Hennighausen, Jakob and Maier-Hein, Lena and Kondo, Satoshi and Kasai, Satoshi and Hirasawa, Kousuke and Yang, Shu and Wang, Yihui and Chen, Hao and Rodr{\´i}guez, Santiago and Aparicio, Nicol{\´a}s and Manrique, Leonardo and Lyons, Juan Camilo and Hosie, Olivia and Ayobi, Nicol{\´a}s and Arbel{\´a}ez, Pablo and Li, Yiping and Khalil, Yasmina Al and Nasirihaghighi, Sahar and Speidel, Stefanie and R{\"u}ckert, Daniel and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge}, pages = {36}, abstract = {Reliable recognition and localization of surgical instruments in endoscopic video recordings are foundational for a wide range of applications in computer- and robot-assisted minimally invasive surgery (RAMIS), including surgical training, skill assessment, and autonomous assistance. However, robust performance under real-world conditions remains a significant challenge. Incorporating surgical context - such as the current procedural phase - has emerged as a promising strategy to improve robustness and interpretability. To address these challenges, we organized the Surgical Procedure Phase, Keypoint, and Instrument Recognition (PhaKIR) sub-challenge as part of the Endoscopic Vision (EndoVis) challenge at MICCAI 2024. We introduced a novel, multi-center dataset comprising thirteen full-length laparoscopic cholecystectomy videos collected from three distinct medical institutions, with unified annotations for three interrelated tasks: surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation. Unlike existing datasets, ours enables joint investigation of instrument localization and procedural context within the same data while supporting the integration of temporal information across entire procedures. We report results and findings in accordance with the BIAS guidelines for biomedical image analysis challenges. The PhaKIR sub-challenge advances the field by providing a unique benchmark for developing temporally aware, context-driven methods in RAMIS and offers a high-quality resource to support future research in surgical scene understanding.}, language = {en} } @inproceedings{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {OpenMIBOOD: Open Medical Imaging Benchmarks for Out-Of-Distribution Detection}, series = {2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 10.-17. June 2025, Nashville}, booktitle = {2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 10.-17. June 2025, Nashville}, publisher = {IEEE}, isbn = {979-8-3315-4364-8}, doi = {10.1109/CVPR52734.2025.02410}, pages = {25874 -- 25886}, abstract = {The growing reliance on Artificial Intelligence (AI) in critical domains such as healthcare demands robust mechanisms to ensure the trustworthiness of these systems, especially when faced with unexpected or anomalous inputs. This paper introduces the Open Medical Imaging Benchmarks for Out-Of-Distribution Detection (OpenMIBOOD), a comprehensive framework for evaluating out-of-distribution (OOD) detection methods specifically in medical imaging contexts. OpenMIBOOD includes three benchmarks from diverse medical domains, encompassing 14 datasets divided into covariate-shifted in-distribution, nearOOD, and far-OOD categories. We evaluate 24 post-hoc methods across these benchmarks, providing a standardized reference to advance the development and fair comparison of OODdetection methods. Results reveal that findings from broad-scale OOD benchmarks in natural image domains do not translate to medical applications, underscoring the critical need for such benchmarks in the medical field. By mitigating the risk of exposing AI models to inputs outside their training distribution, OpenMIBOOD aims to support the advancement of reliable and trustworthy AI systems in healthcare. The repository is available at https://github.com/remic-othr/OpenMIBOOD.}, language = {en} } @article{RoserMeinikheimMuzalyovaetal., author = {Roser, David and Meinikheim, Michael and Muzalyova, Anna and Mendel, Robert and Palm, Christoph and Probst, Andreas and Nagl, Sandra and Scheppach, Markus W. and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial intelligence-assisted endoscopy and examiner confidence : a study on human-artificial intelligence interaction in Barrett's Esophagus (With Video)}, series = {DEN Open}, volume = {6}, journal = {DEN Open}, number = {1}, publisher = {Wiley}, doi = {10.1002/deo2.70150}, pages = {8}, abstract = {Objective Despite high stand-alone performance, studies demonstrate that artificial intelligence (AI)-supported endoscopic diagnostics often fall short in clinical applications due to human-AI interaction factors. This video-based trial on Barrett's esophagus aimed to investigate how examiner behavior, their levels of confidence, and system usability influence the diagnostic outcomes of AI-assisted endoscopy. Methods The present analysis employed data from a multicenter randomized controlled tandem video trial involving 22 endoscopists with varying degrees of expertise. Participants were tasked with evaluating a set of 96 endoscopic videos of Barrett's esophagus in two distinct rounds, with and without AI assistance. Diagnostic confidence levels were recorded, and decision changes were categorized according to the AI prediction. Additional surveys assessed user experience and system usability ratings. Results AI assistance significantly increased examiner confidence levels (p < 0.001) and accuracy. Withdrawing AI assistance decreased confidence (p < 0.001), but not accuracy. Experts consistently reported higher confidence than non-experts (p < 0.001), regardless of performance. Despite improved confidence, correct AI guidance was disregarded in 16\% of all cases, and 9\% of initially correct diagnoses were changed to incorrect ones. Overreliance on AI, algorithm aversion, and uncertainty in AI predictions were identified as key factors influencing outcomes. The System Usability Scale questionnaire scores indicated good to excellent usability, with non-experts scoring 73.5 and experts 85.6. Conclusions Our findings highlight the pivotal function of examiner behavior in AI-assisted endoscopy. To fully realize the benefits of AI, implementing explainable AI, improving user interfaces, and providing targeted training are essential. Addressing these factors could enhance diagnostic accuracy and confidence in clinical practice.}, language = {en} } @article{SouzaPachecodeSouzaetal., author = {Souza, Luis A. and Pacheco, Andr{\´e} G.C. and de Souza, Alberto F. and Oliveira-Santos, Thiago and Badue, Claudine and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {TransConv: a lightweight architecture based on transformers and convolutional neural networks for adenocarcinoma and Barrett's esophagus identification}, series = {Neural Computing and Applications}, journal = {Neural Computing and Applications}, number = {37}, publisher = {Springer}, doi = {10.1007/s00521-025-11299-y}, pages = {15535 -- 15546}, abstract = {Barrett's esophagus, also known as BE, is commonly associated with repeated exposure to stomach acid. If not treated properly, it may evolve into esophageal adenocarcinoma, aka esophageal cancer. This paper proposes TransConv, a hybrid architecture that benefits from features learned by pre-trained vision transformers (ViTs) and convolutional neural networks (CNNs), followed by a shallow neural network composed of three normalizations, ReLU activations, and fully connected layers, and a SoftMax head to distinguish between BE and esophageal cancer. TransConv is designed to be training-lightweight, and for the ViT and CNN backbone models, weights are kept frozen during training, i.e., the primary goal of TransConv is to learn the weights of the fully connected layer from both backbones only, avoiding the burden of updating their weights but still learning their final descriptions for the lightweight convolutional model. We report promising results with low computational training costs in two datasets, one public and another private. From our achievements, TransConv was able to deliver balanced accuracy results around 85\% and 86\% for each evaluated dataset, respectively, in a design that required only 50 epochs of model training, a very reduced number compared to state-of-the-art conducted studies in the same domain.}, language = {en} } @misc{ScheppachWeberNunesArizietal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Single frame workflow recognition during endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {57}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806324}, pages = {S511}, abstract = {Aims Precise surgical phase recognition and evaluation may improve our understanding of complex endoscopic procedures. Furthermore, quality control measurements and endoscopy training could benefit from objective descriptions of surgical phase distributions. Therefore, we aimed to develop an artificial intelligence algorithm for frame-by-frame operational phase recognition during endoscopic submucosal dissection (ESD). Methods Full length ESD-videos from 31 patients comprising 6.297.782 single images were collected retrospectively. Videos were annotated on a frame-by-frame basis for the operational macro-phases diagnostics, marking, injection, dissection and bleeding. Further subphases were the application of electrical current, visible injection of fluid into the submucosal space and scope manipulation, leading to 11 phases in total. 4.975.699 frames (21 patients) were used for training of a video swin transformer using uniform frame sampling for temporal information. Hyperparameter tuning was performed with 897.325 further frames (6 patients), while 424.758 frames (4 patients) were used for validation. Results The overall F1 scores on the test dataset for the macro-phases and all 11 phases were 0.96 and 0.90, respectively. The recall values for diagnostics, marking, injection, dissection and bleeding were 1.00, 1.00, 0.95, 0.96 and 0.93, respectively. Conclusions The algorithm classified operational phases during ESD with high accuracy. A precise evaluation of phase distribution may allow for the development of objective quality metrics for quality control and training.}, language = {en} } @inproceedings{WeberNunesRauberPalm, author = {Weber Nunes, Danilo and Rauber, David and Palm, Christoph}, title = {Self-supervised 3D Vision Transformer Pre-training for Robust Brain Tumor Classification}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-47422-5_69}, pages = {298 -- 303}, abstract = {Brain tumors pose significant challenges in neurology, making precise classification crucial for prognosis and treatment planning. This work investigates the effectiveness of a self-supervised learning approach-masked autoencoding (MAE)-to pre-train a vision transformer (ViT) model for brain tumor classification. Our method uses non-domain specific data, leveraging the ADNI and OASIS-3 MRI datasets, which primarily focus on degenerative diseases, for pretraining. The model is subsequently fine-tuned and evaluated on the BraTS glioma and meningioma datasets, representing a novel use of these datasets for tumor classification. The pre-trained MAE ViT model achieves an average F1 score of 0.91 in a 5-fold cross-validation setting, outperforming the nnU-Net encoder trained from scratch, particularly under limited data conditions. These findings highlight the potential of self-supervised MAE in enhancing brain tumor classification accuracy, even with restricted labeled data.}, language = {en} } @inproceedings{WeiherervonRiedheimBrebantetal., author = {Weiherer, Maximilian and von Riedheim, Antonia and Br{\´e}bant, Vanessa and Egger, Bernhard and Palm, Christoph}, title = {iRBSM: A Deep Implicit 3D Breast Shape Model}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-47422-5_11}, pages = {38 -- 43}, abstract = {We present the first deep implicit 3D shape model of the female breast, building upon and improving the recently proposed Regensburg Breast Shape Model (RBSM). Compared to its PCA-based predecessor, our model employs implicit neural representations; hence, it can be trained on raw 3D breast scans and eliminates the need for computationally demanding non-rigid registration, a task that is particularly difficult for feature-less breast shapes. The resulting model, dubbed iRBSM, captures detailed surface geometry including fine structures such as nipples and belly buttons, is highly expressive, and outperforms the RBSM on different surface reconstruction tasks. Finally, leveraging the iRBSM, we present a prototype application to 3D reconstruct breast shapes from just a single image. Model and code publicly available at https://rbsm.re-mic.de/implicit.}, language = {en} } @unpublished{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {OpenMIBOOD: Open Medical Imaging Benchmarks for Out-Of-Distribution Detection}, doi = {10.48550/arXiv.2503.16247}, pages = {18}, abstract = {The growing reliance on Artificial Intelligence (AI) in critical domains such as healthcare demands robust mechanisms to ensure the trustworthiness of these systems, especially when faced with unexpected or anomalous inputs. This paper introduces the Open Medical Imaging Benchmarks for Out-Of-Distribution Detection (OpenMIBOOD), a comprehensive framework for evaluating out-of-distribution (OOD) detection methods specifically in medical imaging contexts. OpenMIBOOD includes three benchmarks from diverse medical domains, encompassing 14 datasets divided into covariate-shifted in-distribution, near-OOD, and far-OOD categories. We evaluate 24 post-hoc methods across these benchmarks, providing a standardized reference to advance the development and fair comparison of OOD detection methods. Results reveal that findings from broad-scale OOD benchmarks in natural image domains do not translate to medical applications, underscoring the critical need for such benchmarks in the medical field. By mitigating the risk of exposing AI models to inputs outside their training distribution, OpenMIBOOD aims to support the advancement of reliable and trustworthy AI systems in healthcare. The repository is available at this https URL.}, language = {en} } @article{ScheppachMendelMuzalyovaetal., author = {Scheppach, Markus W. and Mendel, Robert and Muzalyova, Anna and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Yip, Hon Chi and Lau, Louis Ho Shing and G{\"o}lder, Stefan Karl and Schmidt, Arthur and Kouladouros, Konstantinos and Abdelhafez, Mohamed and Walter, Benjamin M. and Meinikheim, Michael and Chiu, Philip Wai Yan and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial intelligence improves submucosal vessel detection during third space endoscopy}, series = {Endoscopy}, journal = {Endoscopy}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-2534-1164}, abstract = {Background and study aims: While artificial intelligence (AI) shows high potential in decision support for diagnostic gastrointestinal endoscopy, its role in therapeutic endoscopy remains unclear. Third space endoscopic procedures pose the risk of intraprocedural bleeding. Therefore, we aimed to develop an AI algorithm for intraprocedural blood vessel detection. Patients and Methods: Using a test dataset with 101 standardized video clips containing 200 predefined submucosal blood vessels, 19 endoscopists were evaluated for the vessel detection rate (VDR) and time (VDT) with and without support of an AI algorithm. Test subjects were grouped according to experience in ESD. Results: With AI support, endoscopists VDR increased from 56.4\% [CI 54.1-58.6] to 72.4\% [CI 70.3-74.4]. Endoscopists' VDT dropped from 6.7sec [CI 6.2-7.1] to 5.2sec [CI 4.8-5.7]. False positive (FP) readings appeared in 4.5\% of frames and were marked significantly shorter than true positives (6.0sec [CI 5.28-6.70] vs. 0.7sec [CI 0.55-0.87]). Conclusions: AI improved the vessel detection rate and time of endoscopists during third space endoscopy. While these data need to be corroborated by clinical trials, AI may prove to be an invaluable tool for the improvement of endoscopic interventions.}, language = {en} } @article{HartmannWeihererNieberleetal., author = {Hartmann, Robin and Weiherer, Maximilian and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Lamby, Philipp and Reichert, Torsten E. and Taxis, J{\"u}rgen and Ettl, Tobias}, title = {Evaluating smartphone-based 3D imaging techniques for clinical application in oral and maxillofacial surgery: A comparative study with the vectra M5}, series = {Oral and Maxillofacial Surgery}, volume = {29}, journal = {Oral and Maxillofacial Surgery}, publisher = {Springer Nature}, doi = {10.1007/s10006-024-01322-2}, pages = {17}, abstract = {PURPOSE This study aimed to clarify the applicability of smartphone-based three-dimensional (3D) surface imaging for clinical use in oral and maxillofacial surgery, comparing two smartphone-based approaches to the gold standard. METHODS Facial surface models (SMs) were generated for 30 volunteers (15 men, 15 women) using the Vectra M5 (Canfield Scientific, USA), the TrueDepth camera of the iPhone 14 Pro (Apple Inc., USA), and the iPhone 14 Pro with photogrammetry. Smartphone-based SMs were superimposed onto Vectra-based SMs. Linear measurements and volumetric evaluations were performed to evaluate surface-to-surface deviation. To assess inter-observer reliability, all measurements were performed independently by a second observer. Statistical analyses included Bland-Altman analyses, the Wilcoxon signed-rank test for paired samples, and Intraclass correlation coefficients. RESULTS Photogrammetry-based SMs exhibited an overall landmark-to-landmark deviation of M = 0.8 mm (SD =  ± 0.58 mm, n = 450), while TrueDepth-based SMs displayed a deviation of M = 1.1 mm (SD =  ± 0.72 mm, n = 450). The mean volumetric difference for photogrammetry-based SMs was M = 1.8 cc (SD =  ± 2.12 cc, n = 90), and M = 3.1 cc (SD =  ± 2.64 cc, n = 90) for TrueDepth-based SMs. When comparing the two approaches, most landmark-to-landmark measurements demonstrated 95\% Bland-Altman limits of agreement (LoA) of ≤ 2 mm. Volumetric measurements revealed LoA > 2 cc. Photogrammetry-based measurements demonstrated higher inter-observer reliability for overall landmark-to-landmark deviation. CONCLUSION Both approaches for smartphone-based 3D surface imaging exhibit potential in capturing the face. Photogrammetry-based SMs demonstrated superior alignment and volumetric accuracy with Vectra-based SMs than TrueDepth-based SMs.}, language = {en} } @unpublished{WeiherervonRiedheimBrebantetal., author = {Weiherer, Maximilian and von Riedheim, Antonia and Br{\´e}bant, Vanessa and Egger, Bernhard and Palm, Christoph}, title = {iRBSM: A Deep Implicit 3D Breast Shape Model}, doi = {10.48550/arXiv.2412.13244}, pages = {6}, abstract = {We present the first deep implicit 3D shape model of the female breast, building upon and improving the recently proposed Regensburg Breast Shape Model (RBSM). Compared to its PCA-based predecessor, our model employs implicit neural representations; hence, it can be trained on raw 3D breast scans and eliminates the need for computationally demanding non-rigid registration -- a task that is particularly difficult for feature-less breast shapes. The resulting model, dubbed iRBSM, captures detailed surface geometry including fine structures such as nipples and belly buttons, is highly expressive, and outperforms the RBSM on different surface reconstruction tasks. Finally, leveraging the iRBSM, we present a prototype application to 3D reconstruct breast shapes from just a single image. Model and code publicly available at this https URL.}, language = {en} } @inproceedings{SouzaPachecodeAngeloetal., author = {Souza, Luis A. and Pacheco, Andr{\´e} G.C. and de Angelo, Gabriel G. and Oliveira-Santos, Thiago and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {LiwTERM: A Lightweight Transformer-Based Model for Dermatological Multimodal Lesion Detection}, series = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, booktitle = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, publisher = {IEEE}, isbn = {979-8-3503-7603-6}, doi = {10.1109/SIBGRAPI62404.2024.10716324}, pages = {1 -- 6}, abstract = {Skin cancer is the most common type of cancer in the world, accounting for approximately 30\% of all diagnosed tumors. Early diagnosis reduces mortality rates and prevents disfiguring effects in different body regions. In recent years, machine learning techniques, particularly deep learning, have shown promising results in this task, presenting studies that have demonstrated that combining a patient's clinical information with images of the lesion is crucial for improving the classification of skin lesions. Despite that, meaningful use of clinical information with multiple images is mandatory, requiring further investigation. Thus, this project aims to contribute to developing multimodal machine learning-based models to cope with the skin lesion classification task employing a lightweight transformer model. As a main hypothesis, models can take multiple images from different sources as input, along with clinical information from the patient's history, leading to a more reliable diagnosis. Our model deals with the not-trivial task of combining images and clinical information (from anamneses) concerning the skin lesions in a lightweight transformer architecture that does not demand high computation resources but still presents competitive classification results.}, language = {en} } @article{SouzaJrPassosSantanaetal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Rauber, David and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Layer-selective deep representation to improve esophageal cancer classification}, series = {Medical \& Biological Engineering \& Computing}, volume = {62}, journal = {Medical \& Biological Engineering \& Computing}, publisher = {Springer Nature}, address = {Heidelberg}, doi = {10.1007/s11517-024-03142-8}, pages = {3355 -- 3372}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis.For this task, the deep learning techniques' black-box nature must somehow be lightened up to clarify its promising results. Hence, we aim to investigate the impact of the ResNet-50 deep convolutional design for Barrett's esophagus and adenocarcinoma classification. For such a task, and aiming at proposing a two-step learning technique, the output of each convolutional layer that composes the ResNet-50 architecture was trained and classified for further definition of layers that would provide more impact in the architecture. We showed that local information and high-dimensional features are essential to improve the classification for our task. Besides, we observed a significant improvement when the most discriminative layers expressed more impact in the training and classification of ResNet-50 for Barrett's esophagus and adenocarcinoma classification, demonstrating that both human knowledge and computational processing may influence the correct learning of such a problem.}, language = {en} } @article{MeinikheimMendelPalmetal., author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Influence of artificial intelligence on the diagnostic performance of endoscopists in the assessment of Barrett's esophagus: a tandem randomized and video trial}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-2296-5696}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-72818}, pages = {641 -- 649}, abstract = {Background This study evaluated the effect of an artificial intelligence (AI)-based clinical decision support system on the performance and diagnostic confidence of endoscopists in their assessment of Barrett's esophagus (BE). Methods 96 standardized endoscopy videos were assessed by 22 endoscopists with varying degrees of BE experience from 12 centers. Assessment was randomized into two video sets: group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a stand-alone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.3\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1\%, and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.8\% [95\%CI 65.2\%-74.2\%] to 78.0\% [95\%CI 74.0\%-82.0\%]; specificity 67.3\% [95\%CI 62.5\%-72.2\%] to 72.7\% [95\%CI 68.2\%-77.3\%]). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from additional AI. BE experts and nonexperts remained significantly below the stand-alone performance of AI, suggesting that there may be other factors influencing endoscopists' decisions to follow or discard AI advice.}, language = {en} } @misc{ScheppachNunesArizietal., author = {Scheppach, Markus W. and Nunes, Danilo Weber and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Procedural phase recognition in endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783804}, pages = {S439}, abstract = {Aims Recent evidence suggests the possibility of intraprocedural phase recognition in surgical operations as well as endoscopic interventions such as peroral endoscopic myotomy and endoscopic submucosal dissection (ESD) by AI-algorithms. The intricate measurement of intraprocedural phase distribution may deepen the understanding of the procedure. Furthermore, real-time quality assessment as well as automation of reporting may become possible. Therefore, we aimed to develop an AI-algorithm for intraprocedural phase recognition during ESD. Methods A training dataset of 364385 single images from 9 full-length ESD videos was compiled. Each frame was classified into one procedural phase. Phases included scope manipulation, marking, injection, application of electrical current and bleeding. Allocation of each frame was only possible to one category. This training dataset was used to train a Video Swin transformer to recognize the phases. Temporal information was included via logarithmic frame sampling. Validation was performed using two separate ESD videos with 29801 single frames. Results The validation yielded sensitivities of 97.81\%, 97.83\%, 95.53\%, 85.01\% and 87.55\% for scope manipulation, marking, injection, electric application and bleeding, respectively. Specificities of 77.78\%, 90.91\%, 95.91\%, 93.65\% and 84.76\% were measured for the same parameters. Conclusions The developed algorithm was able to classify full-length ESD videos on a frame-by-frame basis into the predefined classes with high sensitivities and specificities. Future research will aim at the development of quality metrics based on single-operator phase distribution.}, language = {en} } @misc{ScheppachMendelRauberetal., author = {Scheppach, Markus W. and Mendel, Robert and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) improves endoscopists' vessel detection during endoscopic submucosal dissection (ESD)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1782891}, pages = {S93}, abstract = {Aims While AI has been successfully implemented in detecting and characterizing colonic polyps, its role in therapeutic endoscopy remains to be elucidated. Especially third space endoscopy procedures like ESD and peroral endoscopic myotomy (POEM) pose a technical challenge and the risk of operator-dependent complications like intraprocedural bleeding and perforation. Therefore, we aimed at developing an AI-algorithm for intraprocedural real time vessel detection during ESD and POEM. Methods A training dataset consisting of 5470 annotated still images from 59 full-length videos (47 ESD, 12 POEM) and 179681 unlabeled images was used to train a DeepLabV3+neural network with the ECMT semi-supervised learning method. Evaluation for vessel detection rate (VDR) and time (VDT) of 19 endoscopists with and without AI-support was performed using a testing dataset of 101 standardized video clips with 200 predefined blood vessels. Endoscopists were stratified into trainees and experts in third space endoscopy. Results The AI algorithm had a mean VDR of 93.5\% and a median VDT of 0.32 seconds. AI support was associated with a statistically significant increase in VDR from 54.9\% to 73.0\% and from 59.0\% to 74.1\% for trainees and experts, respectively. VDT significantly decreased from 7.21 sec to 5.09 sec for trainees and from 6.10 sec to 5.38 sec for experts in the AI-support group. False positive (FP) readings occurred in 4.5\% of frames. FP structures were detected significantly shorter than true positives (0.71 sec vs. 5.99 sec). Conclusions AI improved VDR and VDT of trainees and experts in third space endoscopy and may reduce performance variability during training. Further research is needed to evaluate the clinical impact of this new technology.}, language = {en} } @misc{EbigboRauberAyoubetal., author = {Ebigbo, Alanna and Rauber, David and Ayoub, Mousa and Birzle, Lisa and Matsumura, Tomoaki and Probst, Andreas and Steinbr{\"u}ck, Ingo and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Scheppach, Markus W. and Palm, Christoph and Messmann, Helmut}, title = {Early Esophageal Cancer and the Generalizability of Artificial Intelligence}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783775}, pages = {S428}, abstract = {Aims Artificial Intelligence (AI) systems in gastrointestinal endoscopy are narrow because they are trained to solve only one specific task. Unlike Narrow-AI, general AI systems may be able to solve multiple and unrelated tasks. We aimed to understand whether an AI system trained to detect, characterize, and segment early Barrett's neoplasia (Barrett's AI) is only capable of detecting this pathology or can also detect and segment other diseases like early squamous cell cancer (SCC). Methods 120 white light (WL) and narrow-band endoscopic images (NBI) from 60 patients (1 WL and 1 NBI image per patient) were extracted from the endoscopic database of the University Hospital Augsburg. Images were annotated by three expert endoscopists with extensive experience in the diagnosis and endoscopic resection of early esophageal neoplasias. An AI system based on DeepLabV3+architecture dedicated to early Barrett's neoplasia was tested on these images. The AI system was neither trained with SCC images nor had it seen the test images prior to evaluation. The overlap between the three expert annotations („expert-agreement") was the ground truth for evaluating AI performance. Results Barrett's AI detected early SCC with a mean intersection over reference (IoR) of 92\% when at least 1 pixel of the AI prediction overlapped with the expert-agreement. When the threshold was increased to 5\%, 10\%, and 20\% overlap with the expert-agreement, the IoR was 88\%, 85\% and 82\%, respectively. The mean Intersection Over Union (IoU) - a metric according to segmentation quality between the AI prediction and the expert-agreement - was 0.45. The mean expert IoU as a measure of agreement between the three experts was 0.60. Conclusions In the context of this pilot study, the predictions of SCC by a Barrett's dedicated AI showed some overlap to the expert-agreement. Therefore, features learned from Barrett's cancer-related training might be helpful also for SCC prediction. Our results allow different possible explanations. On the one hand, some Barrett's cancer features generalize toward the related task of assessing early SCC. On the other hand, the Barrett's AI is less specific to Barrett's cancer than a general predictor of pathological tissue. However, we expect to enhance the detection quality significantly by extending the training to SCC-specific data. The insight of this study opens the way towards a transfer learning approach for more efficient training of AI to solve tasks in other domains.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and R{\"o}mmele, Christoph and Nagl, Sandra and Schnoy, Elisabeth and Messmann, Helmut and Ebigbo, Alanna and Palm, Christoph}, title = {Artificial intelligence as a tool in the detection of the papillary ostium during ERCP}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783138}, pages = {S198}, abstract = {Aims Endoscopic retrograde cholangiopancreaticography (ERCP) is the gold standard in the diagnosis as well as treatment of diseases of the pancreatobiliary tract. However, it is technically complex and has a relatively high complication rate. In particular, cannulation of the papillary ostium remains challenging. The aim of this study is to examine whether a deep-learning algorithm can be used to detect the major duodenal papilla and in particular the papillary ostium reliably and could therefore be a valuable tool for inexperienced endoscopists, particularly in training situation. Methods We analyzed a total of 654 retrospectively collected images of 85 patients. Both the major duodenal papilla and the ostium were then segmented. Afterwards, a neural network was trained using a deep-learning algorithm. A 5-fold cross-validation was performed. Subsequently, we ran the algorithm on 5 prospectively collected videos of ERCPs. Results 5-fold cross-validation on the 654 labeled data resulted in an F1 value of 0.8007, a sensitivity of 0.8409 and a specificity of 0.9757 for the class papilla, and an F1 value of 0.5724, a sensitivity of 0.5456 and a specificity of 0.9966 for the class ostium. Regardless of the class, the average F1 value (class papilla and class ostium) was 0.6866, the sensitivity 0.6933 and the specificity 0.9861. In 100\% of cases the AI-detected localization of the papillary ostium in the prospectively collected videos corresponded to the localization of the cannulation performed by the endoscopist. Conclusions In the present study, the neural network was able to identify the major duodenal papilla with a high sensitivity and high specificity. In detecting the papillary ostium, the sensitivity was notably lower. However, when used on videos, the AI was able to identify the location of the subsequent cannulation with 100\% accuracy. In the future, the neural network will be trained with more data. Thus, a suitable tool for ERCP could be established, especially in the training situation.}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fernandez-Esparrach, G. and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @unpublished{MendelRueckertWilhelmetal., author = {Mendel, Robert and R{\"u}ckert, Tobias and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Motion-Corrected Moving Average: Including Post-Hoc Temporal Information for Improved Video Segmentation}, doi = {10.48550/arXiv.2403.03120}, pages = {9}, abstract = {Real-time computational speed and a high degree of precision are requirements for computer-assisted interventions. Applying a segmentation network to a medical video processing task can introduce significant inter-frame prediction noise. Existing approaches can reduce inconsistencies by including temporal information but often impose requirements on the architecture or dataset. This paper proposes a method to include temporal information in any segmentation model and, thus, a technique to improve video segmentation performance without alterations during training or additional labeling. With Motion-Corrected Moving Average, we refine the exponential moving average between the current and previous predictions. Using optical flow to estimate the movement between consecutive frames, we can shift the prior term in the moving-average calculation to align with the geometry of the current frame. The optical flow calculation does not require the output of the model and can therefore be performed in parallel, leading to no significant runtime penalty for our approach. We evaluate our approach on two publicly available segmentation datasets and two proprietary endoscopic datasets and show improvements over a baseline approach.}, subject = {Deep Learning}, language = {en} } @article{SouzaJrPachecoPassosetal., author = {Souza Jr., Luis Antonio de and Pacheco, Andr{\´e} G.C. and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {DeepCraftFuse: visual and deeply-learnable features work better together for esophageal cancer detection in patients with Barrett's esophagus}, series = {Neural Computing and Applications}, volume = {36}, journal = {Neural Computing and Applications}, publisher = {Springer}, address = {London}, doi = {10.1007/s00521-024-09615-z}, pages = {10445 -- 10459}, abstract = {Limitations in computer-assisted diagnosis include lack of labeled data and inability to model the relation between what experts see and what computers learn. Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis. While deep learning techniques are broad so that unseen information might help learn patterns of interest, human insights to describe objects of interest help in decision-making. This paper proposes a novel approach, DeepCraftFuse, to address the challenge of combining information provided by deep networks with visual-based features to significantly enhance the correct identification of cancerous tissues in patients affected with Barrett's esophagus (BE). We demonstrate that DeepCraftFuse outperforms state-of-the-art techniques on private and public datasets, reaching results of around 95\% when distinguishing patients affected by BE that is either positive or negative to esophageal cancer.}, subject = {Deep Learning}, language = {en} } @inproceedings{RueckertRiederFeussneretal., author = {R{\"u}ckert, Tobias and Rieder, Maximilian and Feussner, Hubertus and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Smoke Classification in Laparoscopic Cholecystectomy Videos Incorporating Spatio-temporal Information}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springeer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_78}, pages = {298 -- 303}, abstract = {Heavy smoke development represents an important challenge for operating physicians during laparoscopic procedures and can potentially affect the success of an intervention due to reduced visibility and orientation. Reliable and accurate recognition of smoke is therefore a prerequisite for the use of downstream systems such as automated smoke evacuation systems. Current approaches distinguish between non-smoked and smoked frames but often ignore the temporal context inherent in endoscopic video data. In this work, we therefore present a method that utilizes the pixel-wise displacement from randomly sampled images to the preceding frames determined using the optical flow algorithm by providing the transformed magnitude of the displacement as an additional input to the network. Further, we incorporate the temporal context at evaluation time by applying an exponential moving average on the estimated class probabilities of the model output to obtain more stable and robust results over time. We evaluate our method on two convolutional-based and one state-of-the-art transformer architecture and show improvements in the classification results over a baseline approach, regardless of the network used.}, language = {en} } @inproceedings{GutbrodGeislerRauberetal., author = {Gutbrod, Max and Geisler, Benedikt and Rauber, David and Palm, Christoph}, title = {Data Augmentation for Images of Chronic Foot Wounds}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_71}, pages = {261 -- 266}, abstract = {Training data for Neural Networks is often scarce in the medical domain, which often results in models that struggle to generalize and consequently showpoor performance on unseen datasets. Generally, adding augmentation methods to the training pipeline considerably enhances a model's performance. Using the dataset of the Foot Ulcer Segmentation Challenge, we analyze two additional augmentation methods in the domain of chronic foot wounds - local warping of wound edges along with projection and blurring of shapes inside wounds. Our experiments show that improvements in the Dice similarity coefficient and Normalized Surface Distance metrics depend on a sensible selection of those augmentation methods.}, language = {en} } @article{HartmannNieberlePalmetal., author = {Hartmann, Robin and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Kuehle, Reinald and Reichert, Torsten E. and Taxis, Juergen and Ettl, Tobias}, title = {Utility of Smartphone-based Three-dimensional Surface Imaging for Digital Facial Anthropometry}, series = {JPRAS Open}, volume = {39}, journal = {JPRAS Open}, publisher = {Elsevier}, doi = {10.1016/j.jpra.2024.01.014}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70348}, pages = {330 -- 343}, abstract = {Background The utilization of three-dimensional (3D) surface imaging for facial anthropometry is a significant asset for patients undergoing maxillofacial surgery. Notably, there have been recent advancements in smartphone technology that enable 3D surface imaging. In this study, anthropometric assessments of the face were performed using a smartphone and a sophisticated 3D surface imaging system. Methods 30 healthy volunteers (15 females and 15 males) were included in the study. An iPhone 14 Pro (Apple Inc., USA) using the application 3D Scanner App (Laan Consulting Corp., USA) and the Vectra M5 (Canfield Scientific, USA) were employed to create 3D surface models. For each participant, 19 anthropometric measurements were conducted on the 3D surface models. Subsequently, the anthropometric measurements generated by the two approaches were compared. The statistical techniques employed included the paired t-test, paired Wilcoxon signed-rank test, Bland-Altman analysis, and calculation of the intraclass correlation coefficient (ICC). Results All measurements showed excellent agreement between smartphone-based and Vectra M5-based measurements (ICC between 0.85 and 0.97). Statistical analysis revealed no statistically significant differences in the central tendencies for 17 of the 19 linear measurements. Despite the excellent agreement found, Bland-Altman analysis revealed that the 95\% limits of agreement between the two methods exceeded ±3 mm for the majority of measurements. Conclusion Digital facial anthropometry using smartphones can serve as a valuable supplementary tool for surgeons, enhancing their communication with patients. However, the proposed data suggest that digital facial anthropometry using smartphones may not yet be suitable for certain diagnostic purposes that require high accuracy.}, language = {en} } @misc{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Corrigendum to "Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art" [Comput. Biol. Med. 169 (2024) 107929]}, series = {Computers in Biology and Medicine}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2024.108027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70337}, pages = {1}, abstract = {The authors regret that the SAR-RARP50 dataset is missing from the description of publicly available datasets presented in Chapter 4.}, language = {en} } @article{HammerNunesHammeretal., author = {Hammer, Simone and Nunes, Danilo Weber and Hammer, Michael and Zeman, Florian and Akers, Michael and G{\"o}tz, Andrea and Balla, Annika and Doppler, Michael Christian and Fellner, Claudia and Da Platz Batista Silva, Natascha and Thurn, Sylvia and Verloh, Niklas and Stroszczynski, Christian and Wohlgemuth, Walter Alexander and Palm, Christoph and Uller, Wibke}, title = {Deep learning-based differentiation of peripheral high-flow and low-flow vascular malformations in T2-weighted short tau inversion recovery MRI}, series = {Clinical hemorheology and microcirculation}, journal = {Clinical hemorheology and microcirculation}, edition = {Pre-press}, publisher = {IOP Press}, doi = {10.3233/CH-232071}, pages = {1 -- 15}, abstract = {BACKGROUND Differentiation of high-flow from low-flow vascular malformations (VMs) is crucial for therapeutic management of this orphan disease. OBJECTIVE A convolutional neural network (CNN) was evaluated for differentiation of peripheral vascular malformations (VMs) on T2-weighted short tau inversion recovery (STIR) MRI. METHODS 527 MRIs (386 low-flow and 141 high-flow VMs) were randomly divided into training, validation and test set for this single-center study. 1) Results of the CNN's diagnostic performance were compared with that of two expert and four junior radiologists. 2) The influence of CNN's prediction on the radiologists' performance and diagnostic certainty was evaluated. 3) Junior radiologists' performance after self-training was compared with that of the CNN. RESULTS Compared with the expert radiologists the CNN achieved similar accuracy (92\% vs. 97\%, p = 0.11), sensitivity (80\% vs. 93\%, p = 0.16) and specificity (97\% vs. 100\%, p = 0.50). In comparison to the junior radiologists, the CNN had a higher specificity and accuracy (97\% vs. 80\%, p <  0.001; 92\% vs. 77\%, p <  0.001). CNN assistance had no significant influence on their diagnostic performance and certainty. After self-training, the junior radiologists' specificity and accuracy improved and were comparable to that of the CNN. CONCLUSIONS Diagnostic performance of the CNN for differentiating high-flow from low-flow VM was comparable to that of expert radiologists. CNN did not significantly improve the simulated daily practice of junior radiologists, self-training was more effective.}, language = {en} } @article{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, series = {Computers in Biology and Medicine}, volume = {169}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.compbiomed.2024.107929}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-69830}, pages = {24}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images and videos. In particular, the determination of the position and type of instruments is of great interest. Current work involves both spatial and temporal information, with the idea that predicting the movement of surgical tools over time may improve the quality of the final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify and characterize datasets used for method development and evaluation and quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images and videos. The paper focuses on methods that work purely visually, without markers of any kind attached to the instruments, considering both single-frame semantic and instance segmentation approaches, as well as those that incorporate temporal information. The publications analyzed were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking", resulting in a total of 741 articles published between 01/2015 and 07/2023, of which 123 were included using systematic selection criteria. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing the available potential for future developments.}, subject = {Deep Learning}, language = {en} } @misc{RueckertRiederRauberetal., author = {R{\"u}ckert, Tobias and Rieder, Maximilian and Rauber, David and Xiao, Michel and Humolli, Eg and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {Augmenting instrument segmentation in video sequences of minimally invasive surgery by synthetic smoky frames}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {18}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {Suppl 1}, publisher = {Springer Nature}, doi = {10.1007/s11548-023-02878-2}, pages = {S54 -- S56}, language = {en} } @inproceedings{MendelRauberPalm, author = {Mendel, Robert and Rauber, David and Palm, Christoph}, title = {Exploring the Effects of Contrastive Learning on Homogeneous Medical Image Data}, series = {Bildverarbeitung f{\"u}r die Medizin 2023: Proceedings, German Workshop on Medical Image Computing, July 2- 4, 2023, Braunschweig}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2023: Proceedings, German Workshop on Medical Image Computing, July 2- 4, 2023, Braunschweig}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-41657-7}, pages = {128 -- 13}, abstract = {We investigate contrastive learning in a multi-task learning setting classifying and segmenting early Barrett's cancer. How can contrastive learning be applied in a domain with few classes and low inter-class and inter-sample variance, potentially enabling image retrieval or image attribution? We introduce a data sampling strategy that mines per-lesion data for positive samples and keeps a queue of the recent projections as negative samples. We propose a masking strategy for the NT-Xent loss that keeps the negative set pure and removes samples from the same lesion. We show cohesion and uniqueness improvements of the proposed method in feature space. The introduction of the auxiliary objective does not affect the performance but adds the ability to indicate similarity between lesions. Therefore, the approach could enable downstream auto-documentation tasks on homogeneous medical image data.}, language = {en} } @misc{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Performance comparison of a deep learning algorithm with endoscopists in the detection of duodenal villous atrophy (VA)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765421}, pages = {S165}, abstract = {Aims VA is an endoscopic finding of celiac disease (CD), which can easily be missed if pretest probability is low. In this study, we aimed to develop an artificial intelligence (AI) algorithm for the detection of villous atrophy on endoscopic images. Methods 858 images from 182 patients with VA and 846 images from 323 patients with normal duodenal mucosa were used for training and internal validation of an AI algorithm (ResNet18). A separate dataset was used for external validation, as well as determination of detection performance of experts, trainees and trainees with AI support. According to the AI consultation distribution, images were stratified into "easy" and "difficult". Results Internal validation showed 82\%, 85\% and 84\% for sensitivity, specificity and accuracy. External validation showed 90\%, 76\% and 84\%. The algorithm was significantly more sensitive and accurate than trainees, trainees with AI support and experts in endoscopy. AI support in trainees was associated with significantly improved performance. While all endoscopists showed significantly lower detection for "difficult" images, AI performance remained stable. Conclusions The algorithm outperformed trainees and experts in sensitivity and accuracy for VA detection. The significant improvement with AI support suggests a potential clinical benefit. Stable performance of the algorithm in "easy" and "difficult" test images may indicate an advantage in macroscopically challenging cases.}, language = {en} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Schnoy, Elisabeth and Nagl, Sandra and R{\"o}mmele, Christoph and Prinz, Friederike and Schlottmann, Jakob and Golger, Daniela and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {AI-assisted detection and characterization of early Barrett's neoplasia: Results of an Interim analysis}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765437}, pages = {S169}, abstract = {Aims Evaluation of the add-on effect an artificial intelligence (AI) based clinical decision support system has on the performance of endoscopists with different degrees of expertise in the field of Barrett's esophagus (BE) and Barrett's esophagus-related neoplasia (BERN). Methods The support system is based on a multi-task deep learning model trained to solve a segmentation and several classification tasks. The training approach represents an extension of the ECMT semi-supervised learning algorithm. The complete system evaluates a decision tree between estimated motion, classification, segmentation, and temporal constraints, to decide when and how the prediction is highlighted to the observer. In our current study, ninety-six video cases of patients with BE and BERN were prospectively collected and assessed by Barrett's specialists and non-specialists. All video cases were evaluated twice - with and without AI assistance. The order of appearance, either with or without AI support, was assigned randomly. Participants were asked to detect and characterize regions of dysplasia or early neoplasia within the video sequences. Results Standalone sensitivity, specificity, and accuracy of the AI system were 92.16\%, 68.89\%, and 81.25\%, respectively. Mean sensitivity, specificity, and accuracy of expert endoscopists without AI support were 83,33\%, 58,20\%, and 71,48 \%, respectively. Gastroenterologists without Barrett's expertise but with AI support had a comparable performance with a mean sensitivity, specificity, and accuracy of 76,63\%, 65,35\%, and 71,36\%, respectively. Conclusions Non-Barrett's experts with AI support had a similar performance as experts in a video-based study.}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Rauber, David and R{\"u}ckert, Tobias and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Real-time detection and delineation of tissue during third-space endoscopy using artificial intelligence (AI)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765128}, pages = {S53 -- S54}, abstract = {Aims AI has proven great potential in assisting endoscopists in diagnostics, however its role in therapeutic endoscopy remains unclear. Endoscopic submucosal dissection (ESD) is a technically demanding intervention with a slow learning curve and relevant risks like bleeding and perforation. Therefore, we aimed to develop an algorithm for the real-time detection and delineation of relevant structures during third-space endoscopy. Methods 5470 still images from 59 full length videos (47 ESD, 12 POEM) were annotated. 179681 additional unlabeled images were added to the training dataset. Consequently, a DeepLabv3+ neural network architecture was trained with the ECMT semi-supervised algorithm (under review elsewhere). Evaluation of vessel detection was performed on a dataset of 101 standardized video clips from 15 separate third-space endoscopy videos with 200 predefined blood vessels. Results Internal validation yielded an overall mean Dice score of 85\% (68\% for blood vessels, 86\% for submucosal layer, 88\% for muscle layer). On the video test data, the overall vessel detection rate (VDR) was 94\% (96\% for ESD, 74\% for POEM). The median overall vessel detection time (VDT) was 0.32 sec (0.3 sec for ESD, 0.62 sec for POEM). Conclusions Evaluation of the developed algorithm on a video test dataset showed high VDR and quick VDT, especially for ESD. Further research will focus on a possible clinical benefit of the AI application for VDR and VDT during third-space endoscopy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @unpublished{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, doi = {10.48550/arXiv.2304.13014}, pages = {25}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images. Especially the determination of the position and type of the instruments is of great interest here. Current work involves both spatial and temporal information with the idea, that the prediction of movement of surgical tools over time may improve the quality of final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify datasets used for method development and evaluation, as well as quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images. The paper focuses on methods that work purely visually without attached markers of any kind on the instruments, taking into account both single-frame segmentation approaches as well as those involving temporal information. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing available potential for future developments. The publications considered were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking" and result in 408 articles published between 2015 and 2022 from which 109 were included using systematic selection criteria.}, language = {en} } @article{RueweEigenbergerKleinetal., author = {Ruewe, Marc and Eigenberger, Andreas and Klein, Silvan and von Riedheim, Antonia and Gugg, Christine and Prantl, Lukas and Palm, Christoph and Weiherer, Maximilian and Zeman, Florian and Anker, Alexandra}, title = {Precise Monitoring of Returning Sensation in Digital Nerve Lesions by 3-D Imaging: A Proof-of-Concept Study}, series = {Plastic and Reconstructive Surgery}, volume = {152}, journal = {Plastic and Reconstructive Surgery}, number = {4}, publisher = {Lippincott Williams \& Wilkins}, address = {Philadelphia, Pa.}, organization = {American Society of Plastic Surgeons}, issn = {1529-4242}, doi = {10.1097/PRS.0000000000010456}, pages = {670e -- 674e}, abstract = {Digital nerve lesions result in a loss of tactile sensation reflected by an anesthetic area (AA) at the radial or ulnar aspect of the respective digit. Yet, available tools to monitor the recovery of tactile sense have been criticized for their lack of validity. However, the precise quantification of AA dynamics by three-dimensional (3-D) imaging could serve as an accurate surrogate to monitor recovery following digital nerve repair. For validation, AAs were marked on digits of healthy volunteers to simulate the AA of an impaired cutaneous innervation. Three dimensional models were composed from raw images that had been acquired with a 3-D camera (Vectra H2) to precisely quantify relative AA for each digit (3-D models, n= 80). Operator properties varied regarding individual experience in 3-D imaging and image processing. Additionally, the concept was applied in a clinical case study. Images taken by experienced photographers were rated better quality (p< 0.001) and needed less processing time (p= 0.020). Quantification of the relative AA was neither altered significantly by experience levels of the photographer (p= 0.425) nor the image assembler (p= 0.749). The proposed concept allows precise and reliable surface quantification of digits and can be performed consistently without relevant distortion by lack of examiner experience. Routine 3-D imaging of the AA has the great potential to provide visual evidence of various returning states of sensation and to convert sensory nerve recovery into a metric variable with high responsiveness to temporal progress.}, language = {en} } @incollection{Palm, author = {Palm, Christoph}, title = {History, Core Concepts, and Role of AI in Clinical Medicine}, series = {AI in Clinical Medicine: A Practical Guide for Healthcare Professionals}, booktitle = {AI in Clinical Medicine: A Practical Guide for Healthcare Professionals}, editor = {Byrne, Michael F. and Parsa, Nasim and Greenhill, Alexandra T. and Chahal, Daljeet and Ahmad, Omer and Bargci, Ulas}, edition = {1. Aufl.}, publisher = {Wiley}, isbn = {978-1-119-79064-8}, doi = {10.1002/9781119790686.ch5}, pages = {49 -- 55}, abstract = {The field of AI is characterized by robust promises, astonishing successes, and remarkable breakthroughs. AI will play a major role in all domains of clinical medicine, but the role of AI in relation to the physician is not yet completely determined. The term artificial intelligence or AI is broad, and several different terms are used in this context that must be organized and demystified. This chapter will review the key concepts and methods of AI, and will introduce some of the different roles for AI in relation to the physician.}, language = {en} } @article{MendelRauberSouzaJretal., author = {Mendel, Robert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Error-Correcting Mean-Teacher: Corrections instead of consistency-targets applied to semi-supervised medical image segmentation}, series = {Computers in Biology and Medicine}, volume = {154}, journal = {Computers in Biology and Medicine}, number = {March}, publisher = {Elsevier}, issn = {0010-4825}, doi = {10.1016/j.compbiomed.2023.106585}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-57790}, pages = {13}, abstract = {Semantic segmentation is an essential task in medical imaging research. Many powerful deep-learning-based approaches can be employed for this problem, but they are dependent on the availability of an expansive labeled dataset. In this work, we augment such supervised segmentation models to be suitable for learning from unlabeled data. Our semi-supervised approach, termed Error-Correcting Mean-Teacher, uses an exponential moving average model like the original Mean Teacher but introduces our new paradigm of error correction. The original segmentation network is augmented to handle this secondary correction task. Both tasks build upon the core feature extraction layers of the model. For the correction task, features detected in the input image are fused with features detected in the predicted segmentation and further processed with task-specific decoder layers. The combination of image and segmentation features allows the model to correct present mistakes in the given input pair. The correction task is trained jointly on the labeled data. On unlabeled data, the exponential moving average of the original network corrects the student's prediction. The combined outputs of the students' prediction with the teachers' correction form the basis for the semi-supervised update. We evaluate our method with the 2017 and 2018 Robotic Scene Segmentation data, the ISIC 2017 and the BraTS 2020 Challenges, a proprietary Endoscopic Submucosal Dissection dataset, Cityscapes, and Pascal VOC 2012. Additionally, we analyze the impact of the individual components and examine the behavior when the amount of labeled data varies, with experiments performed on two distinct segmentation architectures. Our method shows improvements in terms of the mean Intersection over Union over the supervised baseline and competing methods. Code is available at https://github.com/CloneRob/ECMT.}, language = {en} } @article{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Stallmach, Andreas and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection of duodenal villous atrophy on endoscopic images using a deep learning algorithm}, series = {Gastrointestinal Endoscopy}, journal = {Gastrointestinal Endoscopy}, publisher = {Elsevier}, doi = {10.1016/j.gie.2023.01.006}, abstract = {Background and aims Celiac disease with its endoscopic manifestation of villous atrophy is underdiagnosed worldwide. The application of artificial intelligence (AI) for the macroscopic detection of villous atrophy at routine esophagogastroduodenoscopy may improve diagnostic performance. Methods A dataset of 858 endoscopic images of 182 patients with villous atrophy and 846 images from 323 patients with normal duodenal mucosa was collected and used to train a ResNet 18 deep learning model to detect villous atrophy. An external data set was used to test the algorithm, in addition to six fellows and four board certified gastroenterologists. Fellows could consult the AI algorithm's result during the test. From their consultation distribution, a stratification of test images into "easy" and "difficult" was performed and used for classified performance measurement. Results External validation of the AI algorithm yielded values of 90 \%, 76 \%, and 84 \% for sensitivity, specificity, and accuracy, respectively. Fellows scored values of 63 \%, 72 \% and 67 \%, while the corresponding values in experts were 72 \%, 69 \% and 71 \%, respectively. AI consultation significantly improved all trainee performance statistics. While fellows and experts showed significantly lower performance for "difficult" images, the performance of the AI algorithm was stable. Conclusion In this study, an AI algorithm outperformed endoscopy fellows and experts in the detection of villous atrophy on endoscopic still images. AI decision support significantly improved the performance of non-expert endoscopists. The stable performance on "difficult" images suggests a further positive add-on effect in challenging cases.}, language = {en} } @article{KnoedlerBaecherKaukeNavarroetal., author = {Kn{\"o}dler, Leonard and Baecher, Helena and Kauke-Navarro, Martin and Prantl, Lukas and Machens, Hans-G{\"u}nther and Scheuermann, Philipp and Palm, Christoph and Baumann, Raphael and Kehrer, Andreas and Panayi, Adriana C. and Knoedler, Samuel}, title = {Towards a Reliable and Rapid Automated Grading System in Facial Palsy Patients: Facial Palsy Surgery Meets Computer Science}, series = {Journal of Clinical Medicine}, volume = {11}, journal = {Journal of Clinical Medicine}, number = {17}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/jcm11174998}, abstract = {Background: Reliable, time- and cost-effective, and clinician-friendly diagnostic tools are cornerstones in facial palsy (FP) patient management. Different automated FP grading systems have been developed but revealed persisting downsides such as insufficient accuracy and cost-intensive hardware. We aimed to overcome these barriers and programmed an automated grading system for FP patients utilizing the House and Brackmann scale (HBS). Methods: Image datasets of 86 patients seen at the Department of Plastic, Hand, and Reconstructive Surgery at the University Hospital Regensburg, Germany, between June 2017 and May 2021, were used to train the neural network and evaluate its accuracy. Nine facial poses per patient were analyzed by the algorithm. Results: The algorithm showed an accuracy of 100\%. Oversampling did not result in altered outcomes, while the direct form displayed superior accuracy levels when compared to the modular classification form (n = 86; 100\% vs. 99\%). The Early Fusion technique was linked to improved accuracy outcomes in comparison to the Late Fusion and sequential method (n = 86; 100\% vs. 96\% vs. 97\%). Conclusions: Our automated FP grading system combines high-level accuracy with cost- and time-effectiveness. Our algorithm may accelerate the grading process in FP patients and facilitate the FP surgeon's workflow.}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) - assisted vessel and tissue recognition during third space endoscopy (Smart ESD)}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0042-1755110}, abstract = {Clinical setting Third space procedures such as endoscopic submucosal dissection (ESD) and peroral endoscopic myotomy (POEM) are complex minimally invasive techniques with an elevated risk for operator-dependent adverse events such as bleeding and perforation. This risk arises from accidental dissection into the muscle layer or through submucosal blood vessels as the submucosal cutting plane within the expanding resection site is not always apparent. Deep learning algorithms have shown considerable potential for the detection and characterization of gastrointestinal lesions. So-called AI - clinical decision support solutions (AI-CDSS) are commercially available for polyp detection during colonoscopy. Until now, these computer programs have concentrated on diagnostics whereas an AI-CDSS for interventional endoscopy has not yet been introduced. We aimed to develop an AI-CDSS („Smart ESD") for real-time intra-procedural detection and delineation of blood vessels, tissue structures and endoscopic instruments during third-space endoscopic procedures. Characteristics of Smart ESD An AI-CDSS was invented that delineates blood vessels, tissue structures and endoscopic instruments during third-space endoscopy in real-time. The output can be displayed by an overlay over the endoscopic image with different modes of visualization, such as a color-coded semitransparent area overlay, or border tracing (demonstration video). Hereby the optimal layer for dissection can be visualized, which is close above or directly at the muscle layer, depending on the applied technique (ESD or POEM). Furthermore, relevant blood vessels (thickness> 1mm) are delineated. Spatial proximity between the electrosurgical knife and a blood vessel triggers a warning signal. By this guidance system, inadvertent dissection through blood vessels could be averted. Technical specifications A DeepLabv3+ neural network architecture with KSAC and a 101-layer ResNeSt backbone was used for the development of Smart ESD. It was trained and validated with 2565 annotated still images from 27 full length third-space endoscopic videos. The annotation classes were blood vessel, submucosal layer, muscle layer, electrosurgical knife and endoscopic instrument shaft. A test on a separate data set yielded an intersection over union (IoU) of 68\%, a Dice Score of 80\% and a pixel accuracy of 87\%, demonstrating a high overlap between expert and AI segmentation. Further experiments on standardized video clips showed a mean vessel detection rate (VDR) of 85\% with values of 92\%, 70\% and 95\% for POEM, rectal ESD and esophageal ESD respectively. False positive measurements occurred 0.75 times per minute. 7 out of 9 vessels which caused intraprocedural bleeding were caught by the algorithm, as well as both vessels which required hemostasis via hemostatic forceps. Future perspectives Smart ESD performed well for vessel and tissue detection and delineation on still images, as well as on video clips. During a live demonstration in the endoscopy suite, clinical applicability of the innovation was examined. The lag time for processing of the live endoscopic image was too short to be visually detectable for the interventionist. Even though the algorithm could not be applied during actual dissection by the interventionist, Smart ESD appeared readily deployable during visual assessment by ESD experts. Therefore, we plan to conduct a clinical trial in order to obtain CE-certification of the algorithm. This new technology may improve procedural safety and speed, as well as training of modern minimally invasive endoscopic resection techniques.}, subject = {Bildgebendes Verfahren}, language = {en} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @unpublished{AllanKondoBodenstedtetal., author = {Allan, Max and Kondo, Satoshi and Bodenstedt, Sebastian and Leger, Stefan and Kadkhodamohammadi, Rahim and Luengo, Imanol and Fuentes, Felix and Flouty, Evangello and Mohammed, Ahmed and Pedersen, Marius and Kori, Avinash and Alex, Varghese and Krishnamurthi, Ganapathy and Rauber, David and Mendel, Robert and Palm, Christoph and Bano, Sophia and Saibro, Guinther and Shih, Chi-Sheng and Chiang, Hsun-An and Zhuang, Juntang and Yang, Junlin and Iglovikov, Vladimir and Dobrenkii, Anton and Reddiboina, Madhu and Reddy, Anubhav and Liu, Xingtong and Gao, Cong and Unberath, Mathias and Kim, Myeonghyeon and Kim, Chanho and Kim, Chaewon and Kim, Hyejin and Lee, Gyeongmin and Ullah, Ihsan and Luna, Miguel and Park, Sang Hyun and Azizian, Mahdi and Stoyanov, Danail and Maier-Hein, Lena and Speidel, Stefanie}, title = {2018 Robotic Scene Segmentation Challenge}, doi = {10.48550/arXiv.2001.11190}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-50049}, pages = {11}, abstract = {In 2015 we began a sub-challenge at the EndoVis workshop at MICCAI in Munich using endoscope images of exvivo tissue with automatically generated annotations from robot forward kinematics and instrument CAD models. However, the limited background variation and simple motion rendered the dataset uninformative in learning about which techniques would be suitable for segmentation in real surgery. In 2017, at the same workshop in Quebec we introduced the robotic instrument segmentation dataset with 10 teams participating in the challenge to perform binary, articulating parts and type segmentation of da Vinci instruments. This challenge included realistic instrument motion and more complex porcine tissue as background and was widely addressed with modfications on U-Nets and other popular CNN architectures [1]. In 2018 we added to the complexity by introducing a set of anatomical objects and medical devices to the segmented classes. To avoid over-complicating the challenge, we continued with porcine data which is dramatically simpler than human tissue due to the lack of fatty tissue occluding many organs.}, subject = {Minimal-invasive Chirurgie}, language = {en} } @article{RoemmeleMendelBarrettetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Barrett, Caroline and Kiesl, Hans and Rauber, David and R{\"u}ckert, Tobias and Kraus, Lisa and Heinkele, Jakob and Dhillon, Christine and Grosser, Bianca and Prinz, Friederike and Wanzl, Julia and Fleischmann, Carola and Nagl, Sandra and Schnoy, Elisabeth and Schlottmann, Jakob and Dellon, Evan S. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {An artificial intelligence algorithm is highly accurate for detecting endoscopic features of eosinophilic esophagitis}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, publisher = {Nature Portfolio}, address = {London}, doi = {10.1038/s41598-022-14605-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-46928}, pages = {10}, abstract = {The endoscopic features associated with eosinophilic esophagitis (EoE) may be missed during routine endoscopy. We aimed to develop and evaluate an Artificial Intelligence (AI) algorithm for detecting and quantifying the endoscopic features of EoE in white light images, supplemented by the EoE Endoscopic Reference Score (EREFS). An AI algorithm (AI-EoE) was constructed and trained to differentiate between EoE and normal esophagus using endoscopic white light images extracted from the database of the University Hospital Augsburg. In addition to binary classification, a second algorithm was trained with specific auxiliary branches for each EREFS feature (AI-EoE-EREFS). The AI algorithms were evaluated on an external data set from the University of North Carolina, Chapel Hill (UNC), and compared with the performance of human endoscopists with varying levels of experience. The overall sensitivity, specificity, and accuracy of AI-EoE were 0.93 for all measures, while the AUC was 0.986. With additional auxiliary branches for the EREFS categories, the AI algorithm (AI-EoEEREFS) performance improved to 0.96, 0.94, 0.95, and 0.992 for sensitivity, specificity, accuracy, and AUC, respectively. AI-EoE and AI-EoE-EREFS performed significantly better than endoscopy beginners and senior fellows on the same set of images. An AI algorithm can be trained to detect and quantify endoscopic features of EoE with excellent performance scores. The addition of the EREFS criteria improved the performance of the AI algorithm, which performed significantly better than endoscopists with a lower or medium experience level.}, language = {en} } @misc{EbigboMendelTziatziosetal., author = {Ebigbo, Alanna and Mendel, Robert and Tziatzios, Georgios and Probst, Andreas and Palm, Christoph and Messmann, Helmut}, title = {Real-Time Diagnosis of an Early Barrett's Carcinoma using Artificial Intelligence (AI) - Video Case Demonstration}, series = {Endoscopy}, volume = {52}, journal = {Endoscopy}, number = {S 01}, publisher = {Thieme}, doi = {10.1055/s-0040-1704075}, pages = {S23}, abstract = {Introduction We present a clinical case showing the real-time detection, characterization and delineation of an early Barrett's cancer using AI. Patients and methods A 70-year old patient with a long-segment Barrett's esophagus (C5M7) was assessed with an AI algorithm. Results The AI system detected a 10 mm focal lesion and AI characterization predicted cancer with a probability of >90\%. After ESD resection, histopathology showed mucosal adenocarcinoma (T1a (m), R0) confirming AI diagnosis. Conclusion We demonstrate the real-time AI detection, characterization and delineation of a small and early mucosal Barrett's cancer.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @article{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {ARTIFICIAL INTELLIGENCE (AI) - ASSISTED VESSEL AND TISSUE RECOGNITION IN THIRD-SPACE ENDOSCOPY}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {S01}, publisher = {Thieme}, doi = {10.1055/s-0042-1745037}, pages = {S175}, abstract = {Aims Third-space endoscopy procedures such as endoscopic submucosal dissection (ESD) and peroral endoscopic myotomy (POEM) are complex interventions with elevated risk of operator-dependent adverse events, such as intra-procedural bleeding and perforation. We aimed to design an artificial intelligence clinical decision support solution (AI-CDSS, "Smart ESD") for the detection and delineation of vessels, tissue structures, and instruments during third-space endoscopy procedures. Methods Twelve full-length third-space endoscopy videos were extracted from the Augsburg University Hospital database. 1686 frames were annotated for the following categories: Submucosal layer, blood vessels, electrosurgical knife and endoscopic instrument. A DeepLabv3+neural network with a 101-layer ResNet backbone was trained and validated internally. Finally, the ability of the AI system to detect visible vessels during ESD and POEM was determined on 24 separate video clips of 7 to 46 seconds duration and showing 33 predefined vessels. These video clips were also assessed by an expert in third-space endoscopy. Results Smart ESD showed a vessel detection rate (VDR) of 93.94\%, while an average of 1.87 false positive signals were recorded per minute. VDR of the expert endoscopist was 90.1\% with no false positive findings. On the internal validation data set using still images, the AI system demonstrated an Intersection over Union (IoU), mean Dice score and pixel accuracy of 63.47\%, 76.18\% and 86.61\%, respectively. Conclusions This is the first AI-CDSS aiming to mitigate operator-dependent limitations during third-space endoscopy. Further clinical trials are underway to better understand the role of AI in such procedures.}, language = {en} } @misc{MeinikheimMendelScheppachetal., author = {Meinikheim, Michael and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Prinz, Friederike and Schwamberger, Tanja and Schlottmann, Jakob and G{\"o}lder, Stefan Karl and Walter, Benjamin and Steinbr{\"u}ck, Ingo and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {INFLUENCE OF AN ARTIFICIAL INTELLIGENCE (AI) BASED DECISION SUPPORT SYSTEM (DSS) ON THE DIAGNOSTIC PERFORMANCE OF NON-EXPERTS IN BARRETT´S ESOPHAGUS RELATED NEOPLASIA (BERN)}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {S 01}, publisher = {Thieme}, doi = {10.1055/s-00000012}, pages = {S39}, abstract = {Aims Barrett´s esophagus related neoplasia (BERN) is difficult to detect and characterize during endoscopy, even for expert endoscopists. We aimed to assess the add-on effect of an Artificial Intelligence (AI) algorithm (Barrett-Ampel) as a decision support system (DSS) for non-expert endoscopists in the evaluation of Barrett's esophagus (BE) and BERN. Methods Twelve videos with multimodal imaging white light (WL), narrow-band imaging (NBI), texture and color enhanced imaging (TXI) of histologically confirmed BE and BERN were assessed by expert and non-expert endoscopists. For each video, endoscopists were asked to identify the area of BERN and decide on the biopsy spot. Videos were assessed by the AI algorithm and regions of BERN were highlighted in real-time by a transparent overlay. Finally, endoscopists were shown the AI videos and asked to either confirm or change their initial decision based on the AI support. Results Barrett-Ampel correctly identified all areas of BERN, irrespective of the imaging modality (WL, NBI, TXI), but misinterpreted two inflammatory lesions (Accuracy=75\%). Expert endoscopists had a similar performance (Accuracy=70,8\%), while non-experts had an accuracy of 58.3\%. When AI was implemented as a DSS, non-expert endoscopists improved their diagnostic accuracy to 75\%. Conclusions AI may have the potential to support non-expert endoscopists in the assessment of videos of BE and BERN. Limitations of this study include the low number of videos used. Randomized clinical trials in a real-life setting should be performed to confirm these results.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{RauberMendelScheppachetal., author = {Rauber, David and Mendel, Robert and Scheppach, Markus W. and Ebigbo, Alanna and Messmann, Helmut and Palm, Christoph}, title = {Analysis of Celiac Disease with Multimodal Deep Learning}, series = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-36932-3_25}, pages = {115 -- 120}, abstract = {Celiac disease is an autoimmune disorder caused by gluten that results in an inflammatory response of the small intestine.We investigated whether celiac disease can be detected using endoscopic images through a deep learning approach. The results show that additional clinical parameters can improve the classification accuracy. In this work, we distinguished between healthy tissue and Marsh III, according to the Marsh score system. We first trained a baseline network to classify endoscopic images of the small bowel into these two classes and then augmented the approach with a multimodality component that took the antibody status into account.}, language = {en} } @inproceedings{NunesHammerHammeretal., author = {Nunes, Danilo Weber and Hammer, Michael and Hammer, Simone and Uller, Wibke and Palm, Christoph}, title = {Classification of Vascular Malformations Based on T2 STIR Magnetic Resonance Imaging}, series = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-36932-3_57}, pages = {267 -- 272}, abstract = {Vascular malformations (VMs) are a rare condition. They can be categorized into high-flow and low-flow VMs, which is a challenging task for radiologists. In this work, a very heterogeneous set of MRI images with only rough annotations are used for classification with a convolutional neural network. The main focus is to describe the challenging data set and strategies to deal with such data in terms of preprocessing, annotation usage and choice of the network architecture. We achieved a classification result of 89.47 \% F1-score with a 3D ResNet 18.}, language = {en} } @article{WeihererEigenbergerEggeretal., author = {Weiherer, Maximilian and Eigenberger, Andreas and Egger, Bernhard and Br{\´e}bant, Vanessa and Prantl, Lukas and Palm, Christoph}, title = {Learning the shape of female breasts: an open-access 3D statistical shape model of the female breast built from 110 breast scans}, series = {The Visual Computer}, volume = {39}, journal = {The Visual Computer}, number = {4}, publisher = {Springer Nature}, doi = {10.1007/s00371-022-02431-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-30506}, pages = {1597 -- 1616}, abstract = {We present the Regensburg Breast Shape Model (RBSM)—a 3D statistical shape model of the female breast built from 110 breast scans acquired in a standing position, and the first publicly available. Together with the model, a fully automated, pairwise surface registration pipeline used to establish dense correspondence among 3D breast scans is introduced. Our method is computationally efficient and requires only four landmarks to guide the registration process. A major challenge when modeling female breasts from surface-only 3D breast scans is the non-separability of breast and thorax. In order to weaken the strong coupling between breast and surrounding areas, we propose to minimize the variance outside the breast region as much as possible. To achieve this goal, a novel concept called breast probability masks (BPMs) is introduced. A BPM assigns probabilities to each point of a 3D breast scan, telling how likely it is that a particular point belongs to the breast area. During registration, we use BPMs to align the template to the target as accurately as possible inside the breast region and only roughly outside. This simple yet effective strategy significantly reduces the unwanted variance outside the breast region, leading to better statistical shape models in which breast shapes are quite well decoupled from the thorax. The RBSM is thus able to produce a variety of different breast shapes as independently as possible from the shape of the thorax. Our systematic experimental evaluation reveals a generalization ability of 0.17 mm and a specificity of 2.8 mm. To underline the expressiveness of the proposed model, we finally demonstrate in two showcase applications how the RBSM can be used for surgical outcome simulation and the prediction of a missing breast from the remaining one. Our model is available at https://www.rbsm.re-mic.de/.}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Byrne, Michael F. and Messmann, Helmut and Palm, Christoph}, title = {Multimodal imaging for detection and segmentation of Barrett's esophagus-related neoplasia using artificial intelligence}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {10}, edition = {E-Video}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1704-7885}, pages = {1}, abstract = {The early diagnosis of cancer in Barrett's esophagus is crucial for improving the prognosis. However, identifying Barrett's esophagus-related neoplasia (BERN) is challenging, even for experts [1]. Four-quadrant biopsies may improve the detection of neoplasia, but they can be associated with sampling errors. The application of artificial intelligence (AI) to the assessment of Barrett's esophagus could improve the diagnosis of BERN, and this has been demonstrated in both preclinical and clinical studies [2] [3]. In this video demonstration, we show the accurate detection and delineation of BERN in two patients ([Video 1]). In part 1, the AI system detects a mucosal cancer about 20 mm in size and accurately delineates the lesion in both white-light and narrow-band imaging. In part 2, a small island of BERN with high-grade dysplasia is detected and delineated in white-light, narrow-band, and texture and color enhancement imaging. The video shows the results using a transparent overlay of the mucosal cancer in real time as well as a full segmentation preview. Additionally, the optical flow allows for the assessment of endoscope movement, something which is inversely related to the reliability of the AI prediction. We demonstrate that multimodal imaging can be applied to the AI-assisted detection and segmentation of even small focal lesions in real time.}, language = {en} } @inproceedings{MaierHaugHuberetal., author = {Maier, Johannes and Haug, Sonja and Huber, Michaela and Katzky, Uwe and Neumann, Sabine and Perret, J{\´e}r{\^o}me and Prinzen, Martin and Weber, Karsten and Wittenberg, Thomas and W{\"o}hl, Rebecca and Scorna, Ulrike and Palm, Christoph}, title = {Development of a haptic and visual assisted training simulation concept for complex bone drilling in minimally invasive hand surgery}, series = {CARS Conference, 5.10.-7.10.2017}, booktitle = {CARS Conference, 5.10.-7.10.2017}, language = {en} } @article{MaierDesernoHandelsetal., author = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, title = {IJCARS: BVM 2021 special issue}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {16}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer}, doi = {10.1007/s11548-021-02534-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-21666}, pages = {2067 -- 2068}, abstract = {The German workshop on medical image computing (BVM) has been held in different locations in Germany for more than 20 years. In terms of content, BVM focused on the computer-aided analysis of medical image data with a wide range of applications, e.g. in the area of imaging, diagnostics, operation planning, computer-aided intervention and visualization. During this time, there have been remarkable methodological developments and upheavals, on which the BVM community has worked intensively. The area of machine learning should be emphasized, which has led to significant improvements, especially for tasks of classification and segmentation, but increasingly also in image formation and registration. As a result, work in connection with deep learning now dominates the BVM. These developments have also contributed to the establishment of medical image processing at the interface between computer science and medicine as one of the key technologies for the digitization of the health system. In addition to the presentation of current research results, a central aspect of the BVM is primarily the promotion of young scientists from the diverse BVM community, covering not only Germany but also Austria, Switzerland, The Netherland and other European neighbors. The conference serves primarily doctoral students and postdocs, but also students with excellent bachelor and master theses as a platform to present their work, to enter into professional discourse with the community, and to establish networks with specialist colleagues. Despite the many conferences and congresses that are also relevant for medical image processing, the BVM has therefore lost none of its importance and attractiveness and has retained its permanent place in the annual conference rhythm. Building on this foundation, there are some innovations and changes this year. The BVM 2021 was organized for the first time at the Ostbayerische Technische Hochschule Regensburg (OTH Regensburg, a technical university of applied sciences). After Aachen, Berlin, Erlangen, Freiburg, Hamburg, Heidelberg, Leipzig, L{\"u}beck, and Munich, Regensburg is not just a new venue. OTH Regensburg is the first representative of the universities of applied sciences (HAW) to organize the conference, which differs to universities, university hospitals, or research centers like Fraunhofer or Helmholtz. This also considers the further development of the research landscape in Germany, where HAWs increasingly contribute to applied research in addition to their focus on teaching. This development is also reflected in the contributions submitted to the BVM in recent years. At BVM 2021, which was held in a virtual format for the first time due to the Corona pandemic, an attractive and high-quality program was offered. Fortunately, the number of submissions increased significantly. Out of 97 submissions, 26 presentations, 51 posters and 5 software demonstrations were accepted via an anonymized reviewing process with three reviews each. The three best works have been awarded BVM prizes, selected by a separate committee. Based on these high-quality submissions, we are able to present another special issue in the International Journal of Computer Assisted Radiology and Surgery (IJCARS). Out of the 97 submissions, the ones with the highest scores have been invited to submit an extended version of their paper to be presented in IJCARS. As a result, we are now able to present this special issue with seven excellent articles. Many submissions focus on machine learning in a medical context.}, subject = {Bildgebendes Verfahren}, language = {en} } @article{MaierDesernoHandelsetal., author = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, title = {Guest editorial of the IJCARS - BVM 2018 special issue}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {14}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer}, doi = {10.1007/s11548-018-01902-0}, pages = {1 -- 2}, language = {en} } @misc{ScheppachRauberMendeletal., author = {Scheppach, Markus W. and Rauber, David and Mendel, Robert and Palm, Christoph and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection Of Celiac Disease Using A Deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724970}, abstract = {Aims Celiac disease (CD) is a complex condition caused by an autoimmune reaction to ingested gluten. Due to its polymorphic manifestation and subtle endoscopic presentation, the diagnosis is difficult and thus the disorder is underreported. We aimed to use deep learning to identify celiac disease on endoscopic images of the small bowel. Methods Patients with small intestinal histology compatible with CD (MARSH classification I-III) were extracted retrospectively from the database of Augsburg University hospital. They were compared to patients with no clinical signs of CD and histologically normal small intestinal mucosa. In a first step MARSH III and normal small intestinal mucosa were differentiated with the help of a deep learning algorithm. For this, the endoscopic white light images were divided into five equal-sized subsets. We avoided splitting the images of one patient into several subsets. A ResNet-50 model was trained with the images from four subsets and then validated with the remaining subset. This process was repeated for each subset, such that each subset was validated once. Sensitivity, specificity, and harmonic mean (F1) of the algorithm were determined. Results The algorithm showed values of 0.83, 0.88, and 0.84 for sensitivity, specificity, and F1, respectively. Further data showing a comparison between the detection rate of the AI model and that of experienced endoscopists will be available at the time of the upcoming conference. Conclusions We present the first clinical report on the use of a deep learning algorithm for the detection of celiac disease using endoscopic images. Further evaluation on an external data set, as well as in the detection of CD in real-time, will follow. However, this work at least suggests that AI can assist endoscopists in the endoscopic diagnosis of CD, and ultimately may be able to do a true optical biopsy in live-time.}, language = {en} } @misc{RoemmeleMendelRauberetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Rauber, David and R{\"u}ckert, Tobias and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Endoscopic Diagnosis of Eosinophilic Esophagitis Using a deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724274}, abstract = {Aims Eosinophilic esophagitis (EoE) is easily missed during endoscopy, either because physicians are not familiar with its endoscopic features or the morphologic changes are too subtle. In this preliminary paper, we present the first attempt to detect EoE in endoscopic white light (WL) images using a deep learning network (EoE-AI). Methods 401 WL images of eosinophilic esophagitis and 871 WL images of normal esophageal mucosa were evaluated. All images were assessed for the Endoscopic Reference score (EREFS) (edema, rings, exudates, furrows, strictures). Images with strictures were excluded. EoE was defined as the presence of at least 15 eosinophils per high power field on biopsy. A convolutional neural network based on the ResNet architecture with several five-fold cross-validation runs was used. Adding auxiliary EREFS-classification branches to the neural network allowed the inclusion of the scores as optimization criteria during training. EoE-AI was evaluated for sensitivity, specificity, and F1-score. In addition, two human endoscopists evaluated the images. Results EoE-AI showed a mean sensitivity, specificity, and F1 of 0.759, 0.976, and 0.834 respectively, averaged over the five distinct cross-validation runs. With the EREFS-augmented architecture, a mean sensitivity, specificity, and F1-score of 0.848, 0.945, and 0.861 could be demonstrated respectively. In comparison, the two human endoscopists had an average sensitivity, specificity, and F1-score of 0.718, 0.958, and 0.793. Conclusions To the best of our knowledge, this is the first application of deep learning to endoscopic images of EoE which were also assessed after augmentation with the EREFS-score. The next step is the evaluation of EoE-AI using an external dataset. We then plan to assess the EoE-AI tool on endoscopic videos, and also in real-time. This preliminary work is encouraging regarding the ability for AI to enhance physician detection of EoE, and potentially to do a true "optical biopsy" but more work is needed.}, language = {en} } @unpublished{WeihererEigenbergerBrebantetal., author = {Weiherer, Maximilian and Eigenberger, Andreas and Br{\´e}bant, Vanessa and Prantl, Lukas and Palm, Christoph}, title = {Learning the shape of female breasts: an open-access 3D statistical shape model of the female breast built from 110 breast scans}, pages = {15}, abstract = {We present the Regensburg Breast Shape Model (RBSM) - a 3D statistical shape model of the female breast built from 110 breast scans, and the first ever publicly available. Together with the model, a fully automated, pairwise surface registration pipeline used to establish correspondence among 3D breast scans is introduced. Our method is computationally efficient and requires only four landmarks to guide the registration process. In order to weaken the strong coupling between breast and thorax, we propose to minimize the variance outside the breast region as much as possible. To achieve this goal, a novel concept called breast probability masks (BPMs) is introduced. A BPM assigns probabilities to each point of a 3D breast scan, telling how likely it is that a particular point belongs to the breast area. During registration, we use BPMs to align the template to the target as accurately as possible inside the breast region and only roughly outside. This simple yet effective strategy significantly reduces the unwanted variance outside the breast region, leading to better statistical shape models in which breast shapes are quite well decoupled from the thorax. The RBSM is thus able to produce a variety of different breast shapes as independently as possible from the shape of the thorax. Our systematic experimental evaluation reveals a generalization ability of 0.17 mm and a specificity of 2.8 mm for the RBSM. Ultimately, our model is seen as a first step towards combining physically motivated deformable models of the breast and statistical approaches in order to enable more realistic surgical outcome simulation.}, language = {en} } @article{SouzaJrMendelStrasseretal., author = {Souza Jr., Luis Antonio de and Mendel, Robert and Strasser, Sophia and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Convolutional Neural Networks for the evaluation of cancer in Barrett's esophagus: Explainable AI to lighten up the black-box}, series = {Computers in Biology and Medicine}, volume = {135}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, issn = {0010-4825}, doi = {10.1016/j.compbiomed.2021.104578}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-20126}, pages = {1 -- 14}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their level of accountability and transparency must be provided in such evaluations. The reliability related to machine learning predictions must be explained and interpreted, especially if diagnosis support is addressed. For this task, the black-box nature of deep learning techniques must be lightened up to transfer its promising results into clinical practice. Hence, we aim to investigate the use of explainable artificial intelligence techniques to quantitatively highlight discriminative regions during the classification of earlycancerous tissues in Barrett's esophagus-diagnosed patients. Four Convolutional Neural Network models (AlexNet, SqueezeNet, ResNet50, and VGG16) were analyzed using five different interpretation techniques (saliency, guided backpropagation, integrated gradients, input × gradients, and DeepLIFT) to compare their agreement with experts' previous annotations of cancerous tissue. We could show that saliency attributes match best with the manual experts' delineations. Moreover, there is moderate to high correlation between the sensitivity of a model and the human-and-computer agreement. The results also lightened that the higher the model's sensitivity, the stronger the correlation of human and computational segmentation agreement. We observed a relevant relation between computational learning and experts' insights, demonstrating how human knowledge may influence the correct computational learning.}, subject = {Deep Learning}, language = {en} } @inproceedings{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Fine-tuning Generative Adversarial Networks using Metaheuristics}, series = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, doi = {10.1007/978-3-658-33198-6_50}, pages = {205 -- 210}, abstract = {Barrett's esophagus denotes a disorder in the digestive system that affects the esophagus' mucosal cells, causing reflux, and showing potential convergence to esophageal adenocarcinoma if not treated in initial stages. Thus, fast and reliable computer-aided diagnosis becomes considerably welcome. Nevertheless, such approaches usually suffer from imbalanced datasets, which can be addressed through Generative Adversarial Networks (GANs). Such techniques generate realistic images based on observed samples, even though at the cost of a proper selection of its hyperparameters. Many works employed a class of nature-inspired algorithms called metaheuristics to tackle the problem considering distinct deep learning approaches. Therefore, this paper's main contribution is to introduce metaheuristic techniques to fine-tune GANs in the context of Barrett's esophagus identification, as well as to investigate the feasibility of generating high-quality synthetic images for early-cancer assisted identification.}, subject = {Endoskopie}, language = {en} } @misc{MendelSouzaJrRauberetal., author = {Mendel, Robert and Souza Jr., Luis Antonio de and Rauber, David and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Abstract: Semi-supervised Segmentation Based on Error-correcting Supervision}, series = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, journal = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, doi = {10.1007/978-3-658-33198-6_43}, pages = {178}, abstract = {Pixel-level classification is an essential part of computer vision. For learning from labeled data, many powerful deep learning models have been developed recently. In this work, we augment such supervised segmentation models by allowing them to learn from unlabeled data. Our semi-supervised approach, termed Error-Correcting Supervision, leverages a collaborative strategy. Apart from the supervised training on the labeled data, the segmentation network is judged by an additional network.}, subject = {Deep Learning}, language = {en} } @article{EbigboPalmMessmann, author = {Ebigbo, Alanna and Palm, Christoph and Messmann, Helmut}, title = {Barrett esophagus: What to expect from Artificial Intelligence?}, series = {Best Practice \& Research Clinical Gastroenterology}, volume = {52-53}, journal = {Best Practice \& Research Clinical Gastroenterology}, number = {June-August}, publisher = {Elsevier}, issn = {1521-6918}, doi = {10.1016/j.bpg.2021.101726}, abstract = {The evaluation and assessment of Barrett's esophagus is challenging for both expert and nonexpert endoscopists. However, the early diagnosis of cancer in Barrett's esophagus is crucial for its prognosis, and could save costs. Pre-clinical and clinical studies on the application of Artificial Intelligence (AI) in Barrett's esophagus have shown promising results. In this review, we focus on the current challenges and future perspectives of implementing AI systems in the management of patients with Barrett's esophagus.}, subject = {Deep Learning}, language = {en} } @article{EbigboMendelRueckertetal., author = {Ebigbo, Alanna and Mendel, Robert and R{\"u}ckert, Tobias and Schuster, Laurin and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Mende, Matthias and Steinbr{\"u}ck, Ingo and Faiss, Siegbert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Deprez, Pierre and Oyama, Tsuneo and Takahashi, Akiko and Seewald, Stefan and Sharma, Prateek and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Endoscopic prediction of submucosal invasion in Barrett's cancer with the use of Artificial Intelligence: A pilot Study}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {09}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-1311-8570}, pages = {878 -- 883}, abstract = {Background and aims: The accurate differentiation between T1a and T1b Barrett's cancer has both therapeutic and prognostic implications but is challenging even for experienced physicians. We trained an Artificial Intelligence (AI) system on the basis of deep artificial neural networks (deep learning) to differentiate between T1a and T1b Barrett's cancer white-light images. Methods: Endoscopic images from three tertiary care centres in Germany were collected retrospectively. A deep learning system was trained and tested using the principles of cross-validation. A total of 230 white-light endoscopic images (108 T1a and 122 T1b) was evaluated with the AI-system. For comparison, the images were also classified by experts specialized in endoscopic diagnosis and treatment of Barrett's cancer. Results: The sensitivity, specificity, F1 and accuracy of the AI-system in the differentiation between T1a and T1b cancer lesions was 0.77, 0.64, 0.73 and 0.71, respectively. There was no statistically significant difference between the performance of the AI-system and that of human experts with sensitivity, specificity, F1 and accuracy of 0.63, 0.78, 0.67 and 0.70 respectively. Conclusion: This pilot study demonstrates the first multicenter application of an AI-based system in the prediction of submucosal invasion in endoscopic images of Barrett's cancer. AI scored equal to international experts in the field, but more work is necessary to improve the system and apply it to video sequences and in a real-life setting. Nevertheless, the correct prediction of submucosal invasion in Barret´s cancer remains challenging for both experts and AI.}, subject = {Maschinelles Lernen}, language = {en} } @article{HartmannWeihererSchiltzetal., author = {Hartmann, Robin and Weiherer, Maximilian and Schiltz, Daniel and Baringer, Magnus and Noisser, Vivien and H{\"o}sl, Vanessa and Eigenberger, Andreas and Seitz, Stefan and Palm, Christoph and Prantl, Lukas and Br{\´e}bant, Vanessa}, title = {New aspects in digital breast assessment: further refinement of a method for automated digital anthropometry}, series = {Archives of Gynecology and Obstetrics}, volume = {303}, journal = {Archives of Gynecology and Obstetrics}, publisher = {Springer Nature}, address = {Heidelberg}, issn = {1432-0711}, doi = {10.1007/s00404-020-05862-2}, pages = {721 -- 728}, abstract = {Purpose: In this trial, we used a previously developed prototype software to assess aesthetic results after reconstructive surgery for congenital breast asymmetry using automated anthropometry. To prove the consensus between the manual and automatic digital measurements, we evaluated the software by comparing the manual and automatic measurements of 46 breasts. Methods: Twenty-three patients who underwent reconstructive surgery for congenital breast asymmetry at our institution were examined and underwent 3D surface imaging. Per patient, 14 manual and 14 computer-based anthropometric measurements were obtained according to a standardized protocol. Manual and automatic measurements, as well as the previously proposed Symmetry Index (SI), were compared. Results: The Wilcoxon signed-rank test revealed no significant differences in six of the seven measurements between the automatic and manual assessments. The SI showed robust agreement between the automatic and manual methods. Conclusion: The present trial validates our method for digital anthropometry. Despite the discrepancy in one measurement, all remaining measurements, including the SI, showed high agreement between the manual and automatic methods. The proposed data bring us one step closer to the long-term goal of establishing robust instruments to evaluate the results of breast surgery.}, language = {en} } @article{ArribasAntonelliFrazzonietal., author = {Arribas, Julia and Antonelli, Giulio and Frazzoni, Leonardo and Fuccio, Lorenzo and Ebigbo, Alanna and van der Sommen, Fons and Ghatwary, Noha and Palm, Christoph and Coimbra, Miguel and Renna, Francesco and Bergman, Jacques J.G.H.M. and Sharma, Prateek and Messmann, Helmut and Hassan, Cesare and Dinis-Ribeiro, Mario J.}, title = {Standalone performance of artificial intelligence for upper GI neoplasia: a meta-analysis}, series = {Gut}, volume = {70}, journal = {Gut}, number = {8}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2020-321922}, pages = {1458 -- 1468}, abstract = {Objective: Artificial intelligence (AI) may reduce underdiagnosed or overlooked upper GI (UGI) neoplastic and preneoplastic conditions, due to subtle appearance and low disease prevalence. Only disease-specific AI performances have been reported, generating uncertainty on its clinical value. Design: We searched PubMed, Embase and Scopus until July 2020, for studies on the diagnostic performance of AI in detection and characterisation of UGI lesions. Primary outcomes were pooled diagnostic accuracy, sensitivity and specificity of AI. Secondary outcomes were pooled positive (PPV) and negative (NPV) predictive values. We calculated pooled proportion rates (\%), designed summary receiving operating characteristic curves with respective area under the curves (AUCs) and performed metaregression and sensitivity analysis. Results: Overall, 19 studies on detection of oesophageal squamous cell neoplasia (ESCN) or Barrett's esophagus-related neoplasia (BERN) or gastric adenocarcinoma (GCA) were included with 218, 445, 453 patients and 7976, 2340, 13 562 images, respectively. AI-sensitivity/specificity/PPV/NPV/positive likelihood ratio/negative likelihood ratio for UGI neoplasia detection were 90\% (CI 85\% to 94\%)/89\% (CI 85\% to 92\%)/87\% (CI 83\% to 91\%)/91\% (CI 87\% to 94\%)/8.2 (CI 5.7 to 11.7)/0.111 (CI 0.071 to 0.175), respectively, with an overall AUC of 0.95 (CI 0.93 to 0.97). No difference in AI performance across ESCN, BERN and GCA was found, AUC being 0.94 (CI 0.52 to 0.99), 0.96 (CI 0.95 to 0.98), 0.93 (CI 0.83 to 0.99), respectively. Overall, study quality was low, with high risk of selection bias. No significant publication bias was found. Conclusion: We found a high overall AI accuracy for the diagnosis of any neoplastic lesion of the UGI tract that was independent of the underlying condition. This may be expected to substantially reduce the miss rate of precancerous lesions and early cancer when implemented in clinical practice.}, language = {en} } @article{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Assisting Barrett's esophagus identification using endoscopic data augmentation based on Generative Adversarial Networks}, series = {Computers in Biology and Medicine}, volume = {126}, journal = {Computers in Biology and Medicine}, number = {November}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2020.104029}, pages = {12}, abstract = {Barrett's esophagus figured a swift rise in the number of cases in the past years. Although traditional diagnosis methods offered a vital role in early-stage treatment, they are generally time- and resource-consuming. In this context, computer-aided approaches for automatic diagnosis emerged in the literature since early detection is intrinsically related to remission probabilities. However, they still suffer from drawbacks because of the lack of available data for machine learning purposes, thus implying reduced recognition rates. This work introduces Generative Adversarial Networks to generate high-quality endoscopic images, thereby identifying Barrett's esophagus and adenocarcinoma more precisely. Further, Convolution Neural Networks are used for feature extraction and classification purposes. The proposed approach is validated over two datasets of endoscopic images, with the experiments conducted over the full and patch-split images. The application of Deep Convolutional Generative Adversarial Networks for the data augmentation step and LeNet-5 and AlexNet for the classification step allowed us to validate the proposed methodology over an extensive set of datasets (based on original and augmented sets), reaching results of 90\% of accuracy for the patch-based approach and 85\% for the image-based approach. Both results are based on augmented datasets and are statistically different from the ones obtained in the original datasets of the same kind. Moreover, the impact of data augmentation was evaluated in the context of image description and classification, and the results obtained using synthetic images outperformed the ones over the original datasets, as well as other recent approaches from the literature. Such results suggest promising insights related to the importance of proper data for the accurate classification concerning computer-assisted Barrett's esophagus and adenocarcinoma detection.}, subject = {Maschinelles Lernen}, language = {en} } @inproceedings{MendelSouzaJrRauberetal., author = {Mendel, Robert and Souza Jr., Luis Antonio de and Rauber, David and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Semi-supervised Segmentation Based on Error-Correcting Supervision}, series = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, booktitle = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-58525-9}, doi = {10.1007/978-3-030-58526-6_9}, pages = {141 -- 157}, abstract = {Pixel-level classification is an essential part of computer vision. For learning from labeled data, many powerful deep learning models have been developed recently. In this work, we augment such supervised segmentation models by allowing them to learn from unlabeled data. Our semi-supervised approach, termed Error-Correcting Supervision, leverages a collaborative strategy. Apart from the supervised training on the labeled data, the segmentation network is judged by an additional network. The secondary correction network learns on the labeled data to optimally spot correct predictions, as well as to amend incorrect ones. As auxiliary regularization term, the corrector directly influences the supervised training of the segmentation network. On unlabeled data, the output of the correction network is essential to create a proxy for the unknown truth. The corrector's output is combined with the segmentation network's prediction to form the new target. We propose a loss function that incorporates both the pseudo-labels as well as the predictive certainty of the correction network. Our approach can easily be added to supervised segmentation models. We show consistent improvements over a supervised baseline on experiments on both the Pascal VOC 2012 and the Cityscapes datasets with varying amounts of labeled data.}, subject = {Semi-Supervised Learning}, language = {en} } @article{OttPalmVogtetal., author = {Ott, Tankred and Palm, Christoph and Vogt, Robert and Oberprieler, Christoph}, title = {GinJinn: An object-detection pipeline for automated feature extraction from herbarium specimens}, series = {Applications in Plant Sciences}, volume = {8}, journal = {Applications in Plant Sciences}, number = {6}, publisher = {Wiley, Botanical Society of America}, issn = {2168-0450}, doi = {10.1002/aps3.11351}, pages = {e11351}, abstract = {PREMISE: The generation of morphological data in evolutionary, taxonomic, and ecological studies of plants using herbarium material has traditionally been a labor-intensive task. Recent progress in machine learning using deep artificial neural networks (deep learning) for image classification and object detection has facilitated the establishment of a pipeline for the automatic recognition and extraction of relevant structures in images of herbarium specimens. METHODS AND RESULTS: We implemented an extendable pipeline based on state-of-the-art deep-learning object-detection methods to collect leaf images from herbarium specimens of two species of the genus Leucanthemum. Using 183 specimens as the training data set, our pipeline extracted one or more intact leaves in 95\% of the 61 test images. CONCLUSIONS: We establish GinJinn as a deep-learning object-detection tool for the automatic recognition and extraction of individual leaves or other structures from herbarium specimens. Our pipeline offers greater flexibility and a lower entrance barrier than previous image-processing approaches based on hand-crafted features.}, subject = {Deep Learning}, language = {en} } @inproceedings{HassanIlgnerPalmetal., author = {Hassan, H. and Ilgner, Justus F. R. and Palm, Christoph and Lehmann, Thomas M. and Spitzer, Klaus and Westhofen, Martin}, title = {Objective Judgement of Endoscopic Laryngeal Images}, series = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Proceedings of the 3rd International Workshop, RWTH Aachen}, booktitle = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Proceedings of the 3rd International Workshop, RWTH Aachen}, editor = {Lehmann, Thomas M. and Spitzer, Klaus and Tolxdorff, Thomas}, pages = {135 -- 142}, abstract = {Video Documentation of endoscopic findings simplifies diagnostic counseling of the patient and aids pre-operative discussion among the medical team. Judgment of such images is still subjective and can not give a quantitative evaluation of the disease process regarding diagnosis or response to treatment. Modern treatment of early laryngeal cancer with laserablation requires intensive follow up and frequent direct laryngoscopy under general anesthesia with blind biopsies to detect any tumor residual or recurrence. Inflammatory conditions of the larynx are frequently confused with other causes of dysphonia. Mapping anddigital analysis of the documented image will suggest the tumor site and avoids undue blind biopsies under anesthesia. However, varying illumination results in different colors reflected from the same object. To achieve quantitative analysis, color constancy has to be assured. Inthis paper, the environment is presented which allow the objective judgment of larngoscopies.}, language = {en} } @inproceedings{PalmPelkmannLehmannetal., author = {Palm, Christoph and Pelkmann, Annegret and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Distortion Correction of Laryngoscopic Images}, series = {Advances in quantitative laryngoscopy, voice and speech research, Proceedings of the 3rd international workshop Aachen, RWTH}, booktitle = {Advances in quantitative laryngoscopy, voice and speech research, Proceedings of the 3rd international workshop Aachen, RWTH}, pages = {117 -- 125}, abstract = {Laryngoscopic images of the vocal tract are used for diagnostic purposes. Quantitative mea-surements like changes of the glottis size or the surface of the vocal cords during an image sequence can be helpful to describe the healing process or to compare the findings of diffe-rent patients. Typically the endoscopic images are circulary symmetric distorted (barrel di-stortion). Therefore measurements of geometric dimensions depend on the object´s position in the image. In this paper an algorithm is presented which allows the computation of the translational invariant "real" object size by correcting the image distortion without using additional calibration of the optical environment.}, language = {en} } @article{HartmannWeihererSchiltzetal., author = {Hartmann, Robin and Weiherer, Maximilian and Schiltz, Daniel and Seitz, Stephan and Lotter, Luisa and Anker, Alexandra and Palm, Christoph and Prantl, Lukas and Br{\´e}bant, Vanessa}, title = {A Novel Method of Outcome Assessment in Breast Reconstruction Surgery: Comparison of Autologous and Alloplastic Techniques Using Three-Dimensional Surface Imaging}, series = {Aesthetic Plastic Surgery}, volume = {44}, journal = {Aesthetic Plastic Surgery}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s00266-020-01749-4}, pages = {1980 -- 1987}, abstract = {Background Breast reconstruction is an important coping tool for patients undergoing a mastectomy. There are numerous surgical techniques in breast reconstruction surgery (BRS). Regardless of the technique used, creating a symmetric outcome is crucial for patients and plastic surgeons. Three-dimensional surface imaging enables surgeons and patients to assess the outcome's symmetry in BRS. To discriminate between autologous and alloplastic techniques, we analyzed both techniques using objective optical computerized symmetry analysis. Software was developed that enables clinicians to assess optical breast symmetry using three-dimensional surface imaging. Methods Twenty-seven patients who had undergone autologous (n = 12) or alloplastic (n = 15) BRS received three-dimensional surface imaging. Anthropomorphic data were collected digitally using semiautomatic measurements and automatic measurements. Automatic measurements were taken using the newly developed software. To quantify symmetry, a Symmetry Index is proposed. Results Statistical analysis revealed that there is no dif- ference in the outcome symmetry between the two groups (t test for independent samples; p = 0.48, two-tailed). Conclusion This study's findings provide a foundation for qualitative symmetry assessment in BRS using automatized digital anthropometry. In the present trial, no difference in the outcomes' optical symmetry was detected between autologous and alloplastic approaches.}, subject = {Mammoplastik}, language = {en} } @inproceedings{PalmKeysersLehmannetal., author = {Palm, Christoph and Keysers, Daniel and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Gabor Filtering of Complex Hue/Saturation Images for Color Texture Classification}, series = {Proceedings of the 5th Joint Conference on Information Science (JCIS) 2, The Association for Intelligent Machinery, Atlantic City, NJ, 2000}, booktitle = {Proceedings of the 5th Joint Conference on Information Science (JCIS) 2, The Association for Intelligent Machinery, Atlantic City, NJ, 2000}, pages = {45 -- 49}, abstract = {Objective: Complex hue/saturation images as a new approach for color texture classification using Gabor filters are introduced and compared with common techniques. Method: The interpretation of hue and saturationas polar coordinates allows direct use of the HSV-colorspace for Fourier transform. This technique is applied for Gabor feature extraction of color textures. In contrast to other color features based on the RGB-colorspace [1] the combination of color bands is done previous to the filtering. Results: The performance of the new HS-featuresis compared with that of RGB based as well as grayscale Gabor features by evaluating the classifi-cation of 30 natural textures. The new HS-featuresshow same results like the best RGB features but allow a more compact representation. On the averagethe color features improve the results of grayscale features. Conclusion: The consideration of the color information enhances the classification of color texture. The choice of colorspace cannot be adjudged finally, but the introduced features suggest the use of the HSV-colorspace with less features than RGB.}, language = {en} } @inproceedings{MetzlerAachPalmetal., author = {Metzler, V. and Aach, T. and Palm, Christoph and Lehmann, Thomas M.}, title = {Texture Classification of Graylevel Images by Multiscale Cross-Co-Occurrence Matrices}, series = {Proceedings 15th International Conference on Pattern Recognition (ICPR-2000)}, booktitle = {Proceedings 15th International Conference on Pattern Recognition (ICPR-2000)}, doi = {10.1109/ICPR.2000.906133}, pages = {549 -- 552}, abstract = {Local gray level dependencies of natural images can be modelled by means of co-occurrence matrices containing joint probabilities of gray-level pairs. Texture, however, is a resolution-dependent phenomenon and hence, classification depends on the chosen scale. Since there is no optimal scale for all textures we employ a multiscale approach that acquires textural features at several scales. Thus linear and nonlinear scale-spaces are analyzed by multiscale co-occurrence matrices that describe the statistical behavior of a texture in scale-space. Classification is then performed on the basis of texture features taken from the individual scale with the highest discriminatory power. By considering cross-scale occurrences of gray level pairs, the impact of filters on the feature is described and used for classification of natural textures. This novel method was found to improve classification rates of the common co-occurrence matrix approach on standard textures significantly.}, language = {en} } @inproceedings{PalmLehmannSpitzer, author = {Palm, Christoph and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Color Texture Analysis of Moving Vocal Cords Using Approaches from Statistics and Signal Theory}, series = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 4th International Workshop, Friedrich Schiller University, Jena}, booktitle = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 4th International Workshop, Friedrich Schiller University, Jena}, pages = {49 -- 56}, abstract = {Textural features are applied for detection of morphological pathologies of vocal cords. Cooccurrence matrices as statistical features are presented as well as filter bank analysis by Gabor filters. Both methods are extended to handle color images. Their robustness against camera movement and vibration of vocal cords is evaluated. Classification results due to three in vivo sequences are in between 94.4 \% and 98.9\%. The classification errors decrease if color features are used instead of grayscale features for both statistical and Fourier features}, language = {en} } @article{IlgnerPalmSchuetzetal., author = {Ilgner, Justus F. R. and Palm, Christoph and Sch{\"u}tz, Andreas G. and Spitzer, Klaus and Westhofen, Martin and Lehmann, Thomas M.}, title = {Colour Texture Analysis for Quantitative Laryngoscopy}, series = {Acta Otolaryngologica}, volume = {123}, journal = {Acta Otolaryngologica}, doi = {10.1080/00016480310000412}, pages = {730 -- 734}, abstract = {Whilst considerable progress has been made in enhancing the quality of indirect laryngoscopy and image processing, the evaluation of clinical findings is still based on the clinician's judgement. The aim of this paper was to examine the feasibility of an objective computer-based method for evaluating laryngeal disease. Digitally recorded images obtained by 90 degree- and 70 degree-angled indirect rod laryngoscopy using standardized white balance values were made of 16 patients and 19 healthy subjects. The digital images were evaluated manually by the clinician based on a standardized questionnaire, and suspect lesions were marked and classified on the image. Following colour separation, normal vocal cord areas as well as suspect lesions were analyzed automatically using co-occurrence matrices, which compare colour differences between neighbouring pixels over a predefined distance. Whilst colour histograms did not provide sufficient information for distinguishing between healthy and diseased tissues, consideration of the blue content of neighbouring pixels enabled a correct classification in 81.4\% of cases. If all colour channels (red, green and blue) were regarded simultaneously, the best classification correctness obtained was 77.1\%. Although only a very basic classification differentiating between healthy and diseased tissue was attempted, the results showed progress compared to grey-scale histograms, which have been evaluated before. The results document a first step towards an objective, machine-based classification of laryngeal images, which could provide the basis for further development of an expert system for use in indirect laryngoscopy.}, language = {en} } @inproceedings{PalmLehmannBrednoetal., author = {Palm, Christoph and Lehmann, Thomas M. and Bredno, J. and Neuschaefer-Rube, C. and Klajman, S. and Spitzer, Klaus}, title = {Automated Analysis of Stroboscopic Image Sequences by Vibration Profiles}, series = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 5th International Workshop}, booktitle = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 5th International Workshop}, abstract = {A method for automated segmentation of vocal cords in stroboscopic video sequences is presented. In contrast to earlier approaches, the inner and outer contours of the vocal cords are independently delineated. Automatic segmentation of the low contrasted images is carried out by connecting the shape constraint of a point distribution model to a multi-channel regionbased balloon model. This enables us to robustly compute a vibration profile that is used as a new diagnostic tool to visualize several vibration parameters in only one graphic. The vibration profiles are studied in two cases: one physiological vibration and one functional pathology.}, language = {en} } @inproceedings{PietrzykPalmBeyer, author = {Pietrzyk, Uwe and Palm, Christoph and Beyer, Thomas}, title = {Investigation of fusion strategies of multi-modality images}, series = {IEEE Nuclear Science Symposium Conference Record}, volume = {4}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2004.1462740}, pages = {2399 -- 2401}, abstract = {Presenting images from different modalities seems to be a trivial task considering the challenges to obtain registered images as a pre-requisite for image fusion. In combined tomographs like PET/CT, image registration is intrinsic. However, informative image fusion mandates careful preparation owing to the large amount of information that is presented to the observer. In complex imaging situations it is required to provide tools that are easy to handle and still powerful enough to help the observer discriminating important details from background patterns. We investigated several options for color tables applied to brain and non-brain images obtained with PET, MRI and CT.}, language = {en} } @inproceedings{PietrzykBauerVietenetal., author = {Pietrzyk, Uwe and Bauer, Dagmar and Vieten, Andrea and Bauer, Andreas and Langen, Karl-Josef and Zilles, Karl and Palm, Christoph}, title = {Creating consistent 3D multi-modality data sets from autoradiographic and histological images of the rat brain}, series = {IEEE Nuclear Science Symposium Conference Record}, volume = {6}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2004.1466754}, pages = {4001 -- 4003}, abstract = {Volumetric representations of autoradiographic and histological images gain ever more interest as a base to interpret data obtained with /spl mu/-imaging devices like microPET. Beyond supporting spatial orientation within rat brains especially autoradiographic images may serve as a base to quantitatively evaluate the complex uptake patterns of microPET studies with receptor ligands or tumor tracers. They may also serve for the development of rat brain atlases or data models, which can be explored during further image analysis or simulation studies. In all cases a consistent spatial representation of the rat brain, i.e. its anatomy and the corresponding quantitative uptake pattern, is required. This includes both, a restacking of the individual two-dimensional images and the exact registration of the respective volumes. We propose strategies how these volumes can be created in a consistent way and trying to limit the requirements on the circumstances during data acquisition, i.e. being independent from other sources like video imaging of the block face prior to cutting or high resolution micro-X-ray CT or micro MRI.}, language = {en} } @article{Palm, author = {Palm, Christoph}, title = {Color Texture Classification by Integrative Co-Occurrence Matrices}, series = {Pattern Recognition}, volume = {37}, journal = {Pattern Recognition}, number = {5}, doi = {10.1016/j.patcog.2003.09.010}, pages = {965 -- 976}, abstract = {Integrative Co-occurrence matrices are introduced as novel features for color texture classification. The extended Co-occurrence notation allows the comparison between integrative and parallel color texture concepts. The information profit of the new matrices is shown quantitatively using the Kolmogorov distance and by extensive classification experiments on two datasets. Applying them to the RGB and the LUV color space the combined color and intensity textures are studied and the existence of intensity independent pure color patterns is demonstrated. The results are compared with two baselines: gray-scale texture analysis and color histogram analysis. The novel features improve the classification results up to 20\% and 32\% for the first and second baseline, respectively.}, language = {en} } @inproceedings{PietrzykPalmBeyer, author = {Pietrzyk, Uwe and Palm, Christoph and Beyer, Thomas}, title = {Fusion strategies in multi-modality imaging}, series = {Medical Physics, Vol 2. Proceedings of the jointly held Congresses: ICMP 2005, 14th International Conference of Medical Physics of the International Organization for Medical Physics (IOMP), the European Federation of Organizations in Medical Physics (EFOMP) and the German Society of Medical Physics (DGMP) ; BMT 2005, 39th Annual Congress of the German Society for Biomedical Engineering (DGBMT) within VDE ; 14th - 17th September 2005, Nuremberg, Germany}, booktitle = {Medical Physics, Vol 2. Proceedings of the jointly held Congresses: ICMP 2005, 14th International Conference of Medical Physics of the International Organization for Medical Physics (IOMP), the European Federation of Organizations in Medical Physics (EFOMP) and the German Society of Medical Physics (DGMP) ; BMT 2005, 39th Annual Congress of the German Society for Biomedical Engineering (DGBMT) within VDE ; 14th - 17th September 2005, Nuremberg, Germany}, pages = {1446 -- 1447}, subject = {Bildgebendes Verfahren}, language = {en} } @inproceedings{PalmDehnhardtVietenetal., author = {Palm, Christoph and Dehnhardt, Markus and Vieten, Andrea and Pietrzyk, Uwe}, title = {3D rat brain tumor reconstruction}, series = {Biomedizinische Technik}, volume = {50}, booktitle = {Biomedizinische Technik}, number = {Suppl. 1, Part 1}, pages = {597 -- 598}, subject = {Dreidimensionale Rekonstruktion}, language = {en} } @inproceedings{MiddelPalmErdt, author = {Middel, Luise and Palm, Christoph and Erdt, Marius}, title = {Synthesis of Medical Images Using GANs}, series = {Uncertainty for safe utilization of machine learning in medical imaging and clinical image-based procedures. First International Workshop, UNSURE 2019, and 8th International Workshop, CLIP 2019, held in conjunction with MICCAI 2019, Shenzhen, China, October 17, 2019}, booktitle = {Uncertainty for safe utilization of machine learning in medical imaging and clinical image-based procedures. First International Workshop, UNSURE 2019, and 8th International Workshop, CLIP 2019, held in conjunction with MICCAI 2019, Shenzhen, China, October 17, 2019}, publisher = {Springer Nature}, address = {Cham}, isbn = {978-3-030-32688-3}, issn = {0302-9743}, doi = {10.1007/978-3-030-32689-0_13}, pages = {125 -- 134}, abstract = {The success of artificial intelligence in medicine is based on the need for large amounts of high quality training data. Sharing of medical image data, however, is often restricted by laws such as doctor-patient confidentiality. Although there are publicly available medical datasets, their quality and quantity are often low. Moreover, datasets are often imbalanced and only represent a fraction of the images generated in hospitals or clinics and can thus usually only be used as training data for specific problems. The introduction of generative adversarial networks (GANs) provides a mean to generate artificial images by training two convolutional networks. This paper proposes a method which uses GANs trained on medical images in order to generate a large number of artificial images that could be used to train other artificial intelligence algorithms. This work is a first step towards alleviating data privacy concerns and being able to publicly share data that still contains a substantial amount of the information in the original private data. The method has been evaluated on several public datasets and quantitative and qualitative tests showing promising results.}, subject = {Neuronale Netze}, language = {en} } @inproceedings{WeihererZornWittenbergetal., author = {Weiherer, Maximilian and Zorn, Martin and Wittenberg, Thomas and Palm, Christoph}, title = {Retrospective Color Shading Correction for Endoscopic Images}, series = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {10.1007/978-3-658-29267-6}, pages = {14 -- 19}, abstract = {In this paper, we address the problem of retrospective color shading correction. An extension of the established gray-level shading correction algorithm based on signal envelope (SE) estimation to color images is developed using principal color components. Compared to the probably most general shading correction algorithm based on entropy minimization, SE estimation does not need any computationally expensive optimization and thus can be implemented more effciently. We tested our new shading correction scheme on artificial as well as real endoscopic images and observed promising results. Additionally, an indepth analysis of the stop criterion used in the SE estimation algorithm is provided leading to the conclusion that a fixed, user-defined threshold is generally not feasible. Thus, we present new ideas how to develop a non-parametric version of the SE estimation algorithm using entropy.}, subject = {Endoskopie}, language = {en} } @inproceedings{ChangLinLeeetal., author = {Chang, Ching-Sheng and Lin, Jin-Fa and Lee, Ming-Ching and Palm, Christoph}, title = {Semantic Lung Segmentation Using Convolutional Neural Networks}, series = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {10.1007/978-3-658-29267-6_17}, pages = {75 -- 80}, abstract = {Chest X-Ray (CXR) images as part of a non-invasive diagnosis method are commonly used in today's medical workflow. In traditional methods, physicians usually use their experience to interpret CXR images, however, there is a large interobserver variance. Computer vision may be used as a standard for assisted diagnosis. In this study, we applied an encoder-decoder neural network architecture for automatic lung region detection. We compared a three-class approach (left lung, right lung, background) and a two-class approach (lung, background). The differentiation of left and right lungs as direct result of a semantic segmentation on basis of neural nets rather than post-processing a lung-background segmentation is done here for the first time. Our evaluation was done on the NIH Chest X-ray dataset, from which 1736 images were extracted and manually annotated. We achieved 94:9\% mIoU and 92\% mIoU as segmentation quality measures for the two-class-model and the three-class-model, respectively. This result is very promising for the segmentation of lung regions having the simultaneous classification of left and right lung in mind.}, subject = {Neuronales Netz}, language = {en} } @misc{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Abstract: Imitating Human Soft Tissue with Dual-Material 3D Printing}, series = {Bildverarbeitung f{\"u}r die Medizin 2019, Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 17. bis 19. M{\"a}rz 2019 in L{\"u}beck}, journal = {Bildverarbeitung f{\"u}r die Medizin 2019, Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 17. bis 19. M{\"a}rz 2019 in L{\"u}beck}, editor = {Handels, Heinz and Deserno, Thomas M. and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-25325-7}, doi = {10.1007/978-3-658-25326-4_48}, pages = {218}, abstract = {Currently, it is common practice to use three-dimensional (3D) printers not only for rapid prototyping in the industry, but also in the medical area to create medical applications for training inexperienced surgeons. In a clinical training simulator for minimally invasive bone drilling to fix hand fractures with Kirschner-wires (K-wires), a 3D printed hand phantom must not only be geometrically but also haptically correct. Due to a limited view during an operation, surgeons need to perfectly localize underlying risk structures only by feeling of specific bony protrusions of the human hand.}, subject = {Handchirurgie}, language = {en} } @article{BrownConsortiumZhouetal., author = {Brown, Peter and Consortium, RELISH and Zhou, Yaoqi and Palm, Christoph}, title = {Large expert-curated database for benchmarking document similarity detection in biomedical literature search}, series = {Database}, volume = {2019}, journal = {Database}, publisher = {Oxford University Pres}, doi = {10.1093/database/baz085}, pages = {1 -- 66}, abstract = {Document recommendation systems for locating relevant literature have mostly relied on methods developed a decade ago. This is largely due to the lack of a large offline gold-standard benchmark of relevant documents that cover a variety of research fields such that newly developed literature search techniques can be compared, improved and translated into practice. To overcome this bottleneck, we have established the RElevant LIterature SearcH consortium consisting of more than 1500 scientists from 84 countries, who have collectively annotated the relevance of over 180 000 PubMed-listed articles with regard to their respective seed (input) article/s. The majority of annotations were contributed by highly experienced, original authors of the seed articles. The collected data cover 76\% of all unique PubMed Medical Subject Headings descriptors. No systematic biases were observed across different experience levels, research fields or time spent on annotations. More importantly, annotations of the same document pairs contributed by different scientists were highly concordant. We further show that the three representative baseline methods used to generate recommended articles for evaluation (Okapi Best Matching 25, Term Frequency-Inverse Document Frequency and PubMed Related Articles) had similar overall performances. Additionally, we found that these methods each tend to produce distinct collections of recommended articles, suggesting that a hybrid method may be required to completely capture all relevant articles. The established database server located at https://relishdb.ict.griffith.edu.au is freely available for the downloading of annotation data and the blind testing of new methods. We expect that this benchmark will be useful for stimulating the development of new powerful techniques for title and title/abstract-based search engines for relevant articles in biomedical research.}, subject = {Information Retrieval}, language = {en} } @misc{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Artificial Intelligence in Early Barrett's Cancer: The Segmentation Task}, series = {Endoscopy}, volume = {51}, journal = {Endoscopy}, number = {04}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0039-1681187}, pages = {6}, abstract = {Aims: The delineation of outer margins of early Barrett's cancer can be challenging even for experienced endoscopists. Artificial intelligence (AI) could assist endoscopists faced with this task. As of date, there is very limited experience in this domain. In this study, we demonstrate the measure of overlap (Dice coefficient = D) between highly experienced Barrett endoscopists and an AI system in the delineation of cancer margins (segmentation task). Methods: An AI system with a deep convolutional neural network (CNN) was trained and tested on high-definition endoscopic images of early Barrett's cancer (n = 33) and normal Barrett's mucosa (n = 41). The reference standard for the segmentation task were the manual delineations of tumor margins by three highly experienced Barrett endoscopists. Training of the AI system included patch generation, patch augmentation and adjustment of the CNN weights. Then, the segmentation results from patch classification and thresholding of the class probabilities. Segmentation results were evaluated using the Dice coefficient (D). Results: The Dice coefficient (D) which can range between 0 (no overlap) and 1 (complete overlap) was computed only for images correctly classified by the AI-system as cancerous. At a threshold of t = 0.5, a mean value of D = 0.72 was computed. Conclusions: AI with CNN performed reasonably well in the segmentation of the tumor region in Barrett's cancer, at least when compared with expert Barrett's endoscopists. AI holds a lot of promise as a tool for better visualization of tumor margins but may need further improvement and enhancement especially in real-time settings.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Real-time use of artificial intelligence in the evaluation of cancer in Barrett's oesophagus}, series = {Gut}, volume = {69}, journal = {Gut}, number = {4}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2019-319460}, pages = {615 -- 616}, abstract = {Based on previous work by our group with manual annotation of visible Barrett oesophagus (BE) cancer images, a real-time deep learning artificial intelligence (AI) system was developed. While an expert endoscopist conducts the endoscopic assessment of BE, our AI system captures random images from the real-time camera livestream and provides a global prediction (classification), as well as a dense prediction (segmentation) differentiating accurately between normal BE and early oesophageal adenocarcinoma (EAC). The AI system showed an accuracy of 89.9\% on 14 cases with neoplastic BE.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{EbigboPalmProbstetal., author = {Ebigbo, Alanna and Palm, Christoph and Probst, Andreas and Mendel, Robert and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Siersema, Peter and Messmann, Helmut}, title = {A technical review of artificial intelligence as applied to gastrointestinal endoscopy: clarifying the terminology}, series = {Endoscopy International Open}, volume = {07}, journal = {Endoscopy International Open}, number = {12}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1010-5705}, pages = {1616 -- 1623}, abstract = {The growing number of publications on the application of artificial intelligence (AI) in medicine underlines the enormous importance and potential of this emerging field of research. In gastrointestinal endoscopy, AI has been applied to all segments of the gastrointestinal tract most importantly in the detection and characterization of colorectal polyps. However, AI research has been published also in the stomach and esophagus for both neoplastic and non-neoplastic disorders. The various technical as well as medical aspects of AI, however, remain confusing especially for non-expert physicians. This physician-engineer co-authored review explains the basic technical aspects of AI and provides a comprehensive overview of recent publications on AI in gastrointestinal endoscopy. Finally, a basic insight is offered into understanding publications on AI in gastrointestinal endoscopy.}, subject = {Diagnose}, language = {en} } @article{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Optically tracked and 3D printed haptic phantom hand for surgical training system}, series = {Quantitative Imaging in Medicine and Surgery}, volume = {10}, journal = {Quantitative Imaging in Medicine and Surgery}, number = {02}, publisher = {AME Publishing Company}, address = {Hong Kong, China}, doi = {10.21037/qims.2019.12.03}, pages = {340 -- 455}, abstract = {Background: For surgical fixation of bone fractures of the human hand, so-called Kirschner-wires (K-wires) are drilled through bone fragments. Due to the minimally invasive drilling procedures without a view of risk structures like vessels and nerves, a thorough training of young surgeons is necessary. For the development of a virtual reality (VR) based training system, a three-dimensional (3D) printed phantom hand is required. To ensure an intuitive operation, this phantom hand has to be realistic in both, its position relative to the driller as well as in its haptic features. The softest 3D printing material available on the market, however, is too hard to imitate human soft tissue. Therefore, a support-material (SUP) filled metamaterial is used to soften the raw material. Realistic haptic features are important to palpate protrusions of the bone to determine the drilling starting point and angle. An optical real-time tracking is used to transfer position and rotation to the training system. Methods: A metamaterial already developed in previous work is further improved by use of a new unit cell. Thus, the amount of SUP within the volume can be increased and the tissue is softened further. In addition, the human anatomy is transferred to the entire hand model. A subcutaneous fat layer and penetration of air through pores into the volume simulate shiftability of skin layers. For optical tracking, a rotationally symmetrical marker attached to the phantom hand with corresponding reference marker is developed. In order to ensure trouble-free position transmission, various types of marker point applications are tested. Results: Several cuboid and forearm sample prints lead to a final 30 centimeter long hand model. The whole haptic phantom could be printed faultless within about 17 hours. The metamaterial consisting of the new unit cell results in an increased SUP share of 4.32\%. Validated by an expert surgeon study, this allows in combination with a displacement of the uppermost skin layer a good palpability of the bones. Tracking of the hand marker in dodecahedron design works trouble-free in conjunction with a reference marker attached to the worktop of the training system. Conclusions: In this work, an optically tracked and haptically correct phantom hand was developed using dual-material 3D printing, which can be easily integrated into a surgical training system.}, subject = {Handchirurgie}, language = {en} } @inproceedings{SouzaJrEbigboProbstetal., author = {Souza Jr., Luis Antonio de and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Mendel, Robert and Palm, Christoph}, title = {Barrett's Esophagus Identification Using Color Co-occurrence Matrices}, series = {31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Parana, 2018}, booktitle = {31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Parana, 2018}, doi = {10.1109/SIBGRAPI.2018.00028}, pages = {166 -- 173}, abstract = {In this work, we propose the use of single channel Color Co-occurrence Matrices for texture description of Barrett'sEsophagus (BE)and adenocarcinoma images. Further classification using supervised learning techniques, such as Optimum-Path Forest (OPF), Support Vector Machines with Radial Basisunction (SVM-RBF) and Bayesian classifier supports the contextof automatic BE and adenocarcinoma diagnosis. We validated three approaches of classification based on patches, patients and images in two datasets (MICCAI 2015 and Augsburg) using the color-and-texture descriptors and the machine learning techniques. Concerning MICCAI 2015 dataset, the best results were obtained using the blue channel for the descriptors and the supervised OPF for classification purposes in the patch-based approach, with sensitivity nearly to 73\% for positive adenocarcinoma identification and specificity close to 77\% for BE (non-cancerous) patch classification. Regarding the Augsburg dataset, the most accurate results were also obtained using both OPF classifier and blue channel descriptor for the feature extraction, with sensitivity close to 67\% and specificity around to76\%. Our work highlights new advances in the related research area and provides a promising technique that combines color and texture information, allied to three different approaches of dataset pre-processing aiming to configure robust scenarios for the classification step.}, language = {en} } @article{LehmannPalm, author = {Lehmann, Thomas M. and Palm, Christoph}, title = {Color Line Search for Illuminant Estimation in Real World Scenes}, series = {Journal of the Optical Society of America (JOSA) A}, volume = {18}, journal = {Journal of the Optical Society of America (JOSA) A}, number = {11}, doi = {10.1364/JOSAA.18.002679}, pages = {2679 -- 2691}, abstract = {The estimation of illuminant color is mandatory for many applications in the field of color image quantification. However, it is an unresolved problem if no additional heuristics or restrictive assumptions apply. Assuming uniformly colored and roundly shaped objects, Lee has presented a theory and a method for computing the scene-illuminant chromaticity from specular highlights [H. C. Lee, J. Opt. Soc. Am. A 3, 1694 (1986)]. However, Lee's method, called image path search, is less robust to noise and is limited in the handling of microtextured surfaces. We introduce a novel approach to estimate the color of a single illuminant for noisy and microtextured images, which frequently occur in real-world scenes. Using dichromatic regions of different colored surfaces, our approach, named color line search, reverses Lee's strategy of image path search. Reliable color lines are determined directly in the domain of the color diagrams by three steps. First, regions of interest are automatically detected around specular highlights, and local color diagrams are computed. Second, color lines are determined according to the dichromatic reflection model by Hough transform of the color diagrams. Third, a consistency check is applied by a corresponding path search in the image domain. Our method is evaluated on 40 natural images of fruit and vegetables. In comparison with those of Lee's method, accuracy and stability are substantially improved. In addition, the color line search approach can easily be extended to scenes of objects with macrotextured surfaces.}, language = {en} }