@misc{MendelSouzaJrRauberetal., author = {Mendel, Robert and Souza Jr., Luis Antonio de and Rauber, David and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Abstract: Semi-supervised Segmentation Based on Error-correcting Supervision}, series = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, journal = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, doi = {10.1007/978-3-658-33198-6_43}, pages = {178}, abstract = {Pixel-level classification is an essential part of computer vision. For learning from labeled data, many powerful deep learning models have been developed recently. In this work, we augment such supervised segmentation models by allowing them to learn from unlabeled data. Our semi-supervised approach, termed Error-Correcting Supervision, leverages a collaborative strategy. Apart from the supervised training on the labeled data, the segmentation network is judged by an additional network.}, subject = {Deep Learning}, language = {en} } @article{EbigboPalmMessmann, author = {Ebigbo, Alanna and Palm, Christoph and Messmann, Helmut}, title = {Barrett esophagus: What to expect from Artificial Intelligence?}, series = {Best Practice \& Research Clinical Gastroenterology}, volume = {52-53}, journal = {Best Practice \& Research Clinical Gastroenterology}, number = {June-August}, publisher = {Elsevier}, issn = {1521-6918}, doi = {10.1016/j.bpg.2021.101726}, abstract = {The evaluation and assessment of Barrett's esophagus is challenging for both expert and nonexpert endoscopists. However, the early diagnosis of cancer in Barrett's esophagus is crucial for its prognosis, and could save costs. Pre-clinical and clinical studies on the application of Artificial Intelligence (AI) in Barrett's esophagus have shown promising results. In this review, we focus on the current challenges and future perspectives of implementing AI systems in the management of patients with Barrett's esophagus.}, subject = {Deep Learning}, language = {en} } @article{MaierDesernoHandelsetal., author = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, title = {Guest editorial of the IJCARS - BVM 2018 special issue}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {14}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer}, doi = {10.1007/s11548-018-01902-0}, pages = {1 -- 2}, language = {en} } @article{PassosSouzaJrMendeletal., author = {Passos, Leandro A. and Souza Jr., Luis Antonio de and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Barrett's esophagus analysis using infinity Restricted Boltzmann Machines}, series = {Journal of Visual Communication and Image Representation}, volume = {59}, journal = {Journal of Visual Communication and Image Representation}, publisher = {Elsevier}, doi = {10.1016/j.jvcir.2019.01.043}, pages = {475 -- 485}, abstract = {The number of patients with Barret's esophagus (BE) has increased in the last decades. Considering the dangerousness of the disease and its evolution to adenocarcinoma, an early diagnosis of BE may provide a high probability of cancer remission. However, limitations regarding traditional methods of detection and management of BE demand alternative solutions. As such, computer-aided tools have been recently used to assist in this problem, but the challenge still persists. To manage the problem, we introduce the infinity Restricted Boltzmann Machines (iRBMs) to the task of automatic identification of Barrett's esophagus from endoscopic images of the lower esophagus. Moreover, since iRBM requires a proper selection of its meta-parameters, we also present a discriminative iRBM fine-tuning using six meta-heuristic optimization techniques. We showed that iRBMs are suitable for the context since it provides competitive results, as well as the meta-heuristic techniques showed to be appropriate for such task.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{SouzaJrAfonsoPalmetal., author = {Souza Jr., Luis Antonio de and Afonso, Luis Claudio Sugi and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Barrett's Esophagus Identification Using Optimum-Path Forest}, series = {Proceedings of the 30th Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T 2017), Niter{\´o}i, Rio de Janeiro, Brazil, 2017, 17-20 October}, booktitle = {Proceedings of the 30th Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T 2017), Niter{\´o}i, Rio de Janeiro, Brazil, 2017, 17-20 October}, doi = {10.1109/SIBGRAPI.2017.47}, pages = {308 -- 314}, abstract = {Computer-assisted analysis of endoscopic images can be helpful to the automatic diagnosis and classification of neoplastic lesions. Barrett's esophagus (BE) is a common type of reflux that is not straight forward to be detected by endoscopic surveillance, thus being way susceptible to erroneous diagnosis, which can cause cancer when not treated properly. In this work, we introduce the Optimum-Path Forest (OPF) classifier to the task of automatic identification of Barrett'sesophagus, with promising results and outperforming the well known Support Vector Machines (SVM) in the aforementioned context. We consider describing endoscopic images by means of feature extractors based on key point information, such as the Speeded up Robust Features (SURF) and Scale-Invariant Feature Transform (SIFT), for further designing a bag-of-visual-wordsthat is used to feed both OPF and SVM classifiers. The best results were obtained by means of the OPF classifier for both feature extractors, with values lying on 0.732 (SURF) - 0.735(SIFT) for sensitivity, 0.782 (SURF) - 0.806 (SIFT) for specificity, and 0.738 (SURF) - 0.732 (SIFT) for the accuracy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{ZehnerSzaloPalm, author = {Zehner, Alexander and Szalo, Alexander Eduard and Palm, Christoph}, title = {GraphMIC: Easy Prototyping of Medical Image Computing Applications}, series = {Interactive Medical Image Computing (IMIC), Workshop at the Medical Image Computing and Computer Assisted Interventions (MICCAI 2015), 2015, Munich}, booktitle = {Interactive Medical Image Computing (IMIC), Workshop at the Medical Image Computing and Computer Assisted Interventions (MICCAI 2015), 2015, Munich}, doi = {10.13140/RG.2.1.3718.4725}, pages = {395 -- 400}, abstract = {GraphMIC is a cross-platform image processing application utilizing the libraries ITK and OpenCV. The abstract structure of image processing pipelines is visually represented by user interface components based on modern QtQuick technology and allows users to focus on arrangement and parameterization of operations rather than implementing the equivalent functionality natively in C++. The application's central goal is to improve and simplify the typical workflow by providing various high level features and functions like multi threading, image sequence processing and advanced error handling. A built-in python interpreter allows the creation of custom nodes, where user defined algorithms can be integrated to extend basic functionality. An embedded 2d/3d visual-izer gives feedback of the resulting image of an operation or the whole pipeline. User inputs like seed points, contours or regions are forwarded to the processing pipeline as parameters to offer semi-automatic image computing. We report the main concept of the application and introduce several features and their implementation. Finally, the current state of development as well as future perspectives of GraphMIC are discussed}, subject = {Bildverarbeitung}, language = {en} } @inproceedings{WeberDoenitzBrawanskietal., author = {Weber, Joachim and Doenitz, Christian and Brawanski, Alexander and Palm, Christoph}, title = {Data-Parallel MRI Brain Segmentation in Clinicial Use}, series = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-46224-9_67}, pages = {389 -- 394}, abstract = {Structural MRI brain analysis and segmentation is a crucial part in the daily routine in neurosurgery for intervention planning. Exemplarily, the free software FSL-FAST (FMRIB's Segmentation Library - FMRIB's Automated Segmentation Tool) in version 4 is used for segmentation of brain tissue types. To speed up the segmentation procedure by parallel execution, we transferred FSL-FAST to a General Purpose Graphics Processing Unit (GPGPU) using Open Computing Language (OpenCL) [1]. The necessary steps for parallelization resulted in substantially different and less useful results. Therefore, the underlying methods were revised and adapted yielding computational overhead. Nevertheless, we achieved a speed-up factor of 3.59 from CPU to GPGPU execution, as well providing similar useful or even better results.}, subject = {Kernspintomografie}, language = {en} } @misc{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Abstract: Imitating Human Soft Tissue with Dual-Material 3D Printing}, series = {Bildverarbeitung f{\"u}r die Medizin 2019, Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 17. bis 19. M{\"a}rz 2019 in L{\"u}beck}, journal = {Bildverarbeitung f{\"u}r die Medizin 2019, Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 17. bis 19. M{\"a}rz 2019 in L{\"u}beck}, editor = {Handels, Heinz and Deserno, Thomas M. and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-25325-7}, doi = {10.1007/978-3-658-25326-4_48}, pages = {218}, abstract = {Currently, it is common practice to use three-dimensional (3D) printers not only for rapid prototyping in the industry, but also in the medical area to create medical applications for training inexperienced surgeons. In a clinical training simulator for minimally invasive bone drilling to fix hand fractures with Kirschner-wires (K-wires), a 3D printed hand phantom must not only be geometrically but also haptically correct. Due to a limited view during an operation, surgeons need to perfectly localize underlying risk structures only by feeling of specific bony protrusions of the human hand.}, subject = {Handchirurgie}, language = {en} } @misc{RueckertRiederRauberetal., author = {R{\"u}ckert, Tobias and Rieder, Maximilian and Rauber, David and Xiao, Michel and Humolli, Eg and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {Augmenting instrument segmentation in video sequences of minimally invasive surgery by synthetic smoky frames}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {18}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {Suppl 1}, publisher = {Springer Nature}, doi = {10.1007/s11548-023-02878-2}, pages = {S54 -- S56}, language = {en} } @article{MaerklRueckertRauberetal., author = {Maerkl, Raphaela and Rueckert, Tobias and Rauber, David and Gutbrod, Max and Weber Nunes, Danilo and Palm, Christoph}, title = {Enhancing generalization in zero-shot multi-label endoscopic instrument classification}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {20}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer Nature}, doi = {10.1007/s11548-025-03439-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-85674}, pages = {1577 -- 1587}, abstract = {Purpose Recognizing previously unseen classes with neural networks is a significant challenge due to their limited generalization capabilities. This issue is particularly critical in safety-critical domains such as medical applications, where accurate classification is essential for reliability and patient safety. Zero-shot learning methods address this challenge by utilizing additional semantic data, with their performance relying heavily on the quality of the generated embeddings. Methods This work investigates the use of full descriptive sentences, generated by a Sentence-BERT model, as class representations, compared to simpler category-based word embeddings derived from a BERT model. Additionally, the impact of z-score normalization as a post-processing step on these embeddings is explored. The proposed approach is evaluated on a multi-label generalized zero-shot learning task, focusing on the recognition of surgical instruments in endoscopic images from minimally invasive cholecystectomies. Results The results demonstrate that combining sentence embeddings and z-score normalization significantly improves model performance. For unseen classes, the AUROC improves from 43.9\% to 64.9\%, and the multi-label accuracy from 26.1\% to 79.5\%. Overall performance measured across both seen and unseen classes improves from 49.3\% to 64.9\% in AUROC and from 37.3\% to 65.1\% in multi-label accuracy, highlighting the effectiveness of our approach. Conclusion These findings demonstrate that sentence embeddings and z-score normalization can substantially enhance the generalization performance of zero-shot learning models. However, as the study is based on a single dataset, future work should validate the method across diverse datasets and application domains to establish its robustness and broader applicability.}, language = {en} } @inproceedings{KlausmannRueckertRauberetal., author = {Klausmann, Leonard and Rueckert, Tobias and Rauber, David and Maerkl, Raphaela and Yildiran, Suemeyye R. and Gutbrod, Max and Palm, Christoph}, title = {DIY challenge blueprint: from organization to technical realization in biomedical image analysis}, series = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2025 ; Proceedings Part XI}, booktitle = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2025 ; Proceedings Part XI}, publisher = {Springer}, address = {Cham}, isbn = {978-3-032-05141-7}, doi = {10.1007/978-3-032-05141-7_9}, pages = {85 -- 95}, abstract = {Biomedical image analysis challenges have become the de facto standard for publishing new datasets and benchmarking different state-of-the-art algorithms. Most challenges use commercial cloud-based platforms, which can limit custom options and involve disadvantages such as reduced data control and increased costs for extended functionalities. In contrast, Do-It-Yourself (DIY) approaches have the capability to emphasize reliability, compliance, and custom features, providing a solid basis for low-cost, custom designs in self-hosted systems. Our approach emphasizes cost efficiency, improved data sovereignty, and strong compliance with regulatory frameworks, such as the GDPR. This paper presents a blueprint for DIY biomedical imaging challenges, designed to provide institutions with greater autonomy over their challenge infrastructure. Our approach comprehensively addresses both organizational and technical dimensions, including key user roles, data management strategies, and secure, efficient workflows. Key technical contributions include a modular, containerized infrastructure based on Docker, integration of open-source identity management, and automated solution evaluation workflows. Practical deployment guidelines are provided to facilitate implementation and operational stability. The feasibility and adaptability of the proposed framework are demonstrated through the MICCAI 2024 PhaKIR challenge with multiple international teams submitting and validating their solutions through our self-hosted platform. This work can be used as a baseline for future self-hosted DIY implementations and our results encourage further studies in the area of biomedical image analysis challenges.}, language = {en} } @unpublished{GutbrodRauberWeberNunesetal., author = {Gutbrod, Max and Rauber, David and Weber Nunes, Danilo and Palm, Christoph}, title = {OpenMIBOOD: Open Medical Imaging Benchmarks for Out-Of-Distribution Detection}, doi = {10.48550/arXiv.2503.16247}, pages = {18}, abstract = {The growing reliance on Artificial Intelligence (AI) in critical domains such as healthcare demands robust mechanisms to ensure the trustworthiness of these systems, especially when faced with unexpected or anomalous inputs. This paper introduces the Open Medical Imaging Benchmarks for Out-Of-Distribution Detection (OpenMIBOOD), a comprehensive framework for evaluating out-of-distribution (OOD) detection methods specifically in medical imaging contexts. OpenMIBOOD includes three benchmarks from diverse medical domains, encompassing 14 datasets divided into covariate-shifted in-distribution, near-OOD, and far-OOD categories. We evaluate 24 post-hoc methods across these benchmarks, providing a standardized reference to advance the development and fair comparison of OOD detection methods. Results reveal that findings from broad-scale OOD benchmarks in natural image domains do not translate to medical applications, underscoring the critical need for such benchmarks in the medical field. By mitigating the risk of exposing AI models to inputs outside their training distribution, OpenMIBOOD aims to support the advancement of reliable and trustworthy AI systems in healthcare. The repository is available at this https URL.}, language = {en} } @article{WeihererEigenbergerEggeretal., author = {Weiherer, Maximilian and Eigenberger, Andreas and Egger, Bernhard and Br{\´e}bant, Vanessa and Prantl, Lukas and Palm, Christoph}, title = {Learning the shape of female breasts: an open-access 3D statistical shape model of the female breast built from 110 breast scans}, series = {The Visual Computer}, volume = {39}, journal = {The Visual Computer}, number = {4}, publisher = {Springer Nature}, doi = {10.1007/s00371-022-02431-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-30506}, pages = {1597 -- 1616}, abstract = {We present the Regensburg Breast Shape Model (RBSM)—a 3D statistical shape model of the female breast built from 110 breast scans acquired in a standing position, and the first publicly available. Together with the model, a fully automated, pairwise surface registration pipeline used to establish dense correspondence among 3D breast scans is introduced. Our method is computationally efficient and requires only four landmarks to guide the registration process. A major challenge when modeling female breasts from surface-only 3D breast scans is the non-separability of breast and thorax. In order to weaken the strong coupling between breast and surrounding areas, we propose to minimize the variance outside the breast region as much as possible. To achieve this goal, a novel concept called breast probability masks (BPMs) is introduced. A BPM assigns probabilities to each point of a 3D breast scan, telling how likely it is that a particular point belongs to the breast area. During registration, we use BPMs to align the template to the target as accurately as possible inside the breast region and only roughly outside. This simple yet effective strategy significantly reduces the unwanted variance outside the breast region, leading to better statistical shape models in which breast shapes are quite well decoupled from the thorax. The RBSM is thus able to produce a variety of different breast shapes as independently as possible from the shape of the thorax. Our systematic experimental evaluation reveals a generalization ability of 0.17 mm and a specificity of 2.8 mm. To underline the expressiveness of the proposed model, we finally demonstrate in two showcase applications how the RBSM can be used for surgical outcome simulation and the prediction of a missing breast from the remaining one. Our model is available at https://www.rbsm.re-mic.de/.}, language = {en} } @article{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Imitating human soft tissue on basis of a dual-material 3D print using a support-filled metamaterial to provide bimanual haptic for a hand surgery training system}, series = {Quantitative Imaging in Medicine and Surgery}, volume = {9}, journal = {Quantitative Imaging in Medicine and Surgery}, number = {1}, publisher = {AME Publishing Company}, doi = {10.21037/qims.2018.09.17}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-979}, pages = {30 -- 42}, abstract = {Background: Currently, it is common practice to use three-dimensional (3D) printers not only for rapid prototyping in the industry, but also in the medical area to create medical applications for training inexperienced surgeons. In a clinical training simulator for minimally invasive bone drilling to fix hand fractures with Kirschner-wires (K-wires), a 3D-printed hand phantom must not only be geometrically but also haptically correct. Due to a limited view during an operation, surgeons need to perfectly localize underlying risk structures only by feeling of specific bony protrusions of the human hand. Methods: The goal of this experiment is to imitate human soft tissue with its haptic and elasticity for a realistic hand phantom fabrication, using only a dual-material 3D printer and support-material-filled metamaterial between skin and bone. We present our workflow to generate lattice structures between hard bone and soft skin with iterative cube edge (CE) or cube face (CF) unit cells. Cuboid and finger shaped sample prints with and without inner hard bone in different lattice thickness are constructed and 3D printed. Results: The most elastic available rubber-like material is too firm to imitate soft tissue. By reducing the amount of rubber in the inner volume through support material (SUP), objects become significantly softer. Without metamaterial, after disintegration, the SUP can be shifted through the volume and thus the body loses its original shape. Although the CE design increases the elasticity, it cannot restore the fabric form. In contrast to CE, the CF design increases not only the elasticity but also guarantees a local limitation of the SUP. Therefore, the body retains its shape and internal bones remain in its intended place. Various unit cell sizes, lattice thickening and skin thickness regulate the rubber material and SUP ratio. Test prints with higher SUP and lower rubber material percentage appear softer and vice versa. This was confirmed by an expert surgeon evaluation. Subjects adjudged pure rubber-like material as too firm and samples only filled with SUP or lattice structure in CE design as not suitable for imitating tissue. 3D-printed finger samples in CF design were rated as realistic compared to the haptic of human tissue with a good palpable bone structure. Conclusions: We developed a new dual-material 3D print technique to imitate soft tissue of the human hand with its haptic properties. Blowy SUP is trapped within a lattice structure to soften rubber-like 3D print material, which makes it possible to reproduce a realistic replica of human hand soft tissue.}, subject = {Handchirurgie}, language = {en} } @article{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Optically tracked and 3D printed haptic phantom hand for surgical training system}, series = {Quantitative Imaging in Medicine and Surgery}, volume = {10}, journal = {Quantitative Imaging in Medicine and Surgery}, number = {02}, publisher = {AME Publishing Company}, address = {Hong Kong, China}, doi = {10.21037/qims.2019.12.03}, pages = {340 -- 455}, abstract = {Background: For surgical fixation of bone fractures of the human hand, so-called Kirschner-wires (K-wires) are drilled through bone fragments. Due to the minimally invasive drilling procedures without a view of risk structures like vessels and nerves, a thorough training of young surgeons is necessary. For the development of a virtual reality (VR) based training system, a three-dimensional (3D) printed phantom hand is required. To ensure an intuitive operation, this phantom hand has to be realistic in both, its position relative to the driller as well as in its haptic features. The softest 3D printing material available on the market, however, is too hard to imitate human soft tissue. Therefore, a support-material (SUP) filled metamaterial is used to soften the raw material. Realistic haptic features are important to palpate protrusions of the bone to determine the drilling starting point and angle. An optical real-time tracking is used to transfer position and rotation to the training system. Methods: A metamaterial already developed in previous work is further improved by use of a new unit cell. Thus, the amount of SUP within the volume can be increased and the tissue is softened further. In addition, the human anatomy is transferred to the entire hand model. A subcutaneous fat layer and penetration of air through pores into the volume simulate shiftability of skin layers. For optical tracking, a rotationally symmetrical marker attached to the phantom hand with corresponding reference marker is developed. In order to ensure trouble-free position transmission, various types of marker point applications are tested. Results: Several cuboid and forearm sample prints lead to a final 30 centimeter long hand model. The whole haptic phantom could be printed faultless within about 17 hours. The metamaterial consisting of the new unit cell results in an increased SUP share of 4.32\%. Validated by an expert surgeon study, this allows in combination with a displacement of the uppermost skin layer a good palpability of the bones. Tracking of the hand marker in dodecahedron design works trouble-free in conjunction with a reference marker attached to the worktop of the training system. Conclusions: In this work, an optically tracked and haptically correct phantom hand was developed using dual-material 3D printing, which can be easily integrated into a surgical training system.}, subject = {Handchirurgie}, language = {en} } @inproceedings{FranzDreherPrinzenetal., author = {Franz, Daniela and Dreher, Maria and Prinzen, Martin and Teßmann, Matthias and Palm, Christoph and Katzky, Uwe and Perret, Jerome and Hofer, Mathias and Wittenberg, Thomas}, title = {CT-basiertes virtuelles Fr{\"a}sen am Felsenbein}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-56537-7}, doi = {10.1007/978-3-662-56537-7_51}, pages = {176 -- 181}, abstract = {Im Rahmen der Entwicklung eines haptisch-visuellen Trainingssystems f{\"u}r das Fr{\"a}sen am Felsenbein werden ein Haptikarm und ein autostereoskopischer 3D-Monitor genutzt, um Chirurgen die virtuelle Manipulation von kn{\"o}chernen Strukturen im Kontext eines sog. Serious Game zu erm{\"o}glichen. Unter anderem sollen Assistenz{\"a}rzte im Rahmen ihrer Ausbildung das Fr{\"a}sen am Felsenbein f{\"u}r das chirurgische Einsetzen eines Cochlea-Implantats {\"u}ben k{\"o}nnen. Die Visualisierung des virtuellen Fr{\"a}sens muss daf{\"u}r in Echtzeit und m{\"o}glichst realistisch modelliert, implementiert und evaluiert werden. Wir verwenden verschiedene Raycasting Methoden mit linearer und Nearest Neighbor Interpolation und vergleichen die visuelle Qualit{\"a}t und die Bildwiederholfrequenzen der Methoden. Alle verglichenen Verfahren sind sind echtzeitf{\"a}hig, unterscheiden sich aber in ihrer visuellen Qualit{\"a}t.}, subject = {Felsenbein}, language = {de} } @inproceedings{MaierHuberKatzkyetal., author = {Maier, Johannes and Huber, Michaela and Katzky, Uwe and Perret, Jerome and Wittenberg, Thomas and Palm, Christoph}, title = {Force-Feedback-assisted Bone Drilling Simulation Based on CT Data}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-56537-7_78}, pages = {291 -- 296}, abstract = {In order to fix a fracture using minimally invasive surgery approaches, surgeons are drilling complex and tiny bones with a 2 dimensional X-ray as single imaging modality in the operating room. Our novel haptic force-feedback and visual assisted training system will potentially help hand surgeons to learn the drilling procedure in a realistic visual environment. Within the simulation, the collision detection as well as the interaction between virtual drill, bone voxels and surfaces are important. In this work, the chai3d collision detection and force calculation algorithms are combined with a physics engine to simulate the bone drilling process. The chosen Bullet-Physics-Engine provides a stable simulation of rigid bodies, if the collision model of the drill and the tool holder is generated as a compound shape. Three haptic points are added to the K-wire tip for removing single voxels from the bone. For the drilling process three modes are proposed to emulate the different phases of drilling in restricting the movement of a haptic device.}, subject = {Handchirurgie}, language = {en} } @inproceedings{EixelbergerWittenbergPerretetal., author = {Eixelberger, Thomas and Wittenberg, Thomas and Perret, Jerome and Katzky, Uwe and Simon, Martina and Schmitt-R{\"u}th, Stephanie and Hofer, Mathias and Sorge, M. and Jacob, R. and Engel, Felix B. and Gostian, A. and Palm, Christoph and Franz, Daniela}, title = {A haptic model for virtual petrosal bone milling}, series = {17. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2018), Tagungsband, 2018, Leipzig, 13.-15. September}, volume = {17}, booktitle = {17. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2018), Tagungsband, 2018, Leipzig, 13.-15. September}, pages = {214 -- 219}, abstract = {Virtual training of bone milling requires realtime and realistic haptics of the interaction between the "virtual mill" and a "virtual bone". We propose an exponential abrasion model between a virtual one and the mill bit and combine it with a coarse representation of the virtual bone and the mill shaft for collision detection using the Bullet Physics Engine. We compare our exponential abrasion model to a widely used linear abrasion model and evaluate it quantitatively and qualitatively. The evaluation results show, that we can provide virtual milling in real-time, with an abrasion behavior similar to that proposed in the literature and with a realistic feeling of five different surgeons.}, subject = {Osteosynthese}, language = {en} } @inproceedings{PalmSchanze, author = {Palm, Christoph and Schanze, Thomas}, title = {Biomedical Image and Signal Computing (BISC 2013)}, series = {58. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e.V. (GMDS 2013), L{\"u}beck, 01.-05.09.2013}, booktitle = {58. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e.V. (GMDS 2013), L{\"u}beck, 01.-05.09.2013}, number = {DocAbstr. 324}, publisher = {German Medical Science GMS Publishing House}, address = {D{\"u}sseldorf}, doi = {doi:10.3205/13gmds257}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Computer-aided diagnosis using deep learning in the evaluation of early oesophageal adenocarcinoma}, series = {GuT}, volume = {68}, journal = {GuT}, number = {7}, publisher = {British Society of Gastroenterology}, doi = {10.1136/gutjnl-2018-317573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-68}, pages = {1143 -- 1145}, abstract = {Computer-aided diagnosis using deep learning (CAD-DL) may be an instrument to improve endoscopic assessment of Barrett's oesophagus (BE) and early oesophageal adenocarcinoma (EAC). Based on still images from two databases, the diagnosis of EAC by CAD-DL reached sensitivities/specificities of 97\%/88\% (Augsburg data) and 92\%/100\% (Medical Image Computing and Computer-Assisted Intervention [MICCAI] data) for white light (WL) images and 94\%/80\% for narrow band images (NBI) (Augsburg data), respectively. Tumour margins delineated by experts into images were detected satisfactorily with a Dice coefficient (D) of 0.72. This could be a first step towards CAD-DL for BE assessment. If developed further, it could become a useful adjunctive tool for patient management.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @article{WoehlMaierGehmertetal., author = {W{\"o}hl, Rebecca and Maier, Johannes and Gehmert, Sebastian and Palm, Christoph and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Huber, Michaela}, title = {3D Analysis of Osteosyntheses Material using semi-automated CT Segmentation}, series = {BMC Musculoskeletal Disorders}, volume = {19}, journal = {BMC Musculoskeletal Disorders}, publisher = {Springer Nature}, doi = {10.1186/s12891-018-1975-0}, pages = {1 -- 8}, abstract = {Backround Scaphoidectomy and midcarpal fusion can be performed using traditional fixation methods like K-wires, staples, screws or different dorsal (non)locking arthrodesis systems. The aim of this study is to test the Aptus four corner locking plate and to compare the clinical findings to the data revealed by CT scans and semi-automated segmentation. Methods: This is a retrospective review of eleven patients suffering from scapholunate advanced collapse (SLAC) or scaphoid non-union advanced collapse (SNAC) wrist, who received a four corner fusion between August 2011 and July 2014. The clinical evaluation consisted of measuring the range of motion (ROM), strength and pain on a visual analogue scale (VAS). Additionally, the Disabilities of the Arm, Shoulder and Hand (QuickDASH) and the Mayo Wrist Score were assessed. A computerized tomography (CT) of the wrist was obtained six weeks postoperatively. After semi-automated segmentation of the CT scans, the models were post processed and surveyed. Results During the six-month follow-up mean range of motion (ROM) of the operated wrist was 60°, consisting of 30° extension and 30° flexion. While pain levels decreased significantly, 54\% of grip strength and 89\% of pinch strength were preserved compared to the contralateral healthy wrist. Union could be detected in all CT scans of the wrist. While X-ray pictures obtained postoperatively revealed no pathology, two user related technical complications were found through the 3D analysis, which correlated to the clinical outcome. Conclusion Due to semi-automated segmentation and 3D analysis it has been proved that the plate design can keep up to the manufacturers' promises. Over all, this case series confirmed that the plate can compete with the coexisting techniques concerning clinical outcome, union and complication rate.}, subject = {Handchirurgie}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Rauber, David and R{\"u}ckert, Tobias and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Real-time detection and delineation of tissue during third-space endoscopy using artificial intelligence (AI)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765128}, pages = {S53 -- S54}, abstract = {Aims AI has proven great potential in assisting endoscopists in diagnostics, however its role in therapeutic endoscopy remains unclear. Endoscopic submucosal dissection (ESD) is a technically demanding intervention with a slow learning curve and relevant risks like bleeding and perforation. Therefore, we aimed to develop an algorithm for the real-time detection and delineation of relevant structures during third-space endoscopy. Methods 5470 still images from 59 full length videos (47 ESD, 12 POEM) were annotated. 179681 additional unlabeled images were added to the training dataset. Consequently, a DeepLabv3+ neural network architecture was trained with the ECMT semi-supervised algorithm (under review elsewhere). Evaluation of vessel detection was performed on a dataset of 101 standardized video clips from 15 separate third-space endoscopy videos with 200 predefined blood vessels. Results Internal validation yielded an overall mean Dice score of 85\% (68\% for blood vessels, 86\% for submucosal layer, 88\% for muscle layer). On the video test data, the overall vessel detection rate (VDR) was 94\% (96\% for ESD, 74\% for POEM). The median overall vessel detection time (VDT) was 0.32 sec (0.3 sec for ESD, 0.62 sec for POEM). Conclusions Evaluation of the developed algorithm on a video test dataset showed high VDR and quick VDT, especially for ESD. Further research will focus on a possible clinical benefit of the AI application for VDR and VDT during third-space endoscopy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{FranzKatzkyNeumannetal., author = {Franz, Daniela and Katzky, Uwe and Neumann, Sabine and Perret, Jerome and Hofer, Mathias and Huber, Michaela and Schmitt-R{\"u}th, Stephanie and Haug, Sonja and Weber, Karsten and Prinzen, Martin and Palm, Christoph and Wittenberg, Thomas}, title = {Haptisches Lernen f{\"u}r Cochlea Implantationen}, series = {15. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2016), Tagungsband, 2016, Bern, 29.09. - 01.10.}, booktitle = {15. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2016), Tagungsband, 2016, Bern, 29.09. - 01.10.}, pages = {21 -- 26}, abstract = {Die Implantation eines Cochlea Implantates ben{\"o}tigt einen chirurgischen Zugang im Felsenbein und durch die Paukenh{\"o}hle des Patienten. Der Chirurg hat eine eingeschr{\"a}nkte Sicht im Operationsgebiet, die weiterhin viele Risikostrukturen enth{\"a}lt. Um eine Cochlea Implantation sicher und fehlerfrei durchzuf{\"u}hren, ist eine umfangreiche theoretische und praktische (teilweise berufsbegleitende) Fortbildung sowie langj{\"a}hrige Erfahrung notwendig. Unter Nutzung von realen klinischen CT/MRT Daten von Innen- und Mittelohr und der interaktiven Segmentierung der darin abgebildeten Strukturen (Nerven, Cochlea, Geh{\"o}rkn{\"o}chelchen,...) wird im HaptiVisT Projekt ein haptisch-visuelles Trainingssystem f{\"u}r die Implantation von Innen- und Mittelohr-Implantaten realisiert, das als sog. „Serious Game" mit immersiver Didaktik gestaltet wird. Die Evaluierung des Demonstrators hinsichtlich Zweckm{\"a}ßigkeit erfolgt prozessbegleitend und ergebnisorientiert, um m{\"o}gliche technische oder didaktische Fehler vor Fertigstellung des Systems aufzudecken. Drei zeitlich versetzte Evaluationen fokussieren dabei chirurgisch-fachliche, didaktische sowie haptisch-ergonomische Akzeptanzkriterien.}, subject = {Cochlea-Implantat}, language = {de} } @inproceedings{SouzaJrHookPapaetal., author = {Souza Jr., Luis Antonio de and Hook, Christian and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using SURF Features}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_34}, pages = {141 -- 146}, abstract = {The development of adenocarcinoma in Barrett's esophagus is difficult to detect by endoscopic surveillance of patients with signs of dysplasia. Computer assisted diagnosis of endoscopic images (CAD) could therefore be most helpful in the demarcation and classification of neoplastic lesions. In this study we tested the feasibility of a CAD method based on Speeded up Robust Feature Detection (SURF). A given database containing 100 images from 39 patients served as benchmark for feature based classification models. Half of the images had previously been diagnosed by five clinical experts as being "cancerous", the other half as "non-cancerous". Cancerous image regions had been visibly delineated (masked) by the clinicians. SURF features acquired from full images as well as from masked areas were utilized for the supervised training and testing of an SVM classifier. The predictive accuracy of the developed CAD system is illustrated by sensitivity and specificity values. The results based on full image matching where 0.78 (sensitivity) and 0.82 (specificity) were achieved, while the masked region approach generated results of 0.90 and 0.95, respectively.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} }