@article{DesernoHandelsMaierHeinetal., author = {Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Mersmann, Sven and Palm, Christoph and Tolxdorff, Thomas and Wagenknecht, Gudrun and Wittenberg, Thomas}, title = {Viewpoints on Medical Image Processing}, series = {Current Medical Imaging Reviews}, volume = {9}, journal = {Current Medical Imaging Reviews}, number = {2}, doi = {10.2174/1573405611309020002}, pages = {79 -- 88}, abstract = {Medical image processing provides core innovation for medical imaging. This paper is focused on recent developments from science to applications analyzing the past fifteen years of history of the proceedings of the German annual meeting on medical image processing (BVM). Furthermore, some members of the program committee present their personal points of views: (i) multi-modality for imaging and diagnosis, (ii) analysis of diffusion-weighted imaging, (iii) model-based image analysis, (iv) registration of section images, (v) from images to information in digital endoscopy, and (vi) virtual reality and robotics. Medical imaging and medical image computing is seen as field of rapid development with clear trends to integrated applications in diagnostics, treatment planning and treatment.}, subject = {Bildgebendes Verfahren}, language = {en} } @inproceedings{WeberDoenitzBrawanskietal., author = {Weber, Joachim and Doenitz, Christian and Brawanski, Alexander and Palm, Christoph}, title = {Data-Parallel MRI Brain Segmentation in Clinicial Use}, series = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-46224-9_67}, pages = {389 -- 394}, abstract = {Structural MRI brain analysis and segmentation is a crucial part in the daily routine in neurosurgery for intervention planning. Exemplarily, the free software FSL-FAST (FMRIB's Segmentation Library - FMRIB's Automated Segmentation Tool) in version 4 is used for segmentation of brain tissue types. To speed up the segmentation procedure by parallel execution, we transferred FSL-FAST to a General Purpose Graphics Processing Unit (GPGPU) using Open Computing Language (OpenCL) [1]. The necessary steps for parallelization resulted in substantially different and less useful results. Therefore, the underlying methods were revised and adapted yielding computational overhead. Nevertheless, we achieved a speed-up factor of 3.59 from CPU to GPGPU execution, as well providing similar useful or even better results.}, subject = {Kernspintomografie}, language = {en} } @inproceedings{SzaloZehnerPalm, author = {Szalo, Alexander Eduard and Zehner, Alexander and Palm, Christoph}, title = {GraphMIC: Medizinische Bildverarbeitung in der Lehre}, series = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-46224-9_68}, pages = {395 -- 400}, abstract = {Die Lehre der medizinischen Bildverarbeitung vermittelt Kenntnisse mit einem breiten Methodenspektrum. Neben den Grundlagen der Verfahren soll ein Gef{\"u}hl f{\"u}r eine geeignete Ausf{\"u}hrungsreihenfolge und ihrer Wirkung auf medizinische Bilddaten entwickelt werden. Die Komplexit{\"a}t der Methoden erfordert vertiefte Programmierkenntnisse, sodass bereits einfache Operationen mit großem Programmieraufwand verbunden sind. Die Software GraphMIC stellt Bildverarbeitungsoperationen in Form interaktiver Knoten zur Verf{\"u}gung und erlaubt das Arrangieren, Parametrisieren und Ausf{\"u}hren komplexer Verarbeitungssequenzen in einem Graphen. Durch den Fokus auf das Design einer Pipeline, weg von sprach- und frameworkspezifischen Implementierungsdetails, lassen sich grundlegende Prinzipien der Bildverarbeitung anschaulich erlernen. In diesem Beitrag stellen wir die visuelle Programmierung mit GraphMIC der nativen Implementierung {\"a}quivalenter Funktionen gegen{\"u}ber. Die in C++ entwickelte Applikation basiert auf Qt, ITK, OpenCV, VTK und MITK.}, subject = {Bildverarbeitung}, language = {de} } @inproceedings{WoehlHuberLoibletal., author = {W{\"o}hl, Rebecca and Huber, Michaela and Loibl, Markus and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Palm, Christoph}, title = {The Impact of Semi-Automated Segmentation and 3D Analysis on Testing New Osteosynthesis Material}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_30}, pages = {122 -- 127}, abstract = {A new protocol for testing osteosynthesis material postoperatively combining semi-automated segmentation and 3D analysis of surface meshes is proposed. By various steps of transformation and measuring, objective data can be collected. In this study the specifications of a locking plate used for mediocarpal arthrodesis of the wrist were examined. The results show, that union of the lunate, triquetrum, hamate and capitate was achieved and that the plate is comparable to coexisting arthrodesis systems. Additionally, it was shown, that the complications detected correlate to the clinical outcome. In synopsis, this protocol is considered beneficial and should be taken into account in further studies.}, subject = {Osteosynthese}, language = {en} } @inproceedings{MendelEbigboProbstetal., author = {Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using Convolutional Neural Networks}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_23}, pages = {80 -- 85}, abstract = {We propose an automatic approach for early detection of adenocarcinoma in the esophagus. High-definition endoscopic images (50 cancer, 50 Barrett) are partitioned into a dataset containing approximately equal amounts of patches showing cancerous and non-cancerous regions. A deep convolutional neural network is adapted to the data using a transfer learning approach. The final classification of an image is determined by at least one patch, for which the probability being a cancer patch exceeds a given threshold. The model was evaluated with leave one patient out cross-validation. With sensitivity and specificity of 0.94 and 0.88, respectively, our findings improve recently published results on the same image data base considerably. Furthermore, the visualization of the class probabilities of each individual patch indicates, that our approach might be extensible to the segmentation domain.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @inproceedings{SouzaJrHookPapaetal., author = {Souza Jr., Luis Antonio de and Hook, Christian and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using SURF Features}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_34}, pages = {141 -- 146}, abstract = {The development of adenocarcinoma in Barrett's esophagus is difficult to detect by endoscopic surveillance of patients with signs of dysplasia. Computer assisted diagnosis of endoscopic images (CAD) could therefore be most helpful in the demarcation and classification of neoplastic lesions. In this study we tested the feasibility of a CAD method based on Speeded up Robust Feature Detection (SURF). A given database containing 100 images from 39 patients served as benchmark for feature based classification models. Half of the images had previously been diagnosed by five clinical experts as being "cancerous", the other half as "non-cancerous". Cancerous image regions had been visibly delineated (masked) by the clinicians. SURF features acquired from full images as well as from masked areas were utilized for the supervised training and testing of an SVM classifier. The predictive accuracy of the developed CAD system is illustrated by sensitivity and specificity values. The results based on full image matching where 0.78 (sensitivity) and 0.82 (specificity) were achieved, while the masked region approach generated results of 0.90 and 0.95, respectively.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{FranzDreherPrinzenetal., author = {Franz, Daniela and Dreher, Maria and Prinzen, Martin and Teßmann, Matthias and Palm, Christoph and Katzky, Uwe and Perret, Jerome and Hofer, Mathias and Wittenberg, Thomas}, title = {CT-basiertes virtuelles Fr{\"a}sen am Felsenbein}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-56537-7}, doi = {10.1007/978-3-662-56537-7_51}, pages = {176 -- 181}, abstract = {Im Rahmen der Entwicklung eines haptisch-visuellen Trainingssystems f{\"u}r das Fr{\"a}sen am Felsenbein werden ein Haptikarm und ein autostereoskopischer 3D-Monitor genutzt, um Chirurgen die virtuelle Manipulation von kn{\"o}chernen Strukturen im Kontext eines sog. Serious Game zu erm{\"o}glichen. Unter anderem sollen Assistenz{\"a}rzte im Rahmen ihrer Ausbildung das Fr{\"a}sen am Felsenbein f{\"u}r das chirurgische Einsetzen eines Cochlea-Implantats {\"u}ben k{\"o}nnen. Die Visualisierung des virtuellen Fr{\"a}sens muss daf{\"u}r in Echtzeit und m{\"o}glichst realistisch modelliert, implementiert und evaluiert werden. Wir verwenden verschiedene Raycasting Methoden mit linearer und Nearest Neighbor Interpolation und vergleichen die visuelle Qualit{\"a}t und die Bildwiederholfrequenzen der Methoden. Alle verglichenen Verfahren sind sind echtzeitf{\"a}hig, unterscheiden sich aber in ihrer visuellen Qualit{\"a}t.}, subject = {Felsenbein}, language = {de} } @article{WoehlMaierGehmertetal., author = {W{\"o}hl, Rebecca and Maier, Johannes and Gehmert, Sebastian and Palm, Christoph and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Huber, Michaela}, title = {3D Analysis of Osteosyntheses Material using semi-automated CT Segmentation}, series = {BMC Musculoskeletal Disorders}, volume = {19}, journal = {BMC Musculoskeletal Disorders}, publisher = {Springer Nature}, doi = {10.1186/s12891-018-1975-0}, pages = {1 -- 8}, abstract = {Backround Scaphoidectomy and midcarpal fusion can be performed using traditional fixation methods like K-wires, staples, screws or different dorsal (non)locking arthrodesis systems. The aim of this study is to test the Aptus four corner locking plate and to compare the clinical findings to the data revealed by CT scans and semi-automated segmentation. Methods: This is a retrospective review of eleven patients suffering from scapholunate advanced collapse (SLAC) or scaphoid non-union advanced collapse (SNAC) wrist, who received a four corner fusion between August 2011 and July 2014. The clinical evaluation consisted of measuring the range of motion (ROM), strength and pain on a visual analogue scale (VAS). Additionally, the Disabilities of the Arm, Shoulder and Hand (QuickDASH) and the Mayo Wrist Score were assessed. A computerized tomography (CT) of the wrist was obtained six weeks postoperatively. After semi-automated segmentation of the CT scans, the models were post processed and surveyed. Results During the six-month follow-up mean range of motion (ROM) of the operated wrist was 60°, consisting of 30° extension and 30° flexion. While pain levels decreased significantly, 54\% of grip strength and 89\% of pinch strength were preserved compared to the contralateral healthy wrist. Union could be detected in all CT scans of the wrist. While X-ray pictures obtained postoperatively revealed no pathology, two user related technical complications were found through the 3D analysis, which correlated to the clinical outcome. Conclusion Due to semi-automated segmentation and 3D analysis it has been proved that the plate design can keep up to the manufacturers' promises. Over all, this case series confirmed that the plate can compete with the coexisting techniques concerning clinical outcome, union and complication rate.}, subject = {Handchirurgie}, language = {en} } @inproceedings{MaierHuberKatzkyetal., author = {Maier, Johannes and Huber, Michaela and Katzky, Uwe and Perret, Jerome and Wittenberg, Thomas and Palm, Christoph}, title = {Force-Feedback-assisted Bone Drilling Simulation Based on CT Data}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-56537-7_78}, pages = {291 -- 296}, abstract = {In order to fix a fracture using minimally invasive surgery approaches, surgeons are drilling complex and tiny bones with a 2 dimensional X-ray as single imaging modality in the operating room. Our novel haptic force-feedback and visual assisted training system will potentially help hand surgeons to learn the drilling procedure in a realistic visual environment. Within the simulation, the collision detection as well as the interaction between virtual drill, bone voxels and surfaces are important. In this work, the chai3d collision detection and force calculation algorithms are combined with a physics engine to simulate the bone drilling process. The chosen Bullet-Physics-Engine provides a stable simulation of rigid bodies, if the collision model of the drill and the tool holder is generated as a compound shape. Three haptic points are added to the K-wire tip for removing single voxels from the bone. For the drilling process three modes are proposed to emulate the different phases of drilling in restricting the movement of a haptic device.}, subject = {Handchirurgie}, language = {en} } @article{GrassmannMengelkampBrandletal., author = {Graßmann, Felix and Mengelkamp, Judith and Brandl, Caroline and Harsch, Sebastian and Zimmermann, Martina E. and Linkohr, Birgit and Peters, Annette and Heid, Iris M. and Palm, Christoph and Weber, Bernhard H. F.}, title = {A Deep Learning Algorithm for Prediction of Age-Related Eye Disease Study Severity Scale for Age-Related Macular Degeneration from Color Fundus Photography}, series = {Ophtalmology}, volume = {125}, journal = {Ophtalmology}, number = {9}, publisher = {Elsevier}, doi = {10.1016/j.ophtha.2018.02.037}, pages = {1410 -- 1420}, abstract = {Purpose Age-related macular degeneration (AMD) is a common threat to vision. While classification of disease stages is critical to understanding disease risk and progression, several systems based on color fundus photographs are known. Most of these require in-depth and time-consuming analysis of fundus images. Herein, we present an automated computer-based classification algorithm. Design Algorithm development for AMD classification based on a large collection of color fundus images. Validation is performed on a cross-sectional, population-based study. Participants. We included 120 656 manually graded color fundus images from 3654 Age-Related Eye Disease Study (AREDS) participants. AREDS participants were >55 years of age, and non-AMD sight-threatening diseases were excluded at recruitment. In addition, performance of our algorithm was evaluated in 5555 fundus images from the population-based Kooperative Gesundheitsforschung in der Region Augsburg (KORA; Cooperative Health Research in the Region of Augsburg) study. Methods. We defined 13 classes (9 AREDS steps, 3 late AMD stages, and 1 for ungradable images) and trained several convolution deep learning architectures. An ensemble of network architectures improved prediction accuracy. An independent dataset was used to evaluate the performance of our algorithm in a population-based study. Main Outcome Measures. κ Statistics and accuracy to evaluate the concordance between predicted and expert human grader classification. Results. A network ensemble of 6 different neural net architectures predicted the 13 classes in the AREDS test set with a quadratic weighted κ of 92\% (95\% confidence interval, 89\%-92\%) and an overall accuracy of 63.3\%. In the independent KORA dataset, images wrongly classified as AMD were mainly the result of a macular reflex observed in young individuals. By restricting the KORA analysis to individuals >55 years of age and prior exclusion of other retinopathies, the weighted and unweighted κ increased to 50\% and 63\%, respectively. Importantly, the algorithm detected 84.2\% of all fundus images with definite signs of early or late AMD. Overall, 94.3\% of healthy fundus images were classified correctly. Conclusions Our deep learning algoritm revealed a weighted κ outperforming human graders in the AREDS study and is suitable to classify AMD fundus images in other datasets using individuals >55 years of age.}, subject = {Senile Makuladegeneration}, language = {en} }