@misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and R{\"o}mmele, Christoph and Nagl, Sandra and Schnoy, Elisabeth and Messmann, Helmut and Ebigbo, Alanna and Palm, Christoph}, title = {Artificial intelligence as a tool in the detection of the papillary ostium during ERCP}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783138}, pages = {S198}, abstract = {Aims Endoscopic retrograde cholangiopancreaticography (ERCP) is the gold standard in the diagnosis as well as treatment of diseases of the pancreatobiliary tract. However, it is technically complex and has a relatively high complication rate. In particular, cannulation of the papillary ostium remains challenging. The aim of this study is to examine whether a deep-learning algorithm can be used to detect the major duodenal papilla and in particular the papillary ostium reliably and could therefore be a valuable tool for inexperienced endoscopists, particularly in training situation. Methods We analyzed a total of 654 retrospectively collected images of 85 patients. Both the major duodenal papilla and the ostium were then segmented. Afterwards, a neural network was trained using a deep-learning algorithm. A 5-fold cross-validation was performed. Subsequently, we ran the algorithm on 5 prospectively collected videos of ERCPs. Results 5-fold cross-validation on the 654 labeled data resulted in an F1 value of 0.8007, a sensitivity of 0.8409 and a specificity of 0.9757 for the class papilla, and an F1 value of 0.5724, a sensitivity of 0.5456 and a specificity of 0.9966 for the class ostium. Regardless of the class, the average F1 value (class papilla and class ostium) was 0.6866, the sensitivity 0.6933 and the specificity 0.9861. In 100\% of cases the AI-detected localization of the papillary ostium in the prospectively collected videos corresponded to the localization of the cannulation performed by the endoscopist. Conclusions In the present study, the neural network was able to identify the major duodenal papilla with a high sensitivity and high specificity. In detecting the papillary ostium, the sensitivity was notably lower. However, when used on videos, the AI was able to identify the location of the subsequent cannulation with 100\% accuracy. In the future, the neural network will be trained with more data. Thus, a suitable tool for ERCP could be established, especially in the training situation.}, language = {en} } @misc{ScheppachNunesArizietal., author = {Scheppach, Markus W. and Nunes, Danilo Weber and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Procedural phase recognition in endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783804}, pages = {S439}, abstract = {Aims Recent evidence suggests the possibility of intraprocedural phase recognition in surgical operations as well as endoscopic interventions such as peroral endoscopic myotomy and endoscopic submucosal dissection (ESD) by AI-algorithms. The intricate measurement of intraprocedural phase distribution may deepen the understanding of the procedure. Furthermore, real-time quality assessment as well as automation of reporting may become possible. Therefore, we aimed to develop an AI-algorithm for intraprocedural phase recognition during ESD. Methods A training dataset of 364385 single images from 9 full-length ESD videos was compiled. Each frame was classified into one procedural phase. Phases included scope manipulation, marking, injection, application of electrical current and bleeding. Allocation of each frame was only possible to one category. This training dataset was used to train a Video Swin transformer to recognize the phases. Temporal information was included via logarithmic frame sampling. Validation was performed using two separate ESD videos with 29801 single frames. Results The validation yielded sensitivities of 97.81\%, 97.83\%, 95.53\%, 85.01\% and 87.55\% for scope manipulation, marking, injection, electric application and bleeding, respectively. Specificities of 77.78\%, 90.91\%, 95.91\%, 93.65\% and 84.76\% were measured for the same parameters. Conclusions The developed algorithm was able to classify full-length ESD videos on a frame-by-frame basis into the predefined classes with high sensitivities and specificities. Future research will aim at the development of quality metrics based on single-operator phase distribution.}, language = {en} } @misc{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Performance comparison of a deep learning algorithm with endoscopists in the detection of duodenal villous atrophy (VA)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765421}, pages = {S165}, abstract = {Aims VA is an endoscopic finding of celiac disease (CD), which can easily be missed if pretest probability is low. In this study, we aimed to develop an artificial intelligence (AI) algorithm for the detection of villous atrophy on endoscopic images. Methods 858 images from 182 patients with VA and 846 images from 323 patients with normal duodenal mucosa were used for training and internal validation of an AI algorithm (ResNet18). A separate dataset was used for external validation, as well as determination of detection performance of experts, trainees and trainees with AI support. According to the AI consultation distribution, images were stratified into "easy" and "difficult". Results Internal validation showed 82\%, 85\% and 84\% for sensitivity, specificity and accuracy. External validation showed 90\%, 76\% and 84\%. The algorithm was significantly more sensitive and accurate than trainees, trainees with AI support and experts in endoscopy. AI support in trainees was associated with significantly improved performance. While all endoscopists showed significantly lower detection for "difficult" images, AI performance remained stable. Conclusions The algorithm outperformed trainees and experts in sensitivity and accuracy for VA detection. The significant improvement with AI support suggests a potential clinical benefit. Stable performance of the algorithm in "easy" and "difficult" test images may indicate an advantage in macroscopically challenging cases.}, language = {en} } @misc{ScheppachWeberNunesArizietal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Single frame workflow recognition during endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {57}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806324}, pages = {S511}, abstract = {Aims Precise surgical phase recognition and evaluation may improve our understanding of complex endoscopic procedures. Furthermore, quality control measurements and endoscopy training could benefit from objective descriptions of surgical phase distributions. Therefore, we aimed to develop an artificial intelligence algorithm for frame-by-frame operational phase recognition during endoscopic submucosal dissection (ESD). Methods Full length ESD-videos from 31 patients comprising 6.297.782 single images were collected retrospectively. Videos were annotated on a frame-by-frame basis for the operational macro-phases diagnostics, marking, injection, dissection and bleeding. Further subphases were the application of electrical current, visible injection of fluid into the submucosal space and scope manipulation, leading to 11 phases in total. 4.975.699 frames (21 patients) were used for training of a video swin transformer using uniform frame sampling for temporal information. Hyperparameter tuning was performed with 897.325 further frames (6 patients), while 424.758 frames (4 patients) were used for validation. Results The overall F1 scores on the test dataset for the macro-phases and all 11 phases were 0.96 and 0.90, respectively. The recall values for diagnostics, marking, injection, dissection and bleeding were 1.00, 1.00, 0.95, 0.96 and 0.93, respectively. Conclusions The algorithm classified operational phases during ESD with high accuracy. A precise evaluation of phase distribution may allow for the development of objective quality metrics for quality control and training.}, language = {en} } @article{WeigertPietrzykMuelleretal., author = {Weigert, Markus and Pietrzyk, Uwe and M{\"u}ller, Stefan P. and Palm, Christoph and Beyer, Thomas}, title = {Whole-body PET/CT imaging}, series = {Zeitschrift f{\"u}r Medizinische Physik}, volume = {18}, journal = {Zeitschrift f{\"u}r Medizinische Physik}, number = {1}, doi = {10.1016/j.zemedi.2007.07.004}, pages = {59 -- 66}, abstract = {Aim Combined whole-body (WB) PET/CT imaging provides better overall co-registration compared to separate CT and PET. However, in clinical routine local PET-CT mis-registration cannot be avoided. Thus, the reconstructed PET tracer distribution may be biased when using the misaligned CT transmission data for CT-based attenuation correction (CT-AC). We investigate the feasibility of retrospective co-registration techniques to align CT and PET images prior to CT-AC, thus improving potentially the quality of combined PET/CT imaging in clinical routine. Methods First, using a commercial software registration package CT images were aligned to the uncorrected PET data by rigid and non-rigid registration methods. Co-registration accuracy of both alignment approaches was assessed by reviewing the PET tracer uptake patterns (visual, linked cursor display) following attenuation correction based on the original and co-registered CT. Second, we investigated non-rigid registration based on a prototype ITK implementation of the B-spline algorithm on a similar targeted MR-CT registration task, there showing promising results. Results Manual rigid, landmark-based co-registration introduced unacceptable misalignment, in particular in peripheral areas of the whole-body images. Manual, non-rigid landmark-based co-registration prior to CT-AC was successful with minor loco-regional distortions. Nevertheless, neither rigid nor non-rigid automatic co-registration based on the Mutual Information image to image metric succeeded in co-registering the CT and noAC-PET images. In contrast to widely available commercial software registration our implementation of an alternative automated, non-rigid B-spline co-registration technique yielded promising results in this setting with MR-CT data. Conclusion In clinical PET/CT imaging, retrospective registration of CT and uncorrected PET images may improve the quality of the AC-PET images. As of today no validated and clinically viable commercial registration software is in routine use. This has triggered our efforts in pursuing new approaches to a validated, non-rigid co-registration algorithm applicable to whole-body PET/CT imaging of which first results are presented here. This approach appears suitable for applications in retrospective WB-PET/CT alignment. Ziel Kombinierte PET/CT-Bildgebung erm{\"o}glicht verbesserte Koregistrierung von PET- und CT-Daten gegen{\"u}ber separat akquirierten Bildern. Trotzdem entstehen in der klinischen Anwendung lokale Fehlregistrierungen, die zu Fehlern in der rekonstruierten PET- Tracerverteilung f{\"u}hren k{\"o}nnen, falls die unregistrierten CT-Daten zur Schw{\"a}chungskorrektur (AC) der Emissionsdaten verwendet werden. Wir untersuchen daher die Anwendung von Bildregistrierungsalgorithmen vor der CT-basierten AC zur Verbesserung der PET-Aufnahmen. Methoden Mittels einer kommerziellen Registrierungssoftware wurden die CT-Daten eines PET/CT- Tomographen durch landmarken- und intensit{\"a}tsbasierte rigide (starre) und nicht-rigide Registrierungsverfahren r{\"a}umlich an die unkorrigierten PET-Emissionsdaten angepasst und zur AC verwendet. Zur Bewertung wurden die Tracerverteilungen in den PET-Bildern (vor AC, CT-AC, CT-AC nach Koregistrierung) visuell und mit Hilfe korrelierter Fadenkreuze verglichen. Zus{\"a}tzlich untersuchten wir die ITK-Implementierung der bekannten B-spline basierten, nicht-rigiden Registrierungsans{\"a}tze im Hinblick auf ihre Verwendbarkeit f{\"u}r die multimodale PET/CT-Ganzk{\"o}rperregistrierung. Ergebnisse Mittels landmarkenbasierter, nicht-rigider Registrierung konnte die Tracerverteilung in den PET-Daten lokal verbessert werden. Landmarkenbasierte rigide Registrierung f{\"u}hrte zu starker Fehlregistrierung in entfernten K{\"o}rperregionen. Automatische rigide und nicht-rigide Registrierung unter Verwendung der Mutual-Information-{\"A}hnlichkeitsmetrik versagte auf allen verwendeten Datens{\"a}tzen. Die automatische Registrierung mit B-spline-Funktionen zeigte vielversprechende Resultate in der Anwendung auf einem {\"a}hnlich gelagerten CT-MR-Registrierungsproblem. Fazit Retrospektive, nicht-rigide Registrierung unkorrigierter PET- und CT-Aufnahmen aus kombinierten Aufnahmensystemen vor der AC kann die Qualit{\"a}t von PET-Aufnahmen im klinischen Einsatz verbessern. Trotzdem steht bis heute im klinischen Alltag keine validierte, automatische Registrierungssoftware zur Verf{\"u}gung. Wir verfolgen dazu Ans{\"a}tze f{\"u}r validierte, nicht-rigide Bildregistrierung f{\"u}r den klinischen Einsatz und pr{\"a}sentieren erste Ergebnisse.}, subject = {Positronen-Emissions-Tomografie}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fernandez-Esparrach, G. and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Birzle, Lisa and Aehling, Niklas and Schulz, Dominik Andreas Helmut Otto and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {K{\"u}nstliche Intelligenz als Hilfsmittel zur Detektion der Papilla duodeni major und des papill{\"a}ren Ostiums w{\"a}hrend der ERCP}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {63}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {5}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806882}, pages = {e295}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der endoskopischen Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Allerdings ist sie technisch anspruchsvoll, schwer zu erlernen und mit einer relativ hohen Komplikationsrate assoziiert. Daher soll in der vorliegenden Machbarkeitsstudie gepr{\"u}ft werden, ob mithilfe eines Deeplearning- Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und dieser f{\"u}r Endoskopiker, insbesondere in der Ausbildungssituation, ein geeignetes Hilfsmittel darstellen k{\"o}nnte. Material und Methodik Insgesamt wurden 1534 ERCP-Bilder von 134 Patienten analysiert, wobei sowohl die Papilla duodeni major als auch das Ostium segmentiert wurden. Anschließend erfolgte das Training eines neuronalen Netzes unter Verwendung eines Deep-Learning-Algorithmus. F{\"u}r den Test des Algorithmus erfolgte eine f{\"u}nffache Kreuzvalidierung. Ergebnisse Auf den 1534 gelabelten Bildern wurden f{\"u}r die Klasse Papille ein F1-Wert von 0,7996, eine Sensitivit{\"a}t von 0,8488 und eine Spezifit{\"a}t von 0,9822 erzielt. F{\"u}r die Klasse Ostium ergaben sich ein F1-Wert von 0,5198, eine Sensitivit{\"a}t von 0,5945 und eine Spezifit{\"a}t von 0,9974. Klassen{\"u}bergreifend (Klasse Papille und Klasse Ostium) betrug der F1-Wert 0,6593, die Sensitivit{\"a}t 0,7216 und f{\"u}r die Spezifit{\"a}t 0,9898. Zusammenfassung In der vorliegenden Machbarkeitsstudie zeigte das neuronale Netz eine hohe Sensitivit{\"a}t und eine sehr hohe Spezifit{\"a}t bei der Identifikation der Papilla duodeni major. Die Detektion des Ostiums erfolgte hingegen mit einer deutlich geringeren Sensitivit{\"a}t. Zuk{\"u}nftig ist eine Erweiterung des Trainingsdatensatzes um Videos und klinische Daten vorgesehen, um die Leistungsf{\"a}higkeit des Netzwerks zu verbessern. Hierdurch k{\"o}nnte langfristig ein geeignetes Assistenzsystem f{\"u}r die ERCP, insbesondere in der Ausbildungssituation etabliert werden.}, language = {de} } @article{DehnhardtPalmVietenetal., author = {Dehnhardt, Markus and Palm, Christoph and Vieten, Andrea and Bauer, Andreas and Pietrzyk, Uwe}, title = {Quantifying the A1AR distribution in peritumoral zones around experimental F98 and C6 rat brain tumours}, series = {Journal of Neuro-Oncology}, volume = {85}, journal = {Journal of Neuro-Oncology}, doi = {10.1007/s11060-007-9391-6}, pages = {49 -- 63}, abstract = {Quantification of growth in experimental F98 and C6 rat brain tumours was performed on 51 rat brains, 17 of which have been further assessed by 3D tumour reconstruction. Brains were cryosliced and radio-labelled with a ligand of the peripheral type benzodiazepine-receptor (pBR), 3H-Pk11195 [(1-(2-chlorophenyl)-N-methyl-N-(1-methyl-propylene)-3-isoquinoline-carboxamide)] by receptor autoradiography. Manually segmented and automatically registered tumours have been 3D-reconstructed for volumetric comparison on the basis of 3H-Pk11195-based tumour recognition. Furthermore automatically computed areas of -300 μm inner (marginal) zone as well as 300 μm and 600 μm outer tumour space were quantified. These three different regions were transferred onto other adjacent slices that had been labelled by receptor autoradiography with the A1 Adenosine receptor (A1AR)-ligand 3H-CPFPX (3H-8-cyclopentyl-3-(3-fluorpropyl)-1-propylxanthine) for quantitative assessment of A1AR in the three different tumour zones. Hence, a method is described for quantifying various receptor protein systems in the tumour as well as in the marginal invasive zones around experimentally implanted rat brain tumours and their representation in the tumour microenvironment as well as in 3D space. Furthermore, a tool for automatically reading out radio-labelled rat brain slices from auto radiographic films was developed, reconstructed into a consistent 3D-tumour model and the zones around the tumour were visualized. A1AR expression was found to depend upon the tumour volume in C6 animals, but is independent on the time of tumour development. In F98 animals, a significant increase in A1AR receptor protein was found in the Peritumoural zone as a function of time of tumour development and tumour volume.}, subject = {Hirntumor}, language = {en} } @article{MangSchnabelCrumetal., author = {Mang, Andreas and Schnabel, Julia A. and Crum, William R. and Modat, Marc and Camara-Rey, Oscar and Palm, Christoph and Caseiras, Gisele Brasil and J{\"a}ger, H. Rolf and Ourselin, S{\´e}bastien and Buzug, Thorsten M. and Hawkes, David J.}, title = {Consistency of parametric registration in serial MRI studies of brain tumor progression}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {3}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {3-4}, doi = {10.1007/s11548-008-0234-5}, pages = {201 -- 211}, abstract = {Object The consistency of parametric registration in multi-temporal magnetic resonance (MR) imaging studies was evaluated. Materials and methods Serial MRI scans of adult patients with a brain tumor (glioma) were aligned by parametric registration. The performance of low-order spatial alignment (6/9/12 degrees of freedom) of different 3D serial MR-weighted images is evaluated. A registration protocol for the alignment of all images to one reference coordinate system at baseline is presented. Registration results were evaluated for both, multimodal intra-timepoint and mono-modal multi-temporal registration. The latter case might present a challenge to automatic intensity-based registration algorithms due to ill-defined correspondences. The performance of our algorithm was assessed by testing the inverse registration consistency. Four different similarity measures were evaluated to assess consistency. Results Careful visual inspection suggests that images are well aligned, but their consistency may be imperfect. Sub-voxel inconsistency within the brain was found for allsimilarity measures used for parametric multi-temporal registration. T1-weighted images were most reliable for establishing spatial correspondence between different timepoints. Conclusions The parametric registration algorithm is feasible for use in this application. The sub-voxel resolution mean displacement error of registration transformations demonstrates that the algorithm converges to an almost identical solution for forward and reverse registration.}, subject = {Kernspintomografie}, language = {en} } @inproceedings{PalmGraemeCrumetal., author = {Palm, Christoph and Graeme, Penny P. and Crum, William R. and Schnabel, Julia A. and Pietrzyk, Uwe and Hawkes, David J.}, title = {Fusion of Rat Brain Histology and MRI using Weighted Multi-Image Mutual Information}, series = {Proceedings of the SPIE Medical Imaging 6914: Image Processing 69140M}, booktitle = {Proceedings of the SPIE Medical Imaging 6914: Image Processing 69140M}, number = {6914}, doi = {10.1117/12.770605}, pages = {69140M-1 -- 69140M-9}, abstract = {Fusion of histology and MRI is frequently demanded in biomedical research to study in vitro tissue properties in an in vivo reference space. Distortions and artifacts caused by cutting and staining of histological slices as well as differences in spatial resolution make even the rigid fusion a difficult task. State-of- the-art methods start with a mono-modal restacking yielding a histological pseudo-3D volume. The 3D information of the MRI reference is considered subsequently. However, consistency of the histology volume and consistency due to the corresponding MRI seem to be diametral goals. Therefore, we propose a novel fusion framework optimizing histology/histology and histology/MRI consistency at the same time finding a balance between both goals. Method - Direct slice-to-slice correspondence even in irregularly-spaced cutting sequences is achieved by registration-based interpolation of the MRI. Introducing a weighted multi-image mutual information metric (WI), adjacent histology and corresponding MRI are taken into account at the same time. Therefore, the reconstruction of the histological volume as well as the fusion with the MRI is done in a single step. Results - Based on two data sets with more than 110 single registrations in all, the results are evaluated quantitatively based on Tanimoto overlap measures and qualitatively showing the fused volumes. In comparison to other multi-image metrics, the reconstruction based on WI is significantly improved. We evaluated different parameter settings with emphasis on the weighting term steering the balance between intra- and inter-modality consistency.}, subject = {Kernspintomografie}, language = {en} } @article{DesernoHandelsMaierHeinetal., author = {Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Mersmann, Sven and Palm, Christoph and Tolxdorff, Thomas and Wagenknecht, Gudrun and Wittenberg, Thomas}, title = {Viewpoints on Medical Image Processing}, series = {Current Medical Imaging Reviews}, volume = {9}, journal = {Current Medical Imaging Reviews}, number = {2}, doi = {10.2174/1573405611309020002}, pages = {79 -- 88}, abstract = {Medical image processing provides core innovation for medical imaging. This paper is focused on recent developments from science to applications analyzing the past fifteen years of history of the proceedings of the German annual meeting on medical image processing (BVM). Furthermore, some members of the program committee present their personal points of views: (i) multi-modality for imaging and diagnosis, (ii) analysis of diffusion-weighted imaging, (iii) model-based image analysis, (iv) registration of section images, (v) from images to information in digital endoscopy, and (vi) virtual reality and robotics. Medical imaging and medical image computing is seen as field of rapid development with clear trends to integrated applications in diagnostics, treatment planning and treatment.}, subject = {Bildgebendes Verfahren}, language = {en} }