@misc{TackKobayashiGaueretal., author = {Tack, Alexander and Kobayashi, Yuske and Gauer, Tobias and Schlaefer, Alexander and Werner, Ren{\´e}}, title = {Bewegungsfeldsch{\"a}tzung in artefaktbehafteten 4D-CT-Bilddaten: Vergleich von paar- und gruppenweiser Registrierung}, series = {21st Annual Meeting of the German-Society-for-Radiation-Oncology}, volume = {Supplement 1}, journal = {21st Annual Meeting of the German-Society-for-Radiation-Oncology}, edition = {191}, publisher = {Springer}, address = {Strahlentherapie und Onkologie}, doi = {10.1007/s00066-015-0847-x}, pages = {65 -- 65}, abstract = {In der Strahlentherapie von Lungentumoren kann mittels Dosisakkumulation der Einfluss von Atembewegungen auf statisch geplante Dosisverteilungen abgesch{\"a}tzt werden. Grundlage sind 4D-CT-Daten des Patienten, aus denen mittels nicht-linearer Bildregistrierung eine Sequenz von Bewegungsfeldern berechnet wird. Typischerweise werden Methoden der paarweisen Bildregistrierung eingesetzt, d.h. konsekutiv zwei Atemphasen aufeinander registriert. Hierbei erfolgt i.d.R. eine physiologisch nicht plausible Anpassung der Felder an CT-Bewegungsartefakte. Gruppenweise Registrierungsans{\"a}tze ber{\"u}cksichtigen hingegen gleichzeitig s{\"a}mtliche Bilddaten des 4D-CT-Scans und erm{\"o}glichen die Integration von zeitlichen Konsistenzbetrachtungen. In diesem Beitrag wird der potentielle Vorteil der gruppen- im Vergleich zur paarweisen Registrierung in artefaktbehafteten 4D-CT-Daten untersucht.}, language = {de} } @inproceedings{TackKobayashiGaueretal., author = {Tack, Alexander and Kobayashi, Yuske and Gauer, Tobias and Schlaefer, Alexander and Werner, Ren{\´e}}, title = {Groupwise Registration for Robust Motion Field Estimation in Artifact-Affected 4D CT Images}, series = {ICART: Imaging and Computer Assistance in Radiation Therapy: A workshop held on Friday 9th October as part of MICCAI 2015 in Munich, Germany. MICCAI workshop. 2015.}, booktitle = {ICART: Imaging and Computer Assistance in Radiation Therapy: A workshop held on Friday 9th October as part of MICCAI 2015 in Munich, Germany. MICCAI workshop. 2015.}, pages = {18 -- 25}, abstract = {Precise voxel trajectory estimation in 4D CT images is a prerequisite for reliable dose accumulation during 4D treatment planning. 4D CT image data is, however, often affected by motion artifacts and applying standard pairwise registration to such data sets bears the risk of aligning anatomical structures to artifacts - with physiologically unrealistic trajectories being the consequence. In this work, the potential of a novel non-linear hybrid intensity- and feature-based groupwise registration method for robust motion field estimation in artifact-affected 4D CT image data is investigated. The overall registration performance is evaluated on the DIR-lab datasets; Its robustness if applied to artifact-affected data sets is analyzed using clinically acquired data sets with and without artifacts. The proposed registration approach achieves an accuracy comparable to the state-of-the-art (subvoxel accuracy), but smoother voxel trajectories compared to pairwise registration. Even more important: it maintained accuracy and trajectory smoothness in the presence of image artifacts - in contrast to standard pairwise registration, which yields higher landmark-based registration errors and a loss of trajectory smoothness when applied to artifact-affected data sets.}, language = {en} } @article{SekuboyinaHusseiniBayatetal., author = {Sekuboyina, Anjany and Husseini, Malek E. and Bayat, Amirhossein and L{\"o}ffler, Maximilian and Liebl, Hans and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Brown, Kevin and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Chen, Di and Bai, Yiwei and Rapazzo, Brandon H. and Yeah, Timyoas and Zhang, Amber and Xu, Shangliang and Hou, Feng and He, Zhiqiang and Zeng, Chan and Xiangshang, Zheng and Liming, Xu and Netherton, Tucker J. and Mumme, Raymond P. and Court, Laurence E. and Huang, Zixun and He, Chenhang and Wang, Li-Wen and Ling, Sai Ho and Huynh, L{\^e} Duy and Boutry, Nicolas and Jakubicek, Roman and Chmelik, Jiri and Mulay, Supriti and Sivaprakasam, Mohanasankar and Paetzold, Johannes C. and Shit, Suprosanna and Ezhov, Ivan and Wiestler, Benedikt and Glocker, Ben and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae labelling and segmentation benchmark for multi-detector CT images}, series = {Medical Image Analysis}, volume = {73}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2021.102166}, abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.}, language = {en} } @article{SekuboyinaBayatHusseinietal., author = {Sekuboyina, Anjany and Bayat, Amirhossein and Husseini, Malek E. and L{\"o}ffler, Maximilian and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Wei, Qingyue and Brown, Kevin and Wolf, Matthias and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae Labelling and Segmentation Benchmark for Multi-detector CT Images}, series = {arXiv}, journal = {arXiv}, language = {en} } @inproceedings{AmbellanTackWilsonetal., author = {Ambellan, Felix and Tack, Alexander and Wilson, Dave and Anglin, Carolyn and Lamecker, Hans and Zachow, Stefan}, title = {Evaluating two methods for Geometry Reconstruction from Sparse Surgical Navigation Data}, series = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, volume = {16}, booktitle = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65339}, pages = {24 -- 30}, abstract = {In this study we investigate methods for fitting a Statistical Shape Model (SSM) to intraoperatively acquired point cloud data from a surgical navigation system. We validate the fitted models against the pre-operatively acquired Magnetic Resonance Imaging (MRI) data from the same patients. We consider a cohort of 10 patients who underwent navigated total knee arthroplasty. As part of the surgical protocol the patients' distal femurs were partially digitized. All patients had an MRI scan two months pre-operatively. The MRI data were manually segmented and the reconstructed bone surfaces used as ground truth against which the fit was compared. Two methods were used to fit the SSM to the data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). For both approaches, the difference between model fit and ground truth surface averaged less than 1.7 mm and excellent correspondence with the distal femoral morphology can be demonstrated.}, language = {en} } @article{BernardSalamancaThunbergetal., author = {Bernard, Florian and Salamanca, Luis and Thunberg, Johan and Tack, Alexander and Jentsch, Dennis and Lamecker, Hans and Zachow, Stefan and Hertel, Frank and Goncalves, Jorge and Gemmar, Peter}, title = {Shape-aware Surface Reconstruction from Sparse Data}, series = {arXiv}, journal = {arXiv}, pages = {1602.08425v1}, abstract = {The reconstruction of an object's shape or surface from a set of 3D points is a common topic in materials and life sciences, computationally handled in computer graphics. Such points usually stem from optical or tactile 3D coordinate measuring equipment. Surface reconstruction also appears in medical image analysis, e.g. in anatomy reconstruction from tomographic measurements or the alignment of intra-operative navigation and preoperative planning data. In contrast to mere 3D point clouds, medical imaging yields contextual information on the 3D point data that can be used to adopt prior information on the shape that is to be reconstructed from the measurements. In this work we propose to use a statistical shape model (SSM) as a prior for surface reconstruction. The prior knowledge is represented by a point distribution model (PDM) that is associated with a surface mesh. Using the shape distribution that is modelled by the PDM, we reformulate the problem of surface reconstruction from a probabilistic perspective based on a Gaussian Mixture Model (GMM). In order to do so, the given measurements are interpreted as samples of the GMM. By using mixture components with anisotropic covariances that are oriented according to the surface normals at the PDM points, a surface-based tting is accomplished. By estimating the parameters of the GMM in a maximum a posteriori manner, the reconstruction of the surface from the given measurements is achieved. Extensive experiments suggest that our proposed approach leads to superior surface reconstructions compared to Iterative Closest Point (ICP) methods.}, language = {en} } @article{BernardSalamancaThunbergetal., author = {Bernard, Florian and Salamanca, Luis and Thunberg, Johan and Tack, Alexander and Jentsch, Dennis and Lamecker, Hans and Zachow, Stefan and Hertel, Frank and Goncalves, Jorge and Gemmar, Peter}, title = {Shape-aware Surface Reconstruction from Sparse 3D Point-Clouds}, series = {Medical Image Analysis}, volume = {38}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2017.02.005}, pages = {77 -- 89}, abstract = {The reconstruction of an object's shape or surface from a set of 3D points plays an important role in medical image analysis, e.g. in anatomy reconstruction from tomographic measurements or in the process of aligning intra-operative navigation and preoperative planning data. In such scenarios, one usually has to deal with sparse data, which significantly aggravates the problem of reconstruction. However, medical applications often provide contextual information about the 3D point data that allow to incorporate prior knowledge about the shape that is to be reconstructed. To this end, we propose the use of a statistical shape model (SSM) as a prior for surface reconstruction. The SSM is represented by a point distribution model (PDM), which is associated with a surface mesh. Using the shape distribution that is modelled by the PDM, we formulate the problem of surface reconstruction from a probabilistic perspective based on a Gaussian Mixture Model (GMM). In order to do so, the given points are interpreted as samples of the GMM. By using mixture components with anisotropic covariances that are "oriented" according to the surface normals at the PDM points, a surface-based fitting is accomplished. Estimating the parameters of the GMM in a maximum a posteriori manner yields the reconstruction of the surface from the given data points. We compare our method to the extensively used Iterative Closest Points method on several different anatomical datasets/SSMs (brain, femur, tibia, hip, liver) and demonstrate superior accuracy and robustness on sparse data.}, language = {en} } @misc{Tack, type = {Master Thesis}, author = {Tack, Alexander}, title = {Gruppenweise Registrierung zur robusten Bewegungsfeldsch{\"a}tzung in artefaktbehafteten 4D-CT-Bilddaten}, abstract = {Das Ziel der Strahlentherapie ist, eine m{\"o}glichst hohe Dosis in den Tumor zu applizieren und zeitgleich die Strahlenexposition des Normalgewebes zu minimieren. Insbesondere bei thorakalen und abdominalen Tumoren treten aufgrund der Atmung w{\"a}hrend der Bestrahlung große, komplexe und patientenspezifisch unterschiedliche Bewegungen der Gewebe auf. Um den Einfluss dieser Bewegung auf die i.d.R. statisch geplante Dosisverteilung abzusch{\"a}tzen, k{\"o}nnen unter Verwendung der nicht-linearen Bildregistrierung anhand von 3D-CT-Aufnahmen eines Atmungszyklus - also 4D-CT-Daten - zun{\"a}chst die Bewegungsfelder f{\"u}r die strahlentherapeutisch relevanten Strukturen, beispielsweise f{\"u}r die Lunge, berechnet werden. Diese Informationen bilden die Grundlage f{\"u}r sogenannte 4D-Dosisberechnungs- oder Dosisakkumulationsverfahren. Deren Genauigkeit h{\"a}ngt aber wesentlich von der Genauigkeit der Bewegungsfeldsch{\"a}tzung ab. Klassisch erfolgt die Berechnung der Bewegungsfelder mittels paarweiser Bildregistrierung, womit f{\"u}r die Berechnung des Bewegungsfeldes zwischen zwei Bildern im Allgemeinen eine sehr hohe Genauigkeit erreicht wird. Auch f{\"u}r CT-Bilder, die Bewegungsartefakte, wie beispielsweise doppelte oder unvollst{\"a}ndige Strukturen, enthalten, wird unter Verwendung der paarweisen Bildregistrierung im Kontext der Registrierung eine exakte Abbildung der anatomischen Strukturen zwischen den beiden Bildern erreicht. Dabei erfolgt aber eine physiologisch unplausible Anpassung der Felder an die Artefakte. Bei Verwendung der paarweisen Bildregistrierung m{\"u}ssen weiterhin f{\"u}r einen Atemzyklus die Voxel-Trajektorien aus Bewegungsfeldern zwischen mehreren dreidimensionalen Bildern zusammengesetzt werden. Durch Bewegungsartefakte entsprechen diese Trajektorien dann teilweise keiner nat{\"u}rlichen Bewegung der anatomischen Strukturen. Diese Ungenauigkeit stellt in der klinischen Anwendung ein Problem dar; dies gilt umso mehr, wenn Bewegungsartefakte im Bereich eines Tumors vorliegen. Im Gegensatz zu der paarweisen Registrierung kann mit der gruppenweisen Registrierung das Problem der durch Bewegungsartefakte hervorgerufenen ungenauen Abbildung der physiologischen Gegebenheiten dadurch reduziert werden, dass im Registrierungsprozess Bildinformationen aller Bilder, also in diesem Kontext der CT-Daten zu unterschiedlichen Atemphasen, gleichzeitig genutzt werden. Es kann bereits im Registrierungsprozess eine zeitliche Glattheit der Voxel-Trajektorien gefordert werden. In dieser Arbeit wird eine Methode zur B-Spline-basierten zeitlich regularisierten gruppenweisen Registrierung entwickelt. Die Genauigkeit der entwickelten Methode wird f{\"u}r frei zug{\"a}ngliche klinische Datens{\"a}tze landmarkenbasiert evaluiert. Dabei wird mit dem Target Registration Error (TRE) die durchschnittliche dreidimensionale euklidische Distanz zwischen den korrespondierenden Landmarken nach Transformation der Landmarken bezeichnet. Eine Genauigkeit in der Gr{\"o}ßenordnung von aktuellen paarweisen Registrierungen verdeutlicht die Qualit{\"a}t des vorgestellten Registrierungs-Algorithmus. Anschließend werden die Vorteile der gruppenweisen Registrierung durch Experimente an einem Lungenphantom und an manipulierten, artefaktbehafteten klinischen 4D-CT-Bilddaten demonstriert. Dabei werden unter Verwendung der gruppenweisen Registrierung im Vergleich zu der paarweisen Registrierung glattere Trajektorien berechnet, die der realen Bewegung der anatomischen Strukturen st{\"a}rker entsprechen. F{\"u}r die Patientendaten wird außerdem anhand von automatisch detektierten Landmarken der TRE ausgewertet. Der TRE verschlechterte sich f{\"u}r die paarweise Bildregistrierung unter Vorliegen von Bewegungsartefakten von durchschnittlich 1,30 mm auf 3,94 mm. Auch hier zeigte sich f{\"u}r die gruppenweise Registrierung die Robustheit gegen{\"u}ber Bewegungsartefakten und der TRE verschlechterte sich nur geringf{\"u}gig von 1,45 mm auf 1,71 mm.}, language = {de} } @inproceedings{TackZachow, author = {Tack, Alexander and Zachow, Stefan}, title = {Accurate Automated Volumetry of Cartilage of the Knee using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, series = {IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)}, booktitle = {IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)}, doi = {10.1109/ISBI.2019.8759201}, pages = {40 -- 43}, abstract = {Volumetry of cartilage of the knee is needed for knee osteoarthritis (KOA) assessment. It is typically performed manually in a tedious and subjective process. We developed a method for an automated, segmentation-based quantification of cartilage volume by employing 3D Convolutional Neural Networks (CNNs). CNNs were trained in a supervised manner using magnetic resonance imaging data and cartilage volumetry readings performed by clinical experts for 1378 subjects provided by the Osteoarthritis Initiative. It was shown that 3D CNNs are able to achieve volume measures comparable to the magnitude of variation between expert readings and the real in vivo situation. In the future, accurate automated cartilage volumetry might support both, diagnosis of KOA as well as longitudinal analysis of KOA progression.}, language = {en} } @inproceedings{EstacioEhlkeTacketal., author = {Estacio, Laura and Ehlke, Moritz and Tack, Alexander and Castro-Gutierrez, Eveling and Lamecker, Hans and Mora, Rensso and Zachow, Stefan}, title = {Unsupervised Detection of Disturbances in 2D Radiographs}, series = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)}, booktitle = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)}, doi = {10.1109/ISBI48211.2021.9434091}, pages = {367 -- 370}, abstract = {We present a method based on a generative model for detection of disturbances such as prosthesis, screws, zippers, and metals in 2D radiographs. The generative model is trained in an unsupervised fashion using clinical radiographs as well as simulated data, none of which contain disturbances. Our approach employs a latent space consistency loss which has the benefit of identifying similarities, and is enforced to reconstruct X-rays without disturbances. In order to detect images with disturbances, an anomaly score is computed also employing the Frechet distance between the input X-ray and the reconstructed one using our generative model. Validation was performed using clinical pelvis radiographs. We achieved an AUC of 0.77 and 0.83 with clinical and synthetic data, respectively. The results demonstrated a good accuracy of our method for detecting outliers as well as the advantage of utilizing synthetic data.}, language = {en} }