@misc{TackShestakovLuedkeetal., author = {Tack, Alexander and Shestakov, Alexey and L{\"u}dke, David and Zachow, Stefan}, title = {A deep multi-task learning method for detection of meniscal tears in MRI data from the Osteoarthritis Initiative database}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-84415}, abstract = {We present a novel and computationally efficient method for the detection of meniscal tears in Magnetic Resonance Imaging (MRI) data. Our method is based on a Convolutional Neural Network (CNN) that operates on a complete 3D MRI scan. Our approach detects the presence of meniscal tears in three anatomical sub-regions (anterior horn, meniscal body, posterior horn) for both the Medial Meniscus (MM) and the Lateral Meniscus (LM) individually. For optimal performance of our method, we investigate how to preprocess the MRI data or how to train the CNN such that only relevant information within a Region of Interest (RoI) of the data volume is taken into account for meniscal tear detection. We propose meniscal tear detection combined with a bounding box regressor in a multi-task deep learning framework to let the CNN implicitly consider the corresponding RoIs of the menisci. We evaluate the accuracy of our CNN-based meniscal tear detection approach on 2,399 Double Echo Steady-State (DESS) MRI scans from the Osteoarthritis Initiative database. In addition, to show that our method is capable of generalizing to other MRI sequences, we also adapt our model to Intermediate-Weighted Turbo Spin-Echo (IW TSE) MRI scans. To judge the quality of our approaches, Receiver Operating Characteristic (ROC) curves and Area Under the Curve (AUC) values are evaluated for both MRI sequences. For the detection of tears in DESS MRI, our method reaches AUC values of 0.94, 0.93, 0.93 (anterior horn, body, posterior horn) in MM and 0.96, 0.94, 0.91 in LM. For the detection of tears in IW TSE MRI data, our method yields AUC values of 0.84, 0.88, 0.86 in MM and 0.95, 0.91, 0.90 in LM. In conclusion, the presented method achieves high accuracy for detecting meniscal tears in both DESS and IW TSE MRI data. Furthermore, our method can be easily trained and applied to other MRI sequences.}, language = {en} } @article{TackShestakovLuedkeetal., author = {Tack, Alexander and Shestakov, Alexey and L{\"u}dke, David and Zachow, Stefan}, title = {A deep multi-task learning method for detection of meniscal tears in MRI data from the Osteoarthritis Initiative database}, series = {Frontiers in Bioengineering and Biotechnology, section Biomechanics}, journal = {Frontiers in Bioengineering and Biotechnology, section Biomechanics}, doi = {10.3389/fbioe.2021.747217}, pages = {28 -- 41}, abstract = {We present a novel and computationally efficient method for the detection of meniscal tears in Magnetic Resonance Imaging (MRI) data. Our method is based on a Convolutional Neural Network (CNN) that operates on a complete 3D MRI scan. Our approach detects the presence of meniscal tears in three anatomical sub-regions (anterior horn, meniscal body, posterior horn) for both the Medial Meniscus (MM) and the Lateral Meniscus (LM) individually. For optimal performance of our method, we investigate how to preprocess the MRI data or how to train the CNN such that only relevant information within a Region of Interest (RoI) of the data volume is taken into account for meniscal tear detection. We propose meniscal tear detection combined with a bounding box regressor in a multi-task deep learning framework to let the CNN implicitly consider the corresponding RoIs of the menisci. We evaluate the accuracy of our CNN-based meniscal tear detection approach on 2,399 Double Echo Steady-State (DESS) MRI scans from the Osteoarthritis Initiative database. In addition, to show that our method is capable of generalizing to other MRI sequences, we also adapt our model to Intermediate-Weighted Turbo Spin-Echo (IW TSE) MRI scans. To judge the quality of our approaches, Receiver Operating Characteristic (ROC) curves and Area Under the Curve (AUC) values are evaluated for both MRI sequences. For the detection of tears in DESS MRI, our method reaches AUC values of 0.94, 0.93, 0.93 (anterior horn, body, posterior horn) in MM and 0.96, 0.94, 0.91 in LM. For the detection of tears in IW TSE MRI data, our method yields AUC values of 0.84, 0.88, 0.86 in MM and 0.95, 0.91, 0.90 in LM. In conclusion, the presented method achieves high accuracy for detecting meniscal tears in both DESS and IW TSE MRI data. Furthermore, our method can be easily trained and applied to other MRI sequences.}, language = {en} } @inproceedings{LuedkeAmiranashviliAmbellanetal., author = {L{\"u}dke, David and Amiranashvili, Tamaz and Ambellan, Felix and Ezhov, Ivan and Menze, Bjoern and Zachow, Stefan}, title = {Landmark-free Statistical Shape Modeling via Neural Flow Deformations}, series = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2022}, volume = {13432}, booktitle = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2022}, publisher = {Springer, Cham}, doi = {10.1007/978-3-031-16434-7_44}, abstract = {Statistical shape modeling aims at capturing shape variations of an anatomical structure that occur within a given population. Shape models are employed in many tasks, such as shape reconstruction and image segmentation, but also shape generation and classification. Existing shape priors either require dense correspondence between training examples or lack robustness and topological guarantees. We present FlowSSM, a novel shape modeling approach that learns shape variability without requiring dense correspondence between training instances. It relies on a hierarchy of continuous deformation flows, which are parametrized by a neural network. Our model outperforms state-of-the-art methods in providing an expressive and robust shape prior for distal femur and liver. We show that the emerging latent representation is discriminative by separating healthy from pathological shapes. Ultimately, we demonstrate its effectiveness on two shape reconstruction tasks from partial data. Our source code is publicly available (https://github.com/davecasp/flowssm).}, language = {en} } @article{TackAmbellanZachow, author = {Tack, Alexander and Ambellan, Felix and Zachow, Stefan}, title = {Towards novel osteoarthritis biomarkers: Multi-criteria evaluation of 46,996 segmented knee MRI data from the Osteoarthritis Initiative}, series = {PLOS One}, volume = {16}, journal = {PLOS One}, number = {10}, doi = {10.1371/journal.pone.0258855}, abstract = {Convolutional neural networks (CNNs) are the state-of-the-art for automated assessment of knee osteoarthritis (KOA) from medical image data. However, these methods lack interpretability, mainly focus on image texture, and cannot completely grasp the analyzed anatomies' shapes. In this study we assess the informative value of quantitative features derived from segmentations in order to assess their potential as an alternative or extension to CNN-based approaches regarding multiple aspects of KOA. Six anatomical structures around the knee (femoral and tibial bones, femoral and tibial cartilages, and both menisci) are segmented in 46,996 MRI scans. Based on these segmentations, quantitative features are computed, i.e., measurements such as cartilage volume, meniscal extrusion and tibial coverage, as well as geometric features based on a statistical shape encoding of the anatomies. The feature quality is assessed by investigating their association to the Kellgren-Lawrence grade (KLG), joint space narrowing (JSN), incident KOA, and total knee replacement (TKR). Using gold standard labels from the Osteoarthritis Initiative database the balanced accuracy (BA), the area under the Receiver Operating Characteristic curve (AUC), and weighted kappa statistics are evaluated. Features based on shape encodings of femur, tibia, and menisci plus the performed measurements showed most potential as KOA biomarkers. Differentiation between non-arthritic and severely arthritic knees yielded BAs of up to 99\%, 84\% were achieved for diagnosis of early KOA. Weighted kappa values of 0.73, 0.72, and 0.78 were achieved for classification of the grade of medial JSN, lateral JSN, and KLG, respectively. The AUC was 0.61 and 0.76 for prediction of incident KOA and TKR within one year, respectively. Quantitative features from automated segmentations provide novel biomarkers for KLG and JSN classification and show potential for incident KOA and TKR prediction. The validity of these features should be further evaluated, especially as extensions of CNN- based approaches. To foster such developments we make all segmentations publicly available together with this publication.}, language = {en} } @inproceedings{SiqueiraRodriguesNyakaturaZachowetal., author = {Siqueira Rodrigues, Lucas and Nyakatura, John and Zachow, Stefan and Israel, Johann Habakuk}, title = {An Immersive Virtual Paleontology Application}, series = {13th International Conference on Human Haptic Sensing and Touch Enabled Computer Applications, EuroHaptics 2022}, booktitle = {13th International Conference on Human Haptic Sensing and Touch Enabled Computer Applications, EuroHaptics 2022}, doi = {10.1007/978-3-031-06249-0}, pages = {478 -- 481}, abstract = {Virtual paleontology studies digital fossils through data analysis and visualization systems. The discipline is growing in relevance for the evident advantages of non-destructive imaging techniques over traditional paleontological methods, and it has made significant advancements during the last few decades. However, virtual paleontology still faces a number of technological challenges, amongst which are interaction shortcomings of image segmentation applications. Whereas automated segmentation methods are seldom applicable to fossil datasets, manual exploration of these specimens is extremely time-consuming as it impractically delves into three-dimensional data through two-dimensional visualization and interaction means. This paper presents an application that employs virtual reality and haptics to virtual paleontology in order to evolve its interaction paradigms and address some of its limitations. We provide a brief overview of the challenges faced by virtual paleontology practitioners, a description of our immersive virtual paleontology prototype, and the results of a heuristic evaluation of our design.}, language = {en} } @article{SekuboyinaHusseiniBayatetal., author = {Sekuboyina, Anjany and Husseini, Malek E. and Bayat, Amirhossein and L{\"o}ffler, Maximilian and Liebl, Hans and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Brown, Kevin and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Chen, Di and Bai, Yiwei and Rapazzo, Brandon H. and Yeah, Timyoas and Zhang, Amber and Xu, Shangliang and Hou, Feng and He, Zhiqiang and Zeng, Chan and Xiangshang, Zheng and Liming, Xu and Netherton, Tucker J. and Mumme, Raymond P. and Court, Laurence E. and Huang, Zixun and He, Chenhang and Wang, Li-Wen and Ling, Sai Ho and Huynh, L{\^e} Duy and Boutry, Nicolas and Jakubicek, Roman and Chmelik, Jiri and Mulay, Supriti and Sivaprakasam, Mohanasankar and Paetzold, Johannes C. and Shit, Suprosanna and Ezhov, Ivan and Wiestler, Benedikt and Glocker, Ben and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae labelling and segmentation benchmark for multi-detector CT images}, series = {Medical Image Analysis}, volume = {73}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2021.102166}, abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.}, language = {en} } @article{GlatzederKomnikAmbellanetal., author = {Glatzeder, Korbinian and Komnik, Igor and Ambellan, Felix and Zachow, Stefan and Potthast, Wolfgang}, title = {Dynamic pressure analysis of novel interpositional knee spacer implants in 3D-printed human knee models}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, doi = {10.1038/s41598-022-20463-6}, abstract = {Alternative treatment methods for knee osteoarthritis (OA) are in demand, to delay the young (< 50 Years) patient's need for osteotomy or knee replacement. Novel interpositional knee spacers shape based on statistical shape model (SSM) approach and made of polyurethane (PU) were developed to present a minimally invasive method to treat medial OA in the knee. The implant should be supposed to reduce peak strains and pain, restore the stability of the knee, correct the malalignment of a varus knee and improve joint function and gait. Firstly, the spacers were tested in artificial knee models. It is assumed that by application of a spacer, a significant reduction in stress values and a significant increase in the contact area in the medial compartment of the knee will be registered. Biomechanical analysis of the effect of novel interpositional knee spacer implants on pressure distribution in 3D-printed knee model replicas: the primary purpose was the medial joint contact stress-related biomechanics. A secondary purpose was a better understanding of medial/lateral redistribution of joint loading. Six 3D printed knee models were reproduced from cadaveric leg computed tomography. Each of four spacer implants was tested in each knee geometry under realistic arthrokinematic dynamic loading conditions, to examine the pressure distribution in the knee joint. All spacers showed reduced mean stress values by 84-88\% and peak stress values by 524-704\% in the medial knee joint compartment compared to the non-spacer test condition. The contact area was enlarged by 462-627\% as a result of the inserted spacers. Concerning the appreciable contact stress reduction and enlargement of the contact area in the medial knee joint compartment, the premises are in place for testing the implants directly on human knee cadavers to gain further insights into a possible tool for treating medial knee osteoarthritis.}, language = {en} } @article{SekuboyinaBayatHusseinietal., author = {Sekuboyina, Anjany and Bayat, Amirhossein and Husseini, Malek E. and L{\"o}ffler, Maximilian and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Wei, Qingyue and Brown, Kevin and Wolf, Matthias and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae Labelling and Segmentation Benchmark for Multi-detector CT Images}, series = {arXiv}, journal = {arXiv}, language = {en} } @inproceedings{AmiranashviliLuedkeLietal., author = {Amiranashvili, Tamaz and L{\"u}dke, David and Li, Hongwei and Menze, Bjoern and Zachow, Stefan}, title = {Learning Shape Reconstruction from Sparse Measurements with Neural Implicit Functions}, series = {Medical Imaging with Deep Learning}, booktitle = {Medical Imaging with Deep Learning}, abstract = {Reconstructing anatomical shapes from sparse or partial measurements relies on prior knowledge of shape variations that occur within a given population. Such shape priors are learned from example shapes, obtained by segmenting volumetric medical images. For existing models, the resolution of a learned shape prior is limited to the resolution of the training data. However, in clinical practice, volumetric images are often acquired with highly anisotropic voxel sizes, e.g. to reduce image acquisition time in MRI or radiation exposure in CT imaging. The missing shape information between the slices prohibits existing methods to learn a high-resolution shape prior. We introduce a method for high-resolution shape reconstruction from sparse measurements without relying on high-resolution ground truth for training. Our method is based on neural implicit shape representations and learns a continuous shape prior only from highly anisotropic segmentations. Furthermore, it is able to learn from shapes with a varying field of view and can reconstruct from various sparse input configurations. We demonstrate its effectiveness on two anatomical structures: vertebra and femur, and successfully reconstruct high-resolution shapes from sparse segmentations, using as few as three orthogonal slices.}, language = {en} } @article{WilsonAnglinAmbellanetal., author = {Wilson, David and Anglin, Carolyn and Ambellan, Felix and Grewe, Carl Martin and Tack, Alexander and Lamecker, Hans and Dunbar, Michael and Zachow, Stefan}, title = {Validation of three-dimensional models of the distal femur created from surgical navigation point cloud data for intraoperative and postoperative analysis of total knee arthroplasty}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {12}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {12}, publisher = {Springer}, doi = {10.1007/s11548-017-1630-5}, pages = {2097 -- 2105}, abstract = {Purpose: Despite the success of total knee arthroplasty there continues to be a significant proportion of patients who are dissatisfied. One explanation may be a shape mismatch between pre and post-operative distal femurs. The purpose of this study was to investigate a method to match a statistical shape model (SSM) to intra-operatively acquired point cloud data from a surgical navigation system, and to validate it against the pre-operative magnetic resonance imaging (MRI) data from the same patients. Methods: A total of 10 patients who underwent navigated total knee arthroplasty also had an MRI scan less than 2 months pre-operatively. The standard surgical protocol was followed which included partial digitization of the distal femur. Two different methods were employed to fit the SSM to the digitized point cloud data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). The available MRI data were manually segmented and the reconstructed three-dimensional surfaces used as ground truth against which the statistical shape model fit was compared. Results: For both approaches, the difference between the statistical shape model-generated femur and the surface generated from MRI segmentation averaged less than 1.7 mm, with maximum errors occurring in less clinically important areas. Conclusion: The results demonstrated good correspondence with the distal femoral morphology even in cases of sparse data sets. Application of this technique will allow for measurement of mismatch between pre and post-operative femurs retrospectively on any case done using the surgical navigation system and could be integrated into the surgical navigation unit to provide real-time feedback.}, language = {en} } @misc{GreweZachow, author = {Grewe, Carl Martin and Zachow, Stefan}, title = {Face to Face-Interface}, series = {+ultra. Knowledge \& Gestaltung}, journal = {+ultra. Knowledge \& Gestaltung}, editor = {Doll, Nikola and Bredekamp, Horst and Sch{\"a}ffner, Wolfgang}, publisher = {Seemann Henschel}, pages = {320 -- 321}, language = {en} } @inproceedings{GreweZachow, author = {Grewe, Carl Martin and Zachow, Stefan}, title = {Fully Automated and Highly Accurate Dense Correspondence for Facial Surfaces}, series = {Computer Vision - ECCV 2016 Workshops}, volume = {9914}, booktitle = {Computer Vision - ECCV 2016 Workshops}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-319-48881-3_38}, pages = {552 -- 568}, abstract = {We present a novel framework for fully automated and highly accurate determination of facial landmarks and dense correspondence, e.g. a topologically identical mesh of arbitrary resolution, across the entire surface of 3D face models. For robustness and reliability of the proposed approach, we are combining 2D landmark detectors and 3D statistical shape priors with a variational matching method. Instead of matching faces in the spatial domain only, we employ image registration to align the 2D parametrization of the facial surface to a planar template we call the Unified Facial Parameter Domain (ufpd). This allows us to simultaneously match salient photometric and geometric facial features using robust image similarity measures while reasonably constraining geometric distortion in regions with less significant features. We demonstrate the accuracy of the dense correspondence established by our framework on the BU3DFE database with 2500 facial surfaces and show, that our framework outperforms current state-of-the-art methods with respect to the fully automated location of facial landmarks.}, language = {en} } @misc{WilsonBuecherGreweetal., author = {Wilson, David and B{\"u}cher, Pia and Grewe, Carl Martin and Anglin, Carolyn and Zachow, Stefan and Michael, Dunbar}, title = {Validation of Three Dimensional Models of the Distal Femur Created from Surgical Navigation Point Cloud Data}, series = {15th Annual Meeting of the International Society for Computer Assisted Orthopaedic Surgery (CAOS)}, journal = {15th Annual Meeting of the International Society for Computer Assisted Orthopaedic Surgery (CAOS)}, language = {en} } @article{GreweSchreiberZachow, author = {Grewe, Carl Martin and Schreiber, Lisa and Zachow, Stefan}, title = {Fast and Accurate Digital Morphometry of Facial Expressions}, series = {Facial Plastic Surgery}, volume = {31}, journal = {Facial Plastic Surgery}, number = {05}, publisher = {Thieme Medical Publishers}, address = {New York}, doi = {10.1055/s-0035-1564720}, pages = {431 -- 438}, language = {en} } @misc{GreweLameckerZachow2013, author = {Grewe, Carl Martin and Lamecker, Hans and Zachow, Stefan}, title = {Landmark-based Statistical Shape Analysis}, series = {Auxology - Studying Human Growth and Development url}, journal = {Auxology - Studying Human Growth and Development url}, editor = {Hermanussen, Michael}, publisher = {Schweizerbart Verlag, Stuttgart}, pages = {199 -- 201}, year = {2013}, language = {en} } @misc{WilsonBuecherGreweetal., author = {Wilson, David and B{\"u}cher, Pia and Grewe, Carl Martin and Mocanu, Valentin and Anglin, Carolyn and Zachow, Stefan and Dunbar, Michael}, title = {Validation of Three Dimensional Models of the Distal Femur Created from Surgical Navigation Data}, series = {Orthopedic Research Society Annual Meeting}, journal = {Orthopedic Research Society Annual Meeting}, address = {Las Vegas, Nevada}, language = {en} } @misc{GreweLameckerZachow2011, author = {Grewe, Carl Martin and Lamecker, Hans and Zachow, Stefan}, title = {Digital morphometry: The Potential of Statistical Shape Models}, series = {Anthropologischer Anzeiger. Journal of Biological and Clinical Anthropology}, journal = {Anthropologischer Anzeiger. Journal of Biological and Clinical Anthropology}, pages = {506 -- 506}, year = {2011}, language = {en} } @misc{EhlkeHeylandMaerdianetal., author = {Ehlke, Moritz and Heyland, Mark and M{\"a}rdian, Sven and Duda, Georg and Zachow, Stefan}, title = {Assessing the Relative Positioning of an Osteosynthesis Plate to the Patient-Specific Femoral Shape from Plain 2D Radiographs}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-54268}, abstract = {We present a novel method to derive the surface distance of an osteosynthesis plate w.r.t. the patient­specific surface of the distal femur based on 2D X­ray images. Our goal is to study from clinical data, how the plate­to­bone distance affects bone healing. The patient­specific 3D shape of the femur is, however, seldom recorded for cases of femoral osteosynthesis since this typically requires Computed Tomography (CT), which comes at high cost and radiation dose. Our method instead utilizes two postoperative X­ray images to derive the femoral shape and thus can be applied on radiographs that are taken in clinical routine for follow­up. First, the implant geometry is used as a calibration object to relate the implant and the individual X­ray images spatially in a virtual X­ray setup. In a second step, the patient­specific femoral shape and pose are reconstructed in the virtual setup by fitting a deformable statistical shape and intensity model (SSIM) to the images. The relative positioning between femur and implant is then assessed in terms of displacement between the reconstructed 3D shape of the femur and the plate. A preliminary evaluation based on 4 cadaver datasets shows that the method derives the plate­to­bone distance with a mean absolute error of less than 1mm and a maximum error of 4.7 mm compared to ground truth from CT. We believe that the approach presented in this paper constitutes a meaningful tool to elucidate the effect of implant positioning on fracture healing.}, language = {en} } @article{TaylorPoepplauKoenigetal.2011, author = {Taylor, William R. and P{\"o}pplau, Berry M. and K{\"o}nig, Christian and Ehrig, Rainald and Zachow, Stefan and Duda, Georg and Heller, Markus O.}, title = {The medial-lateral force distribution in the ovine stifle joint during walking}, series = {Journal of Orthopaedic Research}, volume = {29}, journal = {Journal of Orthopaedic Research}, number = {4}, doi = {10.1002/jor.21254}, pages = {567 -- 571}, year = {2011}, language = {en} } @misc{GreweZachow, author = {Grewe, C. Martin and Zachow, Stefan}, title = {Release of the FexMM for the Open Virtual Mirror Framework}, doi = {10.12752/8532}, abstract = {THIS MODEL IS FOR NON-COMMERCIAL RESEARCH PURPOSES. ONLY MEMBERS OF UNIVERSITIES OR NON-COMMERCIAL RESEARCH INSTITUTES ARE ELIGIBLE TO APPLY. 1. Download, fill, and sign the form available from: https://media.githubusercontent.com/media/mgrewe/ovmf/main/data/fexmm_license_agreement.pdf 2. Send the signed form to: fexmm@zib.de NOTE: Use an official email address of your institution for the request.}, language = {en} } @article{GreweLiuHildebrandtetal., author = {Grewe, Carl Martin and Liu, Tuo and Hildebrandt, Andrea and Zachow, Stefan}, title = {The Open Virtual Mirror Framework for Enfacement Illusions - Enhancing the Sense of Agency With Avatars That Imitate Facial Expressions}, series = {Behavior Research Methods}, journal = {Behavior Research Methods}, publisher = {Springer}, doi = {10.3758/s13428-021-01761-9}, language = {de} } @article{GreweLiuKahletal., author = {Grewe, Carl Martin and Liu, Tuo and Kahl, Christoph and Andrea, Hildebrandt and Zachow, Stefan}, title = {Statistical Learning of Facial Expressions Improves Realism of Animated Avatar Faces}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, publisher = {Frontiers}, doi = {10.3389/frvir.2021.619811}, pages = {1 -- 13}, language = {en} } @misc{GreweLeRouxPilzetal., author = {Grewe, Carl Martin and Le Roux, Gabriel and Pilz, Sven-Kristofer and Zachow, Stefan}, title = {Spotting the Details: The Various Facets of Facial Expressions}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-67696}, abstract = {3D Morphable Models (MM) are a popular tool for analysis and synthesis of facial expressions. They represent plausible variations in facial shape and appearance within a low-dimensional parameter space. Fitted to a face scan, the model's parameters compactly encode its expression patterns. This expression code can be used, for instance, as a feature in automatic facial expression recognition. For accurate classification, an MM that can adequately represent the various characteristic facets and variants of each expression is necessary. Currently available MMs are limited in the diversity of expression patterns. We present a novel high-quality Facial Expression Morphable Model built from a large-scale face database as a tool for expression analysis and synthesis. Establishment of accurate dense correspondence, up to finest skin features, enables a detailed statistical analysis of facial expressions. Various characteristic shape patterns are identified for each expression. The results of our analysis give rise to a new facial expression code. We demonstrate the advantages of such a code for the automatic recognition of expressions, and compare the accuracy of our classifier to state-of-the-art.}, language = {en} } @inproceedings{GreweleRouxPilzetal., author = {Grewe, Carl Martin and le Roux, Gabriel and Pilz, Sven-Kristofer and Zachow, Stefan}, title = {Spotting the Details: The Various Facets of Facial Expressions}, series = {IEEE International Conference on Automatic Face and Gesture Recognition}, booktitle = {IEEE International Conference on Automatic Face and Gesture Recognition}, doi = {10.1109/FG.2018.00049}, pages = {286 -- 293}, language = {en} } @inproceedings{SiqueiraRodriguesRiehmZachowetal., author = {Siqueira Rodrigues, Lucas and Riehm, Felix and Zachow, Stefan and Israel, Johann Habakuk}, title = {VoxSculpt: An Open-Source Voxel Library for Tomographic Volume Sculpting in Virtual Reality}, series = {2023 9th International Conference on Virtual Reality (ICVR), Xianyang, China, 2023}, booktitle = {2023 9th International Conference on Virtual Reality (ICVR), Xianyang, China, 2023}, doi = {10.1109/ICVR57957.2023.10169420}, pages = {515 -- 523}, abstract = {Manual processing of tomographic data volumes, such as interactive image segmentation in medicine or paleontology, is considered a time-consuming and cumbersome endeavor. Immersive volume sculpting stands as a potential solution to improve its efficiency and intuitiveness. However, current open-source software solutions do not yield the required performance and functionalities. We address this issue by contributing a novel open-source game engine voxel library that supports real-time immersive volume sculpting. Our design leverages GPU instancing, parallel computing, and a chunk-based data structure to optimize collision detection and rendering. We have implemented features that enable fast voxel interaction and improve precision. Our benchmark evaluation indicates that our implementation offers a significant improvement over the state-of-the-art and can render and modify millions of visible voxels while maintaining stable performance for real-time interaction in virtual reality.}, language = {en} } @article{WagendorfNahlesVachetal., author = {Wagendorf, Oliver and Nahles, Susanne and Vach, Kirstin and Kernen, Florian and Zachow, Stefan and Heiland, Max and Fl{\"u}gge, Tabea}, title = {The impact of teeth and dental restorations on gray value distribution in cone-beam computer tomography - a pilot study}, series = {International Journal of Implant Dentistry}, volume = {9}, journal = {International Journal of Implant Dentistry}, number = {27}, doi = {10.1186/s40729-023-00493-z}, abstract = {Purpose: To investigate the influence of teeth and dental restorations on the facial skeleton's gray value distributions in cone-beam computed tomography (CBCT). Methods: Gray value selection for the upper and lower jaw segmentation was performed in 40 patients. In total, CBCT data of 20 maxillae and 20 mandibles, ten partial edentulous and ten fully edentulous in each jaw, respectively, were evaluated using two different gray value selection procedures: manual lower threshold selection and automated lower threshold selection. Two sample t tests, linear regression models, linear mixed models, and Pearson's correlation coefficients were computed to evaluate the influence of teeth, dental restorations, and threshold selection procedures on gray value distributions. Results: Manual threshold selection resulted in significantly different gray values in the fully and partially edentulous mandible. (p = 0.015, difference 123). In automated threshold selection, only tendencies to different gray values in fully edentulous compared to partially edentulous jaws were observed (difference: 58-75). Significantly different gray values were evaluated for threshold selection approaches, independent of the dental situation of the analyzed jaw. No significant correlation between the number of teeth and gray values was assessed, but a trend towards higher gray values in patients with more teeth was noted. Conclusions: Standard gray values derived from CT imaging do not apply for threshold-based bone segmentation in CBCT. Teeth influence gray values and segmentation results. Inaccurate bone segmentation may result in ill-fitting surgical guides produced on CBCT data and misinterpreting bone density, which is crucial for selecting surgical protocols.}, language = {en} } @misc{EhlkeHeylandMaerdianetal., author = {Ehlke, Moritz and Heyland, Mark and M{\"a}rdian, Sven and Duda, Georg and Zachow, Stefan}, title = {3D Assessment of Osteosynthesis based on 2D Radiographs}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-56217}, abstract = {We present a novel method to derive the surface distance of an osteosynthesis plate w.r.t. the patient-specific surface of the distal femur based on postoperative 2D radiographs. In a first step, the implant geometry is used as a calibration object to relate the implant and the individual X-ray images spatially in a virtual X-ray setup. Second, the patient-specific femoral shape and pose are reconstructed by fitting a deformable statistical shape and intensity model (SSIM) to the X-rays. The relative positioning between femur and implant is then assessed in terms of the displacement between the reconstructed 3D shape of the femur and the plate. We believe that the approach presented in this paper constitutes a meaningful tool to elucidate the effect of implant positioning on fracture healing and, ultimately, to derive load recommendations after surgery.}, language = {en} } @inproceedings{KraemerHerrmannBoethetal., author = {Kr{\"a}mer, Martin and Herrmann, Karl-Heinz and Boeth, Heide and Tycowicz, Christoph von and K{\"o}nig, Christian and Zachow, Stefan and Ehrig, Rainald and Hege, Hans-Christian and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {Measuring 3D knee dynamics using center out radial ultra-short echo time trajectories with a low cost experimental setup}, series = {ISMRM (International Society for Magnetic Resonance in Medicine), 23rd Annual Meeting 2015, Toronto, Canada}, booktitle = {ISMRM (International Society for Magnetic Resonance in Medicine), 23rd Annual Meeting 2015, Toronto, Canada}, language = {en} } @inproceedings{EhlkeHeylandMaerdianetal., author = {Ehlke, Moritz and Heyland, Mark and M{\"a}rdian, Sven and Duda, Georg and Zachow, Stefan}, title = {Assessing the relative positioning of an osteosynthesis plate to the patient-specific femoral shape from plain 2D radiographs}, series = {Proceedings of the 15th Annual Meeting of CAOS-International (CAOS)}, booktitle = {Proceedings of the 15th Annual Meeting of CAOS-International (CAOS)}, abstract = {We present a novel method to derive the surface distance of an osteosynthesis plate w.r.t. the patient­specific surface of the distal femur based on 2D X­ray images. Our goal is to study from clinical data, how the plate­to­bone distance affects bone healing. The patient­specific 3D shape of the femur is, however, seldom recorded for cases of femoral osteosynthesis since this typically requires Computed Tomography (CT), which comes at high cost and radiation dose. Our method instead utilizes two postoperative X­ray images to derive the femoral shape and thus can be applied on radiographs that are taken in clinical routine for follow­up. First, the implant geometry is used as a calibration object to relate the implant and the individual X­ray images spatially in a virtual X­ray setup. In a second step, the patient­specific femoral shape and pose are reconstructed in the virtual setup by fitting a deformable statistical shape and intensity model (SSIM) to the images. The relative positioning between femur and implant is then assessed in terms of displacement between the reconstructed 3D shape of the femur and the plate. A preliminary evaluation based on 4 cadaver datasets shows that the method derives the plate­to­bone distance with a mean absolute error of less than 1mm and a maximum error of 4.7 mm compared to ground truth from CT. We believe that the approach presented in this paper constitutes a meaningful tool to elucidate the effect of implant positioning on fracture healing.}, language = {en} } @inproceedings{EhlkeHeylandMaerdianetal., author = {Ehlke, Moritz and Heyland, Mark and M{\"a}rdian, Sven and Duda, Georg and Zachow, Stefan}, title = {3D Assessment of Osteosynthesis based on 2D Radiographs}, series = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, booktitle = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, pages = {317 -- 321}, abstract = {We present a novel method to derive the surface distance of an osteosynthesis plate w.r.t. the patient-specific surface of the distal femur based on postoperative 2D radiographs. In a first step, the implant geometry is used as a calibration object to relate the implant and the individual X-ray images spatially in a virtual X-ray setup. Second, the patient- specific femoral shape and pose are reconstructed by fitting a deformable statistical shape and intensity model (SSIM) to the X-rays. The relative positioning between femur and implant is then assessed in terms of the displacement between the reconstructed 3D shape of the femur and the plate. We believe that the approach presented in this paper constitutes a meaningful tool to elucidate the effect of implant positioning on fracture healing and, ultimately, to derive load recommendations after surgery.}, language = {en} } @inproceedings{KraemerMaggioniTycowiczetal., author = {Kr{\"a}mer, Martin and Maggioni, Marta and Tycowicz, Christoph von and Brisson, Nick and Zachow, Stefan and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {Ultra-short echo-time (UTE) imaging of the knee with curved surface reconstruction-based extraction of the patellar tendon}, series = {ISMRM (International Society for Magnetic Resonance in Medicine), 26th Annual Meeting 2018, Paris, France}, booktitle = {ISMRM (International Society for Magnetic Resonance in Medicine), 26th Annual Meeting 2018, Paris, France}, abstract = {Due to very short T2 relaxation times, imaging of tendons is typically performed using ultra-short echo-time (UTE) acquisition techniques. In this work, we combined an echo-train shifted multi-echo 3D UTE imaging sequence with a 3D curved surface reconstruction to virtually extract the patellar tendon from an acquired 3D UTE dataset. Based on the analysis of the acquired multi-echo data, a T2* relaxation time parameter map was calculated and interpolated to the curved surface of the patellar tendon.}, language = {en} } @inproceedings{SiqueiraRodriguesNyakaturaZachowetal., author = {Siqueira Rodrigues, Lucas and Nyakatura, John and Zachow, Stefan and Israel, Johann Habakuk}, title = {Design Challenges and Opportunities of Fossil Preparation Tools and Methods}, series = {Proceedings of the 20th International Conference on Culture and Computer Science: Code and Materiality}, booktitle = {Proceedings of the 20th International Conference on Culture and Computer Science: Code and Materiality}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, doi = {10.1145/3623462.3623470}, abstract = {Fossil preparation is the activity of processing paleontological specimens for research and exhibition purposes. In addition to traditional mechanical extraction of fossils, preparation presently comprises non-destructive digital methods that are part of a relatively new field, namely virtual paleontology. Despite significant technological advances, both traditional and digital preparation remain cumbersome and time-consuming endeavors. However, this field has received scarce attention from a human-computer interaction perspective. The present study aims to elucidate the state-of-the-art for paleontological fossil preparation in order to determine its main challenges and start a conversation regarding opportunities for creating novel designs that tackle the field's current issues. We conducted a qualitative study involving both technical preparators and virtual paleontologists. The study was divided into two parts: First, we assembled technical preparators and paleontology researchers in a focus group session to discuss their workflows, obtain a preliminary understanding of their issues, and ideate solutions based on their counterparts' workflows. Next, we conducted a series of contextual inquiries involving direct observation and semi-structured in-depth interviews. We transcribed our recordings and examined the data through theoretical and inductive thematic analysis, clustering emerging themes and applying concepts from human-computer interaction and related fields. Our findings report on challenges faced by traditional and digital fossil preparators and potential opportunities to improve their tools and workflows. We contribute with a novel analysis of fossil preparation from an HCI perspective.}, language = {en} } @article{AmiranashviliLuedkeLietal., author = {Amiranashvili, Tamaz and L{\"u}dke, David and Li, Hongwei Bran and Zachow, Stefan and Menze, Bjoern}, title = {Learning continuous shape priors from sparse data with neural implicit functions}, series = {Medical Image Analysis}, volume = {94}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2024.103099}, pages = {103099}, abstract = {Statistical shape models are an essential tool for various tasks in medical image analysis, including shape generation, reconstruction and classification. Shape models are learned from a population of example shapes, which are typically obtained through segmentation of volumetric medical images. In clinical practice, highly anisotropic volumetric scans with large slice distances are prevalent, e.g., to reduce radiation exposure in CT or image acquisition time in MR imaging. For existing shape modeling approaches, the resolution of the emerging model is limited to the resolution of the training shapes. Therefore, any missing information between slices prohibits existing methods from learning a high-resolution shape prior. We propose a novel shape modeling approach that can be trained on sparse, binary segmentation masks with large slice distances. This is achieved through employing continuous shape representations based on neural implicit functions. After training, our model can reconstruct shapes from various sparse inputs at high target resolutions beyond the resolution of individual training examples. We successfully reconstruct high-resolution shapes from as few as three orthogonal slices. Furthermore, our shape model allows us to embed various sparse segmentation masks into a common, low-dimensional latent space — independent of the acquisition direction, resolution, spacing, and field of view. We show that the emerging latent representation discriminates between healthy and pathological shapes, even when provided with sparse segmentation masks. Lastly, we qualitatively demonstrate that the emerging latent space is smooth and captures characteristic modes of shape variation. We evaluate our shape model on two anatomical structures: the lumbar vertebra and the distal femur, both from publicly available datasets.}, language = {en} }