@inproceedings{ZachowHierlErdmann2004, author = {Zachow, Stefan and Hierl, Thomas and Erdmann, Bodo}, title = {{\"U}ber die Genauigkeit einer 3D Weichgewebepr{\"a}diktion in der MKG-Cirurgie}, series = {Workshop 'Bildverarbeitung f{\"u}r die Medizin' (BVM)}, booktitle = {Workshop 'Bildverarbeitung f{\"u}r die Medizin' (BVM)}, address = {Berlin, Germany}, pages = {75 -- 79}, year = {2004}, language = {en} } @inproceedings{SiqueiraRodriguesRiehmZachowetal., author = {Siqueira Rodrigues, Lucas and Riehm, Felix and Zachow, Stefan and Israel, Johann Habakuk}, title = {VoxSculpt: An Open-Source Voxel Library for Tomographic Volume Sculpting in Virtual Reality}, series = {2023 9th International Conference on Virtual Reality (ICVR), Xianyang, China, 2023}, booktitle = {2023 9th International Conference on Virtual Reality (ICVR), Xianyang, China, 2023}, doi = {10.1109/ICVR57957.2023.10169420}, pages = {515 -- 523}, abstract = {Manual processing of tomographic data volumes, such as interactive image segmentation in medicine or paleontology, is considered a time-consuming and cumbersome endeavor. Immersive volume sculpting stands as a potential solution to improve its efficiency and intuitiveness. However, current open-source software solutions do not yield the required performance and functionalities. We address this issue by contributing a novel open-source game engine voxel library that supports real-time immersive volume sculpting. Our design leverages GPU instancing, parallel computing, and a chunk-based data structure to optimize collision detection and rendering. We have implemented features that enable fast voxel interaction and improve precision. Our benchmark evaluation indicates that our implementation offers a significant improvement over the state-of-the-art and can render and modify millions of visible voxels while maintaining stable performance for real-time interaction in virtual reality.}, language = {en} } @inproceedings{HierlWollnyZachowetal.2002, author = {Hierl, Thomas and Wollny, Gert and Zachow, Stefan and Kl{\"o}ppel, Rainer}, title = {Visualisierung von Knochen und Weichteilver{\"a}nderungen in der Distraktionsosteogenese des Mittelgesichtes}, series = {Proc. 10. Jahrestagung der Deutschen Gesellschaft f{\"u}r Sch{\"a}delbasischirurgie}, booktitle = {Proc. 10. Jahrestagung der Deutschen Gesellschaft f{\"u}r Sch{\"a}delbasischirurgie}, address = {Heidelberg}, pages = {111 -- 116}, year = {2002}, language = {en} } @inproceedings{RammVictoriaMorilloTodtetal.2013, author = {Ramm, Heiko and Victoria Morillo, Oscar Salvador and Todt, Ingo and Schirmacher, Hartmut and Ernst, Arneborg and Zachow, Stefan and Lamecker, Hans}, title = {Visual Support for Positioning Hearing Implants}, series = {Proceedings of the 12th annual meeting of the CURAC society}, booktitle = {Proceedings of the 12th annual meeting of the CURAC society}, editor = {Freysinger, Wolfgang}, pages = {116 -- 120}, year = {2013}, language = {en} } @misc{RammMorilloVictoriaTodtetal., author = {Ramm, Heiko and Morillo Victoria, Oscar Salvador and Todt, Ingo and Schirmacher, Hartmut and Ernst, Arneborg and Zachow, Stefan and Lamecker, Hans}, title = {Visual Support for Positioning Hearing Implants}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42495}, abstract = {We present a software planning tool that provides intuitive visual feedback for finding suitable positions of hearing implants in the human temporal bone. After an automatic reconstruction of the temporal bone anatomy the tool pre-positions the implant and allows the user to adjust its position interactively with simple 2D dragging and rotation operations on the bone's surface. During this procedure, visual elements like warning labels on the implant or color encoded bone density information on the bone geometry provide guidance for the determination of a suitable fit.}, language = {en} } @article{ZachowMuiggHildebrandtetal.2009, author = {Zachow, Stefan and Muigg, Philipp and Hildebrandt, Thomas and Doleisch, Helmut and Hege, Hans-Christian}, title = {Visual Exploration of Nasal Airflow}, series = {IEEE Transactions on Visualization and Computer Graphics}, volume = {15}, journal = {IEEE Transactions on Visualization and Computer Graphics}, number = {8}, doi = {10.1109/TVCG.2009.198}, pages = {1407 -- 1414}, year = {2009}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2001, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Virtual Fibers: A Robust Approach for Muscle Simulation}, series = {IX Mediterranean Conference on Medical and Biological Engineering and Computing (MEDICON)}, booktitle = {IX Mediterranean Conference on Medical and Biological Engineering and Computing (MEDICON)}, address = {Pula, Croatia}, pages = {961 -- 964}, year = {2001}, language = {en} } @article{SekuboyinaHusseiniBayatetal., author = {Sekuboyina, Anjany and Husseini, Malek E. and Bayat, Amirhossein and L{\"o}ffler, Maximilian and Liebl, Hans and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Brown, Kevin and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Chen, Di and Bai, Yiwei and Rapazzo, Brandon H. and Yeah, Timyoas and Zhang, Amber and Xu, Shangliang and Hou, Feng and He, Zhiqiang and Zeng, Chan and Xiangshang, Zheng and Liming, Xu and Netherton, Tucker J. and Mumme, Raymond P. and Court, Laurence E. and Huang, Zixun and He, Chenhang and Wang, Li-Wen and Ling, Sai Ho and Huynh, L{\^e} Duy and Boutry, Nicolas and Jakubicek, Roman and Chmelik, Jiri and Mulay, Supriti and Sivaprakasam, Mohanasankar and Paetzold, Johannes C. and Shit, Suprosanna and Ezhov, Ivan and Wiestler, Benedikt and Glocker, Ben and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae labelling and segmentation benchmark for multi-detector CT images}, series = {Medical Image Analysis}, volume = {73}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2021.102166}, abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.}, language = {en} } @article{SekuboyinaBayatHusseinietal., author = {Sekuboyina, Anjany and Bayat, Amirhossein and Husseini, Malek E. and L{\"o}ffler, Maximilian and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Wei, Qingyue and Brown, Kevin and Wolf, Matthias and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae Labelling and Segmentation Benchmark for Multi-detector CT Images}, series = {arXiv}, journal = {arXiv}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2001, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Validierung eines linear elastischen Modells f{\"u}r die Weichgewebesimulation in der Mund-Kiefer-Gesichtschirurgie}, series = {Bildverarbeitung f{\"u}r die Medizin (BVM)}, booktitle = {Bildverarbeitung f{\"u}r die Medizin (BVM)}, address = {L{\"u}beck, Germany}, pages = {57 -- 61}, year = {2001}, language = {en} } @article{WilsonAnglinAmbellanetal., author = {Wilson, David and Anglin, Carolyn and Ambellan, Felix and Grewe, Carl Martin and Tack, Alexander and Lamecker, Hans and Dunbar, Michael and Zachow, Stefan}, title = {Validation of three-dimensional models of the distal femur created from surgical navigation point cloud data for intraoperative and postoperative analysis of total knee arthroplasty}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {12}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {12}, publisher = {Springer}, doi = {10.1007/s11548-017-1630-5}, pages = {2097 -- 2105}, abstract = {Purpose: Despite the success of total knee arthroplasty there continues to be a significant proportion of patients who are dissatisfied. One explanation may be a shape mismatch between pre and post-operative distal femurs. The purpose of this study was to investigate a method to match a statistical shape model (SSM) to intra-operatively acquired point cloud data from a surgical navigation system, and to validate it against the pre-operative magnetic resonance imaging (MRI) data from the same patients. Methods: A total of 10 patients who underwent navigated total knee arthroplasty also had an MRI scan less than 2 months pre-operatively. The standard surgical protocol was followed which included partial digitization of the distal femur. Two different methods were employed to fit the SSM to the digitized point cloud data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). The available MRI data were manually segmented and the reconstructed three-dimensional surfaces used as ground truth against which the statistical shape model fit was compared. Results: For both approaches, the difference between the statistical shape model-generated femur and the surface generated from MRI segmentation averaged less than 1.7 mm, with maximum errors occurring in less clinically important areas. Conclusion: The results demonstrated good correspondence with the distal femoral morphology even in cases of sparse data sets. Application of this technique will allow for measurement of mismatch between pre and post-operative femurs retrospectively on any case done using the surgical navigation system and could be integrated into the surgical navigation unit to provide real-time feedback.}, language = {en} } @misc{WilsonBuecherGreweetal., author = {Wilson, David and B{\"u}cher, Pia and Grewe, Carl Martin and Anglin, Carolyn and Zachow, Stefan and Michael, Dunbar}, title = {Validation of Three Dimensional Models of the Distal Femur Created from Surgical Navigation Point Cloud Data}, series = {15th Annual Meeting of the International Society for Computer Assisted Orthopaedic Surgery (CAOS)}, journal = {15th Annual Meeting of the International Society for Computer Assisted Orthopaedic Surgery (CAOS)}, language = {en} } @misc{WilsonBuecherGreweetal., author = {Wilson, David and B{\"u}cher, Pia and Grewe, Carl Martin and Mocanu, Valentin and Anglin, Carolyn and Zachow, Stefan and Dunbar, Michael}, title = {Validation of Three Dimensional Models of the Distal Femur Created from Surgical Navigation Data}, series = {Orthopedic Research Society Annual Meeting}, journal = {Orthopedic Research Society Annual Meeting}, address = {Las Vegas, Nevada}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2001, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Validation of a Linear Elastic Model for Soft Tissue Simulation in Craniofacial Surgery}, series = {Proc. SPIE Medical Imaging 2001}, volume = {4319}, booktitle = {Proc. SPIE Medical Imaging 2001}, editor = {Mun, Seong}, address = {San Diego, USA}, doi = {10.1117/12.428061}, pages = {27 -- 35}, year = {2001}, language = {en} } @inproceedings{EstacioEhlkeTacketal., author = {Estacio, Laura and Ehlke, Moritz and Tack, Alexander and Castro-Gutierrez, Eveling and Lamecker, Hans and Mora, Rensso and Zachow, Stefan}, title = {Unsupervised Detection of Disturbances in 2D Radiographs}, series = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)}, booktitle = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)}, doi = {10.1109/ISBI48211.2021.9434091}, pages = {367 -- 370}, abstract = {We present a method based on a generative model for detection of disturbances such as prosthesis, screws, zippers, and metals in 2D radiographs. The generative model is trained in an unsupervised fashion using clinical radiographs as well as simulated data, none of which contain disturbances. Our approach employs a latent space consistency loss which has the benefit of identifying similarities, and is enforced to reconstruct X-rays without disturbances. In order to detect images with disturbances, an anomaly score is computed also employing the Frechet distance between the input X-ray and the reconstructed one using our generative model. Validation was performed using clinical pelvis radiographs. We achieved an AUC of 0.77 and 0.83 with clinical and synthetic data, respectively. The results demonstrated a good accuracy of our method for detecting outliers as well as the advantage of utilizing synthetic data.}, language = {en} } @article{ZachowSteinmannHildebrandtetal.2007, author = {Zachow, Stefan and Steinmann, Alexander and Hildebrandt, Thomas and Heppt, Werner}, title = {Understanding nasal airflow via CFD simulation and visualization}, series = {Proc. Computer Aided Surgery around the Head}, journal = {Proc. Computer Aided Surgery around the Head}, pages = {173 -- 176}, year = {2007}, language = {en} } @misc{WeiserErdmannSchenkletal.2017, author = {Weiser, Martin and Erdmann, Bodo and Schenkl, Sebastian and Muggenthaler, Holger and Hubig, Michael and Mall, Gita and Zachow, Stefan}, title = {Uncertainty in Temperature-Based Determination of Time of Death}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-63818}, year = {2017}, abstract = {Temperature-based estimation of time of death (ToD) can be per- formed either with the help of simple phenomenological models of corpse cooling or with detailed mechanistic (thermodynamic) heat transfer mod- els. The latter are much more complex, but allow a higher accuracy of ToD estimation as in principle all relevant cooling mechanisms can be taken into account. The potentially higher accuracy depends on the accuracy of tissue and environmental parameters as well as on the geometric resolution. We in- vestigate the impact of parameter variations and geometry representation on the estimated ToD based on a highly detailed 3D corpse model, that has been segmented and geometrically reconstructed from a computed to- mography (CT) data set, differentiating various organs and tissue types. From that we identify the most crucial parameters to measure or estimate, and obtain a local uncertainty quantifcation for the ToD.}, language = {en} } @article{WeiserErdmannSchenkletal., author = {Weiser, Martin and Erdmann, Bodo and Schenkl, Sebastian and Muggenthaler, Holger and Hubig, Michael and Mall, Gita and Zachow, Stefan}, title = {Uncertainty in Temperature-Based Determination of Time of Death}, series = {Heat and Mass Transfer}, volume = {54}, journal = {Heat and Mass Transfer}, number = {9}, publisher = {Springer}, doi = {10.1007/s00231-018-2324-4}, pages = {2815 -- 2826}, abstract = {Temperature-based estimation of time of death (ToD) can be per- formed either with the help of simple phenomenological models of corpse cooling or with detailed mechanistic (thermodynamic) heat transfer mod- els. The latter are much more complex, but allow a higher accuracy of ToD estimation as in principle all relevant cooling mechanisms can be taken into account. The potentially higher accuracy depends on the accuracy of tissue and environmental parameters as well as on the geometric resolution. We in- vestigate the impact of parameter variations and geometry representation on the estimated ToD based on a highly detailed 3D corpse model, that has been segmented and geometrically reconstructed from a computed to- mography (CT) data set, differentiating various organs and tissue types.}, language = {en} } @inproceedings{KraemerMaggioniTycowiczetal., author = {Kr{\"a}mer, Martin and Maggioni, Marta and Tycowicz, Christoph von and Brisson, Nick and Zachow, Stefan and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {Ultra-short echo-time (UTE) imaging of the knee with curved surface reconstruction-based extraction of the patellar tendon}, series = {ISMRM (International Society for Magnetic Resonance in Medicine), 26th Annual Meeting 2018, Paris, France}, booktitle = {ISMRM (International Society for Magnetic Resonance in Medicine), 26th Annual Meeting 2018, Paris, France}, abstract = {Due to very short T2 relaxation times, imaging of tendons is typically performed using ultra-short echo-time (UTE) acquisition techniques. In this work, we combined an echo-train shifted multi-echo 3D UTE imaging sequence with a 3D curved surface reconstruction to virtually extract the patellar tendon from an acquired 3D UTE dataset. Based on the analysis of the acquired multi-echo data, a T2* relaxation time parameter map was calculated and interpolated to the curved surface of the patellar tendon.}, language = {en} } @article{ZeilhoferZachowFairleyetal.2000, author = {Zeilhofer, Hans-Florian and Zachow, Stefan and Fairley, Jeffrey and Sader, Robert and Deuflhard, Peter}, title = {Treatment Planning and Simulation in Craniofacial Surgery with Virtual Reality Techiques}, series = {Journal of Cranio-Maxillofacial Surgery}, volume = {28 (Suppl. 1)}, journal = {Journal of Cranio-Maxillofacial Surgery}, pages = {82}, year = {2000}, language = {en} } @article{HoffmannLemanisWulffetal., author = {Hoffmann, Rene and Lemanis, Robert and Wulff, Lena and Zachow, Stefan and Lukeneder, Alexander and Klug, Christian and Keupp, Helmut}, title = {Traumatic events in the life of the deep-sea cephalopod mollusc, the coleoid Spirula spirula}, series = {ScienceDirect: Deep Sea Research Part I - Oceanographic Research}, volume = {142}, journal = {ScienceDirect: Deep Sea Research Part I - Oceanographic Research}, number = {12}, doi = {10.1016/j.dsr.2018.10.007}, pages = {127 -- 144}, abstract = {Here, we report on different types of shell pathologies of the enigmatic deep-sea (mesopelagic) cephalopod Spirula spirula. For the first time, we apply non-invasive imaging methods to: document trauma-induced changes in shell shapes, reconstruct the different causes and effects of these pathologies, unravel the etiology, and attempt to quantify the efficiency of the buoyancy apparatus. We have analysed 2D and 3D shell parameters from eleven shells collected as beach findings from the Canary Islands (Gran Canaria and Fuerteventura), West-Australia, and the Maldives. All shells were scanned with a nanotom-m computer tomograph. Seven shells were likely injured by predator attacks: fishes, cephalopods or crustaceans, one specimen was infested by an endoparasite (potentially Digenea) and one shell shows signs of inflammation and one shell shows large fluctuations of chamber volumes without any signs of pathology. These fluctuations are potential indicators of a stressed environment. Pathological shells represent the most deviant morphologies of a single species and can therefore be regarded as morphological end-members. The changes in the shell volume / chamber volume ratio were assessed in order to evaluate the functional tolerance of the buoyancy apparatus showing that these had little effect.}, language = {en} } @inproceedings{EhlkeFrenzelRammetal., author = {Ehlke, Moritz and Frenzel, Thomas and Ramm, Heiko and Shandiz, Mohsen Akbari and Anglin, Carolyn and Zachow, Stefan}, title = {Towards Robust Measurement Of Pelvic Parameters From AP Radiographs Using Articulated 3D Models}, series = {Computer Assisted Radiology and Surgery (CARS)}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, abstract = {Patient-specific parameters such as the orientation of the acetabulum or pelvic tilt are useful for custom planning for total hip arthroplasty (THA) and for evaluating the outcome of surgical interventions. The gold standard in obtaining pelvic parameters is from three-dimensional (3D) computed tomography (CT) imaging. However, this adds time and cost, exposes the patient to a substantial radiation dose, and does not allow for imaging under load (e.g. while the patient is standing). If pelvic parameters could be reliably derived from the standard anteroposterior (AP) radiograph, preoperative planning would be more widespread, and research analyses could be applied to retrospective data, after a postoperative issue is discovered. The goal of this work is to enable robust measurement of two surgical parameters of interest: the tilt of the anterior pelvic plane (APP) and the orientation of the natural acetabulum. We present a computer-aided reconstruction method to determine the APP and natural acetabular orientation from a single, preoperative X-ray. It can easily be extended to obtain other important preoperative and postoperative parameters solely based on a single AP radiograph.}, language = {en} } @misc{EhlkeFrenzelRammetal., author = {Ehlke, Moritz and Frenzel, Thomas and Ramm, Heiko and Shandiz, Mohsen Akbari and Anglin, Carolyn and Zachow, Stefan}, title = {Towards Robust Measurement of Pelvic Parameters from AP Radiographs using Articulated 3D Models}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53707}, abstract = {Patient-specific parameters such as the orientation of the acetabulum or pelvic tilt are useful for custom planning for total hip arthroplasty (THA) and for evaluating the outcome of surgical interventions. The gold standard in obtaining pelvic parameters is from three-dimensional (3D) computed tomography (CT) imaging. However, this adds time and cost, exposes the patient to a substantial radiation dose, and does not allow for imaging under load (e.g. while the patient is standing). If pelvic parameters could be reliably derived from the standard anteroposterior (AP) radiograph, preoperative planning would be more widespread, and research analyses could be applied to retrospective data, after a postoperative issue is discovered. The goal of this work is to enable robust measurement of two surgical parameters of interest: the tilt of the anterior pelvic plane (APP) and the orientation of the natural acetabulum. We present a computer-aided reconstruction method to determine the APP and natural acetabular orientation from a single, preoperative X-ray. It can easily be extended to obtain other important preoperative and postoperative parameters solely based on a single AP radiograph.}, language = {en} } @inproceedings{ZachowGladilinHegeetal.2002, author = {Zachow, Stefan and Gladilin, Evgeny and Hege, Hans-Christian and Deuflhard, Peter}, title = {Towards Patient Specific, Anatomy Based Simulation of Facial Mimics for Surgical Nerve Rehabilitation}, series = {Computer Assisted Radiology and Surgery (CARS)}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, publisher = {Springer Verlag}, pages = {3 -- 6}, year = {2002}, language = {en} } @misc{TackAmbellanZachow2021, author = {Tack, Alexander and Ambellan, Felix and Zachow, Stefan}, title = {Towards novel osteoarthritis biomarkers: Multi-criteria evaluation of 46,996 segmented knee MRI data from the Osteoarthritis Initiative (Supplementary Material)}, series = {PLOS One}, volume = {16}, journal = {PLOS One}, number = {10}, doi = {10.12752/8328}, year = {2021}, abstract = {Convolutional neural networks (CNNs) are the state-of-the-art for automated assessment of knee osteoarthritis (KOA) from medical image data. However, these methods lack interpretability, mainly focus on image texture, and cannot completely grasp the analyzed anatomies' shapes. In this study we assess the informative value of quantitative features derived from segmentations in order to assess their potential as an alternative or extension to CNN-based approaches regarding multiple aspects of KOA A fully automated method is employed to segment six anatomical structures around the knee (femoral and tibial bones, femoral and tibial cartilages, and both menisci) in 46,996 MRI scans. Based on these segmentations, quantitative features are computed, i.e., measurements such as cartilage volume, meniscal extrusion and tibial coverage, as well as geometric features based on a statistical shape encoding of the anatomies. The feature quality is assessed by investigating their association to the Kellgren-Lawrence grade (KLG), joint space narrowing (JSN), incident KOA, and total knee replacement (TKR). Using gold standard labels from the Osteoarthritis Initiative database the balanced accuracy (BA), the area under the Receiver Operating Characteristic curve (AUC), and weighted kappa statistics are evaluated. Features based on shape encodings of femur, tibia, and menisci plus the performed measurements showed most potential as KOA biomarkers. Differentiation between healthy and severely arthritic knees yielded BAs of up to 99\%, 84\% were achieved for diagnosis of early KOA. Substantial agreement with weighted kappa values of 0.73, 0.73, and 0.79 were achieved for classification of the grade of medial JSN, lateral JSN, and KLG, respectively. The AUC was 0.60 and 0.75 for prediction of incident KOA and TKR within 5 years, respectively. Quantitative features from automated segmentations yield excellent results for KLG and JSN classification and show potential for incident KOA and TKR prediction. The validity of these features as KOA biomarkers should be further evaluated, especially as extensions of CNN-based approaches. To foster such developments we make all segmentations publicly available together with this publication.}, language = {en} } @article{TackAmbellanZachow, author = {Tack, Alexander and Ambellan, Felix and Zachow, Stefan}, title = {Towards novel osteoarthritis biomarkers: Multi-criteria evaluation of 46,996 segmented knee MRI data from the Osteoarthritis Initiative}, series = {PLOS One}, volume = {16}, journal = {PLOS One}, number = {10}, doi = {10.1371/journal.pone.0258855}, abstract = {Convolutional neural networks (CNNs) are the state-of-the-art for automated assessment of knee osteoarthritis (KOA) from medical image data. However, these methods lack interpretability, mainly focus on image texture, and cannot completely grasp the analyzed anatomies' shapes. In this study we assess the informative value of quantitative features derived from segmentations in order to assess their potential as an alternative or extension to CNN-based approaches regarding multiple aspects of KOA. Six anatomical structures around the knee (femoral and tibial bones, femoral and tibial cartilages, and both menisci) are segmented in 46,996 MRI scans. Based on these segmentations, quantitative features are computed, i.e., measurements such as cartilage volume, meniscal extrusion and tibial coverage, as well as geometric features based on a statistical shape encoding of the anatomies. The feature quality is assessed by investigating their association to the Kellgren-Lawrence grade (KLG), joint space narrowing (JSN), incident KOA, and total knee replacement (TKR). Using gold standard labels from the Osteoarthritis Initiative database the balanced accuracy (BA), the area under the Receiver Operating Characteristic curve (AUC), and weighted kappa statistics are evaluated. Features based on shape encodings of femur, tibia, and menisci plus the performed measurements showed most potential as KOA biomarkers. Differentiation between non-arthritic and severely arthritic knees yielded BAs of up to 99\%, 84\% were achieved for diagnosis of early KOA. Weighted kappa values of 0.73, 0.72, and 0.78 were achieved for classification of the grade of medial JSN, lateral JSN, and KLG, respectively. The AUC was 0.61 and 0.76 for prediction of incident KOA and TKR within one year, respectively. Quantitative features from automated segmentations provide novel biomarkers for KLG and JSN classification and show potential for incident KOA and TKR prediction. The validity of these features should be further evaluated, especially as extensions of CNN- based approaches. To foster such developments we make all segmentations publicly available together with this publication.}, language = {en} } @inproceedings{DworzakLameckervonBergetal.2008, author = {Dworzak, Jalda and Lamecker, Hans and von Berg, Jens and Klinder, Tobias and Lorenz, Cristian and Kainm{\"u}ller, Dagmar and Seim, Heiko and Hege, Hans-Christian and Zachow, Stefan}, title = {Towards model-based 3-D reconstruction of the human rib cage from radiographs}, series = {Proc. 7. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer-Roboterassistierte Chirurgie (CURAC)}, booktitle = {Proc. 7. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer-Roboterassistierte Chirurgie (CURAC)}, pages = {193 -- 196}, year = {2008}, language = {en} } @inproceedings{GladilinZachowHegeetal.2001, author = {Gladilin, Evgeny and Zachow, Stefan and Hege, Hans-Christian and Deuflhard, Peter}, title = {Towards a Realistic Simulation of Individual Facial Mimics}, series = {Vision Modeling and Visualization 2001 (VMV)}, booktitle = {Vision Modeling and Visualization 2001 (VMV)}, address = {Stuttgart, Germany}, pages = {129 -- 134}, year = {2001}, language = {en} } @misc{KamerNoserLameckeretal.2006, author = {Kamer, Lukas and Noser, Hansrudi and Lamecker, Hans and Zachow, Stefan and Wittmers, Antonia and Kaup, Thomas and Schramm, Alexander and Hammer, Beat}, title = {Three-dimensional statistical shape analysis - A useful tool for developing a new type of orbital implant?}, publisher = {AO Development Institute, New Products Brochure 2/06}, pages = {20 -- 21}, year = {2006}, language = {en} } @inproceedings{BaumMahlowLameckeretal., author = {Baum, Daniel and Mahlow, Kristin and Lamecker, Hans and Zachow, Stefan and M{\"u}ller, Johannes and Hege, Hans-Christian}, title = {The Potential of Surface-based Geometric Morphometrics for Evolutionary Studies: An Example using Dwarf Snakes (Eirenis)}, series = {Abstract in DigitalSpecimen 2014}, booktitle = {Abstract in DigitalSpecimen 2014}, abstract = {Geometric morphometrics plays an important role in evolutionary studies. The state-of-the-art in this field are landmark-based methods. Since the landmarks usually need to be placed manually, only a limited number of landmarks are generally used to represent the shape of an anatomical structure. As a result, shape characteristics that cannot be properly represented by small sets of landmarks are disregarded. In this study, we present a method that is free of this limitation. The method takes into account the whole shape of an anatomical structure, which is represented as a surface, hence the term 'surface-based morphometrics'. Correspondence between two surfaces is established by defining a partitioning of the surfaces into homologous surface patches. The first step for the generation of a surface partitioning is to place landmarks on the surface. Subsequently, the landmarks are connected by curves lying on the surface. The curves, called 'surface paths', might either follow specific anatomical features or they can be geodesics, that is, shortest paths on the surface. One important requirement, however, is that the resulting surface path networks are topologically equivalent across all surfaces. Once the surface path networks have been defined, the surfaces are decomposed into patches according to the path networks. This approach has several advantages. One of them is that we can discretize the surface by as many points as desired. Thus, even fine shape details can be resolved if this is of interest for the study. Since a point discretization is used, another advantage is that well-established analysis methods for landmark-based morphometrics can be utilized. Finally, the shapes can be easily morphed into one another, thereby greatly supporting the understanding of shape changes across all considered specimens. To show the potential of the described method for evolutionary studies of biological specimens, we applied the method to the para-basisphenoid complex of the snake genus Eirenis. By using this anatomical structure as example, we present all the steps that are necessary for surface-based morphometrics, including the segmentation of the para-basisphenoid complex from micro-CT data sets. We also show some first results using statistical analysis as well as classification methods based on the presented technique.}, language = {en} } @article{GreweLiuHildebrandtetal., author = {Grewe, Carl Martin and Liu, Tuo and Hildebrandt, Andrea and Zachow, Stefan}, title = {The Open Virtual Mirror Framework for Enfacement Illusions - Enhancing the Sense of Agency With Avatars That Imitate Facial Expressions}, series = {Behavior Research Methods}, journal = {Behavior Research Methods}, publisher = {Springer}, doi = {10.3758/s13428-021-01761-9}, language = {de} } @article{TaylorPoepplauKoenigetal.2011, author = {Taylor, William R. and P{\"o}pplau, Berry M. and K{\"o}nig, Christian and Ehrig, Rainald and Zachow, Stefan and Duda, Georg and Heller, Markus O.}, title = {The medial-lateral force distribution in the ovine stifle joint during walking}, series = {Journal of Orthopaedic Research}, volume = {29}, journal = {Journal of Orthopaedic Research}, number = {4}, doi = {10.1002/jor.21254}, pages = {567 -- 571}, year = {2011}, language = {en} } @article{WagendorfNahlesVachetal., author = {Wagendorf, Oliver and Nahles, Susanne and Vach, Kirstin and Kernen, Florian and Zachow, Stefan and Heiland, Max and Fl{\"u}gge, Tabea}, title = {The impact of teeth and dental restorations on gray value distribution in cone-beam computer tomography - a pilot study}, series = {International Journal of Implant Dentistry}, volume = {9}, journal = {International Journal of Implant Dentistry}, number = {27}, doi = {10.1186/s40729-023-00493-z}, abstract = {Purpose: To investigate the influence of teeth and dental restorations on the facial skeleton's gray value distributions in cone-beam computed tomography (CBCT). Methods: Gray value selection for the upper and lower jaw segmentation was performed in 40 patients. In total, CBCT data of 20 maxillae and 20 mandibles, ten partial edentulous and ten fully edentulous in each jaw, respectively, were evaluated using two different gray value selection procedures: manual lower threshold selection and automated lower threshold selection. Two sample t tests, linear regression models, linear mixed models, and Pearson's correlation coefficients were computed to evaluate the influence of teeth, dental restorations, and threshold selection procedures on gray value distributions. Results: Manual threshold selection resulted in significantly different gray values in the fully and partially edentulous mandible. (p = 0.015, difference 123). In automated threshold selection, only tendencies to different gray values in fully edentulous compared to partially edentulous jaws were observed (difference: 58-75). Significantly different gray values were evaluated for threshold selection approaches, independent of the dental situation of the analyzed jaw. No significant correlation between the number of teeth and gray values was assessed, but a trend towards higher gray values in patients with more teeth was noted. Conclusions: Standard gray values derived from CT imaging do not apply for threshold-based bone segmentation in CBCT. Teeth influence gray values and segmentation results. Inaccurate bone segmentation may result in ill-fitting surgical guides produced on CBCT data and misinterpreting bone density, which is crucial for selecting surgical protocols.}, language = {en} } @article{HildebrandtBrueningSchmidtetal., author = {Hildebrandt, Thomas and Bruening, Jan Joris and Schmidt, Nora Laura and Lamecker, Hans and Heppt, Werner and Zachow, Stefan and Goubergrits, Leonid}, title = {The Healthy Nasal Cavity - Characteristics of Morphology and Related Airflow Based on a Statistical Shape Model Viewed from a Surgeon's Perspective}, series = {Facial Plastic Surgery}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677721}, pages = {9 -- 13}, abstract = {Functional surgery on the nasal framework requires referential criteria to objectively assess nasal breathing for indication and follow-up. Thismotivated us to generate amean geometry of the nasal cavity based on a statistical shape model. In this study, the authors could demonstrate that the introduced nasal cavity's mean geometry features characteristics of the inner shape and airflow, which are commonly observed in symptom-free subjects. Therefore, the mean geometry might serve as a reference-like model when one considers qualitative aspects. However, to facilitate quantitative considerations and statistical inference, further research is necessary. Additionally, the authorswere able to obtain details about the importance of the isthmus nasi and the inferior turbinate for the intranasal airstream.}, language = {en} } @article{ZachowHeppt, author = {Zachow, Stefan and Heppt, Werner}, title = {The Facial Profile}, series = {Facial Plastic Surgery}, volume = {31}, journal = {Facial Plastic Surgery}, number = {5}, doi = {10.1055/s-0035-1566132}, pages = {419 -- 420}, abstract = {Facial appearance in our societies is often associated with notions of attractiveness, juvenileness, beauty, success, and so forth. Hence, the role of facial plastic surgery is highly interrelated to a patient's desire to feature many of these positively connoted attributes, which of course, are subject of different cultural perceptions or social trends. To judge about somebody's facial appearance, appropriate quantitative measures as well as methods to obtain and compare individual facial features are required. This special issue on facial profile is intended to provide an overview on how facial characteristics are surgically managed in an interdisciplinary way based on experience, instrumentation, and modern technology to obtain an aesthetic facial appearance with harmonious facial proportions. The facial profile will be discussed within the context of facial aesthetics. Latest concepts for capturing facial morphology in high speed and impressive detail are presented for quantitative analysis of even subtle changes, aging effects, or facial expressions. In addition, the perception of facial profiles is evaluated based on eye tracking technology.}, language = {en} } @article{LemanisKornZachowetal., author = {Lemanis, Robert and Korn, Dieter and Zachow, Stefan and Rybacki, Erik and Hoffmann, Ren{\´e}}, title = {The Evolution and Development of Cephalopod Chambers and Their Shape}, series = {PLOS ONE}, volume = {11}, journal = {PLOS ONE}, number = {3}, doi = {10.1371/journal.pone.0151404}, abstract = {The Ammonoidea is a group of extinct cephalopods ideal to study evolution through deep time. The evolution of the planispiral shell and complexly folded septa in ammonoids has been thought to have increased the functional surface area of the chambers permitting enhanced metabolic functions such as: chamber emptying, rate of mineralization and increased growth rates throughout ontogeny. Using nano-computed tomography and synchrotron radiation based micro-computed tomography, we present the first study of ontogenetic changes in surface area to volume ratios in the phragmocone chambers of several phylogenetically distant ammonoids and extant cephalopods. Contrary to the initial hypothesis, ammonoids do not possess a persistently high relative chamber surface area. Instead, the functional surface area of the chambers is higher in earliest ontogeny when compared to Spirula spirula. The higher the functional surface area the quicker the potential emptying rate of the chamber; quicker chamber emptying rates would theoretically permit faster growth. This is supported by the persistently higher siphuncular surface area to chamber volume ratio we collected for the ammonite Amauroceras sp. compared to either S. spirula or nautilids. We demonstrate that the curvature of the surface of the chamber increases with greater septal complexity increasing the potential refilling rates. We further show a unique relationship between ammonoid chamber shape and size that does not exist in S. spirula or nautilids. This view of chamber function also has implications for the evolution of the internal shell of coleoids, relating this event to the decoupling of soft-body growth and shell growth.}, language = {en} } @article{RybakKussLameckeretal.2010, author = {Rybak, J{\"u}rgen and Kuß, Anja and Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian and Lienhard, Matthias and Singer, Jochen and Neubert, Kerstin and Menzel, Randolf}, title = {The Digital Bee Brain: Integrating and Managing Neurons in a Common 3D Reference System}, series = {Front. Syst. Neurosci.}, volume = {4}, journal = {Front. Syst. Neurosci.}, number = {30}, doi = {10.3389/fnsys.2010.00030}, year = {2010}, language = {en} } @inproceedings{vonBergDworzakKlinderetal.2011, author = {von Berg, Jens and Dworzak, Jalda and Klinder, Tobias and Manke, Dirk and Lamecker, Hans and Zachow, Stefan and Lorenz, Cristian}, title = {Temporal Subtraction of Chest Radiographs Compensating Pose Differences}, series = {SPIE Medical Imaging}, booktitle = {SPIE Medical Imaging}, year = {2011}, language = {en} } @article{HaberlHellZoeckleretal.2004, author = {Haberl, Hannes and Hell, Bertold and Z{\"o}ckler, Maja and Zachow, Stefan and Lamecker, Hans and Sarrafzadeh, Asita and Riecke, B. and Langsch, Wolfgang and Deuflhard, Peter and Bier, J{\"u}rgen and Brock, Mario}, title = {Technical aspects and results of surgery for craniosynostosis}, series = {Zentralblatt f{\"u}r Neurochirurgie}, volume = {65}, journal = {Zentralblatt f{\"u}r Neurochirurgie}, number = {2}, pages = {65 -- 74}, year = {2004}, language = {en} } @article{KraemerMaggioniBrissonetal., author = {Kr{\"a}mer, Martin and Maggioni, Marta and Brisson, Nicholas and Zachow, Stefan and Teichgr{\"a}ber, Ulf and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {T1 and T2* mapping of the human quadriceps and patellar tendons using ultra-short echo-time (UTE) imaging and bivariate relaxation parameter-based volumetric visualization}, series = {Magnetic Resonance Imaging}, volume = {63}, journal = {Magnetic Resonance Imaging}, number = {11}, doi = {10.1016/j.mri.2019.07.015}, pages = {29 -- 36}, abstract = {Quantification of magnetic resonance (MR)-based relaxation parameters of tendons and ligaments is challenging due to their very short transverse relaxation times, requiring application of ultra-short echo-time (UTE) imaging sequences. We quantify both T1 and T2⁎ in the quadriceps and patellar tendons of healthy volunteers at a field strength of 3 T and visualize the results based on 3D segmentation by using bivariate histogram analysis. We applied a 3D ultra-short echo-time imaging sequence with either variable repetition times (VTR) or variable flip angles (VFA) for T1 quantification in combination with multi-echo acquisition for extracting T2⁎. The values of both relaxation parameters were subsequently binned for bivariate histogram analysis and corresponding cluster identification, which were subsequently visualized. Based on manually-drawn regions of interest in the tendons on the relaxation parameter maps, T1 and T2⁎ boundaries were selected in the bivariate histogram to segment the quadriceps and patellar tendons and visualize the relaxation times by 3D volumetric rendering. Segmentation of bone marrow, fat, muscle and tendons was successfully performed based on the bivariate histogram analysis. Based on the segmentation results mean T2⁎ relaxation times, over the entire tendon volumes averaged over all subjects, were 1.8 ms ± 0.1 ms and 1.4 ms ± 0.2 ms for the patellar and quadriceps tendons, respectively. The mean T1 value of the patellar tendon, averaged over all subjects, was 527 ms ± 42 ms and 476 ms ± 40 ms for the VFA and VTR acquisitions, respectively. The quadriceps tendon had higher mean T1 values of 662 ms ± 97 ms (VFA method) and 637 ms ± 40 ms (VTR method) compared to the patellar tendon. 3D volumetric visualization of the relaxation times revealed that T1 values are not constant over the volume of both tendons, but vary locally. This work provided additional data to build upon the scarce literature available on relaxation times in the quadriceps and patellar tendons. We were able to segment both tendons and to visualize the relaxation parameter distributions over the entire tendon volumes.}, language = {en} } @article{LameckerZachowHegeetal.2006, author = {Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian and Z{\"o}ckler, Maja}, title = {Surgical treatment of craniosynostosis based on a statistical 3D-shape model}, series = {Int. J. Computer Assisted Radiology and Surgery}, volume = {1(1)}, journal = {Int. J. Computer Assisted Radiology and Surgery}, doi = {10.1007/s11548-006-0024-x}, pages = {253 -- 254}, year = {2006}, language = {en} } @misc{SahuDillMukhopadyayetal., author = {Sahu, Manish and Dill, Sabrina and Mukhopadyay, Anirban and Zachow, Stefan}, title = {Surgical Tool Presence Detection for Cataract Procedures}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69110}, abstract = {This article outlines the submission to the CATARACTS challenge for automatic tool presence detection [1]. Our approach for this multi-label classification problem comprises labelset-based sampling, a CNN architecture and temporal smothing as described in [3], which we call ZIB-Res-TS.}, language = {en} } @article{SahuSzengelMukhopadhyayetal.2020, author = {Sahu, Manish and Szengel, Angelika and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Surgical phase recognition by learning phase transitions}, series = {Current Directions in Biomedical Engineering (CDBME)}, volume = {6}, journal = {Current Directions in Biomedical Engineering (CDBME)}, number = {1}, publisher = {De Gruyter}, doi = {https://doi.org/10.1515/cdbme-2020-0037}, pages = {20200037}, year = {2020}, abstract = {Automatic recognition of surgical phases is an important component for developing an intra-operative context-aware system. Prior work in this area focuses on recognizing short-term tool usage patterns within surgical phases. However, the difference between intra- and inter-phase tool usage patterns has not been investigated for automatic phase recognition. We developed a Recurrent Neural Network (RNN), in particular a state-preserving Long Short Term Memory (LSTM) architecture to utilize the long-term evolution of tool usage within complete surgical procedures. For fully automatic tool presence detection from surgical video frames, a Convolutional Neural Network (CNN) based architecture namely ZIBNet is employed. Our proposed approach outperformed EndoNet by 8.1\% on overall precision for phase detection tasks and 12.5\% on meanAP for tool recognition tasks.}, language = {en} } @inproceedings{NkenkeHaeuslerNeukametal.2005, author = {Nkenke, Emeka and H{\"a}usler, Gerd and Neukam, Friedrich and Zachow, Stefan}, title = {Streak artifact correction of CT data by optical 3D imaging in the simulation of orthognathic surgery}, series = {Computer Assisted Radiology and Surgery (CARS)}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, address = {Berlin Germany}, doi = {doi:10.1016/j.ics.2005.03.278}, year = {2005}, language = {en} } @misc{AmbellanLameckervonTycowiczetal., author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, issn = {1438-0064}, doi = {10.1007/978-3-030-19385-0_5}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72699}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @incollection{AmbellanLameckervonTycowiczetal., author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, series = {Biomedical Visualisation}, volume = {3}, booktitle = {Biomedical Visualisation}, number = {1156}, editor = {Rea, Paul M.}, edition = {1}, publisher = {Springer Nature Switzerland AG}, isbn = {978-3-030-19384-3}, doi = {10.1007/978-3-030-19385-0_5}, pages = {67 -- 84}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @incollection{LameckerZachow, author = {Lamecker, Hans and Zachow, Stefan}, title = {Statistical Shape Modeling of Musculoskeletal Structures and Its Applications}, series = {Computational Radiology for Orthopaedic Interventions}, volume = {23}, booktitle = {Computational Radiology for Orthopaedic Interventions}, publisher = {Springer}, isbn = {978-3-319-23481-6}, doi = {10.1007/978-3-319-23482-3}, pages = {1 -- 23}, abstract = {Statistical shape models (SSM) describe the shape variability contained in a given population. They are able to describe large populations of complex shapes with few degrees of freedom. This makes them a useful tool for a variety of tasks that arise in computer-aided madicine. In this chapter we are going to explain the basic methodology of SSMs and present a variety of examples, where SSMs have been successfully applied.}, language = {en} } @inproceedings{LameckerZoecklerHaberletal.2005, author = {Lamecker, Hans and Z{\"o}ckler, Maja and Haberl, Hannes and Zachow, Stefan and Hege, Hans-Christian}, title = {Statistical shape modeling for craniosynostosis planning}, series = {2nd International Conference Advanced Digital Technology in Head and Neck Reconstruction 2005, Abstract Volume}, booktitle = {2nd International Conference Advanced Digital Technology in Head and Neck Reconstruction 2005, Abstract Volume}, address = {Banff, Alberta}, pages = {64}, year = {2005}, language = {en} } @article{GreweLiuKahletal., author = {Grewe, Carl Martin and Liu, Tuo and Kahl, Christoph and Andrea, Hildebrandt and Zachow, Stefan}, title = {Statistical Learning of Facial Expressions Improves Realism of Animated Avatar Faces}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, publisher = {Frontiers}, doi = {10.3389/frvir.2021.619811}, pages = {1 -- 13}, language = {en} } @misc{GreweLeRouxPilzetal., author = {Grewe, Carl Martin and Le Roux, Gabriel and Pilz, Sven-Kristofer and Zachow, Stefan}, title = {Spotting the Details: The Various Facets of Facial Expressions}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-67696}, abstract = {3D Morphable Models (MM) are a popular tool for analysis and synthesis of facial expressions. They represent plausible variations in facial shape and appearance within a low-dimensional parameter space. Fitted to a face scan, the model's parameters compactly encode its expression patterns. This expression code can be used, for instance, as a feature in automatic facial expression recognition. For accurate classification, an MM that can adequately represent the various characteristic facets and variants of each expression is necessary. Currently available MMs are limited in the diversity of expression patterns. We present a novel high-quality Facial Expression Morphable Model built from a large-scale face database as a tool for expression analysis and synthesis. Establishment of accurate dense correspondence, up to finest skin features, enables a detailed statistical analysis of facial expressions. Various characteristic shape patterns are identified for each expression. The results of our analysis give rise to a new facial expression code. We demonstrate the advantages of such a code for the automatic recognition of expressions, and compare the accuracy of our classifier to state-of-the-art.}, language = {en} }