@inproceedings{GladilinZachowDeuflhardetal.2001, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Validierung eines linear elastischen Modells f{\"u}r die Weichgewebesimulation in der Mund-Kiefer-Gesichtschirurgie}, booktitle = {Bildverarbeitung f{\"u}r die Medizin (BVM)}, address = {L{\"u}beck, Germany}, pages = {57 -- 61}, year = {2001}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2003, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Realistic prediction of individual facial emotion expressions for craniofacial surgery simulations}, volume = {5029}, booktitle = {Proc. SPIE medical Imaging 2003}, editor = {Galloway, Robert}, address = {San Diego, CA, USA}, doi = {10.1117/12.479584}, pages = {520 -- 527}, year = {2003}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2003, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {On constitutive modeling of soft tissue for the long-term prediction of cranio-maxillofacial surgery outcome}, volume = {1256}, booktitle = {International Congress Series, CARS2003, Computer Assisted Radiology and Surgery, Proceedings of the 17th International Congress and Exhibition}, doi = {10.1016/S0531-5131(03)00500-4}, pages = {343 -- 348}, year = {2003}, language = {en} } @inproceedings{ZachowGladilinSaderetal.2003, author = {Zachow, Stefan and Gladilin, Evgeny and Sader, Robert and Zeilhofer, Hans-Florian}, title = {Draw \& Cut: Intuitive 3D Osteotomy Planning on Polygonal Bone Models}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, address = {London, UK}, doi = {10.1016/S0531-5131(03)00272-3}, pages = {362 -- 369}, year = {2003}, language = {en} } @article{GladilinZachowDeuflhardetal.2004, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Anatomy- and physics-based facial animation for craniofacial surgery simulations}, volume = {42(2)}, journal = {Med Biol Eng Comput.}, doi = {10.1007/BF02344627}, pages = {167 -- 170}, year = {2004}, language = {en} } @inproceedings{ZachowGladilinHegeetal.2002, author = {Zachow, Stefan and Gladilin, Evgeny and Hege, Hans-Christian and Deuflhard, Peter}, title = {Towards Patient Specific, Anatomy Based Simulation of Facial Mimics for Surgical Nerve Rehabilitation}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, publisher = {Springer Verlag}, pages = {3 -- 6}, year = {2002}, language = {en} } @article{ZachowGladilinTrepczynskietal.2002, author = {Zachow, Stefan and Gladilin, Evgeny and Trepczynski, Adam and Sader, Robert and Zeilhofer, Hans-Florian}, title = {3D Osteotomy Planning in Cranio-Maxillofacial Surgery: Experiences and Results of Surgery Planning and Volumetric Finite-Element Soft Tissue Prediction in Three Clinical Cases}, journal = {Computer Assisted Radiology and Surgery (CARS)}, publisher = {Springer Verlag}, pages = {983 -- 987}, year = {2002}, language = {en} } @inproceedings{ZachowErdmannHegeetal.2004, author = {Zachow, Stefan and Erdmann, Bodo and Hege, Hans-Christian and Deuflhard, Peter}, title = {Advances in 3D osteotomy planning with 3D soft tissue prediction}, booktitle = {Proc. 2nd International Symposium on Computer Aided Surgery around the Head, Abstract}, address = {Bern}, pages = {31}, year = {2004}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2002, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Adaptive Nonlinear Elastic FEM for Realistic Prediction of Soft Tissue in Craniofacial Surgery Simulations}, volume = {4681}, booktitle = {Proc. SPIE Medical Imaging 2002}, editor = {K. Mun, Seong}, address = {San Diego, USA}, doi = {10.1117/12.466906}, pages = {1 -- 8}, year = {2002}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2002, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Shape-based Modeling Approach for the Estimation of Individual Facial Mimics in Craniofacial Surgery Planning}, volume = {4681}, booktitle = {Proc. SPIE Medical Imaging 2002: Visualization, Image-Guided Procedures, and Display}, editor = {Mun, Seong}, address = {San Diego, USA target}, pages = {242 -- 248}, year = {2002}, language = {en} } @inproceedings{HegeSchirmacherWesterhoffetal.2002, author = {Hege, Hans-Christian and Schirmacher, Hartmut and Westerhoff, Malte and Lamecker, Hans and Prohaska, Steffen and Zachow, Stefan}, title = {From Image Data to Three-Dimensional Models - Case Studies on the Impact of 3D Patient Models}, booktitle = {Proceedings of the Japan Korea Computer Graphics Conference 2002}, publisher = {Kanazawa University}, address = {Kanazawa City, Ishikawa, Japan}, year = {2002}, language = {en} } @article{LameckerZachowWittmersetal.2006, author = {Lamecker, Hans and Zachow, Stefan and Wittmers, Antonia and Weber, Britta and Hege, Hans-Christian and Elsholtz, Barbara and Stiller, Michael}, title = {Automatic segmentation of mandibles in low-dose CT-data}, volume = {1(1)}, journal = {Int. J. Computer Assisted Radiology and Surgery}, pages = {393 -- 395}, year = {2006}, language = {en} } @article{LameckerZachowHegeetal.2006, author = {Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian and Z{\"o}ckler, Maja}, title = {Surgical treatment of craniosynostosis based on a statistical 3D-shape model}, volume = {1(1)}, journal = {Int. J. Computer Assisted Radiology and Surgery}, doi = {10.1007/s11548-006-0024-x}, pages = {253 -- 254}, year = {2006}, language = {en} } @article{ZachowHegeDeuflhard2006, author = {Zachow, Stefan and Hege, Hans-Christian and Deuflhard, Peter}, title = {Computer assisted planning in cranio-maxillofacial surgery}, volume = {14(1)}, journal = {Journal of Computing and Information Technology}, pages = {53 -- 64}, year = {2006}, language = {en} } @article{ZachowLameckerElsholtzetal.2006, author = {Zachow, Stefan and Lamecker, Hans and Elsholtz, Barbara and Stiller, Michael}, title = {Is the course of the mandibular nerve deducible from the shape of the mandible?}, journal = {Int. J. of Computer Assisted Radiology and Surgery}, publisher = {Springer}, pages = {415 -- 417}, year = {2006}, language = {en} } @article{ZachowSteinmannHildebrandtetal.2006, author = {Zachow, Stefan and Steinmann, Alexander and Hildebrandt, Thomas and Weber, Rainer and Heppt, Werner}, title = {CFD simulation of nasal airflow: Towards treatment planning for functional rhinosurgery}, journal = {Int. J. of Computer Assisted Radiology and Surgery}, publisher = {Springer}, pages = {165 -- 167}, year = {2006}, language = {en} } @inproceedings{HierlWollnyZachowetal.2002, author = {Hierl, Thomas and Wollny, Gert and Zachow, Stefan and Kl{\"o}ppel, Rainer}, title = {Visualisierung von Knochen und Weichteilver{\"a}nderungen in der Distraktionsosteogenese des Mittelgesichtes}, booktitle = {Proc. 10. Jahrestagung der Deutschen Gesellschaft f{\"u}r Sch{\"a}delbasischirurgie}, address = {Heidelberg}, pages = {111 -- 116}, year = {2002}, language = {en} } @inproceedings{ZachowGladilinHegeetal.2000, author = {Zachow, Stefan and Gladilin, Evgeny and Hege, Hans-Christian and Deuflhard, Peter}, title = {Finite-Element Simulation of Soft Tissue Deformation}, booktitle = {Computer Assisted Radiology and Surgey (CARS)}, publisher = {Elsevier Science B.V.}, pages = {23 -- 28}, year = {2000}, language = {en} } @inproceedings{ZachowLuethStallingetal.1999, author = {Zachow, Stefan and Lueth, Tim and Stalling, Detlev and Hein, Andreas and Klein, Martin and Menneking, Horst}, title = {Optimized Arrangement of Osseointegrated Implants: A Surgical Planning System for the Fixation of Facial Protheses}, booktitle = {Computer Assisted Radiology and Surgery (CARS'99)}, publisher = {Elsevier Science B.V.}, pages = {942 -- 946}, year = {1999}, language = {en} } @inproceedings{HeinLuethZachowetal.1999, author = {Hein, Andreas and Lueth, Tim and Zachow, Stefan and Stien, Malte}, title = {A 2D Planning Sytem for Robot-Assisted Interventions}, booktitle = {Computer Assisted Radiology and Surgery}, publisher = {Elsevier Science B.V.}, pages = {1049}, year = {1999}, language = {en} } @inproceedings{StallingSeebassZachow1999, author = {Stalling, Detlev and Seebaß, Martin and Zachow, Stefan}, title = {Mehrschichtige Oberfl{\"a}chenmodelle zur computergest{\"u}tzten Planung in der Chirurgie}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 1999 - Algorithmen, Anwendungen}, publisher = {Springer-Verlag, Berlin}, pages = {203 -- 207}, year = {1999}, language = {en} } @phdthesis{Zachow2005, author = {Zachow, Stefan}, title = {Computer assisted osteotomy planning in cranio-maxillofacial surgery under consideration of facial soft tissue changes}, year = {2005}, language = {en} } @misc{EhlkeRammLameckeretal.2012, author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Zachow, Stefan}, title = {Efficient projection and deformation of volumetric intensity models for accurate simulation of X-ray images}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-16580}, year = {2012}, abstract = {We present an efficient GPU-based method to generate virtual X-ray images from tetrahedral meshes which are associated with attenuation values. In addition, a novel approach is proposed that performs the model deformation on the GPU. The tetrahedral grids are derived from volumetric statistical shape and intensity models (SSIMs) and describe anatomical structures. Our research targets at reconstructing 3D anatomical shapes by comparing virtual X-ray images generated using our novel approach with clinical data while varying the shape and density of the SSIM in an optimization process. We assume that a deformed SSIM adequately represents an anatomy of interest when the similarity between the virtual and the clinical X-ray image is maximized. The OpenGL implementation presented here generates accurate (virtual) X-ray images at interactive rates, thus qualifying it for its use in the reconstruction process.}, language = {en} } @misc{RammMorilloVictoriaTodtetal.2013, author = {Ramm, Heiko and Morillo Victoria, Oscar Salvador and Todt, Ingo and Schirmacher, Hartmut and Ernst, Arneborg and Zachow, Stefan and Lamecker, Hans}, title = {Visual Support for Positioning Hearing Implants}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42495}, year = {2013}, abstract = {We present a software planning tool that provides intuitive visual feedback for finding suitable positions of hearing implants in the human temporal bone. After an automatic reconstruction of the temporal bone anatomy the tool pre-positions the implant and allows the user to adjust its position interactively with simple 2D dragging and rotation operations on the bone's surface. During this procedure, visual elements like warning labels on the implant or color encoded bone density information on the bone geometry provide guidance for the determination of a suitable fit.}, language = {en} } @misc{AmbellanLameckervonTycowiczetal.2019, author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, issn = {1438-0064}, doi = {10.1007/978-3-030-19385-0_5}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72699}, year = {2019}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @misc{AmbellanTackEhlkeetal.2019, author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72704}, year = {2019}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging (MRI) that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs).The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures.The shape models and neural networks employed are trained using data from the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets from the SKI10 challenge.For the first time, an accuracy equivalent to the inter-observer variability of human readers is achieved in this challenge.Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We make the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation.In conclusion, combining localized classification via CNNs with statistical anatomical knowledge via SSMs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @incollection{AmbellanLameckervonTycowiczetal.2019, author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, volume = {3}, booktitle = {Biomedical Visualisation}, number = {1156}, editor = {Rea, Paul M.}, edition = {1}, publisher = {Springer Nature Switzerland AG}, isbn = {978-3-030-19384-3}, doi = {10.1007/978-3-030-19385-0_5}, pages = {67 -- 84}, year = {2019}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @article{HildebrandtBrueningSchmidtetal.2019, author = {Hildebrandt, Thomas and Bruening, Jan Joris and Schmidt, Nora Laura and Lamecker, Hans and Heppt, Werner and Zachow, Stefan and Goubergrits, Leonid}, title = {The Healthy Nasal Cavity - Characteristics of Morphology and Related Airflow Based on a Statistical Shape Model Viewed from a Surgeon's Perspective}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677721}, pages = {9 -- 13}, year = {2019}, abstract = {Functional surgery on the nasal framework requires referential criteria to objectively assess nasal breathing for indication and follow-up. Thismotivated us to generate amean geometry of the nasal cavity based on a statistical shape model. In this study, the authors could demonstrate that the introduced nasal cavity's mean geometry features characteristics of the inner shape and airflow, which are commonly observed in symptom-free subjects. Therefore, the mean geometry might serve as a reference-like model when one considers qualitative aspects. However, to facilitate quantitative considerations and statistical inference, further research is necessary. Additionally, the authorswere able to obtain details about the importance of the isthmus nasi and the inferior turbinate for the intranasal airstream.}, language = {en} } @article{HildebrandtBrueningLameckeretal.2019, author = {Hildebrandt, Thomas and Bruening, Jan Joris and Lamecker, Hans and Zachow, Stefan and Heppt, Werner and Schmidt, Nora and Goubergrits, Leonid}, title = {Digital Analysis of Nasal Airflow Facilitating Decision Support in Rhinosurgery}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677720}, pages = {1 -- 8}, year = {2019}, abstract = {Successful functional surgery on the nasal framework requires reliable and comprehensive diagnosis. In this regard, the authors introduce a new methodology: Digital Analysis of Nasal Airflow (diANA). It is based on computational fluid dynamics, a statistical shape model of the healthy nasal cavity and rhinologic expertise. diANA necessitates an anonymized tomographic dataset of the paranasal sinuses including the complete nasal cavity and, when available, clinical information. The principle of diANA is to compare the morphology and the respective airflow of an individual nose with those of a reference. This enablesmorphometric aberrations and consecutive flow field anomalies to localize and quantify within a patient's nasal cavity. Finally, an elaborated expert opinion with instructive visualizations is provided. Using diANA might support surgeons in decision-making, avoiding unnecessary surgery, gaining more precision, and target-orientation for indicated operations.}, language = {en} } @inproceedings{TackZachow2019, author = {Tack, Alexander and Zachow, Stefan}, title = {Accurate Automated Volumetry of Cartilage of the Knee using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, booktitle = {IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)}, doi = {10.1109/ISBI.2019.8759201}, pages = {40 -- 43}, year = {2019}, abstract = {Volumetry of cartilage of the knee is needed for knee osteoarthritis (KOA) assessment. It is typically performed manually in a tedious and subjective process. We developed a method for an automated, segmentation-based quantification of cartilage volume by employing 3D Convolutional Neural Networks (CNNs). CNNs were trained in a supervised manner using magnetic resonance imaging data and cartilage volumetry readings performed by clinical experts for 1378 subjects provided by the Osteoarthritis Initiative. It was shown that 3D CNNs are able to achieve volume measures comparable to the magnitude of variation between expert readings and the real in vivo situation. In the future, accurate automated cartilage volumetry might support both, diagnosis of KOA as well as longitudinal analysis of KOA progression.}, language = {en} } @misc{AmbellanTackEhlkeetal.2019, author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative (Supplementary Material)}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, doi = {10.12752/4.ATEZ.1.0}, pages = {109 -- 118}, year = {2019}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The shape models and neural networks employed are trained using data of the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets of the SKI10 challenge. For the first time, an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We made the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation. In conclusion, combining statistical anatomical knowledge via SSMs with the localized classification via CNNs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @article{AmbellanTackEhlkeetal.2019, author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, doi = {10.1016/j.media.2018.11.009}, pages = {109 -- 118}, year = {2019}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The shape models and neural networks employed are trained using data of the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets of the SKI10 challenge. For the first time, an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We made the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation. In conclusion, combining statistical anatomical knowledge via SSMs with the localized classification via CNNs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @article{AlHajjSahuLamardetal.2019, author = {Al Hajj, Hassan and Sahu, Manish and Lamard, Mathieu and Conze, Pierre-Henri and Roychowdhury, Soumali and Hu, Xiaowei and Marsalkaite, Gabija and Zisimopoulos, Odysseas and Dedmari, Muneer Ahmad and Zhao, Fenqiang and Prellberg, Jonas and Galdran, Adrian and Araujo, Teresa and Vo, Duc My and Panda, Chandan and Dahiya, Navdeep and Kondo, Satoshi and Bian, Zhengbing and Bialopetravicius, Jonas and Qiu, Chenghui and Dill, Sabrina and Mukhopadyay, Anirban and Costa, Pedro and Aresta, Guilherme and Ramamurthy, Senthil and Lee, Sang-Woong and Campilho, Aurelio and Zachow, Stefan and Xia, Shunren and Conjeti, Sailesh and Armaitis, Jogundas and Heng, Pheng-Ann and Vahdat, Arash and Cochener, Beatrice and Quellec, Gwenole}, title = {CATARACTS: Challenge on Automatic Tool Annotation for cataRACT Surgery}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, publisher = {Elsevier}, doi = {10.1016/j.media.2018.11.008}, pages = {24 -- 41}, year = {2019}, abstract = {Surgical tool detection is attracting increasing attention from the medical image analysis community. The goal generally is not to precisely locate tools in images, but rather to indicate which tools are being used by the surgeon at each instant. The main motivation for annotating tool usage is to design efficient solutions for surgical workflow analysis, with potential applications in report generation, surgical training and even real-time decision support. Most existing tool annotation algorithms focus on laparoscopic surgeries. However, with 19 million interventions per year, the most common surgical procedure in the world is cataract surgery. The CATARACTS challenge was organized in 2017 to evaluate tool annotation algorithms in the specific context of cataract surgery. It relies on more than nine hours of videos, from 50 cataract surgeries, in which the presence of 21 surgical tools was manually annotated by two experts. With 14 participating teams, this challenge can be considered a success. As might be expected, the submitted solutions are based on deep learning. This paper thoroughly evaluates these solutions: in particular, the quality of their annotations are compared to that of human interpretations. Next, lessons learnt from the differential analysis of these solutions are discussed. We expect that they will guide the design of efficient surgery monitoring tools in the near future.}, language = {en} } @misc{TackZachow2019, author = {Tack, Alexander and Zachow, Stefan}, title = {Accurate Automated Volumetry of Cartilage of the Knee using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-71439}, year = {2019}, abstract = {Volumetry of the cartilage of the knee, as needed for the assessment of knee osteoarthritis (KOA), is typically performed in a tedious and subjective process. We present an automated segmentation-based method for the quantification of cartilage volume by employing 3D Convolutional Neural Networks (CNNs). CNNs were trained in a supervised manner using magnetic resonance imaging data as well as cartilage volumetry readings given by clinical experts for 1378 subjects. It was shown that 3D CNNs can be employed for cartilage volumetry with an accuracy similar to expert volumetry readings. In future, accurate automated cartilage volumetry might support both, diagnosis of KOA as well as assessment of KOA progression via longitudinal analysis.}, language = {en} } @article{HoffmannLemanisWulffetal.2018, author = {Hoffmann, Rene and Lemanis, Robert and Wulff, Lena and Zachow, Stefan and Lukeneder, Alexander and Klug, Christian and Keupp, Helmut}, title = {Traumatic events in the life of the deep-sea cephalopod mollusc, the coleoid Spirula spirula}, volume = {142}, journal = {ScienceDirect: Deep Sea Research Part I - Oceanographic Research}, number = {12}, doi = {10.1016/j.dsr.2018.10.007}, pages = {127 -- 144}, year = {2018}, abstract = {Here, we report on different types of shell pathologies of the enigmatic deep-sea (mesopelagic) cephalopod Spirula spirula. For the first time, we apply non-invasive imaging methods to: document trauma-induced changes in shell shapes, reconstruct the different causes and effects of these pathologies, unravel the etiology, and attempt to quantify the efficiency of the buoyancy apparatus. We have analysed 2D and 3D shell parameters from eleven shells collected as beach findings from the Canary Islands (Gran Canaria and Fuerteventura), West-Australia, and the Maldives. All shells were scanned with a nanotom-m computer tomograph. Seven shells were likely injured by predator attacks: fishes, cephalopods or crustaceans, one specimen was infested by an endoparasite (potentially Digenea) and one shell shows signs of inflammation and one shell shows large fluctuations of chamber volumes without any signs of pathology. These fluctuations are potential indicators of a stressed environment. Pathological shells represent the most deviant morphologies of a single species and can therefore be regarded as morphological end-members. The changes in the shell volume / chamber volume ratio were assessed in order to evaluate the functional tolerance of the buoyancy apparatus showing that these had little effect.}, language = {en} } @misc{TycowiczAmbellanMukhopadhyayetal.2016, author = {Tycowicz, Christoph von and Ambellan, Felix and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {A Riemannian Statistical Shape Model using Differential Coordinates}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61175}, year = {2016}, abstract = {We propose a novel Riemannian framework for statistical analysis of shapes that is able to account for the nonlinearity in shape variation. By adopting a physical perspective, we introduce a differential representation that puts the local geometric variability into focus. We model these differential coordinates as elements of a Lie group thereby endowing our shape space with a non-Euclidian structure. A key advantage of our framework is that statistics in a manifold shape space become numerically tractable improving performance by several orders of magnitude over state-of-the-art. We show that our Riemannian model is well suited for the identification of intra-population variability as well as inter-population differences. In particular, we demonstrate the superiority of the proposed model in experiments on specificity and generalization ability. We further derive a statistical shape descriptor that outperforms the standard Euclidian approach in terms of shape-based classification of morphological disorders.}, language = {en} } @article{LemanisZachowHoffmann2016, author = {Lemanis, Robert and Zachow, Stefan and Hoffmann, Ren{\´e}}, title = {Comparative cephalopod shell strength and the role of septum morphology on stress distribution}, volume = {4}, journal = {PeerJ}, doi = {10.7717/peerj.2434}, pages = {e2434}, year = {2016}, abstract = {The evolution of complexly folded septa in ammonoids has long been a controversial topic. Explanations of the function of these folded septa can be divided into physiological and mechanical hypotheses with the mechanical functions tending to find widespread support. The complexity of the cephalopod shell has made it difficult to directly test the mechanical properties of these structures without oversimplification of the septal morphology or extraction of a small sub-domain. However, the power of modern finite element analysis now permits direct testing of mechanical hypothesis on complete, empirical models of the shells taken from computed tomographic data. Here we compare, for the first time using empirical models, the capability of the shells of extant Nautilus pompilius, Spirula spirula, and the extinct ammonite Cadoceras sp. to withstand hydrostatic pressure and point loads. Results show hydrostatic pressure imparts highest stress on the final septum with the rest of the shell showing minimal compression. S. spirula shows the lowest stress under hydrostatic pressure while N. pompilius shows the highest stress. Cadoceras sp. shows the development of high stress along the attachment of the septal saddles with the shell wall. Stress due to point loads decreases when the point force is directed along the suture as opposed to the unsupported chamber wall. Cadoceras sp. shows the greatest decrease in stress between the point loads compared to all other models. Greater amplitude of septal flutes corresponds with greater stress due to hydrostatic pressure; however, greater amplitude decreases the stress magnitude of point loads directed along the suture. In our models, sutural complexity does not predict greater resistance to hydrostatic pressure but it does seem to increase resistance to point loads, such as would be from predators. This result permits discussion of palaeoecological reconstructions on the basis of septal morphology. We further suggest that the ratio used to characterize septal morphology in the septal strength index and in calculations of tensile strength of nacre are likely insufficient. A better understanding of the material properties of cephalopod nacre may allow the estimation of maximum depth limits of shelled cephalopods through finite element analysis.}, language = {en} } @inproceedings{GreweZachow2016, author = {Grewe, Carl Martin and Zachow, Stefan}, title = {Fully Automated and Highly Accurate Dense Correspondence for Facial Surfaces}, volume = {9914}, booktitle = {Computer Vision - ECCV 2016 Workshops}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-319-48881-3_38}, pages = {552 -- 568}, year = {2016}, abstract = {We present a novel framework for fully automated and highly accurate determination of facial landmarks and dense correspondence, e.g. a topologically identical mesh of arbitrary resolution, across the entire surface of 3D face models. For robustness and reliability of the proposed approach, we are combining 2D landmark detectors and 3D statistical shape priors with a variational matching method. Instead of matching faces in the spatial domain only, we employ image registration to align the 2D parametrization of the facial surface to a planar template we call the Unified Facial Parameter Domain (ufpd). This allows us to simultaneously match salient photometric and geometric facial features using robust image similarity measures while reasonably constraining geometric distortion in regions with less significant features. We demonstrate the accuracy of the dense correspondence established by our framework on the BU3DFE database with 2500 facial surfaces and show, that our framework outperforms current state-of-the-art methods with respect to the fully automated location of facial landmarks.}, language = {en} } @article{BernardSalamancaThunbergetal.2016, author = {Bernard, Florian and Salamanca, Luis and Thunberg, Johan and Tack, Alexander and Jentsch, Dennis and Lamecker, Hans and Zachow, Stefan and Hertel, Frank and Goncalves, Jorge and Gemmar, Peter}, title = {Shape-aware Surface Reconstruction from Sparse Data}, journal = {arXiv}, arxiv = {http://arxiv.org/abs/arXiv:1602.08425v1}, pages = {1602.08425v1}, year = {2016}, abstract = {The reconstruction of an object's shape or surface from a set of 3D points is a common topic in materials and life sciences, computationally handled in computer graphics. Such points usually stem from optical or tactile 3D coordinate measuring equipment. Surface reconstruction also appears in medical image analysis, e.g. in anatomy reconstruction from tomographic measurements or the alignment of intra-operative navigation and preoperative planning data. In contrast to mere 3D point clouds, medical imaging yields contextual information on the 3D point data that can be used to adopt prior information on the shape that is to be reconstructed from the measurements. In this work we propose to use a statistical shape model (SSM) as a prior for surface reconstruction. The prior knowledge is represented by a point distribution model (PDM) that is associated with a surface mesh. Using the shape distribution that is modelled by the PDM, we reformulate the problem of surface reconstruction from a probabilistic perspective based on a Gaussian Mixture Model (GMM). In order to do so, the given measurements are interpreted as samples of the GMM. By using mixture components with anisotropic covariances that are oriented according to the surface normals at the PDM points, a surface-based tting is accomplished. By estimating the parameters of the GMM in a maximum a posteriori manner, the reconstruction of the surface from the given measurements is achieved. Extensive experiments suggest that our proposed approach leads to superior surface reconstructions compared to Iterative Closest Point (ICP) methods.}, language = {en} } @article{BernardSalamancaThunbergetal.2017, author = {Bernard, Florian and Salamanca, Luis and Thunberg, Johan and Tack, Alexander and Jentsch, Dennis and Lamecker, Hans and Zachow, Stefan and Hertel, Frank and Goncalves, Jorge and Gemmar, Peter}, title = {Shape-aware Surface Reconstruction from Sparse 3D Point-Clouds}, volume = {38}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2017.02.005}, pages = {77 -- 89}, year = {2017}, abstract = {The reconstruction of an object's shape or surface from a set of 3D points plays an important role in medical image analysis, e.g. in anatomy reconstruction from tomographic measurements or in the process of aligning intra-operative navigation and preoperative planning data. In such scenarios, one usually has to deal with sparse data, which significantly aggravates the problem of reconstruction. However, medical applications often provide contextual information about the 3D point data that allow to incorporate prior knowledge about the shape that is to be reconstructed. To this end, we propose the use of a statistical shape model (SSM) as a prior for surface reconstruction. The SSM is represented by a point distribution model (PDM), which is associated with a surface mesh. Using the shape distribution that is modelled by the PDM, we formulate the problem of surface reconstruction from a probabilistic perspective based on a Gaussian Mixture Model (GMM). In order to do so, the given points are interpreted as samples of the GMM. By using mixture components with anisotropic covariances that are "oriented" according to the surface normals at the PDM points, a surface-based fitting is accomplished. Estimating the parameters of the GMM in a maximum a posteriori manner yields the reconstruction of the surface from the given data points. We compare our method to the extensively used Iterative Closest Points method on several different anatomical datasets/SSMs (brain, femur, tibia, hip, liver) and demonstrate superior accuracy and robustness on sparse data.}, language = {en} } @article{GladilinZachowDeuflhardetal.2002, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {A nonlinear elastic soft tissue model for craniofacial surgery simulations}, volume = {12}, journal = {ESAIM, Proc.}, doi = {10.1051/proc:2002011}, pages = {61 -- 66}, year = {2002}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2002, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {A nonlinear soft tissue model for craniofacial surgery simulations}, booktitle = {Proc. of Modeling and Simulation for Computer-aided Medicine and Surgery (MS4CMS}, publisher = {INRIA}, address = {Paris, France}, year = {2002}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2002, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Biomechanical modeling of individual facial emotion expressions}, booktitle = {Proc. of Visualization, Imaging, and Image Processing (VIIP)}, address = {Malaga, Spain}, pages = {7 -- 11}, year = {2002}, language = {en} } @inproceedings{GladilinZachowDeuflhardetal.2002, author = {Gladilin, Evgeny and Zachow, Stefan and Deuflhard, Peter and Hege, Hans-Christian}, title = {Biomechanisches Modell zur Absch{\"a}tzung der individuellen Gesichtsmimik}, booktitle = {Proc.of Workshop Bildverarbeitung f{\"u}r die Medizin (BVM)}, editor = {Meiler, M. and Saupe, D. and Krugel, F. and Handels, H. and Lehmann, T.}, address = {Leipzig, Germany}, pages = {25 -- 28}, year = {2002}, language = {en} } @incollection{ZachowWeiserHegeetal.2005, author = {Zachow, Stefan and Weiser, Martin and Hege, Hans-Christian and Deuflhard, Peter}, title = {Soft Tissue Prediction in Computer Assisted Maxillofacial Surgery Planning}, booktitle = {Biomechanics Applied to Computer Assisted Surgery}, editor = {Payan, Y.}, publisher = {Research Signpost}, pages = {277 -- 298}, year = {2005}, language = {en} } @incollection{ZachowWeiserDeuflhard2008, author = {Zachow, Stefan and Weiser, Martin and Deuflhard, Peter}, title = {Modellgest{\"u}tzte Operationsplanung in der Kopfchirurgie}, booktitle = {Modellgest{\"u}tzte Therapie}, editor = {Niederlag, Wolfgang and Lemke, Heinz and Meixensberger, J{\"u}rgen and Baumann, Michael}, publisher = {Health Academy}, pages = {140 -- 156}, year = {2008}, language = {en} } @article{ZeilhoferZachowFairleyetal.2000, author = {Zeilhofer, Hans-Florian and Zachow, Stefan and Fairley, Jeffrey and Sader, Robert and Deuflhard, Peter}, title = {Treatment Planning and Simulation in Craniofacial Surgery with Virtual Reality Techiques}, volume = {28 (Suppl. 1)}, journal = {Journal of Cranio-Maxillofacial Surgery}, pages = {82}, year = {2000}, language = {en} } @article{TaylorPoepplauKoenigetal.2011, author = {Taylor, William R. and P{\"o}pplau, Berry M. and K{\"o}nig, Christian and Ehrig, Rainald and Zachow, Stefan and Duda, Georg and Heller, Markus O.}, title = {The medial-lateral force distribution in the ovine stifle joint during walking}, volume = {29}, journal = {Journal of Orthopaedic Research}, number = {4}, doi = {10.1002/jor.21254}, pages = {567 -- 571}, year = {2011}, language = {en} } @article{LamasRodriguezHerasArgueelloetal.2013, author = {Lamas-Rodr{\´i}guez, Juli{\´a}n and Heras, Dora Blanco and Arg{\"u}ello, Francisco and Kainm{\"u}ller, Dagmar and Zachow, Stefan and B{\´o}o, Montserrat}, title = {GPU-accelerated level-set segmentation}, journal = {Journal of Real-Time Image Processing}, publisher = {Springer Berlin Heidelberg}, issn = {1861-8200}, doi = {10.1007/s11554-013-0378-6}, pages = {1 -- 15}, year = {2013}, language = {en} } @article{HoffmannSchultzSchellhornetal.2014, author = {Hoffmann, Ren{\´e} and Schultz, Julia A. and Schellhorn, Rico and Rybacki, Erik and Keupp, Helmut and Gerden, S. R. and Lemanis, Robert and Zachow, Stefan}, title = {Non-invasive imaging methods applied to neo- and paleontological cephalopod research}, volume = {11}, journal = {Biogeosciences}, number = {10}, doi = {10.5194/bg-11-2721-2014}, pages = {2721 -- 2739}, year = {2014}, abstract = {Several non-invasive methods are common practice in natural sciences today. Here we present how they can be applied and contribute to current topics in cephalopod (paleo-) biology. Different methods will be compared in terms of time necessary to acquire the data, amount of data, accuracy/resolution, minimum/maximum size of objects that can be studied, the degree of post-processing needed and availability. The main application of the methods is seen in morphometry and volumetry of cephalopod shells. In particular we present a method for precise buoyancy calculation. Therefore, cephalopod shells were scanned together with different reference bodies, an approach developed in medical sciences. It is necessary to know the volume of the reference bodies, which should have similar absorption properties like the object of interest. Exact volumes can be obtained from surface scanning. Depending on the dimensions of the study object different computed tomography techniques were applied.}, language = {en} } @inproceedings{RammVictoriaMorilloTodtetal.2013, author = {Ramm, Heiko and Victoria Morillo, Oscar Salvador and Todt, Ingo and Schirmacher, Hartmut and Ernst, Arneborg and Zachow, Stefan and Lamecker, Hans}, title = {Visual Support for Positioning Hearing Implants}, booktitle = {Proceedings of the 12th annual meeting of the CURAC society}, editor = {Freysinger, Wolfgang}, pages = {116 -- 120}, year = {2013}, language = {en} } @article{KainmuellerLameckerHelleretal.2013, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Heller, Markus O. and Weber, Britta and Hege, Hans-Christian and Zachow, Stefan}, title = {Omnidirectional Displacements for Deformable Surfaces}, volume = {17}, journal = {Medical Image Analysis}, number = {4}, publisher = {Elsevier}, doi = {10.1016/j.media.2012.11.006}, pages = {429 -- 441}, year = {2013}, language = {en} } @article{DunlopApanaskevichLehmannetal.2016, author = {Dunlop, Jason and Apanaskevich, Dmitry and Lehmann, Jens and Hoffmann, Rene and Fusseis, Florian and Ehlke, Moritz and Zachow, Stefan and Xiao, Xianghui}, title = {Microtomography of the Baltic amber tick Ixodes succineus reveals affinities with the modern Asian disease vector Ixodes ovatus}, volume = {16}, journal = {BMC Evolutionary Biology}, number = {1}, doi = {10.1186/s12862-016-0777-y}, year = {2016}, abstract = {Background: Fossil ticks are extremely rare, whereby Ixodes succineus Weidner, 1964 from Eocene (ca. 44-49 Ma) Baltic amber is one of the oldest examples of a living hard tick genus (Ixodida: Ixodidae). Previous work suggested it was most closely related to the modern and widespread European sheep tick Ixodes ricinus (Linneaus, 1758). Results: Restudy using phase contrast synchrotron x-ray tomography yielded images of exceptional quality. These confirm the fossil's referral to Ixodes Latreille, 1795, but the characters resolved here suggest instead affinities with the Asian subgenus Partipalpiger Hoogstraal et al., 1973 and its single living (and medically significant) species Ixodes ovatus Neumann, 1899. We redescribe the amber fossil here as Ixodes (Partipalpiger) succineus. Conclusions: Our data suggest that Ixodes ricinus is unlikely to be directly derived from Weidner's amber species, but instead reveals that the Partipalpiger lineage was originally more widely distributed across the northern hemisphere. The closeness of Ixodes (P.) succineus to a living vector of a wide range of pathogens offers the potential to correlate its spatial and temporal position (northern Europe, nearly 50 million years ago) with the estimated origination dates of various tick-borne diseases.}, language = {en} } @article{SchenklMuggenthalerHubigetal.2017, author = {Schenkl, Sebastian and Muggenthaler, Holger and Hubig, Michael and Erdmann, Bodo and Weiser, Martin and Zachow, Stefan and Heinrich, Andreas and G{\"u}ttler, Felix Victor and Teichgr{\"a}ber, Ulf and Mall, Gita}, title = {Automatic CT-based finite element model generation for temperature-based death time estimation: feasibility study and sensitivity analysis}, volume = {131}, journal = {International Journal of Legal Medicine}, number = {3}, doi = {doi:10.1007/s00414-016-1523-0}, pages = {699 -- 712}, year = {2017}, abstract = {Temperature based death time estimation is based either on simple phenomenological models of corpse cooling or on detailed physical heat transfer models. The latter are much more complex, but allow a higher accuracy of death time estimation as in principle all relevant cooling mechanisms can be taken into account. Here, a complete work flow for finite element based cooling simulation models is presented. The following steps are demonstrated on CT-phantoms: • CT-scan • Segmentation of the CT images for thermodynamically relevant features of individual geometries • Conversion of the segmentation result into a Finite Element (FE) simulation model • Computation of the model cooling curve • Calculation of the cooling time For the first time in FE-based cooling time estimation the steps from the CT image over segmentation to FE model generation are semi-automatically performed. The cooling time calculation results are compared to cooling measurements performed on the phantoms under controlled conditions. In this context, the method is validated using different CTphantoms. Some of the CT phantoms thermodynamic material parameters had to be experimentally determined via independent experiments. Moreover the impact of geometry and material parameter uncertainties on the estimated cooling time is investigated by a sensitivity analysis.}, language = {en} } @inproceedings{JoachimskyAmbellanZachow2017, author = {Joachimsky, Robert and Ambellan, Felix and Zachow, Stefan}, title = {Computerassistierte Auswahl und Platzierung von interpositionalen Spacern zur Behandlung fr{\"u}her Gonarthrose}, volume = {16}, booktitle = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65321}, pages = {106 -- 111}, year = {2017}, abstract = {Degenerative Gelenkerkrankungen, wie die Osteoarthrose, sind ein h{\"a}ufiges Krankheitsbild unter {\"a}lteren Erwachsenen. Hierbei verringert sich u.a. der Gelenkspalt aufgrund degenerierten Knorpels oder gesch{\"a}digter Menisci. Ein in den Gelenkspalt eingebrachter interpositionaler Spacer soll die mit der Osteoarthrose einhergehende verringerte Gelenkkontaktfl{\"a}che erh{\"o}hen und so der teilweise oder vollst{\"a}ndige Gelenkersatz hinausgez{\"o}gert oder vermieden werden. In dieser Arbeit pr{\"a}sentieren wir eine Planungssoftware f{\"u}r die Auswahl und Positionierung eines interpositionalen Spacers am Patientenmodell. Auf einer MRT-basierten Bildsegmentierung aufbauend erfolgt eine geometrische Rekonstruktion der 3D-Anatomie des Kniegelenks. Anhand dieser wird der Gelenkspalt bestimmt, sowie ein Spacer ausgew{\"a}hlt und algorithmisch vorpositioniert. Die Positionierung des Spacers ist durch den Benutzer jederzeit interaktiv anpassbar. F{\"u}r jede Positionierung eines Spacers wird ein Fitness-Wert zur Knieanatomie des jeweiligen Patienten berechnet und den Nutzern R{\"u}ckmeldung hinsichtlich Passgenauigkeit gegeben. Die Software unterst{\"u}tzt somit als Entscheidungshilfe die behandelnden {\"A}rzte bei der patientenspezifischen Spacerauswahl.}, language = {de} } @inproceedings{AmbellanTackWilsonetal.2017, author = {Ambellan, Felix and Tack, Alexander and Wilson, Dave and Anglin, Carolyn and Lamecker, Hans and Zachow, Stefan}, title = {Evaluating two methods for Geometry Reconstruction from Sparse Surgical Navigation Data}, volume = {16}, booktitle = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65339}, pages = {24 -- 30}, year = {2017}, abstract = {In this study we investigate methods for fitting a Statistical Shape Model (SSM) to intraoperatively acquired point cloud data from a surgical navigation system. We validate the fitted models against the pre-operatively acquired Magnetic Resonance Imaging (MRI) data from the same patients. We consider a cohort of 10 patients who underwent navigated total knee arthroplasty. As part of the surgical protocol the patients' distal femurs were partially digitized. All patients had an MRI scan two months pre-operatively. The MRI data were manually segmented and the reconstructed bone surfaces used as ground truth against which the fit was compared. Two methods were used to fit the SSM to the data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). For both approaches, the difference between model fit and ground truth surface averaged less than 1.7 mm and excellent correspondence with the distal femoral morphology can be demonstrated.}, language = {en} } @misc{AmbellanTackWilsonetal.2017, author = {Ambellan, Felix and Tack, Alexander and Wilson, Dave and Anglin, Carolyn and Lamecker, Hans and Zachow, Stefan}, title = {Evaluating two methods for Geometry Reconstruction from Sparse Surgical Navigation Data}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66052}, year = {2017}, abstract = {In this study we investigate methods for fitting a Statistical Shape Model (SSM) to intraoperatively acquired point cloud data from a surgical navigation system. We validate the fitted models against the pre-operatively acquired Magnetic Resonance Imaging (MRI) data from the same patients. We consider a cohort of 10 patients who underwent navigated total knee arthroplasty. As part of the surgical protocol the patients' distal femurs were partially digitized. All patients had an MRI scan two months pre-operatively. The MRI data were manually segmented and the reconstructed bone surfaces used as ground truth against which the fit was compared. Two methods were used to fit the SSM to the data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). For both approaches, the difference between model fit and ground truth surface averaged less than 1.7 mm and excellent correspondence with the distal femoral morphology can be demonstrated.}, language = {en} } @inproceedings{GreweleRouxPilzetal.2018, author = {Grewe, Carl Martin and le Roux, Gabriel and Pilz, Sven-Kristofer and Zachow, Stefan}, title = {Spotting the Details: The Various Facets of Facial Expressions}, booktitle = {IEEE International Conference on Automatic Face and Gesture Recognition}, doi = {10.1109/FG.2018.00049}, pages = {286 -- 293}, year = {2018}, language = {en} } @misc{SahuDillMukhopadyayetal.2017, author = {Sahu, Manish and Dill, Sabrina and Mukhopadyay, Anirban and Zachow, Stefan}, title = {Surgical Tool Presence Detection for Cataract Procedures}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69110}, year = {2017}, abstract = {This article outlines the submission to the CATARACTS challenge for automatic tool presence detection [1]. Our approach for this multi-label classification problem comprises labelset-based sampling, a CNN architecture and temporal smothing as described in [3], which we call ZIB-Res-TS.}, language = {en} } @article{TackMukhopadhyayZachow2018, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, volume = {26}, journal = {Osteoarthritis and Cartilage}, number = {5}, doi = {10.1016/j.joca.2018.02.907}, pages = {680 -- 688}, year = {2018}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @misc{TackMukhopadhyayZachow2018, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative (Supplementary Material)}, doi = {10.12752/4.TMZ.1.0}, year = {2018}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @inproceedings{KraemerMaggioniTycowiczetal.2018, author = {Kr{\"a}mer, Martin and Maggioni, Marta and Tycowicz, Christoph von and Brisson, Nick and Zachow, Stefan and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {Ultra-short echo-time (UTE) imaging of the knee with curved surface reconstruction-based extraction of the patellar tendon}, booktitle = {ISMRM (International Society for Magnetic Resonance in Medicine), 26th Annual Meeting 2018, Paris, France}, year = {2018}, abstract = {Due to very short T2 relaxation times, imaging of tendons is typically performed using ultra-short echo-time (UTE) acquisition techniques. In this work, we combined an echo-train shifted multi-echo 3D UTE imaging sequence with a 3D curved surface reconstruction to virtually extract the patellar tendon from an acquired 3D UTE dataset. Based on the analysis of the acquired multi-echo data, a T2* relaxation time parameter map was calculated and interpolated to the curved surface of the patellar tendon.}, language = {en} } @inproceedings{AmbellanTackEhlkeetal.2018, author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, booktitle = {Medical Imaging with Deep Learning}, year = {2018}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging, that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The method is evaluated on data of the MICCAI grand challenge "Segmentation of Knee Images 2010". For the first time an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy. In conclusion, combining of anatomical knowledge using SSMs with localized classification via CNNs results in a state-of-the-art segmentation method.}, language = {en} } @article{BrueningHildebrandtHepptetal.2020, author = {Br{\"u}ning, Jan and Hildebrandt, Thomas and Heppt, Werner and Schmidt, Nora and Lamecker, Hans and Szengel, Angelika and Amiridze, Natalja and Ramm, Heiko and Bindernagel, Matthias and Zachow, Stefan and Goubergrits, Leonid}, title = {Characterization of the Airflow within an Average Geometry of the Healthy Human Nasal Cavity}, volume = {3755}, journal = {Scientific Reports}, number = {10}, doi = {10.1038/s41598-020-60755-3}, year = {2020}, abstract = {This study's objective was the generation of a standardized geometry of the healthy nasal cavity. An average geometry of the healthy nasal cavity was generated using a statistical shape model based on 25 symptom-free subjects. Airflow within the average geometry and these geometries was calculated using fluid simulations. Integral measures of the nasal resistance, wall shear stresses (WSS) and velocities were calculated as well as cross-sectional areas (CSA). Furthermore, individual WSS and static pressure distributions were mapped onto the average geometry. The average geometry featured an overall more regular shape that resulted in less resistance, reduced wall shear stresses and velocities compared to the median of the 25 geometries. Spatial distributions of WSS and pressure of average geometry agreed well compared to the average distributions of all individual geometries. The minimal CSA of the average geometry was larger than the median of all individual geometries (83.4 vs. 74.7 mm²). The airflow observed within the average geometry of the healthy nasal cavity did not equal the average airflow of the individual geometries. While differences observed for integral measures were notable, the calculated values for the average geometry lay within the distributions of the individual parameters. Spatially resolved parameters differed less prominently.}, language = {en} } @inproceedings{SahuStroemsdoerferMukhopadhyayetal.2020, author = {Sahu, Manish and Str{\"o}msd{\"o}rfer, Ronja and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Endo-Sim2Real: Consistency learning-based domain adaptation for instrument segmentation}, volume = {12263}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part III}, publisher = {Springer Nature}, doi = {https://doi.org/10.1007/978-3-030-59716-0_75}, year = {2020}, abstract = {Surgical tool segmentation in endoscopic videos is an important component of computer assisted interventions systems. Recent success of image-based solutions using fully-supervised deep learning approaches can be attributed to the collection of big labeled datasets. However, the annotation of a big dataset of real videos can be prohibitively expensive and time consuming. Computer simulations could alleviate the manual labeling problem, however, models trained on simulated data do not generalize to real data. This work proposes a consistency-based framework for joint learning of simulated and real (unlabeled) endoscopic data to bridge this performance generalization issue. Empirical results on two data sets (15 videos of the Cholec80 and EndoVis'15 dataset) highlight the effectiveness of the proposed Endo-Sim2Real method for instrument segmentation. We compare the segmentation of the proposed approach with state-of-the-art solutions and show that our method improves segmentation both in terms of quality and quantity.}, language = {en} } @article{SahuSzengelMukhopadhyayetal.2020, author = {Sahu, Manish and Szengel, Angelika and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Surgical phase recognition by learning phase transitions}, volume = {6}, journal = {Current Directions in Biomedical Engineering (CDBME)}, number = {1}, publisher = {De Gruyter}, doi = {https://doi.org/10.1515/cdbme-2020-0037}, pages = {20200037}, year = {2020}, abstract = {Automatic recognition of surgical phases is an important component for developing an intra-operative context-aware system. Prior work in this area focuses on recognizing short-term tool usage patterns within surgical phases. However, the difference between intra- and inter-phase tool usage patterns has not been investigated for automatic phase recognition. We developed a Recurrent Neural Network (RNN), in particular a state-preserving Long Short Term Memory (LSTM) architecture to utilize the long-term evolution of tool usage within complete surgical procedures. For fully automatic tool presence detection from surgical video frames, a Convolutional Neural Network (CNN) based architecture namely ZIBNet is employed. Our proposed approach outperformed EndoNet by 8.1\% on overall precision for phase detection tasks and 12.5\% on meanAP for tool recognition tasks.}, language = {en} } @misc{SahuSzengelMukhopadhyayetal.2020, author = {Sahu, Manish and Szengel, Angelika and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Analyzing laparoscopic cholecystectomy with deep learning: automatic detection of surgical tools and phases}, journal = {28th International Congress of the European Association for Endoscopic Surgery (EAES)}, year = {2020}, abstract = {Motivation: The ever-rising volume of patients, high maintenance cost of operating rooms and time consuming analysis of surgical skills are fundamental problems that hamper the practical training of the next generation of surgeons. The hospitals prefer to keep the surgeons busy in real operations over training young surgeons for obvious economic reasons. One fundamental need in surgical training is the reduction of the time needed by the senior surgeon to review the endoscopic procedures performed by the young surgeon while minimizing the subjective bias in evaluation. The unprecedented performance of deep learning ushers the new age of data-driven automatic analysis of surgical skills. Method: Deep learning is capable of efficiently analyzing thousands of hours of laparoscopic video footage to provide an objective assessment of surgical skills. However, the traditional end-to-end setting of deep learning (video in, skill assessment out) is not explainable. Our strategy is to utilize the surgical process modeling framework to divide the surgical process into understandable components. This provides the opportunity to employ deep learning for superior yet automatic detection and evaluation of several aspects of laparoscopic cholecystectomy such as surgical tool and phase detection. We employ ZIBNet for the detection of surgical tool presence. ZIBNet employs pre-processing based on tool usage imbalance, a transfer learned 50-layer residual network (ResNet-50) and temporal smoothing. To encode the temporal evolution of tool usage (over the entire video sequence) that relates to the surgical phases, Long Short Term Memory (LSTM) units are employed with long-term dependency. Dataset: We used CHOLEC 80 dataset that consists of 80 videos of laparoscopic cholecystectomy performed by 13 surgeons, divided equally for training and testing. In these videos, up to three different tools (among 7 types of tools) can be present in a frame. Results: The mean average precision of the detection of all tools is 93.5 ranging between 86.8 and 99.3, a significant improvement (p <0.01) over the previous state-of-the-art. We observed that less frequent tools like Scissors, Irrigator, Specimen Bag etc. are more related to phase transitions. The overall precision (recall) of the detection of all surgical phases is 79.6 (81.3). Conclusion: While this is not the end goal for surgical skill analysis, the development of such a technological platform is essential toward a data-driven objective understanding of surgical skills. In future, we plan to investigate surgeon-in-the-loop analysis and feedback for surgical skill analysis.}, language = {en} } @article{PimentelSzengelEhlkeetal.2020, author = {Pimentel, Pedro and Szengel, Angelika and Ehlke, Moritz and Lamecker, Hans and Zachow, Stefan and Estacio, Laura and Doenitz, Christian and Ramm, Heiko}, title = {Automated Virtual Reconstruction of Large Skull Defects using Statistical Shape Models and Generative Adversarial Networks}, volume = {12439}, journal = {Towards the Automatization of Cranial Implant Design in Cranioplasty}, editor = {Li, Jianning and Egger, Jan}, edition = {1}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-030-64327-0_3}, pages = {16 -- 27}, year = {2020}, abstract = {We present an automated method for extrapolating missing regions in label data of the skull in an anatomically plausible manner. The ultimate goal is to design patient-speci� c cranial implants for correcting large, arbitrarily shaped defects of the skull that can, for example, result from trauma of the head. Our approach utilizes a 3D statistical shape model (SSM) of the skull and a 2D generative adversarial network (GAN) that is trained in an unsupervised fashion from samples of healthy patients alone. By � tting the SSM to given input labels containing the skull defect, a First approximation of the healthy state of the patient is obtained. The GAN is then applied to further correct and smooth the output of the SSM in an anatomically plausible manner. Finally, the defect region is extracted using morphological operations and subtraction between the extrapolated healthy state of the patient and the defective input labels. The method is trained and evaluated based on data from the MICCAI 2020 AutoImplant challenge. It produces state-of-the art results on regularly shaped cut-outs that were present in the training and testing data of the challenge. Furthermore, due to unsupervised nature of the approach, the method generalizes well to previously unseen defects of varying shapes that were only present in the hidden test dataset.}, language = {en} } @misc{GreweZachow2017, author = {Grewe, Carl Martin and Zachow, Stefan}, title = {Face to Face-Interface}, journal = {+ultra. Knowledge \& Gestaltung}, editor = {Doll, Nikola and Bredekamp, Horst and Sch{\"a}ffner, Wolfgang}, publisher = {Seemann Henschel}, pages = {320 -- 321}, year = {2017}, language = {en} } @misc{GreweLeRouxPilzetal.2018, author = {Grewe, Carl Martin and Le Roux, Gabriel and Pilz, Sven-Kristofer and Zachow, Stefan}, title = {Spotting the Details: The Various Facets of Facial Expressions}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-67696}, year = {2018}, abstract = {3D Morphable Models (MM) are a popular tool for analysis and synthesis of facial expressions. They represent plausible variations in facial shape and appearance within a low-dimensional parameter space. Fitted to a face scan, the model's parameters compactly encode its expression patterns. This expression code can be used, for instance, as a feature in automatic facial expression recognition. For accurate classification, an MM that can adequately represent the various characteristic facets and variants of each expression is necessary. Currently available MMs are limited in the diversity of expression patterns. We present a novel high-quality Facial Expression Morphable Model built from a large-scale face database as a tool for expression analysis and synthesis. Establishment of accurate dense correspondence, up to finest skin features, enables a detailed statistical analysis of facial expressions. Various characteristic shape patterns are identified for each expression. The results of our analysis give rise to a new facial expression code. We demonstrate the advantages of such a code for the automatic recognition of expressions, and compare the accuracy of our classifier to state-of-the-art.}, language = {en} } @article{OeltzeJaffraMeuschkeNeugebaueretal.2019, author = {Oeltze-Jaffra, Steffen and Meuschke, Monique and Neugebauer, Mathias and Saalfeld, Sylvia and Lawonn, Kai and Janiga, Gabor and Hege, Hans-Christian and Zachow, Stefan and Preim, Bernhard}, title = {Generation and Visual Exploration of Medical Flow Data: Survey, Research Trends, and Future Challenges}, volume = {38}, journal = {Computer Graphics Forum}, number = {1}, publisher = {Wiley}, doi = {10.1111/cgf.13394}, pages = {87 -- 125}, year = {2019}, abstract = {Simulations and measurements of blood and air flow inside the human circulatory and respiratory system play an increasingly important role in personalized medicine for prevention, diagnosis, and treatment of diseases. This survey focuses on three main application areas. (1) Computational Fluid Dynamics (CFD) simulations of blood flow in cerebral aneurysms assist in predicting the outcome of this pathologic process and of therapeutic interventions. (2) CFD simulations of nasal airflow allow for investigating the effects of obstructions and deformities and provide therapy decision support. (3) 4D Phase-Contrast (4D PC) Magnetic Resonance Imaging (MRI) of aortic hemodynamics supports the diagnosis of various vascular and valve pathologies as well as their treatment. An investigation of the complex and often dynamic simulation and measurement data requires the coupling of sophisticated visualization, interaction, and data analysis techniques. In this paper, we survey the large body of work that has been conducted within this realm. We extend previous surveys by incorporating nasal airflow, addressing the joint investigation of blood flow and vessel wall properties, and providing a more fine-granular taxonomy of the existing techniques. From the survey, we extract major research trends and identify open problems and future challenges. The survey is intended for researchers interested in medical flow but also more general, in the combined visualization of physiology and anatomy, the extraction of features from flow field data and feature-based visualization, the visual comparison of different simulation results, and the interactive visual analysis of the flow field and derived characteristics.}, language = {en} } @misc{TackMukhopadhyayZachow2018, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, volume = {26}, number = {5}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-68038}, pages = {680 -- 688}, year = {2018}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @article{MoldenhauerWeiserZachow2017, author = {Moldenhauer, Marian and Weiser, Martin and Zachow, Stefan}, title = {Adaptive Algorithms for Optimal Hip Implant Positioning}, volume = {17}, journal = {PAMM}, number = {1}, doi = {10.1002/pamm.201710071}, pages = {203 -- 204}, year = {2017}, abstract = {In an aging society where the number of joint replacements rises, it is important to also increase the longevity of implants. In particular hip implants have a lifetime of at most 15 years. This derives primarily from pain due to implant migration, wear, inflammation, and dislocation, which is affected by the positioning of the implant during the surgery. Current joint replacement practice uses 2D software tools and relies on the experience of surgeons. Especially the 2D tools fail to take the patients' natural range of motion as well as stress distribution in the 3D joint induced by different daily motions into account. Optimizing the hip joint implant position for all possible parametrized motions under the constraint of a contact problem is prohibitively expensive as there are too many motions and every position change demands a recalculation of the contact problem. For the reduction of the computational effort, we use adaptive refinement on the parameter domain coupled with the interpolation method of Kriging. A coarse initial grid is to be locally refined using goal-oriented error estimation, reducing locally high variances. This approach will be combined with multi-grid optimization such that numerical errors are reduced.}, language = {en} } @article{WeiserErdmannSchenkletal.2018, author = {Weiser, Martin and Erdmann, Bodo and Schenkl, Sebastian and Muggenthaler, Holger and Hubig, Michael and Mall, Gita and Zachow, Stefan}, title = {Uncertainty in Temperature-Based Determination of Time of Death}, volume = {54}, journal = {Heat and Mass Transfer}, number = {9}, publisher = {Springer}, doi = {10.1007/s00231-018-2324-4}, pages = {2815 -- 2826}, year = {2018}, abstract = {Temperature-based estimation of time of death (ToD) can be per- formed either with the help of simple phenomenological models of corpse cooling or with detailed mechanistic (thermodynamic) heat transfer mod- els. The latter are much more complex, but allow a higher accuracy of ToD estimation as in principle all relevant cooling mechanisms can be taken into account. The potentially higher accuracy depends on the accuracy of tissue and environmental parameters as well as on the geometric resolution. We in- vestigate the impact of parameter variations and geometry representation on the estimated ToD based on a highly detailed 3D corpse model, that has been segmented and geometrically reconstructed from a computed to- mography (CT) data set, differentiating various organs and tissue types.}, language = {en} } @inproceedings{NeumannHellwichZachow2019, author = {Neumann, Mario and Hellwich, Olaf and Zachow, Stefan}, title = {Localization and Classification of Teeth in Cone Beam CT using Convolutional Neural Networks}, booktitle = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, isbn = {978-3-00-063717-9}, pages = {182 -- 188}, year = {2019}, abstract = {In dentistry, software-based medical image analysis and visualization provide efficient and accurate diagnostic and therapy planning capabilities. We present an approach for the automatic recognition of tooth types and positions in digital volume tomography (DVT). By using deep learning techniques in combination with dimensionality reduction through non-planar reformatting of the jaw anatomy, DVT data can be efficiently processed and teeth reliably recognized and classified, even in the presence of imaging artefacts, missing or dislocated teeth. We evaluated our approach, which is based on 2D Convolutional Neural Networks (CNNs), on 118 manually annotated cases of clinical DVT datasets. Our proposed method correctly classifies teeth with an accuracy of 94\% within a limit of 2mm distance to ground truth labels.}, language = {en} } @inproceedings{JoachimskyMaIckingetal.2019, author = {Joachimsky, Robert and Ma, Lihong and Icking, Christian and Zachow, Stefan}, title = {A Collision-Aware Articulated Statistical Shape Model of the Human Spine}, booktitle = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, pages = {58 -- 64}, year = {2019}, abstract = {Statistical Shape Models (SSMs) are a proven means for model-based 3D anatomy reconstruction from medical image data. In orthopaedics and biomechanics, SSMs are increasingly employed to individualize measurement data or to create individualized anatomical models to which implants can be adapted to or functional tests can be performed on. For modeling and analysis of articulated structures, so called articulated SSMs (aSSMs) have been developed. However, a missing feature of aSSMs is the consideration of collisions in the course of individual fitting and articulation. The aim of our work was to develop aSSMs that handle collisions between components correctly. That way it becomes possible to adjust shape and articulation in view of a physically and geometrically plausible individualization. To be able to apply collision-aware aSSMs in simulation and optimisation, our approach is based on an e� cient collision detection method employing Graphics Processing Units (GPUs).}, language = {en} } @article{KraemerMaggioniBrissonetal.2019, author = {Kr{\"a}mer, Martin and Maggioni, Marta and Brisson, Nicholas and Zachow, Stefan and Teichgr{\"a}ber, Ulf and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {T1 and T2* mapping of the human quadriceps and patellar tendons using ultra-short echo-time (UTE) imaging and bivariate relaxation parameter-based volumetric visualization}, volume = {63}, journal = {Magnetic Resonance Imaging}, number = {11}, doi = {10.1016/j.mri.2019.07.015}, pages = {29 -- 36}, year = {2019}, abstract = {Quantification of magnetic resonance (MR)-based relaxation parameters of tendons and ligaments is challenging due to their very short transverse relaxation times, requiring application of ultra-short echo-time (UTE) imaging sequences. We quantify both T1 and T2⁎ in the quadriceps and patellar tendons of healthy volunteers at a field strength of 3 T and visualize the results based on 3D segmentation by using bivariate histogram analysis. We applied a 3D ultra-short echo-time imaging sequence with either variable repetition times (VTR) or variable flip angles (VFA) for T1 quantification in combination with multi-echo acquisition for extracting T2⁎. The values of both relaxation parameters were subsequently binned for bivariate histogram analysis and corresponding cluster identification, which were subsequently visualized. Based on manually-drawn regions of interest in the tendons on the relaxation parameter maps, T1 and T2⁎ boundaries were selected in the bivariate histogram to segment the quadriceps and patellar tendons and visualize the relaxation times by 3D volumetric rendering. Segmentation of bone marrow, fat, muscle and tendons was successfully performed based on the bivariate histogram analysis. Based on the segmentation results mean T2⁎ relaxation times, over the entire tendon volumes averaged over all subjects, were 1.8 ms ± 0.1 ms and 1.4 ms ± 0.2 ms for the patellar and quadriceps tendons, respectively. The mean T1 value of the patellar tendon, averaged over all subjects, was 527 ms ± 42 ms and 476 ms ± 40 ms for the VFA and VTR acquisitions, respectively. The quadriceps tendon had higher mean T1 values of 662 ms ± 97 ms (VFA method) and 637 ms ± 40 ms (VTR method) compared to the patellar tendon. 3D volumetric visualization of the relaxation times revealed that T1 values are not constant over the volume of both tendons, but vary locally. This work provided additional data to build upon the scarce literature available on relaxation times in the quadriceps and patellar tendons. We were able to segment both tendons and to visualize the relaxation parameter distributions over the entire tendon volumes.}, language = {en} } @misc{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {An as-invariant-as-possible GL+(3)-based Statistical Shape Model}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74566}, year = {2019}, abstract = {We describe a novel nonlinear statistical shape model basedon differential coordinates viewed as elements of GL+(3). We adopt an as-invariant-as possible framework comprising a bi-invariant Lie group mean and a tangent principal component analysis based on a unique GL+(3)-left-invariant, O(3)-right-invariant metric. Contrary to earlier work that equips the coordinates with a specifically constructed group structure, our method employs the inherent geometric structure of the group-valued data and therefore features an improved statistical power in identifying shape differences. We demonstrate this in experiments on two anatomical datasets including comparison to the standard Euclidean as well as recent state-of-the-art nonlinear approaches to statistical shape modeling.}, language = {en} } @inproceedings{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {An as-invariant-as-possible GL+(3)-based Statistical Shape Model}, volume = {11846}, booktitle = {Proc. 7th MICCAI workshop on Mathematical Foundations of Computational Anatomy (MFCA)}, publisher = {Springer}, doi = {10.1007/978-3-030-33226-6_23}, pages = {219 -- 228}, year = {2019}, abstract = {We describe a novel nonlinear statistical shape model basedon differential coordinates viewed as elements of GL+(3). We adopt an as-invariant-as possible framework comprising a bi-invariant Lie group mean and a tangent principal component analysis based on a unique GL+(3)-left-invariant, O(3)-right-invariant metric. Contrary to earlier work that equips the coordinates with a specifically constructed group structure, our method employs the inherent geometric structure of the group-valued data and therefore features an improved statistical power in identifying shape differences. We demonstrate this in experiments on two anatomical datasets including comparison to the standard Euclidean as well as recent state-of-the-art nonlinear approaches to statistical shape modeling.}, language = {en} } @misc{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {A Surface-Theoretic Approach for Statistical Shape Modeling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74497}, year = {2019}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. We evaluate the performance of our model w.r.t. shape-based classification of pathological malformations of the human knee and show that it outperforms the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing natural biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @inproceedings{AmbellanZachowvonTycowicz2019, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {A Surface-Theoretic Approach for Statistical Shape Modeling}, volume = {11767}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part IV}, publisher = {Springer}, doi = {10.1007/978-3-030-32251-9_3}, pages = {21 -- 29}, year = {2019}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. We evaluate the performance of our model w.r.t. shape-based classification of pathological malformations of the human knee and show that it outperforms the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing natural biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @inproceedings{EstacioEhlkeTacketal.2021, author = {Estacio, Laura and Ehlke, Moritz and Tack, Alexander and Castro-Gutierrez, Eveling and Lamecker, Hans and Mora, Rensso and Zachow, Stefan}, title = {Unsupervised Detection of Disturbances in 2D Radiographs}, booktitle = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)}, doi = {10.1109/ISBI48211.2021.9434091}, pages = {367 -- 370}, year = {2021}, abstract = {We present a method based on a generative model for detection of disturbances such as prosthesis, screws, zippers, and metals in 2D radiographs. The generative model is trained in an unsupervised fashion using clinical radiographs as well as simulated data, none of which contain disturbances. Our approach employs a latent space consistency loss which has the benefit of identifying similarities, and is enforced to reconstruct X-rays without disturbances. In order to detect images with disturbances, an anomaly score is computed also employing the Frechet distance between the input X-ray and the reconstructed one using our generative model. Validation was performed using clinical pelvis radiographs. We achieved an AUC of 0.77 and 0.83 with clinical and synthetic data, respectively. The results demonstrated a good accuracy of our method for detecting outliers as well as the advantage of utilizing synthetic data.}, language = {en} } @article{SahuMukhopadhyayZachow2021, author = {Sahu, Manish and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Simulation-to-Real domain adaptation with teacher-student learning for endoscopic instrument segmentation}, volume = {16}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer Nature}, arxiv = {http://arxiv.org/abs/arXiv:2103.01593}, doi = {10.1007/s11548-021-02383-4}, pages = {849 -- 859}, year = {2021}, abstract = {Purpose Segmentation of surgical instruments in endoscopic video streams is essential for automated surgical scene understanding and process modeling. However, relying on fully supervised deep learning for this task is challenging because manual annotation occupies valuable time of the clinical experts. Methods We introduce a teacher-student learning approach that learns jointly from annotated simulation data and unlabeled real data to tackle the challenges in simulation-to-real unsupervised domain adaptation for endoscopic image segmentation. Results Empirical results on three datasets highlight the effectiveness of the proposed framework over current approaches for the endoscopic instrument segmentation task. Additionally, we provide analysis of major factors affecting the performance on all datasets to highlight the strengths and failure modes of our approach. Conclusions We show that our proposed approach can successfully exploit the unlabeled real endoscopic video frames and improve generalization performance over pure simulation-based training and the previous state-of-the-art. This takes us one step closer to effective segmentation of surgical instrument in the annotation scarce setting.}, language = {en} } @article{TackPreimZachow2021, author = {Tack, Alexander and Preim, Bernhard and Zachow, Stefan}, title = {Fully automated Assessment of Knee Alignment from Full-Leg X-Rays employing a "YOLOv4 And Resnet Landmark regression Algorithm" (YARLA): Data from the Osteoarthritis Initiative}, volume = {205}, journal = {Computer Methods and Programs in Biomedicine}, number = {106080}, doi = {https://doi.org/10.1016/j.cmpb.2021.106080}, year = {2021}, abstract = {We present a method for the quantification of knee alignment from full-leg X-Rays. A state-of-the-art object detector, YOLOv4, was trained to locate regions of interests (ROIs) in full-leg X-Ray images for the hip joint, the knee, and the ankle. Residual neural networks (ResNets) were trained to regress landmark coordinates for each ROI.Based on the detected landmarks the knee alignment, i.e., the hip-knee-ankle (HKA) angle, was computed. The accuracy of landmark detection was evaluated by a comparison to manually placed landmarks for 360 legs in 180 X-Rays. The accuracy of HKA angle computations was assessed on the basis of 2,943 X-Rays. Results of YARLA were compared to the results of two independent image reading studies(Cooke; Duryea) both publicly accessible via the Osteoarthritis Initiative. The agreement was evaluated using Spearman's Rho, and weighted kappa as well as regarding the correspondence of the class assignment (varus/neutral/valgus). The average difference between YARLA and manually placed landmarks was less than 2.0+- 1.5 mm for all structures (hip, knee, ankle). The average mismatch between HKA angle determinations of Cooke and Duryea was 0.09 +- 0.63°; YARLA resulted in a mismatch of 0.10 +- 0.74° compared to Cooke and of 0.18 +- 0.64° compared to Duryea. Cooke and Duryea agreed almost perfectly with respect to a weighted kappa value of 0.86, and showed an excellent reliability as measured by a Spearman's Rho value of 0.99. Similar values were achieved by YARLA, i.e., a weighted kappa value of0.83 and 0.87 and a Spearman's Rho value of 0.98 and 0.99 to Cooke and Duryea,respectively. Cooke and Duryea agreed in 92\% of all class assignments and YARLA did so in 90\% against Cooke and 92\% against Duryea. In conclusion, YARLA achieved results comparable to those of human experts and thus provides a basis for an automated assessment of knee alignment in full-leg X-Rays.}, language = {de} } @article{SekuboyinaBayatHusseinietal.2020, author = {Sekuboyina, Anjany and Bayat, Amirhossein and Husseini, Malek E. and L{\"o}ffler, Maximilian and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Wei, Qingyue and Brown, Kevin and Wolf, Matthias and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae Labelling and Segmentation Benchmark for Multi-detector CT Images}, journal = {arXiv}, arxiv = {http://arxiv.org/abs/2001.09193}, year = {2020}, language = {en} } @article{AmbellanZachowvonTycowicz2021, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {Rigid Motion Invariant Statistical Shape Modeling based on Discrete Fundamental Forms}, volume = {73}, journal = {Medical Image Analysis}, arxiv = {http://arxiv.org/abs/2111.06850}, doi = {10.1016/j.media.2021.102178}, year = {2021}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. Additionally, as planar configurations form a submanifold in shape space, our representation allows for effective estimation of quasi-isometric surfaces flattenings. We evaluate the performance of our model w.r.t. shape-based classification of hippocampus and femur malformations due to Alzheimer's disease and osteoarthritis, respectively. In particular, we achieve state-of-the-art accuracies outperforming the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @article{TrepczynskiKneifelHeylandetal.2025, author = {Trepczynski, Adam and Kneifel, Paul and Heyland, Mark and Leskovar, Marko and Moewis, Philippe and Damm, Philipp and Taylor, William R. and Zachow, Stefan and Duda, Georg N.}, title = {Impact of the external knee flexion moment on patello-femoral loading derived from in vivo loads and kinematics}, volume = {12/2024}, journal = {Frontiers in Bioengineering and Biotechnology}, publisher = {Frontiers Media SA}, organization = {Charit{\´e} - Universit{\"a}tsmedizin Berlin}, issn = {2296-4185}, doi = {10.3389/fbioe.2024.1473951}, year = {2025}, abstract = {Introduction: Anterior knee pain and other patello-femoral (PF) complications frequently limit the success of total knee arthroplasty as the final treatment of end stage osteoarthritis. However, knowledge about the in-vivo loading conditions at the PF joint remains limited, as no direct measurements are available. We hypothesised that the external knee flexion moment (EFM) is highly predictive of the PF contact forces during activities with substantial flexion of the loaded knee. Materials and methods: Six patients (65-80 years, 67-101 kg) with total knee arthroplasty (TKA) performed two activities of daily living: sit-stand-sit and squat. Tibio-femoral (TF) contact forces were measured in vivo using instrumented tibial components, while synchronously internal TF and PF kinematics were captured with mobile fluoroscopy. The measurements were used to compute PF contact forces using patient specific musculoskeletal models. The relationship between the EFM and the PF contact force was quantified using linear regression. Results: Mean peak TF contact forces of 1.97-3.24 times body weight (BW) were found while peak PF forces reached 1.75 to 3.29 times body weight (BW). The peak EFM ranged from 3.2 to 5.9 \%BW times body height, and was a good predictor of the PF contact force (R2 = 0.95 and 0.88 for sit-stand-sit and squat, respectively). Discussion: The novel combination of in vivo TF contact forces and internal patellar kinematics enabled a reliable assessment of PF contact forces. The results of the regression analysis suggest that PF forces can be estimated based solely on the EFM from quantitative gait analysis. Our study also demonstrates the relevance of PF contact forces, which reach magnitudes similar to TF forces during activities of daily living.}, language = {en} } @article{KomnikFunkenZachowetal.2024, author = {Komnik, Igor and Funken, Johannes and Zachow, Stefan and Schmidt-Wiethoff, R{\"u}diger and Ellermann, Andree and Potthast, Wolfgang}, title = {Surgical planning in HTO - Alternative approaches to the Fujisawa gold-standard}, journal = {Technology and Health Care}, doi = {10.1177/09287329241299568}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-98227}, year = {2024}, abstract = {BACKGROUND: Presurgical planning of the correction angle plays a decisive role in a high tibial osteotomy, affecting the loading situation in the knee affected by osteoarthritis. The planning approach by Fujisawa et al. aims to adjust the weight-bearing line to achieve an optimal knee joint load distribution. While this method is accessible, it may not fully consider the complexity of individual dynamic knee-loading profiles. This review aims to disclose existing alternative HTO planning methods that do not follow Fujisawa's standard. METHODS: PubMed, Web of Science and CENTRAL databases were screened, focusing on HTO research in combination with alternative planning approaches. RESULTS: Eight out of 828 studies were included, with seven simulation studies based on finite element analysis and multi-body dynamics. The planning approaches incorporated gradual degrees of realignment parameters (weight-bearing line shift, medial proximal tibial angle, hip- knee-ankle, knee joint line orientation), simulating their effect on knee kinematics, contact force/stress, Von Mises and shear stress. Two studies proposed implementing individual correction magnitudes derived from preoperatively predicted knee adduction moments. CONCLUSION: Most planning methods depend on static alignment assessments, neglecting an adequate loading-depending profile. They are confined to their conceptual phases, making the associated planning methods unviable for current clinical use.}, language = {en} } @misc{StallingSeebassZachow1998, author = {Stalling, Detlev and Seebass, Martin and Zachow, Stefan}, title = {Mehrschichtige Oberfl{\"a}chenmodelle zur computergest{\"u}tzten Planung in der Chirurgie}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5661}, number = {TR-98-05}, year = {1998}, abstract = {Polygonale Sch{\"a}delmodelle bilden ein wichtiges Hilfsmittel f{\"u}r computergest{\"u}tzte Planungen im Bereich der plastischen Chirurgie. Wir beschreiben, wie derartige Modelle automatisch aus hochaufgel{\"o}sten CT-Datens{\"a}tzen erzeugt werden k{\"o}nnen. Durch einen lokal steuerbaren Simplifizierungsalgorithmus werden die Modelle so weit vereinfacht, daß auch auf kleineren Graphikcomputern interaktives Arbeiten m{\"o}glich wird. Die Verwendung eines speziellen Transparenzmodells erm{\"o}glicht den ungehinderten Blick auf die bei der Planung relevanten Knochenstrukturen und l{\"a}ßt den Benutzer zugleich die Kopfumrisse des Patienten erkennen.}, language = {de} } @article{DeuflhardWeiserZachow2006, author = {Deuflhard, Peter and Weiser, Martin and Zachow, Stefan}, title = {Mathematics in Facial Surgery}, volume = {53}, journal = {AMS Notices}, number = {9}, pages = {1012 -- 1016}, year = {2006}, language = {en} } @article{ZinserZachowSailer2013, author = {Zinser, Max and Zachow, Stefan and Sailer, Hermann}, title = {Bimaxillary "rotation advancement" procedures in patients with obstructive sleep apnea: A 3-dimensional airway analysis of morphological changes}, volume = {42}, journal = {International Journal of Oral \& Maxillofacial Surgery}, number = {5}, doi = {10.1016/j.ijom.2012.08.002}, pages = {569 -- 578}, year = {2013}, language = {en} } @misc{DeuflhardZachow2012, author = {Deuflhard, Peter and Zachow, Stefan}, title = {Mathematische Therapie- und Operationsplanung}, publisher = {Berliner Wirtschaftsgespr{\"a}che e.V.}, address = {Berlin}, pages = {89 -- 90}, year = {2012}, language = {en} } @article{GallowayKahntRammetal.2013, author = {Galloway, Francis and Kahnt, Max and Ramm, Heiko and Worsley, Peter and Zachow, Stefan and Nair, Prasanth and Taylor, Mark}, title = {A large scale finite element study of a cementless osseointegrated tibial tray}, volume = {46}, journal = {Journal of Biomechanics}, number = {11}, doi = {/10.1016/j.jbiomech.2013.04.021}, pages = {1900 -- 1906}, year = {2013}, language = {en} } @misc{GreweLameckerZachow2013, author = {Grewe, Carl Martin and Lamecker, Hans and Zachow, Stefan}, title = {Landmark-based Statistical Shape Analysis}, journal = {Auxology - Studying Human Growth and Development url}, editor = {Hermanussen, Michael}, publisher = {Schweizerbart Verlag, Stuttgart}, pages = {199 -- 201}, year = {2013}, language = {en} } @article{EhlkeRammLameckeretal.2013, author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Hege, Hans-Christian and Zachow, Stefan}, title = {Fast Generation of Virtual X-ray Images for Reconstruction of 3D Anatomy}, volume = {19}, journal = {IEEE Transactions on Visualization and Computer Graphics}, number = {12}, doi = {10.1109/TVCG.2013.159}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-35928}, pages = {2673 -- 2682}, year = {2013}, language = {en} } @inproceedings{HoffmannZachow2011, author = {Hoffmann, Ren{\´e} and Zachow, Stefan}, title = {Non-invasive approach to shed new light on the buoyancy business of chambered cephalopods (Mollusca)}, booktitle = {Proc. of the Intl. Assoc. for Mathematical Geosciences, Salzburg}, doi = {10.5242/iamg.2011.0163}, pages = {1 -- 11}, year = {2011}, language = {en} } @misc{GreweLameckerZachow2011, author = {Grewe, Carl Martin and Lamecker, Hans and Zachow, Stefan}, title = {Digital morphometry: The Potential of Statistical Shape Models}, journal = {Anthropologischer Anzeiger. Journal of Biological and Clinical Anthropology}, pages = {506 -- 506}, year = {2011}, language = {en} } @inproceedings{LameckerKainmuellerSeimetal.2010, author = {Lamecker, Hans and Kainm{\"u}ller, Dagmar and Seim, Heiko and Zachow, Stefan}, title = {Automatische 3D Rekonstruktion des Unterkiefers und der Mandibul{\"a}rnerven auf Basis dentaler Bildgebung}, volume = {55 (Suppl. 1)}, booktitle = {Proc. BMT, Biomed Tech}, publisher = {Walter de Gruyter-Verlag}, pages = {35 -- 36}, year = {2010}, language = {en} } @article{DworzakLameckervonBergetal.2009, author = {Dworzak, Jalda and Lamecker, Hans and von Berg, Jens and Klinder, Tobias and Lorenz, Cristian and Kainm{\"u}ller, Dagmar and Seim, Heiko and Hege, Hans-Christian and Zachow, Stefan}, title = {3D Reconstruction of the Human Rib Cage from 2D Projection Images using a Statistical Shape Model}, volume = {5}, journal = {Int. J. Comput. Assist. Radiol. Surg.}, number = {2}, publisher = {Springer}, issn = {1861-6410}, doi = {10.1007/s11548-009-0390-2}, pages = {111 -- 124}, year = {2009}, language = {en} } @article{RybakKussLameckeretal.2010, author = {Rybak, J{\"u}rgen and Kuß, Anja and Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian and Lienhard, Matthias and Singer, Jochen and Neubert, Kerstin and Menzel, Randolf}, title = {The Digital Bee Brain: Integrating and Managing Neurons in a Common 3D Reference System}, volume = {4}, journal = {Front. Syst. Neurosci.}, number = {30}, doi = {10.3389/fnsys.2010.00030}, year = {2010}, language = {en} }