@article{Zachow, author = {Zachow, Stefan}, title = {Computational Planning in Facial Surgery}, series = {Facial Plastic Surgery}, volume = {31}, journal = {Facial Plastic Surgery}, number = {5}, doi = {10.1055/s-0035-1564717}, pages = {446 -- 462}, abstract = {This article reflects the research of the last two decades in computational planning for cranio-maxillofacial surgery. Model-guided and computer-assisted surgery planning has tremendously developed due to ever increasing computational capabilities. Simulators for education, planning, and training of surgery are often compared with flight simulators, where maneuvers are also trained to reduce a possible risk of failure. Meanwhile, digital patient models can be derived from medical image data with astonishing accuracy and thus can serve for model surgery to derive a surgical template model that represents the envisaged result. Computerized surgical planning approaches, however, are often still explorative, meaning that a surgeon tries to find a therapeutic concept based on his or her expertise using computational tools that are mimicking real procedures. Future perspectives of an improved computerized planning may be that surgical objectives will be generated algorithmically by employing mathematical modeling, simulation, and optimization techniques. Planning systems thus act as intelligent decision support systems. However, surgeons can still use the existing tools to vary the proposed approach, but they mainly focus on how to transfer objectives into reality. Such a development may result in a paradigm shift for future surgery planning.}, language = {en} } @misc{LamasRodriguezEhlkeHoffmannetal., author = {Lamas-Rodr{\´i}guez, Juli{\´a}n and Ehlke, Moritz and Hoffmann, Ren{\´e} and Zachow, Stefan}, title = {GPU-accelerated denoising of large tomographic data sets with low SNR}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-56339}, abstract = {Enhancements in tomographic imaging techniques facilitate non-destructive methods for visualizing fossil structures. However, to penetrate dense materials such as sediments or pyrites, image acquisition is typically performed with high beam energy and very sensitive image intensifiers, leading to artifacts and noise in the acquired data. The analysis of delicate fossil structures requires the images to be captured in maximum resolution, resulting in large data sets of several giga bytes (GB) in size. Since the structural information of interest is often almost in the same spatial range as artifacts and noise, image processing and segmentation algorithms have to cope with a very low signal-to-noise ratio (SNR). Within this report we present a study on the performance of a collection of denoising algorithms applied to a very noisy fossil dataset. The study shows that a non-local means (NLM) filter, in case it is properly configured, is able to remove a considerable amount of noise while preserving most of the structural information of interest. Based on the results of this study, we developed a software tool within ZIBAmira that denoises large tomographic datasets using an adaptive, GPU-accelerated NLM filter. With the help of our implementation a user can interactively configure the filter's parameters and thus its effectiveness with respect to the data of interest, while the filtering response is instantly visualized for a preselected region of interest (ROI). Our implementation efficiently denoises even large fossil datasets in a reasonable amount of time.}, language = {en} } @article{ZahnGrotjohannRammetal., author = {Zahn, Robert and Grotjohann, Sarah and Ramm, Heiko and Zachow, Stefan and Putzier, Michael and Perka, Carsten and Tohtz, Stephan}, title = {Pelvic tilt compensates for increased acetabular anteversion}, series = {International Orthopaedics}, volume = {40}, journal = {International Orthopaedics}, number = {8}, doi = {10.1007/s00264-015-2949-6}, pages = {1571 -- 1575}, abstract = {Pelvic tilt determines functional orientation of the acetabulum. In this study, we investigated the interaction of pelvic tilt and functional acetabular anteversion (AA) in supine position.}, language = {en} } @incollection{LameckerZachow, author = {Lamecker, Hans and Zachow, Stefan}, title = {Statistical Shape Modeling of Musculoskeletal Structures and Its Applications}, series = {Computational Radiology for Orthopaedic Interventions}, volume = {23}, booktitle = {Computational Radiology for Orthopaedic Interventions}, publisher = {Springer}, isbn = {978-3-319-23481-6}, doi = {10.1007/978-3-319-23482-3}, pages = {1 -- 23}, abstract = {Statistical shape models (SSM) describe the shape variability contained in a given population. They are able to describe large populations of complex shapes with few degrees of freedom. This makes them a useful tool for a variety of tasks that arise in computer-aided madicine. In this chapter we are going to explain the basic methodology of SSMs and present a variety of examples, where SSMs have been successfully applied.}, language = {en} } @article{ZachowHeppt, author = {Zachow, Stefan and Heppt, Werner}, title = {The Facial Profile}, series = {Facial Plastic Surgery}, volume = {31}, journal = {Facial Plastic Surgery}, number = {5}, doi = {10.1055/s-0035-1566132}, pages = {419 -- 420}, abstract = {Facial appearance in our societies is often associated with notions of attractiveness, juvenileness, beauty, success, and so forth. Hence, the role of facial plastic surgery is highly interrelated to a patient's desire to feature many of these positively connoted attributes, which of course, are subject of different cultural perceptions or social trends. To judge about somebody's facial appearance, appropriate quantitative measures as well as methods to obtain and compare individual facial features are required. This special issue on facial profile is intended to provide an overview on how facial characteristics are surgically managed in an interdisciplinary way based on experience, instrumentation, and modern technology to obtain an aesthetic facial appearance with harmonious facial proportions. The facial profile will be discussed within the context of facial aesthetics. Latest concepts for capturing facial morphology in high speed and impressive detail are presented for quantitative analysis of even subtle changes, aging effects, or facial expressions. In addition, the perception of facial profiles is evaluated based on eye tracking technology.}, language = {en} } @inproceedings{RammVictoriaMorilloTodtetal.2013, author = {Ramm, Heiko and Victoria Morillo, Oscar Salvador and Todt, Ingo and Schirmacher, Hartmut and Ernst, Arneborg and Zachow, Stefan and Lamecker, Hans}, title = {Visual Support for Positioning Hearing Implants}, series = {Proceedings of the 12th annual meeting of the CURAC society}, booktitle = {Proceedings of the 12th annual meeting of the CURAC society}, editor = {Freysinger, Wolfgang}, pages = {116 -- 120}, year = {2013}, language = {en} } @article{KainmuellerLameckerHelleretal.2013, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Heller, Markus O. and Weber, Britta and Hege, Hans-Christian and Zachow, Stefan}, title = {Omnidirectional Displacements for Deformable Surfaces}, series = {Medical Image Analysis}, volume = {17}, journal = {Medical Image Analysis}, number = {4}, publisher = {Elsevier}, doi = {10.1016/j.media.2012.11.006}, pages = {429 -- 441}, year = {2013}, language = {en} } @article{LamasRodriguezHerasArgueelloetal.2013, author = {Lamas-Rodr{\´i}guez, Juli{\´a}n and Heras, Dora Blanco and Arg{\"u}ello, Francisco and Kainm{\"u}ller, Dagmar and Zachow, Stefan and B{\´o}o, Montserrat}, title = {GPU-accelerated level-set segmentation}, series = {Journal of Real-Time Image Processing}, journal = {Journal of Real-Time Image Processing}, publisher = {Springer Berlin Heidelberg}, issn = {1861-8200}, doi = {10.1007/s11554-013-0378-6}, pages = {1 -- 15}, year = {2013}, language = {en} } @article{HoffmannSchultzSchellhornetal., author = {Hoffmann, Ren{\´e} and Schultz, Julia A. and Schellhorn, Rico and Rybacki, Erik and Keupp, Helmut and Gerden, S. R. and Lemanis, Robert and Zachow, Stefan}, title = {Non-invasive imaging methods applied to neo- and paleontological cephalopod research}, series = {Biogeosciences}, volume = {11}, journal = {Biogeosciences}, number = {10}, doi = {10.5194/bg-11-2721-2014}, pages = {2721 -- 2739}, abstract = {Several non-invasive methods are common practice in natural sciences today. Here we present how they can be applied and contribute to current topics in cephalopod (paleo-) biology. Different methods will be compared in terms of time necessary to acquire the data, amount of data, accuracy/resolution, minimum/maximum size of objects that can be studied, the degree of post-processing needed and availability. The main application of the methods is seen in morphometry and volumetry of cephalopod shells. In particular we present a method for precise buoyancy calculation. Therefore, cephalopod shells were scanned together with different reference bodies, an approach developed in medical sciences. It is necessary to know the volume of the reference bodies, which should have similar absorption properties like the object of interest. Exact volumes can be obtained from surface scanning. Depending on the dimensions of the study object different computed tomography techniques were applied.}, language = {en} } @inproceedings{vonBergDworzakKlinderetal.2011, author = {von Berg, Jens and Dworzak, Jalda and Klinder, Tobias and Manke, Dirk and Lamecker, Hans and Zachow, Stefan and Lorenz, Cristian}, title = {Temporal Subtraction of Chest Radiographs Compensating Pose Differences}, series = {SPIE Medical Imaging}, booktitle = {SPIE Medical Imaging}, year = {2011}, language = {en} } @inproceedings{BindernagelKainmuellerSeimetal.2011, author = {Bindernagel, Matthias and Kainm{\"u}ller, Dagmar and Seim, Heiko and Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian}, title = {An Articulated Statistical Shape Model of the Human Knee}, series = {Bildverarbeitung f{\"u}r die Medizin 2011}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2011}, publisher = {Springer}, doi = {10.1007/978-3-642-19335-4_14}, pages = {59 -- 63}, year = {2011}, language = {en} } @inproceedings{KahntGallowaySeimetal.2011, author = {Kahnt, Max and Galloway, Francis and Seim, Heiko and Lamecker, Hans and Taylor, Mark and Zachow, Stefan}, title = {Robust and Intuitive Meshing of Bone-Implant Compounds}, series = {CURAC}, booktitle = {CURAC}, address = {Magdeburg}, pages = {71 -- 74}, year = {2011}, language = {en} } @misc{SKGBSetal.2011, author = {SK, Saevarsson and GB, Sharma and S, Montgomery and KCT, Ho and Ramm, Heiko and Lieck, Robert and Zachow, Stefan and C, Anglin}, title = {Kinematic Comparison Between Gender Specific and Traditional Femoral Implants}, series = {Proceedings of the 11th Alberta Biomedical Engineering (BME) Conference (Poster)}, journal = {Proceedings of the 11th Alberta Biomedical Engineering (BME) Conference (Poster)}, pages = {80}, year = {2011}, language = {en} } @incollection{DeuflhardDoesselLouisetal.2008, author = {Deuflhard, Peter and D{\"o}ssel, Olaf and Louis, Alfred and Zachow, Stefan}, title = {Mehr Mathematik wagen in der Medizin}, series = {acatech diskutiert, Produktionsfaktor Mathematik - Wie Mathematik Technik und Wirtschaft bewegt}, booktitle = {acatech diskutiert, Produktionsfaktor Mathematik - Wie Mathematik Technik und Wirtschaft bewegt}, publisher = {Springer}, doi = {10.1007/978-3-540-89435-3}, pages = {435 -- 459}, year = {2008}, language = {en} } @inproceedings{KainmuellerLameckerZachowetal.2008, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian}, title = {Coupling Deformable Models for Multi-object Segmentation}, series = {Proc. Int. Symp. on Computational Models for Biomedical Simulation (ISBMS)}, booktitle = {Proc. Int. Symp. on Computational Models for Biomedical Simulation (ISBMS)}, doi = {10.1007/978-3-540-70521-5_8}, pages = {69 -- 78}, year = {2008}, language = {en} } @inproceedings{KainmuellerLameckerZachowetal.2008, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Zachow, Stefan and Heller, Markus O. and Hege, Hans-Christian}, title = {Multi-Object Segmentation with Coupled Deformable Models}, series = {Proc. Medical Image Understanding and Analysis}, booktitle = {Proc. Medical Image Understanding and Analysis}, pages = {34 -- 38}, year = {2008}, language = {en} } @inproceedings{SeimKainmuellerKussetal.2008, author = {Seim, Heiko and Kainm{\"u}ller, Dagmar and Kuss, Anja and Lamecker, Hans and Zachow, Stefan and Menzel, Randolf and Rybak, Juergen}, title = {Model-based autosegmentation of the central brain of the honeybee, Apis mellifera, using active statistical shape models}, series = {Proc. 1st INCF Congress of Neuroinformatics: Databasing and Modeling the Brain}, booktitle = {Proc. 1st INCF Congress of Neuroinformatics: Databasing and Modeling the Brain}, doi = {10.3389/conf.neuro.11.2008.01.064}, year = {2008}, language = {en} } @inproceedings{DworzakLameckervonBergetal.2008, author = {Dworzak, Jalda and Lamecker, Hans and von Berg, Jens and Klinder, Tobias and Lorenz, Cristian and Kainm{\"u}ller, Dagmar and Seim, Heiko and Hege, Hans-Christian and Zachow, Stefan}, title = {Towards model-based 3-D reconstruction of the human rib cage from radiographs}, series = {Proc. 7. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer-Roboterassistierte Chirurgie (CURAC)}, booktitle = {Proc. 7. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer-Roboterassistierte Chirurgie (CURAC)}, pages = {193 -- 196}, year = {2008}, language = {en} } @inproceedings{SeimKainmuellerHelleretal.2008, author = {Seim, Heiko and Kainm{\"u}ller, Dagmar and Heller, Markus O. and Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian}, title = {Automatic Segmentation of the Pelvic Bones from CT Data Based on a Statistical Shape Model}, series = {Eurographics Workshop on Visual Computing for Biomedicine (VCBM)}, booktitle = {Eurographics Workshop on Visual Computing for Biomedicine (VCBM)}, address = {Delft, Netherlands}, pages = {93 -- 100}, year = {2008}, language = {en} } @incollection{DeuflhardDoesselLouisetal.2010, author = {Deuflhard, Peter and D{\"o}ssel, Olaf and Louis, Alfred and Zachow, Stefan}, title = {More Mathematics into Medicine!}, series = {Production Factor Mathematics}, booktitle = {Production Factor Mathematics}, publisher = {Springer}, pages = {357 -- 378}, year = {2010}, language = {en} } @inproceedings{SeimKainmuellerLameckeretal.2010, author = {Seim, Heiko and Kainm{\"u}ller, Dagmar and Lamecker, Hans and Bindernagel, Matthias and Malinowski, Jana and Zachow, Stefan}, title = {Model-based Auto-Segmentation of Knee Bones and Cartilage in MRI Data}, series = {Proc. MICCAI Workshop Medical Image Analysis for the Clinic}, booktitle = {Proc. MICCAI Workshop Medical Image Analysis for the Clinic}, editor = {v. Ginneken, B.}, pages = {215 -- 223}, year = {2010}, language = {en} } @misc{EhlkeRammLameckeretal.2012, author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Zachow, Stefan}, title = {Efficient projection and deformation of volumetric shape and intensity models for accurate simulation of X-ray images}, series = {Eurographics Workshop on Visual Computing for Biomedicine (NVIDIA best poster award)}, journal = {Eurographics Workshop on Visual Computing for Biomedicine (NVIDIA best poster award)}, year = {2012}, language = {en} } @article{RammKahntZachow2012, author = {Ramm, Heiko and Kahnt, Max and Zachow, Stefan}, title = {Patientenspezifische Simulationsmodelle f{\"u}r die funktionelle Analyse von k{\"u}nstlichem Gelenkersatz}, series = {Computer Aided Medical Engineering (CaMe)}, volume = {3}, journal = {Computer Aided Medical Engineering (CaMe)}, number = {2}, pages = {30 -- 36}, year = {2012}, language = {de} } @inproceedings{KahntRammLameckeretal.2012, author = {Kahnt, Max and Ramm, Heiko and Lamecker, Hans and Zachow, Stefan}, title = {Feature-Preserving, Multi-Material Mesh Generation using Hierarchical Oracles}, series = {Proc. MICCAI Workshop on Mesh Processing in Medical Image Analysis (MeshMed)}, volume = {7599}, booktitle = {Proc. MICCAI Workshop on Mesh Processing in Medical Image Analysis (MeshMed)}, editor = {Levine, Joshua A. and Paulsen, Rasmus R. and Zhang, Yongjie}, pages = {101 -- 111}, year = {2012}, language = {en} } @incollection{RammZachow2012, author = {Ramm, Heiko and Zachow, Stefan}, title = {Computergest{\"u}tzte Planung f{\"u}r die individuelle Implantatversorgung}, series = {Health Academy}, volume = {16}, booktitle = {Health Academy}, editor = {Niederlag, Wolfgang and Lemke, Heinz and Peitgen, Heinz-Otto and Lehrach, Hans}, pages = {145 -- 158}, year = {2012}, language = {de} } @article{KainmuellerLameckerSeimetal.2009, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Seim, Heiko and Zachow, Stefan}, title = {Multi-object segmentation of head bones}, series = {MIDAS Journal}, journal = {MIDAS Journal}, year = {2009}, language = {en} } @inproceedings{SeimKainmuellerLameckeretal.2009, author = {Seim, Heiko and Kainm{\"u}ller, Dagmar and Lamecker, Hans and Zachow, Stefan}, title = {A System for Unsupervised Extraction of Orthopaedic Parameters from CT Data}, series = {GI Workshop Softwareassistenten - Computerunterst{\"u}tzung f{\"u}r die medizinische Diagnose und Therapieplanung}, booktitle = {GI Workshop Softwareassistenten - Computerunterst{\"u}tzung f{\"u}r die medizinische Diagnose und Therapieplanung}, address = {L{\"u}beck, Germany}, pages = {1328 -- 1337}, year = {2009}, language = {en} } @inproceedings{KainmuellerLameckerSeimetal.2009, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Seim, Heiko and Zinser, Max and Zachow, Stefan}, title = {Automatic Extraction of Mandibular Nerve and Bone from Cone-Beam CT Data}, series = {Proceedings of Medical Image Computing and Computer Assisted Intervention (MICCAI)}, booktitle = {Proceedings of Medical Image Computing and Computer Assisted Intervention (MICCAI)}, editor = {Yang, Guang-Zhong and J. Hawkes, David and Rueckert, Daniel and Noble, J. Alison and J. Taylor, Chris}, address = {London, UK}, pages = {76 -- 83}, year = {2009}, language = {en} } @inproceedings{KainmuellerLameckerZachowetal.2009, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian}, title = {An Articulated Statistical Shape Model for Accurate Hip Joint Segmentation}, series = {EBMC 2009. Int. Conf. of the IEEE Eng. in Med. and Biol. Society (EMBC)}, booktitle = {EBMC 2009. Int. Conf. of the IEEE Eng. in Med. and Biol. Society (EMBC)}, address = {Minneapolis, USA}, pages = {6345 -- 6351}, year = {2009}, language = {en} } @inproceedings{SeimKainmuellerHelleretal.2009, author = {Seim, Heiko and Kainm{\"u}ller, Dagmar and Heller, Markus O. and Zachow, Stefan and Hege, Hans-Christian}, title = {Automatic Extraction of Anatomical Landmarks from Medical Image Data: An Evaluation of Different Methods}, series = {Proc. of IEEE Int. Symposium on Biomedical Imaging (ISBI)}, booktitle = {Proc. of IEEE Int. Symposium on Biomedical Imaging (ISBI)}, address = {Boston, MA, USA}, pages = {538 -- 541}, year = {2009}, language = {en} } @article{KainmuellerLameckerZachow2009, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Zachow, Stefan}, title = {Multi-object Segmentation with Coupled Deformable Models}, series = {Annals of the British Machine Vision Association (BMVA)}, volume = {5}, journal = {Annals of the British Machine Vision Association (BMVA)}, pages = {1 -- 10}, year = {2009}, language = {en} } @article{ZachowMuiggHildebrandtetal.2009, author = {Zachow, Stefan and Muigg, Philipp and Hildebrandt, Thomas and Doleisch, Helmut and Hege, Hans-Christian}, title = {Visual Exploration of Nasal Airflow}, series = {IEEE Transactions on Visualization and Computer Graphics}, volume = {15}, journal = {IEEE Transactions on Visualization and Computer Graphics}, number = {8}, doi = {10.1109/TVCG.2009.198}, pages = {1407 -- 1414}, year = {2009}, language = {en} } @article{ZachowDeuflhard2008, author = {Zachow, Stefan and Deuflhard, Peter}, title = {Computergest{\"u}tzte Planung in der kraniofazialen Chirurgie}, series = {Face 01/08, Int. Mag. of Orofacial Esthetics}, journal = {Face 01/08, Int. Mag. of Orofacial Esthetics}, publisher = {Oemus Journale Leipzig}, pages = {43 -- 49}, year = {2008}, language = {en} } @inproceedings{ZilskeLameckerZachow2008, author = {Zilske, Michael and Lamecker, Hans and Zachow, Stefan}, title = {Adaptive Remeshing of Non-Manifold Surfaces}, series = {Eurographics 2008 Annex to the Conf. Proc.}, booktitle = {Eurographics 2008 Annex to the Conf. Proc.}, pages = {207 -- 211}, year = {2008}, language = {en} } @inproceedings{SeimLameckerZachow2008, author = {Seim, Heiko and Lamecker, Hans and Zachow, Stefan}, title = {Segmentation of Bony Structures with Ligament Attachment Sites}, series = {Bildverarbeitung f{\"u}r die Medizin 2008}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2008}, publisher = {Springer}, doi = {10.1007/978-3-540-78640-5_42}, pages = {207 -- 211}, year = {2008}, language = {en} } @inproceedings{NeugebauerJanigaZachowetal.2008, author = {Neugebauer, Mathias and Janiga, Gabor and Zachow, Stefan and Krischek, {\"O}zlem and Preim, Bernhard}, title = {Generierung qualitativ hochwertiger Modelle f{\"u}r die Simulation von Blutfluss in zerebralen Aneurysmen}, series = {Proc. of Simulation and Visualization 2008}, booktitle = {Proc. of Simulation and Visualization 2008}, editor = {Hauser, Helwig}, pages = {221 -- 235}, year = {2008}, language = {en} } @inproceedings{DornheimBornZachowetal.2008, author = {Dornheim, Jana and Born, Silvia and Zachow, Stefan and Gessat, Michael and Wellein, Daniela and Strauß, Gero and Preim, Bernhard and Bartz, Dirk}, title = {Bildanalyse, Visualisierung und Modellerstellung f{\"u}r die Implantatplanung im Mittelohr}, series = {Proc. of Simulation and Visualization 2008}, booktitle = {Proc. of Simulation and Visualization 2008}, editor = {Hauser, Helwig}, pages = {139 -- 154}, year = {2008}, language = {en} } @article{SteinmannBartschZachowetal.2008, author = {Steinmann, Alexander and Bartsch, Peter and Zachow, Stefan and Hildebrandt, Thomas}, title = {Breathing Easily: Simulation of airflow in human noses can become a useful rhinosurgery planning tool}, series = {ANSYS Advantage}, volume = {Vol. II, No. 1}, journal = {ANSYS Advantage}, pages = {30 -- 31}, year = {2008}, language = {en} } @article{WeiserZachowDeuflhard2010, author = {Weiser, Martin and Zachow, Stefan and Deuflhard, Peter}, title = {Craniofacial Surgery Planning Based on Virtual Patient Models}, series = {it - Information Technology}, volume = {52}, journal = {it - Information Technology}, number = {5}, publisher = {Oldenbourg Verlagsgruppe}, doi = {10.1524/itit.2010.0600}, pages = {258 -- 263}, year = {2010}, language = {en} } @incollection{ZachowHahnLange2010, author = {Zachow, Stefan and Hahn, Horst and Lange, Thomas}, title = {Computerassistierte Chirugieplanung}, series = {Computerassistierte Chirurgie}, booktitle = {Computerassistierte Chirurgie}, editor = {Schlag, Peter and Eulenstein, Sebastian and Lange, Thomas}, publisher = {Elsevier}, pages = {119 -- 149}, year = {2010}, language = {en} } @inproceedings{KainmuellerLameckerSeimetal.2010, author = {Kainm{\"u}ller, Dagmar and Lamecker, Hans and Seim, Heiko and Zachow, Stefan and Hege, Hans-Christian}, title = {Improving Deformable Surface Meshes through Omni-directional Displacements and MRFs}, series = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI)}, volume = {6361}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI)}, editor = {Navab, Tianzi and P. W. Pluim, Josien and Viergever, Max}, publisher = {Springer}, doi = {10.1007/978-3-642-15705-9_28}, pages = {227 -- 234}, year = {2010}, language = {en} } @inproceedings{ZachowKubiackMalinowskietal.2010, author = {Zachow, Stefan and Kubiack, Kim and Malinowski, Jana and Lamecker, Hans and Essig, Harald and Gellrich, Nils-Claudius}, title = {Modellgest{\"u}tzte chirurgische Rekonstruktion komplexer Mittelgesichtsfrakturen}, series = {Proc. BMT, Biomed Tech 2010}, volume = {55 (Suppl 1)}, booktitle = {Proc. BMT, Biomed Tech 2010}, publisher = {Walter de Gruyter-Verlag}, pages = {107 -- 108}, year = {2010}, language = {de} } @inproceedings{LameckerKainmuellerSeimetal.2010, author = {Lamecker, Hans and Kainm{\"u}ller, Dagmar and Seim, Heiko and Zachow, Stefan}, title = {Automatische 3D Rekonstruktion des Unterkiefers und der Mandibul{\"a}rnerven auf Basis dentaler Bildgebung}, series = {Proc. BMT, Biomed Tech}, volume = {55 (Suppl. 1)}, booktitle = {Proc. BMT, Biomed Tech}, publisher = {Walter de Gruyter-Verlag}, pages = {35 -- 36}, year = {2010}, language = {en} } @article{DworzakLameckervonBergetal.2010, author = {Dworzak, Jalda and Lamecker, Hans and von Berg, Jens and Klinder, Tobias and Lorenz, Cristian and Kainm{\"u}ller, Dagmar and Seim, Heiko and Hege, Hans-Christian and Zachow, Stefan}, title = {3D Reconstruction of the Human Rib Cage from 2D Projection Images using a Statistical Shape Model}, series = {Int. J. Comput. Assist. Radiol. Surg.}, volume = {5}, journal = {Int. J. Comput. Assist. Radiol. Surg.}, number = {2}, publisher = {Springer}, issn = {1861-6410}, doi = {10.1007/s11548-009-0390-2}, pages = {111 -- 124}, year = {2010}, language = {en} } @misc{KamerNoserLameckeretal.2006, author = {Kamer, Lukas and Noser, Hansrudi and Lamecker, Hans and Zachow, Stefan and Wittmers, Antonia and Kaup, Thomas and Schramm, Alexander and Hammer, Beat}, title = {Three-dimensional statistical shape analysis - A useful tool for developing a new type of orbital implant?}, publisher = {AO Development Institute, New Products Brochure 2/06}, pages = {20 -- 21}, year = {2006}, language = {en} } @article{HepptHildebrandtSteinmannetal.2007, author = {Heppt, Werner and Hildebrandt, Thomas and Steinmann, Alexander and Zachow, Stefan}, title = {Aesthetic and Function in Rhinoplasty}, series = {Springer Journal}, volume = {264 (Suppl 1), RL 126}, journal = {Springer Journal}, pages = {307}, year = {2007}, language = {en} } @inproceedings{ZachowZilskeHege2007, author = {Zachow, Stefan and Zilske, Michael and Hege, Hans-Christian}, title = {3D Reconstruction of Individual Anatomy from Medical Image Data: Segmentation and Geometry Processing}, series = {25. ANSYS Conference \& CADFEM Users' Meeting}, booktitle = {25. ANSYS Conference \& CADFEM Users' Meeting}, address = {Dresden}, year = {2007}, language = {en} } @article{GessatZachowBurgertetal.2007, author = {Gessat, Michael and Zachow, Stefan and Burgert, Oliver and Lemke, Heinz}, title = {Geometric Meshes in Medical Applications - Steps towards a specification of Geometric Models in DICOM}, series = {Int. J. of Computer Assisted Radiology and Surgery (CARS)}, journal = {Int. J. of Computer Assisted Radiology and Surgery (CARS)}, doi = {10.1007/s11548-007-0112-6}, pages = {440 -- 442}, year = {2007}, language = {en} } @article{HildebrandtZachowSteinmannetal.2007, author = {Hildebrandt, Thomas and Zachow, Stefan and Steinmann, Alexander and Heppt, Werner}, title = {Innovation in der Funktionell-{\"A}sthetischen Nasenchirurgie: Rhino-CFD}, series = {Face, Int. Mag. of Orofacial Esthetics}, journal = {Face, Int. Mag. of Orofacial Esthetics}, publisher = {Oemus Journale Leipzig}, pages = {20 -- 23}, year = {2007}, language = {en} } @article{ZachowSteinmannHildebrandtetal.2007, author = {Zachow, Stefan and Steinmann, Alexander and Hildebrandt, Thomas and Heppt, Werner}, title = {Understanding nasal airflow via CFD simulation and visualization}, series = {Proc. Computer Aided Surgery around the Head}, journal = {Proc. Computer Aided Surgery around the Head}, pages = {173 -- 176}, year = {2007}, language = {en} } @article{LameckerKamerWittmersetal.2007, author = {Lamecker, Hans and Kamer, Lukas and Wittmers, Antonia and Zachow, Stefan and Kaup, Thomas and Schramm, Alexander and Noser, Hansrudi and Hammer, Beat}, title = {A method for the three-dimensional statistical shape analysis of the bony orbit}, series = {Proc. Computer Aided Surgery around the Head}, journal = {Proc. Computer Aided Surgery around the Head}, pages = {94 -- 97}, year = {2007}, language = {en} } @inproceedings{ZachowLameckerElsholtzetal.2005, author = {Zachow, Stefan and Lamecker, Hans and Elsholtz, Barbara and Stiller, Michael}, title = {Reconstruction of mandibular dysplasia using a statistical 3D shape model}, series = {Proc. Computer Assisted Radiology and Surgery (CARS)}, booktitle = {Proc. Computer Assisted Radiology and Surgery (CARS)}, address = {Berlin, Germany}, doi = {10.1016/j.ics.2005.03.339}, pages = {1238 -- 1243}, year = {2005}, language = {en} } @inproceedings{NkenkeHaeuslerNeukametal.2005, author = {Nkenke, Emeka and H{\"a}usler, Gerd and Neukam, Friedrich and Zachow, Stefan}, title = {Streak artifact correction of CT data by optical 3D imaging in the simulation of orthognathic surgery}, series = {Computer Assisted Radiology and Surgery (CARS)}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, address = {Berlin Germany}, doi = {doi:10.1016/j.ics.2005.03.278}, year = {2005}, language = {en} } @inproceedings{NkenkeZachowHaeusler2005, author = {Nkenke, Emeka and Zachow, Stefan and H{\"a}usler, Gerd}, title = {Fusion von optischen 3D- und CT-Daten des Gebisses zur Metallartefaktkorrektur vor computerassistierter Planung MKG-chirurgischer Eingriffe}, series = {Symposium der Arbeitsgemeinschaf f{\"u}r Kieferchirurgie}, booktitle = {Symposium der Arbeitsgemeinschaf f{\"u}r Kieferchirurgie}, address = {Bad Homburg v.d.H}, year = {2005}, language = {en} } @article{LameckerZachowHaberletal.2005, author = {Lamecker, Hans and Zachow, Stefan and Haberl, Hannes and Stiller, Michael}, title = {Medical applications for statistical shape models}, series = {Computer Aided Surgery around the Head, Fortschritt-Berichte VDI - Biotechnik/Medizintechnik}, volume = {17 (258)}, journal = {Computer Aided Surgery around the Head, Fortschritt-Berichte VDI - Biotechnik/Medizintechnik}, pages = {61}, year = {2005}, language = {en} } @article{LameckerZachowWittmersetal.2006, author = {Lamecker, Hans and Zachow, Stefan and Wittmers, Antonia and Weber, Britta and Hege, Hans-Christian and Elsholtz, Barbara and Stiller, Michael}, title = {Automatic segmentation of mandibles in low-dose CT-data}, series = {Int. J. Computer Assisted Radiology and Surgery}, volume = {1(1)}, journal = {Int. J. Computer Assisted Radiology and Surgery}, pages = {393 -- 395}, year = {2006}, language = {en} } @article{LameckerZachowHegeetal.2006, author = {Lamecker, Hans and Zachow, Stefan and Hege, Hans-Christian and Z{\"o}ckler, Maja}, title = {Surgical treatment of craniosynostosis based on a statistical 3D-shape model}, series = {Int. J. Computer Assisted Radiology and Surgery}, volume = {1(1)}, journal = {Int. J. Computer Assisted Radiology and Surgery}, doi = {10.1007/s11548-006-0024-x}, pages = {253 -- 254}, year = {2006}, language = {en} } @article{ZachowHegeDeuflhard2006, author = {Zachow, Stefan and Hege, Hans-Christian and Deuflhard, Peter}, title = {Computer assisted planning in cranio-maxillofacial surgery}, series = {Journal of Computing and Information Technology}, volume = {14(1)}, journal = {Journal of Computing and Information Technology}, pages = {53 -- 64}, year = {2006}, language = {en} } @article{ZachowLameckerElsholtzetal.2006, author = {Zachow, Stefan and Lamecker, Hans and Elsholtz, Barbara and Stiller, Michael}, title = {Is the course of the mandibular nerve deducible from the shape of the mandible?}, series = {Int. J. of Computer Assisted Radiology and Surgery}, journal = {Int. J. of Computer Assisted Radiology and Surgery}, publisher = {Springer}, pages = {415 -- 417}, year = {2006}, language = {en} } @article{ZachowSteinmannHildebrandtetal.2006, author = {Zachow, Stefan and Steinmann, Alexander and Hildebrandt, Thomas and Weber, Rainer and Heppt, Werner}, title = {CFD simulation of nasal airflow: Towards treatment planning for functional rhinosurgery}, series = {Int. J. of Computer Assisted Radiology and Surgery}, journal = {Int. J. of Computer Assisted Radiology and Surgery}, publisher = {Springer}, pages = {165 -- 167}, year = {2006}, language = {en} } @inproceedings{EhlkeFrenzelRammetal., author = {Ehlke, Moritz and Frenzel, Thomas and Ramm, Heiko and Shandiz, Mohsen Akbari and Anglin, Carolyn and Zachow, Stefan}, title = {Towards Robust Measurement Of Pelvic Parameters From AP Radiographs Using Articulated 3D Models}, series = {Computer Assisted Radiology and Surgery (CARS)}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, abstract = {Patient-specific parameters such as the orientation of the acetabulum or pelvic tilt are useful for custom planning for total hip arthroplasty (THA) and for evaluating the outcome of surgical interventions. The gold standard in obtaining pelvic parameters is from three-dimensional (3D) computed tomography (CT) imaging. However, this adds time and cost, exposes the patient to a substantial radiation dose, and does not allow for imaging under load (e.g. while the patient is standing). If pelvic parameters could be reliably derived from the standard anteroposterior (AP) radiograph, preoperative planning would be more widespread, and research analyses could be applied to retrospective data, after a postoperative issue is discovered. The goal of this work is to enable robust measurement of two surgical parameters of interest: the tilt of the anterior pelvic plane (APP) and the orientation of the natural acetabulum. We present a computer-aided reconstruction method to determine the APP and natural acetabular orientation from a single, preoperative X-ray. It can easily be extended to obtain other important preoperative and postoperative parameters solely based on a single AP radiograph.}, language = {en} } @misc{EhlkeFrenzelRammetal., author = {Ehlke, Moritz and Frenzel, Thomas and Ramm, Heiko and Shandiz, Mohsen Akbari and Anglin, Carolyn and Zachow, Stefan}, title = {Towards Robust Measurement of Pelvic Parameters from AP Radiographs using Articulated 3D Models}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53707}, abstract = {Patient-specific parameters such as the orientation of the acetabulum or pelvic tilt are useful for custom planning for total hip arthroplasty (THA) and for evaluating the outcome of surgical interventions. The gold standard in obtaining pelvic parameters is from three-dimensional (3D) computed tomography (CT) imaging. However, this adds time and cost, exposes the patient to a substantial radiation dose, and does not allow for imaging under load (e.g. while the patient is standing). If pelvic parameters could be reliably derived from the standard anteroposterior (AP) radiograph, preoperative planning would be more widespread, and research analyses could be applied to retrospective data, after a postoperative issue is discovered. The goal of this work is to enable robust measurement of two surgical parameters of interest: the tilt of the anterior pelvic plane (APP) and the orientation of the natural acetabulum. We present a computer-aided reconstruction method to determine the APP and natural acetabular orientation from a single, preoperative X-ray. It can easily be extended to obtain other important preoperative and postoperative parameters solely based on a single AP radiograph.}, language = {en} } @article{LemanisZachowFusseisetal., author = {Lemanis, Robert and Zachow, Stefan and Fusseis, Florian and Hoffmann, Ren{\´e}}, title = {A new approach using high-resolution computed tomography to test the buoyant properties of chambered cephalopod shells}, series = {Paleobiology}, volume = {41}, journal = {Paleobiology}, number = {2}, publisher = {Cambridge University Press}, address = {Cambridge}, doi = {10.1017/pab.2014.17}, pages = {313 -- 329}, abstract = {The chambered shell of modern cephalopods functions as a buoyancy apparatus, allowing the animal to enter the water column without expending a large amount of energy to overcome its own weight. Indeed, the chambered shell is largely considered a key adaptation that allowed the earliest cephalopods to leave the ocean floor and enter the water column. It has been argued by some, however, that the iconic chambered shell of Paleozoic and Mesozoic ammonoids did not provide a sufficiently buoyant force to compensate for the weight of the entire animal, thus restricting ammonoids to a largely benthic lifestyle reminiscent of some octopods. Here we develop a technique using high-resolution computed tomography to quantify the buoyant properties of chambered shells without reducing the shell to ideal spirals or eliminating inherent biological variability by using mathematical models that characterize past work in this area. This technique has been tested on Nautilus pompilius and is now extended to the extant deep-sea squid Spirula spirula and the Jurassic ammonite Cadoceras sp. hatchling. Cadoceras is found to have possessed near-neutral to positive buoyancy if hatched when the shell possessed between three and five chambers. However, we show that the animal could also overcome degrees of negative buoyancy through swimming, similar to the paralarvae of modern squids. These calculations challenge past inferences of benthic life habits based solely on calculations of negative buoyancy. The calculated buoyancy of Cadoceras supports the possibility of planktonic dispersal of ammonite hatchlings. This information is essential to understanding ammonoid ecology as well as biotic interactions and has implications for the interpretation of geochemical data gained from the isotopic analysis of the shell.}, language = {en} } @inproceedings{TackZachow, author = {Tack, Alexander and Zachow, Stefan}, title = {Accurate Automated Volumetry of Cartilage of the Knee using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, series = {IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)}, booktitle = {IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)}, doi = {10.1109/ISBI.2019.8759201}, pages = {40 -- 43}, abstract = {Volumetry of cartilage of the knee is needed for knee osteoarthritis (KOA) assessment. It is typically performed manually in a tedious and subjective process. We developed a method for an automated, segmentation-based quantification of cartilage volume by employing 3D Convolutional Neural Networks (CNNs). CNNs were trained in a supervised manner using magnetic resonance imaging data and cartilage volumetry readings performed by clinical experts for 1378 subjects provided by the Osteoarthritis Initiative. It was shown that 3D CNNs are able to achieve volume measures comparable to the magnitude of variation between expert readings and the real in vivo situation. In the future, accurate automated cartilage volumetry might support both, diagnosis of KOA as well as longitudinal analysis of KOA progression.}, language = {en} } @inproceedings{NeumannHellwichZachow, author = {Neumann, Mario and Hellwich, Olaf and Zachow, Stefan}, title = {Localization and Classification of Teeth in Cone Beam CT using Convolutional Neural Networks}, series = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, booktitle = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, isbn = {978-3-00-063717-9}, pages = {182 -- 188}, abstract = {In dentistry, software-based medical image analysis and visualization provide efficient and accurate diagnostic and therapy planning capabilities. We present an approach for the automatic recognition of tooth types and positions in digital volume tomography (DVT). By using deep learning techniques in combination with dimensionality reduction through non-planar reformatting of the jaw anatomy, DVT data can be efficiently processed and teeth reliably recognized and classified, even in the presence of imaging artefacts, missing or dislocated teeth. We evaluated our approach, which is based on 2D Convolutional Neural Networks (CNNs), on 118 manually annotated cases of clinical DVT datasets. Our proposed method correctly classifies teeth with an accuracy of 94\% within a limit of 2mm distance to ground truth labels.}, language = {en} } @inproceedings{JoachimskyMaIckingetal., author = {Joachimsky, Robert and Ma, Lihong and Icking, Christian and Zachow, Stefan}, title = {A Collision-Aware Articulated Statistical Shape Model of the Human Spine}, series = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, booktitle = {Proc. of the 18th annual conference on Computer- and Robot-assisted Surgery (CURAC)}, pages = {58 -- 64}, abstract = {Statistical Shape Models (SSMs) are a proven means for model-based 3D anatomy reconstruction from medical image data. In orthopaedics and biomechanics, SSMs are increasingly employed to individualize measurement data or to create individualized anatomical models to which implants can be adapted to or functional tests can be performed on. For modeling and analysis of articulated structures, so called articulated SSMs (aSSMs) have been developed. However, a missing feature of aSSMs is the consideration of collisions in the course of individual fitting and articulation. The aim of our work was to develop aSSMs that handle collisions between components correctly. That way it becomes possible to adjust shape and articulation in view of a physically and geometrically plausible individualization. To be able to apply collision-aware aSSMs in simulation and optimisation, our approach is based on an e� cient collision detection method employing Graphics Processing Units (GPUs).}, language = {en} } @article{KraemerMaggioniBrissonetal., author = {Kr{\"a}mer, Martin and Maggioni, Marta and Brisson, Nicholas and Zachow, Stefan and Teichgr{\"a}ber, Ulf and Duda, Georg and Reichenbach, J{\"u}rgen}, title = {T1 and T2* mapping of the human quadriceps and patellar tendons using ultra-short echo-time (UTE) imaging and bivariate relaxation parameter-based volumetric visualization}, series = {Magnetic Resonance Imaging}, volume = {63}, journal = {Magnetic Resonance Imaging}, number = {11}, doi = {10.1016/j.mri.2019.07.015}, pages = {29 -- 36}, abstract = {Quantification of magnetic resonance (MR)-based relaxation parameters of tendons and ligaments is challenging due to their very short transverse relaxation times, requiring application of ultra-short echo-time (UTE) imaging sequences. We quantify both T1 and T2⁎ in the quadriceps and patellar tendons of healthy volunteers at a field strength of 3 T and visualize the results based on 3D segmentation by using bivariate histogram analysis. We applied a 3D ultra-short echo-time imaging sequence with either variable repetition times (VTR) or variable flip angles (VFA) for T1 quantification in combination with multi-echo acquisition for extracting T2⁎. The values of both relaxation parameters were subsequently binned for bivariate histogram analysis and corresponding cluster identification, which were subsequently visualized. Based on manually-drawn regions of interest in the tendons on the relaxation parameter maps, T1 and T2⁎ boundaries were selected in the bivariate histogram to segment the quadriceps and patellar tendons and visualize the relaxation times by 3D volumetric rendering. Segmentation of bone marrow, fat, muscle and tendons was successfully performed based on the bivariate histogram analysis. Based on the segmentation results mean T2⁎ relaxation times, over the entire tendon volumes averaged over all subjects, were 1.8 ms ± 0.1 ms and 1.4 ms ± 0.2 ms for the patellar and quadriceps tendons, respectively. The mean T1 value of the patellar tendon, averaged over all subjects, was 527 ms ± 42 ms and 476 ms ± 40 ms for the VFA and VTR acquisitions, respectively. The quadriceps tendon had higher mean T1 values of 662 ms ± 97 ms (VFA method) and 637 ms ± 40 ms (VTR method) compared to the patellar tendon. 3D volumetric visualization of the relaxation times revealed that T1 values are not constant over the volume of both tendons, but vary locally. This work provided additional data to build upon the scarce literature available on relaxation times in the quadriceps and patellar tendons. We were able to segment both tendons and to visualize the relaxation parameter distributions over the entire tendon volumes.}, language = {en} } @inproceedings{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {An as-invariant-as-possible GL+(3)-based Statistical Shape Model}, series = {Proc. 7th MICCAI workshop on Mathematical Foundations of Computational Anatomy (MFCA)}, volume = {11846}, booktitle = {Proc. 7th MICCAI workshop on Mathematical Foundations of Computational Anatomy (MFCA)}, publisher = {Springer}, doi = {10.1007/978-3-030-33226-6_23}, pages = {219 -- 228}, abstract = {We describe a novel nonlinear statistical shape model basedon differential coordinates viewed as elements of GL+(3). We adopt an as-invariant-as possible framework comprising a bi-invariant Lie group mean and a tangent principal component analysis based on a unique GL+(3)-left-invariant, O(3)-right-invariant metric. Contrary to earlier work that equips the coordinates with a specifically constructed group structure, our method employs the inherent geometric structure of the group-valued data and therefore features an improved statistical power in identifying shape differences. We demonstrate this in experiments on two anatomical datasets including comparison to the standard Euclidean as well as recent state-of-the-art nonlinear approaches to statistical shape modeling.}, language = {en} } @misc{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {A Surface-Theoretic Approach for Statistical Shape Modeling}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74497}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. We evaluate the performance of our model w.r.t. shape-based classification of pathological malformations of the human knee and show that it outperforms the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing natural biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @misc{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {An as-invariant-as-possible GL+(3)-based Statistical Shape Model}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74566}, abstract = {We describe a novel nonlinear statistical shape model basedon differential coordinates viewed as elements of GL+(3). We adopt an as-invariant-as possible framework comprising a bi-invariant Lie group mean and a tangent principal component analysis based on a unique GL+(3)-left-invariant, O(3)-right-invariant metric. Contrary to earlier work that equips the coordinates with a specifically constructed group structure, our method employs the inherent geometric structure of the group-valued data and therefore features an improved statistical power in identifying shape differences. We demonstrate this in experiments on two anatomical datasets including comparison to the standard Euclidean as well as recent state-of-the-art nonlinear approaches to statistical shape modeling.}, language = {en} } @inproceedings{SahuStroemsdoerferMukhopadhyayetal., author = {Sahu, Manish and Str{\"o}msd{\"o}rfer, Ronja and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Endo-Sim2Real: Consistency learning-based domain adaptation for instrument segmentation}, series = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part III}, volume = {12263}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part III}, publisher = {Springer Nature}, doi = {https://doi.org/10.1007/978-3-030-59716-0_75}, abstract = {Surgical tool segmentation in endoscopic videos is an important component of computer assisted interventions systems. Recent success of image-based solutions using fully-supervised deep learning approaches can be attributed to the collection of big labeled datasets. However, the annotation of a big dataset of real videos can be prohibitively expensive and time consuming. Computer simulations could alleviate the manual labeling problem, however, models trained on simulated data do not generalize to real data. This work proposes a consistency-based framework for joint learning of simulated and real (unlabeled) endoscopic data to bridge this performance generalization issue. Empirical results on two data sets (15 videos of the Cholec80 and EndoVis'15 dataset) highlight the effectiveness of the proposed Endo-Sim2Real method for instrument segmentation. We compare the segmentation of the proposed approach with state-of-the-art solutions and show that our method improves segmentation both in terms of quality and quantity.}, language = {en} } @article{SahuSzengelMukhopadhyayetal.2020, author = {Sahu, Manish and Szengel, Angelika and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Surgical phase recognition by learning phase transitions}, series = {Current Directions in Biomedical Engineering (CDBME)}, volume = {6}, journal = {Current Directions in Biomedical Engineering (CDBME)}, number = {1}, publisher = {De Gruyter}, doi = {https://doi.org/10.1515/cdbme-2020-0037}, pages = {20200037}, year = {2020}, abstract = {Automatic recognition of surgical phases is an important component for developing an intra-operative context-aware system. Prior work in this area focuses on recognizing short-term tool usage patterns within surgical phases. However, the difference between intra- and inter-phase tool usage patterns has not been investigated for automatic phase recognition. We developed a Recurrent Neural Network (RNN), in particular a state-preserving Long Short Term Memory (LSTM) architecture to utilize the long-term evolution of tool usage within complete surgical procedures. For fully automatic tool presence detection from surgical video frames, a Convolutional Neural Network (CNN) based architecture namely ZIBNet is employed. Our proposed approach outperformed EndoNet by 8.1\% on overall precision for phase detection tasks and 12.5\% on meanAP for tool recognition tasks.}, language = {en} } @misc{SahuSzengelMukhopadhyayetal., author = {Sahu, Manish and Szengel, Angelika and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Analyzing laparoscopic cholecystectomy with deep learning: automatic detection of surgical tools and phases}, series = {28th International Congress of the European Association for Endoscopic Surgery (EAES)}, journal = {28th International Congress of the European Association for Endoscopic Surgery (EAES)}, abstract = {Motivation: The ever-rising volume of patients, high maintenance cost of operating rooms and time consuming analysis of surgical skills are fundamental problems that hamper the practical training of the next generation of surgeons. The hospitals prefer to keep the surgeons busy in real operations over training young surgeons for obvious economic reasons. One fundamental need in surgical training is the reduction of the time needed by the senior surgeon to review the endoscopic procedures performed by the young surgeon while minimizing the subjective bias in evaluation. The unprecedented performance of deep learning ushers the new age of data-driven automatic analysis of surgical skills. Method: Deep learning is capable of efficiently analyzing thousands of hours of laparoscopic video footage to provide an objective assessment of surgical skills. However, the traditional end-to-end setting of deep learning (video in, skill assessment out) is not explainable. Our strategy is to utilize the surgical process modeling framework to divide the surgical process into understandable components. This provides the opportunity to employ deep learning for superior yet automatic detection and evaluation of several aspects of laparoscopic cholecystectomy such as surgical tool and phase detection. We employ ZIBNet for the detection of surgical tool presence. ZIBNet employs pre-processing based on tool usage imbalance, a transfer learned 50-layer residual network (ResNet-50) and temporal smoothing. To encode the temporal evolution of tool usage (over the entire video sequence) that relates to the surgical phases, Long Short Term Memory (LSTM) units are employed with long-term dependency. Dataset: We used CHOLEC 80 dataset that consists of 80 videos of laparoscopic cholecystectomy performed by 13 surgeons, divided equally for training and testing. In these videos, up to three different tools (among 7 types of tools) can be present in a frame. Results: The mean average precision of the detection of all tools is 93.5 ranging between 86.8 and 99.3, a significant improvement (p <0.01) over the previous state-of-the-art. We observed that less frequent tools like Scissors, Irrigator, Specimen Bag etc. are more related to phase transitions. The overall precision (recall) of the detection of all surgical phases is 79.6 (81.3). Conclusion: While this is not the end goal for surgical skill analysis, the development of such a technological platform is essential toward a data-driven objective understanding of surgical skills. In future, we plan to investigate surgeon-in-the-loop analysis and feedback for surgical skill analysis.}, language = {en} } @article{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {Rigid Motion Invariant Statistical Shape Modeling based on Discrete Fundamental Forms}, series = {Medical Image Analysis}, volume = {73}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2021.102178}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. Additionally, as planar configurations form a submanifold in shape space, our representation allows for effective estimation of quasi-isometric surfaces flattenings. We evaluate the performance of our model w.r.t. shape-based classification of hippocampus and femur malformations due to Alzheimer's disease and osteoarthritis, respectively. In particular, we achieve state-of-the-art accuracies outperforming the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @article{SahuMukhopadhyayZachow, author = {Sahu, Manish and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Simulation-to-Real domain adaptation with teacher-student learning for endoscopic instrument segmentation}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {16}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer Nature}, doi = {10.1007/s11548-021-02383-4}, pages = {849 -- 859}, abstract = {Purpose Segmentation of surgical instruments in endoscopic video streams is essential for automated surgical scene understanding and process modeling. However, relying on fully supervised deep learning for this task is challenging because manual annotation occupies valuable time of the clinical experts. Methods We introduce a teacher-student learning approach that learns jointly from annotated simulation data and unlabeled real data to tackle the challenges in simulation-to-real unsupervised domain adaptation for endoscopic image segmentation. Results Empirical results on three datasets highlight the effectiveness of the proposed framework over current approaches for the endoscopic instrument segmentation task. Additionally, we provide analysis of major factors affecting the performance on all datasets to highlight the strengths and failure modes of our approach. Conclusions We show that our proposed approach can successfully exploit the unlabeled real endoscopic video frames and improve generalization performance over pure simulation-based training and the previous state-of-the-art. This takes us one step closer to effective segmentation of surgical instrument in the annotation scarce setting.}, language = {en} } @inproceedings{EstacioEhlkeTacketal., author = {Estacio, Laura and Ehlke, Moritz and Tack, Alexander and Castro-Gutierrez, Eveling and Lamecker, Hans and Mora, Rensso and Zachow, Stefan}, title = {Unsupervised Detection of Disturbances in 2D Radiographs}, series = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)}, booktitle = {2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI)}, doi = {10.1109/ISBI48211.2021.9434091}, pages = {367 -- 370}, abstract = {We present a method based on a generative model for detection of disturbances such as prosthesis, screws, zippers, and metals in 2D radiographs. The generative model is trained in an unsupervised fashion using clinical radiographs as well as simulated data, none of which contain disturbances. Our approach employs a latent space consistency loss which has the benefit of identifying similarities, and is enforced to reconstruct X-rays without disturbances. In order to detect images with disturbances, an anomaly score is computed also employing the Frechet distance between the input X-ray and the reconstructed one using our generative model. Validation was performed using clinical pelvis radiographs. We achieved an AUC of 0.77 and 0.83 with clinical and synthetic data, respectively. The results demonstrated a good accuracy of our method for detecting outliers as well as the advantage of utilizing synthetic data.}, language = {en} } @misc{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {Geodesic B-Score for Improved Assessment of Knee Osteoarthritis}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81930}, abstract = {Three-dimensional medical imaging enables detailed understanding of osteoarthritis structural status. However, there remains a vast need for automatic, thus, reader-independent measures that provide reliable assessment of subject-specific clinical outcomes. To this end, we derive a consistent generalization of the recently proposed B-score to Riemannian shape spaces. We further present an algorithmic treatment yielding simple, yet efficient computations allowing for analysis of large shape populations with several thousand samples. Our intrinsic formulation exhibits improved discrimination ability over its Euclidean counterpart, which we demonstrate for predictive validity on assessing risks of total knee replacement. This result highlights the potential of the geodesic B-score to enable improved personalized assessment and stratification for interventions.}, language = {en} } @article{TackPreimZachow, author = {Tack, Alexander and Preim, Bernhard and Zachow, Stefan}, title = {Fully automated Assessment of Knee Alignment from Full-Leg X-Rays employing a "YOLOv4 And Resnet Landmark regression Algorithm" (YARLA): Data from the Osteoarthritis Initiative}, series = {Computer Methods and Programs in Biomedicine}, volume = {205}, journal = {Computer Methods and Programs in Biomedicine}, number = {106080}, doi = {https://doi.org/10.1016/j.cmpb.2021.106080}, abstract = {We present a method for the quantification of knee alignment from full-leg X-Rays. A state-of-the-art object detector, YOLOv4, was trained to locate regions of interests (ROIs) in full-leg X-Ray images for the hip joint, the knee, and the ankle. Residual neural networks (ResNets) were trained to regress landmark coordinates for each ROI.Based on the detected landmarks the knee alignment, i.e., the hip-knee-ankle (HKA) angle, was computed. The accuracy of landmark detection was evaluated by a comparison to manually placed landmarks for 360 legs in 180 X-Rays. The accuracy of HKA angle computations was assessed on the basis of 2,943 X-Rays. Results of YARLA were compared to the results of two independent image reading studies(Cooke; Duryea) both publicly accessible via the Osteoarthritis Initiative. The agreement was evaluated using Spearman's Rho, and weighted kappa as well as regarding the correspondence of the class assignment (varus/neutral/valgus). The average difference between YARLA and manually placed landmarks was less than 2.0+- 1.5 mm for all structures (hip, knee, ankle). The average mismatch between HKA angle determinations of Cooke and Duryea was 0.09 +- 0.63°; YARLA resulted in a mismatch of 0.10 +- 0.74° compared to Cooke and of 0.18 +- 0.64° compared to Duryea. Cooke and Duryea agreed almost perfectly with respect to a weighted kappa value of 0.86, and showed an excellent reliability as measured by a Spearman's Rho value of 0.99. Similar values were achieved by YARLA, i.e., a weighted kappa value of0.83 and 0.87 and a Spearman's Rho value of 0.98 and 0.99 to Cooke and Duryea,respectively. Cooke and Duryea agreed in 92\% of all class assignments and YARLA did so in 90\% against Cooke and 92\% against Duryea. In conclusion, YARLA achieved results comparable to those of human experts and thus provides a basis for an automated assessment of knee alignment in full-leg X-Rays.}, language = {de} } @inproceedings{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {Geodesic B-Score for Improved Assessment of Knee Osteoarthritis}, series = {Proc. Information Processing in Medical Imaging (IPMI)}, booktitle = {Proc. Information Processing in Medical Imaging (IPMI)}, doi = {10.1007/978-3-030-78191-0_14}, pages = {177 -- 188}, abstract = {Three-dimensional medical imaging enables detailed understanding of osteoarthritis structural status. However, there remains a vast need for automatic, thus, reader-independent measures that provide reliable assessment of subject-specific clinical outcomes. To this end, we derive a consistent generalization of the recently proposed B-score to Riemannian shape spaces. We further present an algorithmic treatment yielding simple, yet efficient computations allowing for analysis of large shape populations with several thousand samples. Our intrinsic formulation exhibits improved discrimination ability over its Euclidean counterpart, which we demonstrate for predictive validity on assessing risks of total knee replacement. This result highlights the potential of the geodesic B-score to enable improved personalized assessment and stratification for interventions.}, language = {en} } @article{HembusAmbellanZachowetal.2021, author = {Hembus, Jessica and Ambellan, Felix and Zachow, Stefan and Bader, Rainer}, title = {Establishment of a rolling-sliding test bench to analyze abrasive wear propagation of different bearing materials for knee implants}, series = {Applied Sciences}, volume = {11}, journal = {Applied Sciences}, number = {4}, doi = {10.3390/app11041886}, pages = {15}, year = {2021}, abstract = {Currently, new materials for knee implants need to be extensively and expensive tested in a knee wear simulator in a realized design. However, using a rolling-sliding test bench, these materials can be examined under the same test conditions but with simplified geometries. In the present study, the test bench was optimized, and forces were adapted to the physiological contact pressure in the knee joint using the available geometric parameters. Various polymers made of polyethylene and polyurethane articulating against test wheels made of cobalt-chromium and aluminum titanate were tested in the test bench using adapted forces based on ISO 14243-1. Polyurethane materials showed distinctly higher wear rates than polyethylene materials and showed inadequate wear resistance for use as knee implant material. Thus, the rolling-sliding test bench is an adaptable test setup for evaluating newly developed bearing materials for knee implants. It combines the advantages of screening and simulator tests and allows testing of various bearing materials under physiological load and tribological conditions of the human knee joint. The wear behavior of different material compositions and the influence of surface geometry and quality can be initially investigated without the need to produce complex implant prototypes of total knee endoprosthesis or interpositional spacers.}, language = {en} } @article{PimentelSzengelEhlkeetal., author = {Pimentel, Pedro and Szengel, Angelika and Ehlke, Moritz and Lamecker, Hans and Zachow, Stefan and Estacio, Laura and Doenitz, Christian and Ramm, Heiko}, title = {Automated Virtual Reconstruction of Large Skull Defects using Statistical Shape Models and Generative Adversarial Networks}, series = {Towards the Automatization of Cranial Implant Design in Cranioplasty}, volume = {12439}, journal = {Towards the Automatization of Cranial Implant Design in Cranioplasty}, editor = {Li, Jianning and Egger, Jan}, edition = {1}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-030-64327-0_3}, pages = {16 -- 27}, abstract = {We present an automated method for extrapolating missing regions in label data of the skull in an anatomically plausible manner. The ultimate goal is to design patient-speci� c cranial implants for correcting large, arbitrarily shaped defects of the skull that can, for example, result from trauma of the head. Our approach utilizes a 3D statistical shape model (SSM) of the skull and a 2D generative adversarial network (GAN) that is trained in an unsupervised fashion from samples of healthy patients alone. By � tting the SSM to given input labels containing the skull defect, a First approximation of the healthy state of the patient is obtained. The GAN is then applied to further correct and smooth the output of the SSM in an anatomically plausible manner. Finally, the defect region is extracted using morphological operations and subtraction between the extrapolated healthy state of the patient and the defective input labels. The method is trained and evaluated based on data from the MICCAI 2020 AutoImplant challenge. It produces state-of-the art results on regularly shaped cut-outs that were present in the training and testing data of the challenge. Furthermore, due to unsupervised nature of the approach, the method generalizes well to previously unseen defects of varying shapes that were only present in the hidden test dataset.}, language = {en} } @article{OeltzeJaffraMeuschkeNeugebaueretal., author = {Oeltze-Jaffra, Steffen and Meuschke, Monique and Neugebauer, Mathias and Saalfeld, Sylvia and Lawonn, Kai and Janiga, Gabor and Hege, Hans-Christian and Zachow, Stefan and Preim, Bernhard}, title = {Generation and Visual Exploration of Medical Flow Data: Survey, Research Trends, and Future Challenges}, series = {Computer Graphics Forum}, volume = {38}, journal = {Computer Graphics Forum}, number = {1}, publisher = {Wiley}, doi = {10.1111/cgf.13394}, pages = {87 -- 125}, abstract = {Simulations and measurements of blood and air flow inside the human circulatory and respiratory system play an increasingly important role in personalized medicine for prevention, diagnosis, and treatment of diseases. This survey focuses on three main application areas. (1) Computational Fluid Dynamics (CFD) simulations of blood flow in cerebral aneurysms assist in predicting the outcome of this pathologic process and of therapeutic interventions. (2) CFD simulations of nasal airflow allow for investigating the effects of obstructions and deformities and provide therapy decision support. (3) 4D Phase-Contrast (4D PC) Magnetic Resonance Imaging (MRI) of aortic hemodynamics supports the diagnosis of various vascular and valve pathologies as well as their treatment. An investigation of the complex and often dynamic simulation and measurement data requires the coupling of sophisticated visualization, interaction, and data analysis techniques. In this paper, we survey the large body of work that has been conducted within this realm. We extend previous surveys by incorporating nasal airflow, addressing the joint investigation of blood flow and vessel wall properties, and providing a more fine-granular taxonomy of the existing techniques. From the survey, we extract major research trends and identify open problems and future challenges. The survey is intended for researchers interested in medical flow but also more general, in the combined visualization of physiology and anatomy, the extraction of features from flow field data and feature-based visualization, the visual comparison of different simulation results, and the interactive visual analysis of the flow field and derived characteristics.}, language = {en} } @misc{AmbellanLameckervonTycowiczetal., author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, issn = {1438-0064}, doi = {10.1007/978-3-030-19385-0_5}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72699}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @inproceedings{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {A Surface-Theoretic Approach for Statistical Shape Modeling}, series = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part IV}, volume = {11767}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part IV}, publisher = {Springer}, doi = {10.1007/978-3-030-32251-9_3}, pages = {21 -- 29}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. We evaluate the performance of our model w.r.t. shape-based classification of pathological malformations of the human knee and show that it outperforms the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing natural biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @article{HildebrandtBrueningSchmidtetal., author = {Hildebrandt, Thomas and Bruening, Jan Joris and Schmidt, Nora Laura and Lamecker, Hans and Heppt, Werner and Zachow, Stefan and Goubergrits, Leonid}, title = {The Healthy Nasal Cavity - Characteristics of Morphology and Related Airflow Based on a Statistical Shape Model Viewed from a Surgeon's Perspective}, series = {Facial Plastic Surgery}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677721}, pages = {9 -- 13}, abstract = {Functional surgery on the nasal framework requires referential criteria to objectively assess nasal breathing for indication and follow-up. Thismotivated us to generate amean geometry of the nasal cavity based on a statistical shape model. In this study, the authors could demonstrate that the introduced nasal cavity's mean geometry features characteristics of the inner shape and airflow, which are commonly observed in symptom-free subjects. Therefore, the mean geometry might serve as a reference-like model when one considers qualitative aspects. However, to facilitate quantitative considerations and statistical inference, further research is necessary. Additionally, the authorswere able to obtain details about the importance of the isthmus nasi and the inferior turbinate for the intranasal airstream.}, language = {en} } @article{HildebrandtBrueningLameckeretal., author = {Hildebrandt, Thomas and Bruening, Jan Joris and Lamecker, Hans and Zachow, Stefan and Heppt, Werner and Schmidt, Nora and Goubergrits, Leonid}, title = {Digital Analysis of Nasal Airflow Facilitating Decision Support in Rhinosurgery}, series = {Facial Plastic Surgery}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677720}, pages = {1 -- 8}, abstract = {Successful functional surgery on the nasal framework requires reliable and comprehensive diagnosis. In this regard, the authors introduce a new methodology: Digital Analysis of Nasal Airflow (diANA). It is based on computational fluid dynamics, a statistical shape model of the healthy nasal cavity and rhinologic expertise. diANA necessitates an anonymized tomographic dataset of the paranasal sinuses including the complete nasal cavity and, when available, clinical information. The principle of diANA is to compare the morphology and the respective airflow of an individual nose with those of a reference. This enablesmorphometric aberrations and consecutive flow field anomalies to localize and quantify within a patient's nasal cavity. Finally, an elaborated expert opinion with instructive visualizations is provided. Using diANA might support surgeons in decision-making, avoiding unnecessary surgery, gaining more precision, and target-orientation for indicated operations.}, language = {en} } @incollection{AmbellanLameckervonTycowiczetal., author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, series = {Biomedical Visualisation}, volume = {3}, booktitle = {Biomedical Visualisation}, number = {1156}, editor = {Rea, Paul M.}, edition = {1}, publisher = {Springer Nature Switzerland AG}, isbn = {978-3-030-19384-3}, doi = {10.1007/978-3-030-19385-0_5}, pages = {67 -- 84}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @inproceedings{AmbellanTackEhlkeetal., author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, series = {Medical Imaging with Deep Learning}, booktitle = {Medical Imaging with Deep Learning}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging, that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The method is evaluated on data of the MICCAI grand challenge "Segmentation of Knee Images 2010". For the first time an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy. In conclusion, combining of anatomical knowledge using SSMs with localized classification via CNNs results in a state-of-the-art segmentation method.}, language = {en} } @misc{SahuDillMukhopadyayetal., author = {Sahu, Manish and Dill, Sabrina and Mukhopadyay, Anirban and Zachow, Stefan}, title = {Surgical Tool Presence Detection for Cataract Procedures}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69110}, abstract = {This article outlines the submission to the CATARACTS challenge for automatic tool presence detection [1]. Our approach for this multi-label classification problem comprises labelset-based sampling, a CNN architecture and temporal smothing as described in [3], which we call ZIB-Res-TS.}, language = {en} } @article{AlHajjSahuLamardetal., author = {Al Hajj, Hassan and Sahu, Manish and Lamard, Mathieu and Conze, Pierre-Henri and Roychowdhury, Soumali and Hu, Xiaowei and Marsalkaite, Gabija and Zisimopoulos, Odysseas and Dedmari, Muneer Ahmad and Zhao, Fenqiang and Prellberg, Jonas and Galdran, Adrian and Araujo, Teresa and Vo, Duc My and Panda, Chandan and Dahiya, Navdeep and Kondo, Satoshi and Bian, Zhengbing and Bialopetravicius, Jonas and Qiu, Chenghui and Dill, Sabrina and Mukhopadyay, Anirban and Costa, Pedro and Aresta, Guilherme and Ramamurthy, Senthil and Lee, Sang-Woong and Campilho, Aurelio and Zachow, Stefan and Xia, Shunren and Conjeti, Sailesh and Armaitis, Jogundas and Heng, Pheng-Ann and Vahdat, Arash and Cochener, Beatrice and Quellec, Gwenole}, title = {CATARACTS: Challenge on Automatic Tool Annotation for cataRACT Surgery}, series = {Medical Image Analysis}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, publisher = {Elsevier}, doi = {10.1016/j.media.2018.11.008}, pages = {24 -- 41}, abstract = {Surgical tool detection is attracting increasing attention from the medical image analysis community. The goal generally is not to precisely locate tools in images, but rather to indicate which tools are being used by the surgeon at each instant. The main motivation for annotating tool usage is to design efficient solutions for surgical workflow analysis, with potential applications in report generation, surgical training and even real-time decision support. Most existing tool annotation algorithms focus on laparoscopic surgeries. However, with 19 million interventions per year, the most common surgical procedure in the world is cataract surgery. The CATARACTS challenge was organized in 2017 to evaluate tool annotation algorithms in the specific context of cataract surgery. It relies on more than nine hours of videos, from 50 cataract surgeries, in which the presence of 21 surgical tools was manually annotated by two experts. With 14 participating teams, this challenge can be considered a success. As might be expected, the submitted solutions are based on deep learning. This paper thoroughly evaluates these solutions: in particular, the quality of their annotations are compared to that of human interpretations. Next, lessons learnt from the differential analysis of these solutions are discussed. We expect that they will guide the design of efficient surgery monitoring tools in the near future.}, language = {en} } @misc{TackZachow, author = {Tack, Alexander and Zachow, Stefan}, title = {Accurate Automated Volumetry of Cartilage of the Knee using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-71439}, abstract = {Volumetry of the cartilage of the knee, as needed for the assessment of knee osteoarthritis (KOA), is typically performed in a tedious and subjective process. We present an automated segmentation-based method for the quantification of cartilage volume by employing 3D Convolutional Neural Networks (CNNs). CNNs were trained in a supervised manner using magnetic resonance imaging data as well as cartilage volumetry readings given by clinical experts for 1378 subjects. It was shown that 3D CNNs can be employed for cartilage volumetry with an accuracy similar to expert volumetry readings. In future, accurate automated cartilage volumetry might support both, diagnosis of KOA as well as assessment of KOA progression via longitudinal analysis.}, language = {en} } @misc{AmbellanTackEhlkeetal., author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72704}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging (MRI) that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs).The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures.The shape models and neural networks employed are trained using data from the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets from the SKI10 challenge.For the first time, an accuracy equivalent to the inter-observer variability of human readers is achieved in this challenge.Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We make the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation.In conclusion, combining localized classification via CNNs with statistical anatomical knowledge via SSMs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @article{BrueningHildebrandtHepptetal., author = {Br{\"u}ning, Jan and Hildebrandt, Thomas and Heppt, Werner and Schmidt, Nora and Lamecker, Hans and Szengel, Angelika and Amiridze, Natalja and Ramm, Heiko and Bindernagel, Matthias and Zachow, Stefan and Goubergrits, Leonid}, title = {Characterization of the Airflow within an Average Geometry of the Healthy Human Nasal Cavity}, series = {Scientific Reports}, volume = {3755}, journal = {Scientific Reports}, number = {10}, doi = {10.1038/s41598-020-60755-3}, abstract = {This study's objective was the generation of a standardized geometry of the healthy nasal cavity. An average geometry of the healthy nasal cavity was generated using a statistical shape model based on 25 symptom-free subjects. Airflow within the average geometry and these geometries was calculated using fluid simulations. Integral measures of the nasal resistance, wall shear stresses (WSS) and velocities were calculated as well as cross-sectional areas (CSA). Furthermore, individual WSS and static pressure distributions were mapped onto the average geometry. The average geometry featured an overall more regular shape that resulted in less resistance, reduced wall shear stresses and velocities compared to the median of the 25 geometries. Spatial distributions of WSS and pressure of average geometry agreed well compared to the average distributions of all individual geometries. The minimal CSA of the average geometry was larger than the median of all individual geometries (83.4 vs. 74.7 mm²). The airflow observed within the average geometry of the healthy nasal cavity did not equal the average airflow of the individual geometries. While differences observed for integral measures were notable, the calculated values for the average geometry lay within the distributions of the individual parameters. Spatially resolved parameters differed less prominently.}, language = {en} } @article{TackMukhopadhyayZachow, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, series = {Osteoarthritis and Cartilage}, volume = {26}, journal = {Osteoarthritis and Cartilage}, number = {5}, doi = {10.1016/j.joca.2018.02.907}, pages = {680 -- 688}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @misc{TackMukhopadhyayZachow, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative (Supplementary Material)}, doi = {10.12752/4.TMZ.1.0}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @misc{AmbellanTackEhlkeetal., author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative (Supplementary Material)}, series = {Medical Image Analysis}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, doi = {10.12752/4.ATEZ.1.0}, pages = {109 -- 118}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The shape models and neural networks employed are trained using data of the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets of the SKI10 challenge. For the first time, an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We made the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation. In conclusion, combining statistical anatomical knowledge via SSMs with the localized classification via CNNs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @article{HoffmannLemanisWulffetal., author = {Hoffmann, Rene and Lemanis, Robert and Wulff, Lena and Zachow, Stefan and Lukeneder, Alexander and Klug, Christian and Keupp, Helmut}, title = {Traumatic events in the life of the deep-sea cephalopod mollusc, the coleoid Spirula spirula}, series = {ScienceDirect: Deep Sea Research Part I - Oceanographic Research}, volume = {142}, journal = {ScienceDirect: Deep Sea Research Part I - Oceanographic Research}, number = {12}, doi = {10.1016/j.dsr.2018.10.007}, pages = {127 -- 144}, abstract = {Here, we report on different types of shell pathologies of the enigmatic deep-sea (mesopelagic) cephalopod Spirula spirula. For the first time, we apply non-invasive imaging methods to: document trauma-induced changes in shell shapes, reconstruct the different causes and effects of these pathologies, unravel the etiology, and attempt to quantify the efficiency of the buoyancy apparatus. We have analysed 2D and 3D shell parameters from eleven shells collected as beach findings from the Canary Islands (Gran Canaria and Fuerteventura), West-Australia, and the Maldives. All shells were scanned with a nanotom-m computer tomograph. Seven shells were likely injured by predator attacks: fishes, cephalopods or crustaceans, one specimen was infested by an endoparasite (potentially Digenea) and one shell shows signs of inflammation and one shell shows large fluctuations of chamber volumes without any signs of pathology. These fluctuations are potential indicators of a stressed environment. Pathological shells represent the most deviant morphologies of a single species and can therefore be regarded as morphological end-members. The changes in the shell volume / chamber volume ratio were assessed in order to evaluate the functional tolerance of the buoyancy apparatus showing that these had little effect.}, language = {en} } @article{AmbellanTackEhlkeetal., author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, series = {Medical Image Analysis}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, doi = {10.1016/j.media.2018.11.009}, pages = {109 -- 118}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The shape models and neural networks employed are trained using data of the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets of the SKI10 challenge. For the first time, an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We made the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation. In conclusion, combining statistical anatomical knowledge via SSMs with the localized classification via CNNs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @article{LiPimentelSzengeletal., author = {Li, Jianning and Pimentel, Pedro and Szengel, Angelika and Ehlke, Moritz and Lamecker, Hans and Zachow, Stefan and Estacio, Laura and Doenitz, Christian and Ramm, Heiko and Shi, Haochen and Chen, Xiaojun and Matzkin, Franco and Newcombe, Virginia and Ferrante, Enzo and Jin, Yuan and Ellis, David G. and Aizenberg, Michele R. and Kodym, Oldrich and Spanel, Michal and Herout, Adam and Mainprize, James G. and Fishman, Zachary and Hardisty, Michael R. and Bayat, Amirhossein and Shit, Suprosanna and Wang, Bomin and Liu, Zhi and Eder, Matthias and Pepe, Antonio and Gsaxner, Christina and Alves, Victor and Zefferer, Ulrike and von Campe, Cord and Pistracher, Karin and Sch{\"a}fer, Ute and Schmalstieg, Dieter and Menze, Bjoern H. and Glocker, Ben and Egger, Jan}, title = {AutoImplant 2020 - First MICCAI Challenge on Automatic Cranial Implant Design}, series = {IEEE Transactions on Medical Imaging}, volume = {40}, journal = {IEEE Transactions on Medical Imaging}, number = {9}, issn = {0278-0062}, doi = {10.1109/TMI.2021.3077047}, pages = {2329 -- 2342}, abstract = {The aim of this paper is to provide a comprehensive overview of the MICCAI 2020 AutoImplant Challenge. The approaches and publications submitted and accepted within the challenge will be summarized and reported, highlighting common algorithmic trends and algorithmic diversity. Furthermore, the evaluation results will be presented, compared and discussed in regard to the challenge aim: seeking for low cost, fast and fully automated solutions for cranial implant design. Based on feedback from collaborating neurosurgeons, this paper concludes by stating open issues and post-challenge requirements for intra-operative use.}, language = {en} } @article{PichtLeCalveTomaselloetal., author = {Picht, Thomas and Le Calve, Maxime and Tomasello, Rosario and Fekonja, Lucius and Gholami, Mohammad Fardin and Bruhn, Matthias and Zwick, Carola and Rabe, J{\"u}rgen P. and M{\"u}ller-Birn, Claudia and Vajkoczy, Peter and Sauer, Igor M. and Zachow, Stefan and Nyakatura, John A. and Ribault, Patricia and Pulverm{\"u}ller, Friedemann}, title = {A note on neurosurgical resection and why we need to rethink cutting}, series = {Neurosurgery}, volume = {89}, journal = {Neurosurgery}, number = {5}, doi = {10.1093/neuros/nyab326}, pages = {289 -- 291}, language = {en} }