@inproceedings{AmbellanZachowvonTycowicz, author = {Ambellan, Felix and Zachow, Stefan and von Tycowicz, Christoph}, title = {A Surface-Theoretic Approach for Statistical Shape Modeling}, series = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part IV}, volume = {11767}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part IV}, publisher = {Springer}, doi = {10.1007/978-3-030-32251-9_3}, pages = {21 -- 29}, abstract = {We present a novel approach for nonlinear statistical shape modeling that is invariant under Euclidean motion and thus alignment-free. By analyzing metric distortion and curvature of shapes as elements of Lie groups in a consistent Riemannian setting, we construct a framework that reliably handles large deformations. Due to the explicit character of Lie group operations, our non-Euclidean method is very efficient allowing for fast and numerically robust processing. This facilitates Riemannian analysis of large shape populations accessible through longitudinal and multi-site imaging studies providing increased statistical power. We evaluate the performance of our model w.r.t. shape-based classification of pathological malformations of the human knee and show that it outperforms the standard Euclidean as well as a recent nonlinear approach especially in presence of sparse training data. To provide insight into the model's ability of capturing natural biological shape variability, we carry out an analysis of specificity and generalization ability.}, language = {en} } @article{HildebrandtBrueningSchmidtetal., author = {Hildebrandt, Thomas and Bruening, Jan Joris and Schmidt, Nora Laura and Lamecker, Hans and Heppt, Werner and Zachow, Stefan and Goubergrits, Leonid}, title = {The Healthy Nasal Cavity - Characteristics of Morphology and Related Airflow Based on a Statistical Shape Model Viewed from a Surgeon's Perspective}, series = {Facial Plastic Surgery}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677721}, pages = {9 -- 13}, abstract = {Functional surgery on the nasal framework requires referential criteria to objectively assess nasal breathing for indication and follow-up. Thismotivated us to generate amean geometry of the nasal cavity based on a statistical shape model. In this study, the authors could demonstrate that the introduced nasal cavity's mean geometry features characteristics of the inner shape and airflow, which are commonly observed in symptom-free subjects. Therefore, the mean geometry might serve as a reference-like model when one considers qualitative aspects. However, to facilitate quantitative considerations and statistical inference, further research is necessary. Additionally, the authorswere able to obtain details about the importance of the isthmus nasi and the inferior turbinate for the intranasal airstream.}, language = {en} } @article{HildebrandtBrueningLameckeretal., author = {Hildebrandt, Thomas and Bruening, Jan Joris and Lamecker, Hans and Zachow, Stefan and Heppt, Werner and Schmidt, Nora and Goubergrits, Leonid}, title = {Digital Analysis of Nasal Airflow Facilitating Decision Support in Rhinosurgery}, series = {Facial Plastic Surgery}, volume = {35}, journal = {Facial Plastic Surgery}, number = {1}, doi = {10.1055/s-0039-1677720}, pages = {1 -- 8}, abstract = {Successful functional surgery on the nasal framework requires reliable and comprehensive diagnosis. In this regard, the authors introduce a new methodology: Digital Analysis of Nasal Airflow (diANA). It is based on computational fluid dynamics, a statistical shape model of the healthy nasal cavity and rhinologic expertise. diANA necessitates an anonymized tomographic dataset of the paranasal sinuses including the complete nasal cavity and, when available, clinical information. The principle of diANA is to compare the morphology and the respective airflow of an individual nose with those of a reference. This enablesmorphometric aberrations and consecutive flow field anomalies to localize and quantify within a patient's nasal cavity. Finally, an elaborated expert opinion with instructive visualizations is provided. Using diANA might support surgeons in decision-making, avoiding unnecessary surgery, gaining more precision, and target-orientation for indicated operations.}, language = {en} } @incollection{AmbellanLameckervonTycowiczetal., author = {Ambellan, Felix and Lamecker, Hans and von Tycowicz, Christoph and Zachow, Stefan}, title = {Statistical Shape Models - Understanding and Mastering Variation in Anatomy}, series = {Biomedical Visualisation}, volume = {3}, booktitle = {Biomedical Visualisation}, number = {1156}, editor = {Rea, Paul M.}, edition = {1}, publisher = {Springer Nature Switzerland AG}, isbn = {978-3-030-19384-3}, doi = {10.1007/978-3-030-19385-0_5}, pages = {67 -- 84}, abstract = {In our chapter we are describing how to reconstruct three-dimensional anatomy from medical image data and how to build Statistical 3D Shape Models out of many such reconstructions yielding a new kind of anatomy that not only allows quantitative analysis of anatomical variation but also a visual exploration and educational visualization. Future digital anatomy atlases will not only show a static (average) anatomy but also its normal or pathological variation in three or even four dimensions, hence, illustrating growth and/or disease progression. Statistical Shape Models (SSMs) are geometric models that describe a collection of semantically similar objects in a very compact way. SSMs represent an average shape of many three-dimensional objects as well as their variation in shape. The creation of SSMs requires a correspondence mapping, which can be achieved e.g. by parameterization with a respective sampling. If a corresponding parameterization over all shapes can be established, variation between individual shape characteristics can be mathematically investigated. We will explain what Statistical Shape Models are and how they are constructed. Extensions of Statistical Shape Models will be motivated for articulated coupled structures. In addition to shape also the appearance of objects will be integrated into the concept. Appearance is a visual feature independent of shape that depends on observers or imaging techniques. Typical appearances are for instance the color and intensity of a visual surface of an object under particular lighting conditions, or measurements of material properties with computed tomography (CT) or magnetic resonance imaging (MRI). A combination of (articulated) statistical shape models with statistical models of appearance lead to articulated Statistical Shape and Appearance Models (a-SSAMs).After giving various examples of SSMs for human organs, skeletal structures, faces, and bodies, we will shortly describe clinical applications where such models have been successfully employed. Statistical Shape Models are the foundation for the analysis of anatomical cohort data, where characteristic shapes are correlated to demographic or epidemiologic data. SSMs consisting of several thousands of objects offer, in combination with statistical methods ormachine learning techniques, the possibility to identify characteristic clusters, thus being the foundation for advanced diagnostic disease scoring.}, language = {en} } @article{AlHajjSahuLamardetal., author = {Al Hajj, Hassan and Sahu, Manish and Lamard, Mathieu and Conze, Pierre-Henri and Roychowdhury, Soumali and Hu, Xiaowei and Marsalkaite, Gabija and Zisimopoulos, Odysseas and Dedmari, Muneer Ahmad and Zhao, Fenqiang and Prellberg, Jonas and Galdran, Adrian and Araujo, Teresa and Vo, Duc My and Panda, Chandan and Dahiya, Navdeep and Kondo, Satoshi and Bian, Zhengbing and Bialopetravicius, Jonas and Qiu, Chenghui and Dill, Sabrina and Mukhopadyay, Anirban and Costa, Pedro and Aresta, Guilherme and Ramamurthy, Senthil and Lee, Sang-Woong and Campilho, Aurelio and Zachow, Stefan and Xia, Shunren and Conjeti, Sailesh and Armaitis, Jogundas and Heng, Pheng-Ann and Vahdat, Arash and Cochener, Beatrice and Quellec, Gwenole}, title = {CATARACTS: Challenge on Automatic Tool Annotation for cataRACT Surgery}, series = {Medical Image Analysis}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, publisher = {Elsevier}, doi = {10.1016/j.media.2018.11.008}, pages = {24 -- 41}, abstract = {Surgical tool detection is attracting increasing attention from the medical image analysis community. The goal generally is not to precisely locate tools in images, but rather to indicate which tools are being used by the surgeon at each instant. The main motivation for annotating tool usage is to design efficient solutions for surgical workflow analysis, with potential applications in report generation, surgical training and even real-time decision support. Most existing tool annotation algorithms focus on laparoscopic surgeries. However, with 19 million interventions per year, the most common surgical procedure in the world is cataract surgery. The CATARACTS challenge was organized in 2017 to evaluate tool annotation algorithms in the specific context of cataract surgery. It relies on more than nine hours of videos, from 50 cataract surgeries, in which the presence of 21 surgical tools was manually annotated by two experts. With 14 participating teams, this challenge can be considered a success. As might be expected, the submitted solutions are based on deep learning. This paper thoroughly evaluates these solutions: in particular, the quality of their annotations are compared to that of human interpretations. Next, lessons learnt from the differential analysis of these solutions are discussed. We expect that they will guide the design of efficient surgery monitoring tools in the near future.}, language = {en} } @misc{TackZachow, author = {Tack, Alexander and Zachow, Stefan}, title = {Accurate Automated Volumetry of Cartilage of the Knee using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-71439}, abstract = {Volumetry of the cartilage of the knee, as needed for the assessment of knee osteoarthritis (KOA), is typically performed in a tedious and subjective process. We present an automated segmentation-based method for the quantification of cartilage volume by employing 3D Convolutional Neural Networks (CNNs). CNNs were trained in a supervised manner using magnetic resonance imaging data as well as cartilage volumetry readings given by clinical experts for 1378 subjects. It was shown that 3D CNNs can be employed for cartilage volumetry with an accuracy similar to expert volumetry readings. In future, accurate automated cartilage volumetry might support both, diagnosis of KOA as well as assessment of KOA progression via longitudinal analysis.}, language = {en} } @misc{AmbellanTackEhlkeetal., author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72704}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging (MRI) that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs).The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures.The shape models and neural networks employed are trained using data from the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets from the SKI10 challenge.For the first time, an accuracy equivalent to the inter-observer variability of human readers is achieved in this challenge.Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We make the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation.In conclusion, combining localized classification via CNNs with statistical anatomical knowledge via SSMs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @misc{AmbellanTackEhlkeetal., author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative (Supplementary Material)}, series = {Medical Image Analysis}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, doi = {10.12752/4.ATEZ.1.0}, pages = {109 -- 118}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The shape models and neural networks employed are trained using data of the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets of the SKI10 challenge. For the first time, an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We made the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation. In conclusion, combining statistical anatomical knowledge via SSMs with the localized classification via CNNs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @article{AmbellanTackEhlkeetal., author = {Ambellan, Felix and Tack, Alexander and Ehlke, Moritz and Zachow, Stefan}, title = {Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge and Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, series = {Medical Image Analysis}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, doi = {10.1016/j.media.2018.11.009}, pages = {109 -- 118}, abstract = {We present a method for the automated segmentation of knee bones and cartilage from magnetic resonance imaging that combines a priori knowledge of anatomical shape with Convolutional Neural Networks (CNNs). The proposed approach incorporates 3D Statistical Shape Models (SSMs) as well as 2D and 3D CNNs to achieve a robust and accurate segmentation of even highly pathological knee structures. The shape models and neural networks employed are trained using data of the Osteoarthritis Initiative (OAI) and the MICCAI grand challenge "Segmentation of Knee Images 2010" (SKI10), respectively. We evaluate our method on 40 validation and 50 submission datasets of the SKI10 challenge. For the first time, an accuracy equivalent to the inter-observer variability of human readers has been achieved in this challenge. Moreover, the quality of the proposed method is thoroughly assessed using various measures for data from the OAI, i.e. 507 manual segmentations of bone and cartilage, and 88 additional manual segmentations of cartilage. Our method yields sub-voxel accuracy for both OAI datasets. We made the 507 manual segmentations as well as our experimental setup publicly available to further aid research in the field of medical image segmentation. In conclusion, combining statistical anatomical knowledge via SSMs with the localized classification via CNNs results in a state-of-the-art segmentation method for knee bones and cartilage from MRI data.}, language = {en} } @article{AkbariShandizBoulosSavarssonetal.2017, author = {Akbari Shandiz, Mohsen and Boulos, Paul and S{\ae}varsson, Stefan and Ramm, Heiko and Fu, Chun Kit and Miller, Stephen and Zachow, Stefan and Anglin, Carolyn}, title = {Changes in Knee Shape and Geometry Resulting from Total Knee Arthroplasty}, series = {Journal of Engineering in Medicine}, volume = {232}, journal = {Journal of Engineering in Medicine}, number = {1}, doi = {10.1177/0954411917743274}, pages = {67 -- 79}, year = {2017}, abstract = {Changes in knee shape and geometry resulting from total knee arthroplasty can affect patients in numerous important ways: pain, function, stability, range of motion, and kinematics. Quantitative data concerning these changes have not been previously available, to our knowledge, yet are essential to understand individual experiences of total knee arthroplasty and thereby improve outcomes for all patients. The limiting factor has been the challenge of accurately measuring these changes. Our study objective was to develop a conceptual framework and analysis method to investigate changes in knee shape and geometry, and prospectively apply it to a sample total knee arthroplasty population. Using clinically available computed tomography and radiography imaging systems, the three-dimensional knee shape and geometry of nine patients (eight varus and one valgus) were compared before and after total knee arthroplasty. All patients had largely good outcomes after their total knee arthroplasty. Knee shape changed both visually and numerically. On average, the distal condyles were slightly higher medially and lower laterally (range: +4.5 mm to -4.4 mm), the posterior condyles extended farther out medially but not laterally (range: +1.8 to -6.4 mm), patellofemoral distance increased throughout flexion by 1.8-3.5 mm, and patellar thickness alone increased by 2.9 mm (range: 0.7-5.2 mm). External femoral rotation differed preop and postop. Joint line distance, taking cartilage into account, changed by +0.7 to -1.5 mm on average throughout flexion. Important differences in shape and geometry were seen between pre-total knee arthroplasty and post-total knee arthroplasty knees. While this is qualitatively known, this is the first study to report it quantitatively, an important precursor to identifying the reasons for the poor outcome of some patients. Using the developed protocol and visualization techniques to compare patients with good versus poor clinical outcomes could lead to changes in implant design, implant selection, component positioning, and surgical technique. Recommendations based on this sample population are provided. Intraoperative and postoperative feedback could ultimately improve patient satisfaction.}, language = {en} }