@misc{Orgiu2012, type = {Master Thesis}, author = {Orgiu, Sara}, title = {Automatic liver segmentation in contrast enhanced CT data using 3D free-form deformation based on optimal graph searching}, year = {2012}, language = {en} } @misc{Renard2011, type = {Master Thesis}, author = {Renard, Maximilien}, title = {Improvement of Image Segmentation Based on Statistical Shape and Intensity Models}, year = {2011}, language = {en} } @inproceedings{MukhopadhyayMorilloZachowetal.2016, author = {Mukhopadhyay, Anirban and Morillo, Oscar and Zachow, Stefan and Lamecker, Hans}, title = {Robust and Accurate Appearance Models Based on Joint Dictionary Learning Data from the Osteoarthritis Initiative}, volume = {9993}, booktitle = {Lecture Notes in Computer Science, Patch-Based Techniques in Medical Imaging. Patch-MI 2016}, doi = {10.1007/978-3-319-47118-1_4}, pages = {25 -- 33}, year = {2016}, abstract = {Deformable model-based approaches to 3D image segmentation have been shown to be highly successful. Such methodology requires an appearance model that drives the deformation of a geometric model to the image data. Appearance models are usually either created heuristically or through supervised learning. Heuristic methods have been shown to work effectively in many applications but are hard to transfer from one application (imaging modality/anatomical structure) to another. On the contrary, supervised learning approaches can learn patterns from a collection of annotated training data. In this work, we show that the supervised joint dictionary learning technique is capable of overcoming the traditional drawbacks of the heuristic approaches. Our evaluation based on two different applications (liver/CT and knee/MR) reveals that our approach generates appearance models, which can be used effectively and efficiently in a deformable model-based segmentation framework.}, language = {en} } @article{WilsonAnglinAmbellanetal.2017, author = {Wilson, David and Anglin, Carolyn and Ambellan, Felix and Grewe, Carl Martin and Tack, Alexander and Lamecker, Hans and Dunbar, Michael and Zachow, Stefan}, title = {Validation of three-dimensional models of the distal femur created from surgical navigation point cloud data for intraoperative and postoperative analysis of total knee arthroplasty}, volume = {12}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {12}, publisher = {Springer}, doi = {10.1007/s11548-017-1630-5}, pages = {2097 -- 2105}, year = {2017}, abstract = {Purpose: Despite the success of total knee arthroplasty there continues to be a significant proportion of patients who are dissatisfied. One explanation may be a shape mismatch between pre and post-operative distal femurs. The purpose of this study was to investigate a method to match a statistical shape model (SSM) to intra-operatively acquired point cloud data from a surgical navigation system, and to validate it against the pre-operative magnetic resonance imaging (MRI) data from the same patients. Methods: A total of 10 patients who underwent navigated total knee arthroplasty also had an MRI scan less than 2 months pre-operatively. The standard surgical protocol was followed which included partial digitization of the distal femur. Two different methods were employed to fit the SSM to the digitized point cloud data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). The available MRI data were manually segmented and the reconstructed three-dimensional surfaces used as ground truth against which the statistical shape model fit was compared. Results: For both approaches, the difference between the statistical shape model-generated femur and the surface generated from MRI segmentation averaged less than 1.7 mm, with maximum errors occurring in less clinically important areas. Conclusion: The results demonstrated good correspondence with the distal femoral morphology even in cases of sparse data sets. Application of this technique will allow for measurement of mismatch between pre and post-operative femurs retrospectively on any case done using the surgical navigation system and could be integrated into the surgical navigation unit to provide real-time feedback.}, language = {en} } @inproceedings{MukhopadhyayOksuzBevilacquaetal.2015, author = {Mukhopadhyay, Anirban and Oksuz, Ilkay and Bevilacqua, Marco and Dharmakumar, Rohan and Tsaftaris, Sotirios}, title = {Data-Driven Feature Learning for Myocardial Segmentation of CP-BOLD MRI}, volume = {9126}, booktitle = {Functional Imaging and Modeling of the Heart}, publisher = {Springer}, doi = {10.1007/978-3-319-20309-6_22}, pages = {189 -- 197}, year = {2015}, abstract = {Cardiac Phase-resolved Blood Oxygen-Level-Dependent (CP- BOLD) MR is capable of diagnosing an ongoing ischemia by detecting changes in myocardial intensity patterns at rest without any contrast and stress agents. Visualizing and detecting these changes require significant post-processing, including myocardial segmentation for isolating the myocardium. But, changes in myocardial intensity pattern and myocardial shape due to the heart's motion challenge automated standard CINE MR myocardial segmentation techniques resulting in a significant drop of segmentation accuracy. We hypothesize that the main reason behind this phenomenon is the lack of discernible features. In this paper, a multi scale discriminative dictionary learning approach is proposed for supervised learning and sparse representation of the myocardium, to improve the myocardial feature selection. The technique is validated on a challenging dataset of CP-BOLD MR and standard CINE MR acquired in baseline and ischemic condition across 10 canine subjects. The proposed method significantly outperforms standard cardiac segmentation techniques, including segmentation via registration, level sets and supervised methods for myocardial segmentation.}, language = {en} } @inproceedings{MukhopadhyayOksuzBevilacquaetal.2015, author = {Mukhopadhyay, Anirban and Oksuz, Ilkay and Bevilacqua, Marco and Dharmakumar, Rohan and Tsaftaris, Sotirios}, title = {Unsupervised myocardial segmentation for cardiac MRI}, volume = {LNCS 9351}, booktitle = {Medical Image Computing and Computer-Assisted Intervention -- MICCAI 2015}, doi = {10.1007/978-3-319-24574-4_2}, pages = {12 -- 20}, year = {2015}, abstract = {Though unsupervised segmentation was a de-facto standard for cardiac MRI segmentation early on, recently cardiac MRI segmentation literature has favored fully supervised techniques such as Dictionary Learning and Atlas-based techniques. But, the benefits of unsupervised techniques e.g., no need for large amount of training data and better potential of handling variability in anatomy and image contrast, is more evident with emerging cardiac MR modalities. For example, CP-BOLD is a new MRI technique that has been shown to detect ischemia without any contrast at stress but also at rest conditions. Although CP-BOLD looks similar to standard CINE, changes in myocardial intensity patterns and shape across cardiac phases, due to the heart's motion, BOLD effect and artifacts affect the underlying mechanisms of fully supervised segmentation techniques resulting in a significant drop in segmentation accuracy. In this paper, we present a fully unsupervised technique for segmenting myocardium from the background in both standard CINE MR and CP-BOLD MR. We combine appearance with motion information (obtained via Optical Flow) in a dictionary learning framework to sparsely represent important features in a low dimensional space and separate myocardium from background accordingly. Our fully automated method learns background-only models and one class classifier provides myocardial segmentation. The advantages of the proposed technique are demonstrated on a dataset containing CP-BOLD MR and standard CINE MR image sequences acquired in baseline and ischemic condition across 10 canine subjects, where our method outperforms state-of-the-art supervised segmentation techniques in CP-BOLD MR and performs at-par for standard CINE MR.}, language = {en} } @inproceedings{OksuzMukhopadhyayBevilacquaetal.2015, author = {Oksuz, Ilkay and Mukhopadhyay, Anirban and Bevilacqua, Marco and Dharmakumar, Rohan and Tsaftaris, Sotirios}, title = {Dictionary Learning Based Image Descriptor for Myocardial Registration of CP-BOLD MR}, volume = {9350}, booktitle = {Medical Image Computing and Computer-Assisted Intervention -- MICCAI 2015}, publisher = {Springer}, doi = {10.1007/978-3-319-24571-3_25}, pages = {205 -- 213}, year = {2015}, abstract = {Cardiac Phase-resolved Blood Oxygen-Level-Dependent (CP- BOLD) MRI is a new contrast agent- and stress-free imaging technique for the assessment of myocardial ischemia at rest. The precise registration among the cardiac phases in this cine type acquisition is essential for automating the analysis of images of this technique, since it can potentially lead to better specificity of ischemia detection. However, inconsistency in myocardial intensity patterns and the changes in myocardial shape due to the heart's motion lead to low registration performance for state- of-the-art methods. This low accuracy can be explained by the lack of distinguishable features in CP-BOLD and inappropriate metric defini- tions in current intensity-based registration frameworks. In this paper, the sparse representations, which are defined by a discriminative dictionary learning approach for source and target images, are used to improve myocardial registration. This method combines appearance with Gabor and HOG features in a dictionary learning framework to sparsely represent features in a low dimensional space. The sum of squared differences of these distinctive sparse representations are used to define a similarity term in the registration framework. The proposed descriptor is validated on a challenging dataset of CP-BOLD MR and standard CINE MR acquired in baseline and ischemic condition across 10 canines.}, language = {en} } @misc{MukhopadhyayKumarBhandarkar2016, author = {Mukhopadhyay, Anirban and Kumar, Arun and Bhandarkar, Suchendra}, title = {Joint Geometric Graph Embedding for Partial Shape Matching in Images}, journal = {IEEE Winter Conference on Applications of Computer Vision}, edition = {IEEE Winter Conference on Applications of Computer Vision (WACV)}, publisher = {IEEE}, pages = {1 -- 9}, year = {2016}, abstract = {A novel multi-criteria optimization framework for matching of partially visible shapes in multiple images using joint geometric graph embedding is proposed. The proposed framework achieves matching of partial shapes in images that exhibit extreme variations in scale, orientation, viewpoint and illumination and also instances of occlusion; conditions which render impractical the use of global contour-based descriptors or local pixel-level features for shape matching. The proposed technique is based on optimization of the embedding distances of geometric features obtained from the eigenspectrum of the joint image graph, coupled with regularization over values of the mean pixel intensity or histogram of oriented gradients. It is shown to obtain successfully the correspondences denoting partial shape similarities as well as correspondences between feature points in the images. A new benchmark dataset is proposed which contains disparate image pairs with extremely challenging variations in viewing conditions when compared to an existing dataset [18]. The proposed technique is shown to significantly outperform several state-of-the-art partial shape matching techniques on both datasets.}, language = {en} } @article{LiPimentelSzengeletal.2021, author = {Li, Jianning and Pimentel, Pedro and Szengel, Angelika and Ehlke, Moritz and Lamecker, Hans and Zachow, Stefan and Estacio, Laura and Doenitz, Christian and Ramm, Heiko and Shi, Haochen and Chen, Xiaojun and Matzkin, Franco and Newcombe, Virginia and Ferrante, Enzo and Jin, Yuan and Ellis, David G. and Aizenberg, Michele R. and Kodym, Oldrich and Spanel, Michal and Herout, Adam and Mainprize, James G. and Fishman, Zachary and Hardisty, Michael R. and Bayat, Amirhossein and Shit, Suprosanna and Wang, Bomin and Liu, Zhi and Eder, Matthias and Pepe, Antonio and Gsaxner, Christina and Alves, Victor and Zefferer, Ulrike and von Campe, Cord and Pistracher, Karin and Sch{\"a}fer, Ute and Schmalstieg, Dieter and Menze, Bjoern H. and Glocker, Ben and Egger, Jan}, title = {AutoImplant 2020 - First MICCAI Challenge on Automatic Cranial Implant Design}, volume = {40}, journal = {IEEE Transactions on Medical Imaging}, number = {9}, issn = {0278-0062}, doi = {10.1109/TMI.2021.3077047}, pages = {2329 -- 2342}, year = {2021}, abstract = {The aim of this paper is to provide a comprehensive overview of the MICCAI 2020 AutoImplant Challenge. The approaches and publications submitted and accepted within the challenge will be summarized and reported, highlighting common algorithmic trends and algorithmic diversity. Furthermore, the evaluation results will be presented, compared and discussed in regard to the challenge aim: seeking for low cost, fast and fully automated solutions for cranial implant design. Based on feedback from collaborating neurosurgeons, this paper concludes by stating open issues and post-challenge requirements for intra-operative use.}, language = {en} }