@misc{MukhopadhyayKumarBhandarkar, author = {Mukhopadhyay, Anirban and Kumar, Arun and Bhandarkar, Suchendra}, title = {Joint Geometric Graph Embedding for Partial Shape Matching in Images}, series = {IEEE Winter Conference on Applications of Computer Vision}, journal = {IEEE Winter Conference on Applications of Computer Vision}, edition = {IEEE Winter Conference on Applications of Computer Vision (WACV)}, publisher = {IEEE}, pages = {1 -- 9}, abstract = {A novel multi-criteria optimization framework for matching of partially visible shapes in multiple images using joint geometric graph embedding is proposed. The proposed framework achieves matching of partial shapes in images that exhibit extreme variations in scale, orientation, viewpoint and illumination and also instances of occlusion; conditions which render impractical the use of global contour-based descriptors or local pixel-level features for shape matching. The proposed technique is based on optimization of the embedding distances of geometric features obtained from the eigenspectrum of the joint image graph, coupled with regularization over values of the mean pixel intensity or histogram of oriented gradients. It is shown to obtain successfully the correspondences denoting partial shape similarities as well as correspondences between feature points in the images. A new benchmark dataset is proposed which contains disparate image pairs with extremely challenging variations in viewing conditions when compared to an existing dataset [18]. The proposed technique is shown to significantly outperform several state-of-the-art partial shape matching techniques on both datasets.}, language = {en} } @inproceedings{MukhopadhyayMorilloZachowetal., author = {Mukhopadhyay, Anirban and Morillo, Oscar and Zachow, Stefan and Lamecker, Hans}, title = {Robust and Accurate Appearance Models Based on Joint Dictionary Learning Data from the Osteoarthritis Initiative}, series = {Lecture Notes in Computer Science, Patch-Based Techniques in Medical Imaging. Patch-MI 2016}, volume = {9993}, booktitle = {Lecture Notes in Computer Science, Patch-Based Techniques in Medical Imaging. Patch-MI 2016}, doi = {10.1007/978-3-319-47118-1_4}, pages = {25 -- 33}, abstract = {Deformable model-based approaches to 3D image segmentation have been shown to be highly successful. Such methodology requires an appearance model that drives the deformation of a geometric model to the image data. Appearance models are usually either created heuristically or through supervised learning. Heuristic methods have been shown to work effectively in many applications but are hard to transfer from one application (imaging modality/anatomical structure) to another. On the contrary, supervised learning approaches can learn patterns from a collection of annotated training data. In this work, we show that the supervised joint dictionary learning technique is capable of overcoming the traditional drawbacks of the heuristic approaches. Our evaluation based on two different applications (liver/CT and knee/MR) reveals that our approach generates appearance models, which can be used effectively and efficiently in a deformable model-based segmentation framework.}, language = {en} } @inproceedings{MukhopadhyayOksuzBevilacquaetal., author = {Mukhopadhyay, Anirban and Oksuz, Ilkay and Bevilacqua, Marco and Dharmakumar, Rohan and Tsaftaris, Sotirios}, title = {Data-Driven Feature Learning for Myocardial Segmentation of CP-BOLD MRI}, series = {Functional Imaging and Modeling of the Heart}, volume = {9126}, booktitle = {Functional Imaging and Modeling of the Heart}, publisher = {Springer}, doi = {10.1007/978-3-319-20309-6_22}, pages = {189 -- 197}, abstract = {Cardiac Phase-resolved Blood Oxygen-Level-Dependent (CP- BOLD) MR is capable of diagnosing an ongoing ischemia by detecting changes in myocardial intensity patterns at rest without any contrast and stress agents. Visualizing and detecting these changes require significant post-processing, including myocardial segmentation for isolating the myocardium. But, changes in myocardial intensity pattern and myocardial shape due to the heart's motion challenge automated standard CINE MR myocardial segmentation techniques resulting in a significant drop of segmentation accuracy. We hypothesize that the main reason behind this phenomenon is the lack of discernible features. In this paper, a multi scale discriminative dictionary learning approach is proposed for supervised learning and sparse representation of the myocardium, to improve the myocardial feature selection. The technique is validated on a challenging dataset of CP-BOLD MR and standard CINE MR acquired in baseline and ischemic condition across 10 canine subjects. The proposed method significantly outperforms standard cardiac segmentation techniques, including segmentation via registration, level sets and supervised methods for myocardial segmentation.}, language = {en} } @inproceedings{MukhopadhyayOksuzBevilacquaetal., author = {Mukhopadhyay, Anirban and Oksuz, Ilkay and Bevilacqua, Marco and Dharmakumar, Rohan and Tsaftaris, Sotirios}, title = {Unsupervised myocardial segmentation for cardiac MRI}, series = {Medical Image Computing and Computer-Assisted Intervention -- MICCAI 2015}, volume = {LNCS 9351}, booktitle = {Medical Image Computing and Computer-Assisted Intervention -- MICCAI 2015}, doi = {10.1007/978-3-319-24574-4_2}, pages = {12 -- 20}, abstract = {Though unsupervised segmentation was a de-facto standard for cardiac MRI segmentation early on, recently cardiac MRI segmentation literature has favored fully supervised techniques such as Dictionary Learning and Atlas-based techniques. But, the benefits of unsupervised techniques e.g., no need for large amount of training data and better potential of handling variability in anatomy and image contrast, is more evident with emerging cardiac MR modalities. For example, CP-BOLD is a new MRI technique that has been shown to detect ischemia without any contrast at stress but also at rest conditions. Although CP-BOLD looks similar to standard CINE, changes in myocardial intensity patterns and shape across cardiac phases, due to the heart's motion, BOLD effect and artifacts affect the underlying mechanisms of fully supervised segmentation techniques resulting in a significant drop in segmentation accuracy. In this paper, we present a fully unsupervised technique for segmenting myocardium from the background in both standard CINE MR and CP-BOLD MR. We combine appearance with motion information (obtained via Optical Flow) in a dictionary learning framework to sparsely represent important features in a low dimensional space and separate myocardium from background accordingly. Our fully automated method learns background-only models and one class classifier provides myocardial segmentation. The advantages of the proposed technique are demonstrated on a dataset containing CP-BOLD MR and standard CINE MR image sequences acquired in baseline and ischemic condition across 10 canine subjects, where our method outperforms state-of-the-art supervised segmentation techniques in CP-BOLD MR and performs at-par for standard CINE MR.}, language = {en} } @inproceedings{OksuzMukhopadhyayBevilacquaetal., author = {Oksuz, Ilkay and Mukhopadhyay, Anirban and Bevilacqua, Marco and Dharmakumar, Rohan and Tsaftaris, Sotirios}, title = {Dictionary Learning Based Image Descriptor for Myocardial Registration of CP-BOLD MR}, series = {Medical Image Computing and Computer-Assisted Intervention -- MICCAI 2015}, volume = {9350}, booktitle = {Medical Image Computing and Computer-Assisted Intervention -- MICCAI 2015}, publisher = {Springer}, doi = {10.1007/978-3-319-24571-3_25}, pages = {205 -- 213}, abstract = {Cardiac Phase-resolved Blood Oxygen-Level-Dependent (CP- BOLD) MRI is a new contrast agent- and stress-free imaging technique for the assessment of myocardial ischemia at rest. The precise registration among the cardiac phases in this cine type acquisition is essential for automating the analysis of images of this technique, since it can potentially lead to better specificity of ischemia detection. However, inconsistency in myocardial intensity patterns and the changes in myocardial shape due to the heart's motion lead to low registration performance for state- of-the-art methods. This low accuracy can be explained by the lack of distinguishable features in CP-BOLD and inappropriate metric defini- tions in current intensity-based registration frameworks. In this paper, the sparse representations, which are defined by a discriminative dictionary learning approach for source and target images, are used to improve myocardial registration. This method combines appearance with Gabor and HOG features in a dictionary learning framework to sparsely represent features in a low dimensional space. The sum of squared differences of these distinctive sparse representations are used to define a similarity term in the registration framework. The proposed descriptor is validated on a challenging dataset of CP-BOLD MR and standard CINE MR acquired in baseline and ischemic condition across 10 canines.}, language = {en} } @article{SahuMukhopadhyaySzengeletal., author = {Sahu, Manish and Mukhopadhyay, Anirban and Szengel, Angelika and Zachow, Stefan}, title = {Addressing multi-label imbalance problem of Surgical Tool Detection using CNN}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {12}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {6}, publisher = {Springer}, doi = {10.1007/s11548-017-1565-x}, pages = {1013 -- 1020}, abstract = {Purpose: A fully automated surgical tool detection framework is proposed for endoscopic video streams. State-of-the-art surgical tool detection methods rely on supervised one-vs-all or multi-class classification techniques, completely ignoring the co-occurrence relationship of the tools and the associated class imbalance. Methods: In this paper, we formulate tool detection as a multi-label classification task where tool co-occurrences are treated as separate classes. In addition, imbalance on tool co-occurrences is analyzed and stratification techniques are employed to address the imbalance during Convolutional Neural Network (CNN) training. Moreover, temporal smoothing is introduced as an online post-processing step to enhance run time prediction. Results: Quantitative analysis is performed on the M2CAI16 tool detection dataset to highlight the importance of stratification, temporal smoothing and the overall framework for tool detection. Conclusion: The analysis on tool imbalance, backed by the empirical results indicates the need and superiority of the proposed framework over state-of-the-art techniques.}, language = {en} } @article{SahuMukhopadhyayZachow, author = {Sahu, Manish and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Simulation-to-Real domain adaptation with teacher-student learning for endoscopic instrument segmentation}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {16}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer Nature}, doi = {10.1007/s11548-021-02383-4}, pages = {849 -- 859}, abstract = {Purpose Segmentation of surgical instruments in endoscopic video streams is essential for automated surgical scene understanding and process modeling. However, relying on fully supervised deep learning for this task is challenging because manual annotation occupies valuable time of the clinical experts. Methods We introduce a teacher-student learning approach that learns jointly from annotated simulation data and unlabeled real data to tackle the challenges in simulation-to-real unsupervised domain adaptation for endoscopic image segmentation. Results Empirical results on three datasets highlight the effectiveness of the proposed framework over current approaches for the endoscopic instrument segmentation task. Additionally, we provide analysis of major factors affecting the performance on all datasets to highlight the strengths and failure modes of our approach. Conclusions We show that our proposed approach can successfully exploit the unlabeled real endoscopic video frames and improve generalization performance over pure simulation-based training and the previous state-of-the-art. This takes us one step closer to effective segmentation of surgical instrument in the annotation scarce setting.}, language = {en} } @article{TackMukhopadhyayZachow, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, series = {Osteoarthritis and Cartilage}, volume = {26}, journal = {Osteoarthritis and Cartilage}, number = {5}, doi = {10.1016/j.joca.2018.02.907}, pages = {680 -- 688}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @misc{TackMukhopadhyayZachow, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative (Supplementary Material)}, doi = {10.12752/4.TMZ.1.0}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @misc{TackMukhopadhyayZachow, author = {Tack, Alexander and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Knee Menisci Segmentation using Convolutional Neural Networks: Data from the Osteoarthritis Initiative}, volume = {26}, number = {5}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-68038}, pages = {680 -- 688}, abstract = {Abstract: Objective: To present a novel method for automated segmentation of knee menisci from MRIs. To evaluate quantitative meniscal biomarkers for osteoarthritis (OA) estimated thereof. Method: A segmentation method employing convolutional neural networks in combination with statistical shape models was developed. Accuracy was evaluated on 88 manual segmentations. Meniscal volume, tibial coverage, and meniscal extrusion were computed and tested for differences between groups of OA, joint space narrowing (JSN), and WOMAC pain. Correlation between computed meniscal extrusion and MOAKS experts' readings was evaluated for 600 subjects. Suitability of biomarkers for predicting incident radiographic OA from baseline to 24 months was tested on a group of 552 patients (184 incident OA, 386 controls) by performing conditional logistic regression. Results: Segmentation accuracy measured as Dice Similarity Coefficient was 83.8\% for medial menisci (MM) and 88.9\% for lateral menisci (LM) at baseline, and 83.1\% and 88.3\% at 12-month follow-up. Medial tibial coverage was significantly lower for arthritic cases compared to non-arthritic ones. Medial meniscal extrusion was significantly higher for arthritic knees. A moderate correlation between automatically computed medial meniscal extrusion and experts' readings was found (ρ=0.44). Mean medial meniscal extrusion was significantly greater for incident OA cases compared to controls (1.16±0.93 mm vs. 0.83±0.92 mm; p<0.05). Conclusion: Especially for medial menisci an excellent segmentation accuracy was achieved. Our meniscal biomarkers were validated by comparison to experts' readings as well as analysis of differences w.r.t groups of OA, JSN, and WOMAC pain. It was confirmed that medial meniscal extrusion is a predictor for incident OA.}, language = {en} } @misc{TycowiczAmbellanMukhopadhyayetal., author = {Tycowicz, Christoph von and Ambellan, Felix and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {A Riemannian Statistical Shape Model using Differential Coordinates}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61175}, abstract = {We propose a novel Riemannian framework for statistical analysis of shapes that is able to account for the nonlinearity in shape variation. By adopting a physical perspective, we introduce a differential representation that puts the local geometric variability into focus. We model these differential coordinates as elements of a Lie group thereby endowing our shape space with a non-Euclidian structure. A key advantage of our framework is that statistics in a manifold shape space become numerically tractable improving performance by several orders of magnitude over state-of-the-art. We show that our Riemannian model is well suited for the identification of intra-population variability as well as inter-population differences. In particular, we demonstrate the superiority of the proposed model in experiments on specificity and generalization ability. We further derive a statistical shape descriptor that outperforms the standard Euclidian approach in terms of shape-based classification of morphological disorders.}, language = {en} } @article{vonTycowiczAmbellanMukhopadhyayetal., author = {von Tycowicz, Christoph and Ambellan, Felix and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {An Efficient Riemannian Statistical Shape Model using Differential Coordinates}, series = {Medical Image Analysis}, volume = {43}, journal = {Medical Image Analysis}, number = {1}, doi = {10.1016/j.media.2017.09.004}, pages = {1 -- 9}, abstract = {We propose a novel Riemannian framework for statistical analysis of shapes that is able to account for the nonlinearity in shape variation. By adopting a physical perspective, we introduce a differential representation that puts the local geometric variability into focus. We model these differential coordinates as elements of a Lie group thereby endowing our shape space with a non-Euclidean structure. A key advantage of our framework is that statistics in a manifold shape space becomes numerically tractable improving performance by several orders of magnitude over state-of-the-art. We show that our Riemannian model is well suited for the identification of intra-population variability as well as inter-population differences. In particular, we demonstrate the superiority of the proposed model in experiments on specificity and generalization ability. We further derive a statistical shape descriptor that outperforms the standard Euclidean approach in terms of shape-based classification of morphological disorders.}, language = {en} }