@misc{TackShestakovLuedkeetal., author = {Tack, Alexander and Shestakov, Alexey and L{\"u}dke, David and Zachow, Stefan}, title = {A deep multi-task learning method for detection of meniscal tears in MRI data from the Osteoarthritis Initiative database}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-84415}, abstract = {We present a novel and computationally efficient method for the detection of meniscal tears in Magnetic Resonance Imaging (MRI) data. Our method is based on a Convolutional Neural Network (CNN) that operates on a complete 3D MRI scan. Our approach detects the presence of meniscal tears in three anatomical sub-regions (anterior horn, meniscal body, posterior horn) for both the Medial Meniscus (MM) and the Lateral Meniscus (LM) individually. For optimal performance of our method, we investigate how to preprocess the MRI data or how to train the CNN such that only relevant information within a Region of Interest (RoI) of the data volume is taken into account for meniscal tear detection. We propose meniscal tear detection combined with a bounding box regressor in a multi-task deep learning framework to let the CNN implicitly consider the corresponding RoIs of the menisci. We evaluate the accuracy of our CNN-based meniscal tear detection approach on 2,399 Double Echo Steady-State (DESS) MRI scans from the Osteoarthritis Initiative database. In addition, to show that our method is capable of generalizing to other MRI sequences, we also adapt our model to Intermediate-Weighted Turbo Spin-Echo (IW TSE) MRI scans. To judge the quality of our approaches, Receiver Operating Characteristic (ROC) curves and Area Under the Curve (AUC) values are evaluated for both MRI sequences. For the detection of tears in DESS MRI, our method reaches AUC values of 0.94, 0.93, 0.93 (anterior horn, body, posterior horn) in MM and 0.96, 0.94, 0.91 in LM. For the detection of tears in IW TSE MRI data, our method yields AUC values of 0.84, 0.88, 0.86 in MM and 0.95, 0.91, 0.90 in LM. In conclusion, the presented method achieves high accuracy for detecting meniscal tears in both DESS and IW TSE MRI data. Furthermore, our method can be easily trained and applied to other MRI sequences.}, language = {en} } @article{TackShestakovLuedkeetal., author = {Tack, Alexander and Shestakov, Alexey and L{\"u}dke, David and Zachow, Stefan}, title = {A deep multi-task learning method for detection of meniscal tears in MRI data from the Osteoarthritis Initiative database}, series = {Frontiers in Bioengineering and Biotechnology, section Biomechanics}, journal = {Frontiers in Bioengineering and Biotechnology, section Biomechanics}, doi = {10.3389/fbioe.2021.747217}, pages = {28 -- 41}, abstract = {We present a novel and computationally efficient method for the detection of meniscal tears in Magnetic Resonance Imaging (MRI) data. Our method is based on a Convolutional Neural Network (CNN) that operates on a complete 3D MRI scan. Our approach detects the presence of meniscal tears in three anatomical sub-regions (anterior horn, meniscal body, posterior horn) for both the Medial Meniscus (MM) and the Lateral Meniscus (LM) individually. For optimal performance of our method, we investigate how to preprocess the MRI data or how to train the CNN such that only relevant information within a Region of Interest (RoI) of the data volume is taken into account for meniscal tear detection. We propose meniscal tear detection combined with a bounding box regressor in a multi-task deep learning framework to let the CNN implicitly consider the corresponding RoIs of the menisci. We evaluate the accuracy of our CNN-based meniscal tear detection approach on 2,399 Double Echo Steady-State (DESS) MRI scans from the Osteoarthritis Initiative database. In addition, to show that our method is capable of generalizing to other MRI sequences, we also adapt our model to Intermediate-Weighted Turbo Spin-Echo (IW TSE) MRI scans. To judge the quality of our approaches, Receiver Operating Characteristic (ROC) curves and Area Under the Curve (AUC) values are evaluated for both MRI sequences. For the detection of tears in DESS MRI, our method reaches AUC values of 0.94, 0.93, 0.93 (anterior horn, body, posterior horn) in MM and 0.96, 0.94, 0.91 in LM. For the detection of tears in IW TSE MRI data, our method yields AUC values of 0.84, 0.88, 0.86 in MM and 0.95, 0.91, 0.90 in LM. In conclusion, the presented method achieves high accuracy for detecting meniscal tears in both DESS and IW TSE MRI data. Furthermore, our method can be easily trained and applied to other MRI sequences.}, language = {en} } @article{TackAmbellanZachow, author = {Tack, Alexander and Ambellan, Felix and Zachow, Stefan}, title = {Towards novel osteoarthritis biomarkers: Multi-criteria evaluation of 46,996 segmented knee MRI data from the Osteoarthritis Initiative}, series = {PLOS One}, volume = {16}, journal = {PLOS One}, number = {10}, doi = {10.1371/journal.pone.0258855}, abstract = {Convolutional neural networks (CNNs) are the state-of-the-art for automated assessment of knee osteoarthritis (KOA) from medical image data. However, these methods lack interpretability, mainly focus on image texture, and cannot completely grasp the analyzed anatomies' shapes. In this study we assess the informative value of quantitative features derived from segmentations in order to assess their potential as an alternative or extension to CNN-based approaches regarding multiple aspects of KOA. Six anatomical structures around the knee (femoral and tibial bones, femoral and tibial cartilages, and both menisci) are segmented in 46,996 MRI scans. Based on these segmentations, quantitative features are computed, i.e., measurements such as cartilage volume, meniscal extrusion and tibial coverage, as well as geometric features based on a statistical shape encoding of the anatomies. The feature quality is assessed by investigating their association to the Kellgren-Lawrence grade (KLG), joint space narrowing (JSN), incident KOA, and total knee replacement (TKR). Using gold standard labels from the Osteoarthritis Initiative database the balanced accuracy (BA), the area under the Receiver Operating Characteristic curve (AUC), and weighted kappa statistics are evaluated. Features based on shape encodings of femur, tibia, and menisci plus the performed measurements showed most potential as KOA biomarkers. Differentiation between non-arthritic and severely arthritic knees yielded BAs of up to 99\%, 84\% were achieved for diagnosis of early KOA. Weighted kappa values of 0.73, 0.72, and 0.78 were achieved for classification of the grade of medial JSN, lateral JSN, and KLG, respectively. The AUC was 0.61 and 0.76 for prediction of incident KOA and TKR within one year, respectively. Quantitative features from automated segmentations provide novel biomarkers for KLG and JSN classification and show potential for incident KOA and TKR prediction. The validity of these features should be further evaluated, especially as extensions of CNN- based approaches. To foster such developments we make all segmentations publicly available together with this publication.}, language = {en} } @article{SekuboyinaHusseiniBayatetal., author = {Sekuboyina, Anjany and Husseini, Malek E. and Bayat, Amirhossein and L{\"o}ffler, Maximilian and Liebl, Hans and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Brown, Kevin and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Chen, Di and Bai, Yiwei and Rapazzo, Brandon H. and Yeah, Timyoas and Zhang, Amber and Xu, Shangliang and Hou, Feng and He, Zhiqiang and Zeng, Chan and Xiangshang, Zheng and Liming, Xu and Netherton, Tucker J. and Mumme, Raymond P. and Court, Laurence E. and Huang, Zixun and He, Chenhang and Wang, Li-Wen and Ling, Sai Ho and Huynh, L{\^e} Duy and Boutry, Nicolas and Jakubicek, Roman and Chmelik, Jiri and Mulay, Supriti and Sivaprakasam, Mohanasankar and Paetzold, Johannes C. and Shit, Suprosanna and Ezhov, Ivan and Wiestler, Benedikt and Glocker, Ben and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae labelling and segmentation benchmark for multi-detector CT images}, series = {Medical Image Analysis}, volume = {73}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2021.102166}, abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.}, language = {en} } @misc{GreweZachow, author = {Grewe, C. Martin and Zachow, Stefan}, title = {Release of the FexMM for the Open Virtual Mirror Framework}, doi = {10.12752/8532}, abstract = {THIS MODEL IS FOR NON-COMMERCIAL RESEARCH PURPOSES. ONLY MEMBERS OF UNIVERSITIES OR NON-COMMERCIAL RESEARCH INSTITUTES ARE ELIGIBLE TO APPLY. 1. Download, fill, and sign the form available from: https://media.githubusercontent.com/media/mgrewe/ovmf/main/data/fexmm_license_agreement.pdf 2. Send the signed form to: fexmm@zib.de NOTE: Use an official email address of your institution for the request.}, language = {en} } @article{GreweLiuKahletal., author = {Grewe, Carl Martin and Liu, Tuo and Kahl, Christoph and Andrea, Hildebrandt and Zachow, Stefan}, title = {Statistical Learning of Facial Expressions Improves Realism of Animated Avatar Faces}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, publisher = {Frontiers}, doi = {10.3389/frvir.2021.619811}, pages = {1 -- 13}, language = {en} } @inproceedings{SahuStroemsdoerferMukhopadhyayetal., author = {Sahu, Manish and Str{\"o}msd{\"o}rfer, Ronja and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Endo-Sim2Real: Consistency learning-based domain adaptation for instrument segmentation}, series = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part III}, volume = {12263}, booktitle = {Proc. Medical Image Computing and Computer Assisted Intervention (MICCAI), Part III}, publisher = {Springer Nature}, doi = {https://doi.org/10.1007/978-3-030-59716-0_75}, abstract = {Surgical tool segmentation in endoscopic videos is an important component of computer assisted interventions systems. Recent success of image-based solutions using fully-supervised deep learning approaches can be attributed to the collection of big labeled datasets. However, the annotation of a big dataset of real videos can be prohibitively expensive and time consuming. Computer simulations could alleviate the manual labeling problem, however, models trained on simulated data do not generalize to real data. This work proposes a consistency-based framework for joint learning of simulated and real (unlabeled) endoscopic data to bridge this performance generalization issue. Empirical results on two data sets (15 videos of the Cholec80 and EndoVis'15 dataset) highlight the effectiveness of the proposed Endo-Sim2Real method for instrument segmentation. We compare the segmentation of the proposed approach with state-of-the-art solutions and show that our method improves segmentation both in terms of quality and quantity.}, language = {en} } @article{SahuSzengelMukhopadhyayetal.2020, author = {Sahu, Manish and Szengel, Angelika and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Surgical phase recognition by learning phase transitions}, series = {Current Directions in Biomedical Engineering (CDBME)}, volume = {6}, journal = {Current Directions in Biomedical Engineering (CDBME)}, number = {1}, publisher = {De Gruyter}, doi = {https://doi.org/10.1515/cdbme-2020-0037}, pages = {20200037}, year = {2020}, abstract = {Automatic recognition of surgical phases is an important component for developing an intra-operative context-aware system. Prior work in this area focuses on recognizing short-term tool usage patterns within surgical phases. However, the difference between intra- and inter-phase tool usage patterns has not been investigated for automatic phase recognition. We developed a Recurrent Neural Network (RNN), in particular a state-preserving Long Short Term Memory (LSTM) architecture to utilize the long-term evolution of tool usage within complete surgical procedures. For fully automatic tool presence detection from surgical video frames, a Convolutional Neural Network (CNN) based architecture namely ZIBNet is employed. Our proposed approach outperformed EndoNet by 8.1\% on overall precision for phase detection tasks and 12.5\% on meanAP for tool recognition tasks.}, language = {en} } @misc{SahuSzengelMukhopadhyayetal., author = {Sahu, Manish and Szengel, Angelika and Mukhopadhyay, Anirban and Zachow, Stefan}, title = {Analyzing laparoscopic cholecystectomy with deep learning: automatic detection of surgical tools and phases}, series = {28th International Congress of the European Association for Endoscopic Surgery (EAES)}, journal = {28th International Congress of the European Association for Endoscopic Surgery (EAES)}, abstract = {Motivation: The ever-rising volume of patients, high maintenance cost of operating rooms and time consuming analysis of surgical skills are fundamental problems that hamper the practical training of the next generation of surgeons. The hospitals prefer to keep the surgeons busy in real operations over training young surgeons for obvious economic reasons. One fundamental need in surgical training is the reduction of the time needed by the senior surgeon to review the endoscopic procedures performed by the young surgeon while minimizing the subjective bias in evaluation. The unprecedented performance of deep learning ushers the new age of data-driven automatic analysis of surgical skills. Method: Deep learning is capable of efficiently analyzing thousands of hours of laparoscopic video footage to provide an objective assessment of surgical skills. However, the traditional end-to-end setting of deep learning (video in, skill assessment out) is not explainable. Our strategy is to utilize the surgical process modeling framework to divide the surgical process into understandable components. This provides the opportunity to employ deep learning for superior yet automatic detection and evaluation of several aspects of laparoscopic cholecystectomy such as surgical tool and phase detection. We employ ZIBNet for the detection of surgical tool presence. ZIBNet employs pre-processing based on tool usage imbalance, a transfer learned 50-layer residual network (ResNet-50) and temporal smoothing. To encode the temporal evolution of tool usage (over the entire video sequence) that relates to the surgical phases, Long Short Term Memory (LSTM) units are employed with long-term dependency. Dataset: We used CHOLEC 80 dataset that consists of 80 videos of laparoscopic cholecystectomy performed by 13 surgeons, divided equally for training and testing. In these videos, up to three different tools (among 7 types of tools) can be present in a frame. Results: The mean average precision of the detection of all tools is 93.5 ranging between 86.8 and 99.3, a significant improvement (p <0.01) over the previous state-of-the-art. We observed that less frequent tools like Scissors, Irrigator, Specimen Bag etc. are more related to phase transitions. The overall precision (recall) of the detection of all surgical phases is 79.6 (81.3). Conclusion: While this is not the end goal for surgical skill analysis, the development of such a technological platform is essential toward a data-driven objective understanding of surgical skills. In future, we plan to investigate surgeon-in-the-loop analysis and feedback for surgical skill analysis.}, language = {en} } @article{PimentelSzengelEhlkeetal., author = {Pimentel, Pedro and Szengel, Angelika and Ehlke, Moritz and Lamecker, Hans and Zachow, Stefan and Estacio, Laura and Doenitz, Christian and Ramm, Heiko}, title = {Automated Virtual Reconstruction of Large Skull Defects using Statistical Shape Models and Generative Adversarial Networks}, series = {Towards the Automatization of Cranial Implant Design in Cranioplasty}, volume = {12439}, journal = {Towards the Automatization of Cranial Implant Design in Cranioplasty}, editor = {Li, Jianning and Egger, Jan}, edition = {1}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-030-64327-0_3}, pages = {16 -- 27}, abstract = {We present an automated method for extrapolating missing regions in label data of the skull in an anatomically plausible manner. The ultimate goal is to design patient-speci� c cranial implants for correcting large, arbitrarily shaped defects of the skull that can, for example, result from trauma of the head. Our approach utilizes a 3D statistical shape model (SSM) of the skull and a 2D generative adversarial network (GAN) that is trained in an unsupervised fashion from samples of healthy patients alone. By � tting the SSM to given input labels containing the skull defect, a First approximation of the healthy state of the patient is obtained. The GAN is then applied to further correct and smooth the output of the SSM in an anatomically plausible manner. Finally, the defect region is extracted using morphological operations and subtraction between the extrapolated healthy state of the patient and the defective input labels. The method is trained and evaluated based on data from the MICCAI 2020 AutoImplant challenge. It produces state-of-the art results on regularly shaped cut-outs that were present in the training and testing data of the challenge. Furthermore, due to unsupervised nature of the approach, the method generalizes well to previously unseen defects of varying shapes that were only present in the hidden test dataset.}, language = {en} }