@misc{Dill2018, type = {Master Thesis}, author = {Dill, Sabrina}, title = {Joint Feature Learning and Classification - Deep Learning for Surgical Phase Detection}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81745}, year = {2018}, abstract = {In this thesis we investigate the task of automatically detecting phases in surgical workflow in endoscopic video data. For this, we employ deep learning approaches that solely rely on frame-wise visual information, instead of using additional signals or handcrafted features. While previous work has mainly focused on tool presence and temporal information for this task, we reason that additional global information about the context of a frame might benefit the phase detection task. We propose novel deep learning architectures: a convolutional neural network (CNN) based model for the tool detection task only, called Clf-Net, as well as a model which performs joint (context) feature learning and tool classification to incorporate information about the context, which we name Context-Clf-Net. For the phase detection task lower-dimensional feature vectors are extracted, which are used as input to recurrent neural networks in order to enforce temporal constraints. We compare the performance of an online model, which only considers previous frames up to the current time step, to that of an offline model that has access to past and future information. Experimental results indicate that the tool detection task benefits strongly from the introduction of context information, as we outperform both Clf-Net results and stateof-the-art methods. Regarding the phase detection task our results do not surpass state-of-the-art methods. Furthermore, no improvement of using features learned by the Context-Clf-Net is observed in the phase detection task for both online and offline versions}, language = {en} } @misc{Dill, type = {Master Thesis}, author = {Dill, Sabrina Patricia}, title = {Joint Feature Learning and Classification - Deep Learning for Surgical Phase Detection}, pages = {88}, language = {en} } @misc{SahuDillMukhopadyayetal., author = {Sahu, Manish and Dill, Sabrina and Mukhopadyay, Anirban and Zachow, Stefan}, title = {Surgical Tool Presence Detection for Cataract Procedures}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69110}, abstract = {This article outlines the submission to the CATARACTS challenge for automatic tool presence detection [1]. Our approach for this multi-label classification problem comprises labelset-based sampling, a CNN architecture and temporal smothing as described in [3], which we call ZIB-Res-TS.}, language = {en} } @article{AlHajjSahuLamardetal., author = {Al Hajj, Hassan and Sahu, Manish and Lamard, Mathieu and Conze, Pierre-Henri and Roychowdhury, Soumali and Hu, Xiaowei and Marsalkaite, Gabija and Zisimopoulos, Odysseas and Dedmari, Muneer Ahmad and Zhao, Fenqiang and Prellberg, Jonas and Galdran, Adrian and Araujo, Teresa and Vo, Duc My and Panda, Chandan and Dahiya, Navdeep and Kondo, Satoshi and Bian, Zhengbing and Bialopetravicius, Jonas and Qiu, Chenghui and Dill, Sabrina and Mukhopadyay, Anirban and Costa, Pedro and Aresta, Guilherme and Ramamurthy, Senthil and Lee, Sang-Woong and Campilho, Aurelio and Zachow, Stefan and Xia, Shunren and Conjeti, Sailesh and Armaitis, Jogundas and Heng, Pheng-Ann and Vahdat, Arash and Cochener, Beatrice and Quellec, Gwenole}, title = {CATARACTS: Challenge on Automatic Tool Annotation for cataRACT Surgery}, series = {Medical Image Analysis}, volume = {52}, journal = {Medical Image Analysis}, number = {2}, publisher = {Elsevier}, doi = {10.1016/j.media.2018.11.008}, pages = {24 -- 41}, abstract = {Surgical tool detection is attracting increasing attention from the medical image analysis community. The goal generally is not to precisely locate tools in images, but rather to indicate which tools are being used by the surgeon at each instant. The main motivation for annotating tool usage is to design efficient solutions for surgical workflow analysis, with potential applications in report generation, surgical training and even real-time decision support. Most existing tool annotation algorithms focus on laparoscopic surgeries. However, with 19 million interventions per year, the most common surgical procedure in the world is cataract surgery. The CATARACTS challenge was organized in 2017 to evaluate tool annotation algorithms in the specific context of cataract surgery. It relies on more than nine hours of videos, from 50 cataract surgeries, in which the presence of 21 surgical tools was manually annotated by two experts. With 14 participating teams, this challenge can be considered a success. As might be expected, the submitted solutions are based on deep learning. This paper thoroughly evaluates these solutions: in particular, the quality of their annotations are compared to that of human interpretations. Next, lessons learnt from the differential analysis of these solutions are discussed. We expect that they will guide the design of efficient surgery monitoring tools in the near future.}, language = {en} }