@article{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, series = {Computers in Biology and Medicine}, volume = {169}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.compbiomed.2024.107929}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-69830}, pages = {24}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images and videos. In particular, the determination of the position and type of instruments is of great interest. Current work involves both spatial and temporal information, with the idea that predicting the movement of surgical tools over time may improve the quality of the final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify and characterize datasets used for method development and evaluation and quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images and videos. The paper focuses on methods that work purely visually, without markers of any kind attached to the instruments, considering both single-frame semantic and instance segmentation approaches, as well as those that incorporate temporal information. The publications analyzed were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking", resulting in a total of 741 articles published between 01/2015 and 07/2023, of which 123 were included using systematic selection criteria. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing the available potential for future developments.}, subject = {Deep Learning}, language = {en} } @inproceedings{RueckertRiederFeussneretal., author = {R{\"u}ckert, Tobias and Rieder, Maximilian and Feussner, Hubertus and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Smoke Classification in Laparoscopic Cholecystectomy Videos Incorporating Spatio-temporal Information}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springeer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_78}, pages = {298 -- 303}, abstract = {Heavy smoke development represents an important challenge for operating physicians during laparoscopic procedures and can potentially affect the success of an intervention due to reduced visibility and orientation. Reliable and accurate recognition of smoke is therefore a prerequisite for the use of downstream systems such as automated smoke evacuation systems. Current approaches distinguish between non-smoked and smoked frames but often ignore the temporal context inherent in endoscopic video data. In this work, we therefore present a method that utilizes the pixel-wise displacement from randomly sampled images to the preceding frames determined using the optical flow algorithm by providing the transformed magnitude of the displacement as an additional input to the network. Further, we incorporate the temporal context at evaluation time by applying an exponential moving average on the estimated class probabilities of the model output to obtain more stable and robust results over time. We evaluate our method on two convolutional-based and one state-of-the-art transformer architecture and show improvements in the classification results over a baseline approach, regardless of the network used.}, language = {en} } @unpublished{MendelRueckertWilhelmetal., author = {Mendel, Robert and R{\"u}ckert, Tobias and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Motion-Corrected Moving Average: Including Post-Hoc Temporal Information for Improved Video Segmentation}, doi = {10.48550/arXiv.2403.03120}, pages = {9}, abstract = {Real-time computational speed and a high degree of precision are requirements for computer-assisted interventions. Applying a segmentation network to a medical video processing task can introduce significant inter-frame prediction noise. Existing approaches can reduce inconsistencies by including temporal information but often impose requirements on the architecture or dataset. This paper proposes a method to include temporal information in any segmentation model and, thus, a technique to improve video segmentation performance without alterations during training or additional labeling. With Motion-Corrected Moving Average, we refine the exponential moving average between the current and previous predictions. Using optical flow to estimate the movement between consecutive frames, we can shift the prior term in the moving-average calculation to align with the geometry of the current frame. The optical flow calculation does not require the output of the model and can therefore be performed in parallel, leading to no significant runtime penalty for our approach. We evaluate our approach on two publicly available segmentation datasets and two proprietary endoscopic datasets and show improvements over a baseline approach.}, subject = {Deep Learning}, language = {en} } @unpublished{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, doi = {10.48550/arXiv.2304.13014}, pages = {25}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images. Especially the determination of the position and type of the instruments is of great interest here. Current work involves both spatial and temporal information with the idea, that the prediction of movement of surgical tools over time may improve the quality of final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify datasets used for method development and evaluation, as well as quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images. The paper focuses on methods that work purely visually without attached markers of any kind on the instruments, taking into account both single-frame segmentation approaches as well as those involving temporal information. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing available potential for future developments. The publications considered were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking" and result in 408 articles published between 2015 and 2022 from which 109 were included using systematic selection criteria.}, language = {en} } @unpublished{RueckertRauberMaerkletal., author = {R{\"u}ckert, Tobias and Rauber, David and Maerkl, Raphaela and Klausmann, Leonard and Yildiran, Suemeyye R. and Gutbrod, Max and Nunes, Danilo Weber and Moreno, Alvaro Fernandez and Luengo, Imanol and Stoyanov, Danail and Toussaint, Nicolas and Cho, Enki and Kim, Hyeon Bae and Choo, Oh Sung and Kim, Ka Young and Kim, Seong Tae and Arantes, Gon{\c{c}}alo and Song, Kehan and Zhu, Jianjun and Xiong, Junchen and Lin, Tingyi and Kikuchi, Shunsuke and Matsuzaki, Hiroki and Kouno, Atsushi and Manesco, Jo{\~a}o Renato Ribeiro and Papa, Jo{\~a}o Paulo and Choi, Tae-Min and Jeong, Tae Kyeong and Park, Juyoun and Alabi, Oluwatosin and Wei, Meng and Vercauteren, Tom and Wu, Runzhi and Xu, Mengya and an Wang, and Bai, Long and Ren, Hongliang and Yamlahi, Amine and Hennighausen, Jakob and Maier-Hein, Lena and Kondo, Satoshi and Kasai, Satoshi and Hirasawa, Kousuke and Yang, Shu and Wang, Yihui and Chen, Hao and Rodr{\´i}guez, Santiago and Aparicio, Nicol{\´a}s and Manrique, Leonardo and Lyons, Juan Camilo and Hosie, Olivia and Ayobi, Nicol{\´a}s and Arbel{\´a}ez, Pablo and Li, Yiping and Khalil, Yasmina Al and Nasirihaghighi, Sahar and Speidel, Stefanie and R{\"u}ckert, Daniel and Feussner, Hubertus and Wilhelm, Dirk and Palm, Christoph}, title = {Comparative validation of surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation in endoscopy: Results of the PhaKIR 2024 challenge}, pages = {36}, abstract = {Reliable recognition and localization of surgical instruments in endoscopic video recordings are foundational for a wide range of applications in computer- and robot-assisted minimally invasive surgery (RAMIS), including surgical training, skill assessment, and autonomous assistance. However, robust performance under real-world conditions remains a significant challenge. Incorporating surgical context - such as the current procedural phase - has emerged as a promising strategy to improve robustness and interpretability. To address these challenges, we organized the Surgical Procedure Phase, Keypoint, and Instrument Recognition (PhaKIR) sub-challenge as part of the Endoscopic Vision (EndoVis) challenge at MICCAI 2024. We introduced a novel, multi-center dataset comprising thirteen full-length laparoscopic cholecystectomy videos collected from three distinct medical institutions, with unified annotations for three interrelated tasks: surgical phase recognition, instrument keypoint estimation, and instrument instance segmentation. Unlike existing datasets, ours enables joint investigation of instrument localization and procedural context within the same data while supporting the integration of temporal information across entire procedures. We report results and findings in accordance with the BIAS guidelines for biomedical image analysis challenges. The PhaKIR sub-challenge advances the field by providing a unique benchmark for developing temporally aware, context-driven methods in RAMIS and offers a high-quality resource to support future research in surgical scene understanding.}, language = {en} } @misc{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Corrigendum to "Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art" [Comput. Biol. Med. 169 (2024) 107929]}, series = {Computers in Biology and Medicine}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2024.108027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70337}, pages = {1}, abstract = {The authors regret that the SAR-RARP50 dataset is missing from the description of publicly available datasets presented in Chapter 4.}, language = {en} }