@misc{ScharfenbergMottokArtmannetal., author = {Scharfenberg, Georg and Mottok, J{\"u}rgen and Artmann, Christina and Hobelsberger, Martin and Paric, Ivan and Großmann, Benjamin and Pohlt, Clemens and Wackerbarth, Alena and Pausch, Uli and Heidrich, Christiane and Fadanelli, Martin and Elsner, Michael and P{\"o}cher, Daniel and Pittroff, Lenz and Beer, Stefan and Br{\"u}ckl, Oliver and Haslbeck, Matthias and Sterner, Michael and Thema, Martin and Muggenthaler, Nicole and Lenck, Thorsten and G{\"o}tz, Philipp and Eckert, Fabian and Deubzer, Michael and Stingl, Armin and Simsek, Erol and Kr{\"a}mer, Stefan and Großmann, Benjamin and Schlegl, Thomas and Niedersteiner, Sascha and Berlehner, Thomas and Joblin, Mitchell and Mauerer, Wolfgang and Apel, Sven and Siegmund, Janet and Riehle, Dirk and Weber, Joachim and Palm, Christoph and Zobel, Martin and Al-Falouji, Ghassan and Prestel, Dietmar and Scharfenberg, Georg and Mandl, Roland and Deinzer, Arnulf and Halang, W. and Margraf-Stiksrud, Jutta and Sick, Bernhard and Deinzer, Renate and Scherzinger, Stefanie and Klettke, Meike and St{\"o}rl, Uta and Wiech, Katharina and Kubata, Christoph and Sindersberger, Dirk and Monkman, Gareth J. and Dollinger, Markus and Dembianny, Sven and K{\"o}lbl, Andreas and Welker, Franz and Meier, Matthias and Thumann, Philipp and Swidergal, Krzysztof and Wagner, Marcus and Haug, Sonja and Vernim, Matthias and Seidenst{\"u}cker, Barbara and Weber, Karsten and Arsan, Christian and Schone, Reinhold and M{\"u}nder, Johannes and Schroll-Decker, Irmgard and Dillinger, Andrea Elisabeth and Fuchshofer, Rudolf and Monkman, Gareth J. and Shamonin (Chamonine), Mikhail and Geith, Markus A. and Koch, Fabian and {\"U}hlin, Christian and Schratzenstaller, Thomas and Saßmannshausen, Sean Patrick and Auchter, Eberhard and Kriz, Willy and Springer, Othmar and Thumann, Maria and Kusterle, Wolfgang and Obermeier, Andreas and Udalzow, Anton and Schmailzl, Anton and Hierl, Stefan and Langer, Christoph and Schreiner, Rupert}, title = {Forschungsbericht / Ostbayerische Technische Hochschule Regensburg}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, isbn = {978-3-00-048589-3}, doi = {10.35096/othr/pub-1386}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-13867}, language = {de} } @misc{MauererRexhepajMonkmanetal., author = {Mauerer, Wolfgang and Rexhepaj, Tanja and Monkman, Gareth J. and Sindersberger, Dirk and Diermeier, Andreas and Neidhart, Thomas and Wolfrum, Dominik and Sterner, Michael and Heberl, Michael and Nusko, Robert and Maier, Georg and Nagl, Klaus and Reuter, Monika and Hofrichter, Andreas and Lex, Thomas and Lesch, Florian and Kieninger, B{\"a}rbel and Szalo, Alexander Eduard and Zehner, Alexander and Palm, Christoph and Joblin, Mitchell and Apel, Sven and Ramsauer, Ralf and Lohmann, Daniel and Westner, Markus and Strasser, Artur and Munndi, Maximilian and Ebner, Lena and Elsner, Michael and Weiß, Nils and Segerer, Matthias and Hackenberg, Rudolf and Steger, Sebastian and Schmailzl, Anton and Dostalek, Michael and Armbruster, Dominik and Koch, Fabian and Hierl, Stefan and Thumann, Philipp and Swidergal, Krzysztof and Wagner, Marcus and Briem, Ulrich and Diermeier, Andreas and Spreitzer, Stefan and Beiderbeck, Sabrina and Hook, Christian and Zobel, Martin and Weber, Tim and Groß, Simon and Penzkofer, Rainer and Dendorfer, Sebastian and Schillitz, Ingo and Bauer, Thomas and Rudolph, Clarissa and Schmidt, Katja and Liebetruth, Thomas and Hamer, Markus and Haug, Sonja and Vernim, Matthias and Weber, Karsten and Saßmannshausen, Sean Patrick and Books, Sebastian and Neuleitner, Nikolaus and Rechenauer, Christian and Steffens, Oliver and Kusterle, Wolfgang and G{\"o}mmel, Roland and Wellnitz, Felix and Stierstorfer, Johannes and Stadler, Dominik and Hofmann, Matthias J. and Motschmann, Hubert and Shamonin (Chamonine), Mikhail and Bleicher, Veronika and Fischer, Sebastian and Hackenberg, Rudolf and Horn, Anton and Kawasch, Raphael and Petzenhauser, Michael and Probst, Tobias and Udalzow, Anton and Dams, Florian and Schreiner, Rupert and Langer, Christoph and Prommesberger, Christian and Ławrowski, Robert Damian}, title = {Forschungsbericht 2016}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, doi = {10.35096/othr/pub-1384}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-13840}, language = {de} } @misc{WeberDendorferSuessetal., author = {Weber, Karsten and Dendorfer, Sebastian and S{\"u}ß, Franz and Kubowitsch, Simone and Schratzenstaller, Thomas and Haug, Sonja and Mohr, Christa and Kiesl, Hans and Drechsler, J{\"o}rg and Westner, Markus and Kobus, J{\"o}rn and Schubert, Martin J. W. and Zenger, Stefan and Pietsch, Alexander and Weiß, Josef and Hinterseer, Sebastian and Schieck, Roland and Scherzinger, Stefanie and Klettke, Meike and Ringlstetter, Andreas and St{\"o}rl, Uta and Bissyand{\´e}, Tegawend{\´e} F. and Seeburger, Achim and Schindler, Timo and Ramsauer, Ralf and Kiszka, Jan and K{\"o}lbl, Andreas and Lohmann, Daniel and Mauerer, Wolfgang and Maier, Johannes and Scorna, Ulrike and Palm, Christoph and Soska, Alexander and Mottok, J{\"u}rgen and Ellermeier, Andreas and V{\"o}gele, Daniel and Hierl, Stefan and Briem, Ulrich and Buschmann, Knut and Ehrlich, Ingo and Pongratz, Christian and Pielmeier, Benjamin and Tyroller, Quirin and Monkman, Gareth J. and Gut, Franz and Roth, Carina and Hausler, Peter and Bierl, Rudolf and Prommesberger, Christian and Ławrowski, Robert Damian and Langer, Christoph and Schreiner, Rupert and Huang, Yifeng and She, Juncong and Ottl, Andreas and Rieger, Walter and Kraml, Agnes and Poxleitner, Thomas and Hofer, Simon and Heisterkamp, Benjamin and Lerch, Maximilian and Sammer, Nike and Golde, Olivia and Wellnitz, Felix and Schmid, Sandra and Muntschick, Claudia and Kusterle, Wolfgang and Paric, Ivan and Br{\"u}ckl, Oliver and Haslbeck, Matthias and Schmidt, Ottfried and Schwanzer, Peter and Rabl, Hans-Peter and Sterner, Michael and Bauer, Franz and Steinmann, Sven and Eckert, Fabian and Hofrichter, Andreas}, title = {Forschungsbericht 2017}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, isbn = {978-3-9818209-3-5}, doi = {10.35096/othr/pub-1383}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-13835}, subject = {Forschung}, language = {de} } @misc{LautenschlaegerLeisDendorferetal., author = {Lautenschl{\"a}ger, Toni and Leis, Alexander and Dendorfer, Sebastian and Palm, Christoph and Schreiner, Rupert and Langer, Christoph and Prommesberger, Christian and Ławrowski, Robert Damian and Dams, Florian and Bornmann, Benjamin and Navitski, Aliaksandr and Serbun, Pavel and M{\"u}ller, G{\"u}nter and Liebetruth, Thomas and Kohlert, Dieter and Pernsteiner, Jochen and Schreier, Franz and Heerklotz, Sabrina and Heerklotz, Allwin and Boos, Alexander and Herwald, Dominik and Monkman, Gareth J. and Treiber, Daniel and Mayer, Matthias and H{\"o}rner, Eva and Bentz, Alexander and Shamonin (Chamonine), Mikhail and Johansen, S{\o}ren Peter and Reichel, Marco and Stoll, Andrea and Briem, Ulrich and Dullien, Silvia and Renkawitz, Tobias and Weber, Tim and Dendorfer, Sebastian and Grifka, Joachim and Penzkofer, Rainer and Barnsteiner, K. and Jovanovik, M. and Wernecke, P. and V{\"o}gele, A. and Bachmann, T. and Pl{\"o}tz, Martin and Schliekmann, Claus and Wels, Harald and Helmberger, Paul and Kaspar, M. and H{\"o}nicka, M. and Schrammel, Siegfried and Enser, Markus and Schmidmeier, Monika and Schroll-Decker, Irmgard and Haug, Sonja and Gelfert, Verena and Vernim, Matthias}, title = {Forschungsbericht 2012}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, doi = {10.35096/othr/pub-783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-7834}, pages = {64}, language = {de} } @misc{AppelhansKampmannMottoketal., author = {Appelhans, Marie-Luise and Kampmann, Matthias and Mottok, J{\"u}rgen and Riederer, Michael and Nagl, Klaus and Steffens, Oliver and D{\"u}nnweber, Jan and Wildgruber, Markus and Roth, Julius and Stadler, Timo and Palm, Christoph and Weiß, Martin Georg and Rochholz, Sandra and Bierl, Rudolf and Gschossmann, Andreas and Haug, Sonja and Schmidbauer, Simon and Koch, Anna and Westner, Markus and Bary, Benedikt von and Ellermeier, Andreas and V{\"o}gele, Daniel and Maiwald, Frederik and Hierl, Stefan and Schlamp, Matthias and Ehrlich, Ingo and Siegl, Marco and H{\"u}ntelmann, Sven and Wildfeuer, Matthias and Br{\"u}ckl, Oliver and Sterner, Michael and Hofrichter, Andreas and Eckert, Fabian and Bauer, Franz and Dawoud, Belal and Rabl, Hans-Peter and Gamisch, Bernd and Schmidt, Ottfried and Heberl, Michael and Thema, Martin and Mayer, Ulrike and Eller, Johannes and Sippenauer, Thomas and Adelt, Christian and Haslbeck, Matthias and Vogl, Bettina and Mauerer, Wolfgang and Ramsauer, Ralf and Lohmann, Daniel and Sax, Irmengard and Gabor, Thomas and Feld, Sebastian and Linnhoff-Popien, Claudia and Ławrowski, Robert Damian and Langer, Christoph and Schreiner, Rupert and Sellmair, Josef}, title = {Forschung 2019}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, isbn = {978-3-9818209-7-3}, doi = {10.35096/othr/pub-789}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-7890}, pages = {72}, abstract = {Bericht mit Forschungsprojekten aus verschiedenen Bereichen der OTH Regensburg mit dem Schwerpunktthema "K{\"u}nstliche Intelligenz" und einem Gespr{\"a}ch zur "Medizin der Zukunft"}, subject = {Forschung}, language = {de} } @article{RoemmeleMendelBarrettetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Barrett, Caroline and Kiesl, Hans and Rauber, David and R{\"u}ckert, Tobias and Kraus, Lisa and Heinkele, Jakob and Dhillon, Christine and Grosser, Bianca and Prinz, Friederike and Wanzl, Julia and Fleischmann, Carola and Nagl, Sandra and Schnoy, Elisabeth and Schlottmann, Jakob and Dellon, Evan S. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {An artificial intelligence algorithm is highly accurate for detecting endoscopic features of eosinophilic esophagitis}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, publisher = {Nature Portfolio}, address = {London}, doi = {10.1038/s41598-022-14605-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-46928}, pages = {10}, abstract = {The endoscopic features associated with eosinophilic esophagitis (EoE) may be missed during routine endoscopy. We aimed to develop and evaluate an Artificial Intelligence (AI) algorithm for detecting and quantifying the endoscopic features of EoE in white light images, supplemented by the EoE Endoscopic Reference Score (EREFS). An AI algorithm (AI-EoE) was constructed and trained to differentiate between EoE and normal esophagus using endoscopic white light images extracted from the database of the University Hospital Augsburg. In addition to binary classification, a second algorithm was trained with specific auxiliary branches for each EREFS feature (AI-EoE-EREFS). The AI algorithms were evaluated on an external data set from the University of North Carolina, Chapel Hill (UNC), and compared with the performance of human endoscopists with varying levels of experience. The overall sensitivity, specificity, and accuracy of AI-EoE were 0.93 for all measures, while the AUC was 0.986. With additional auxiliary branches for the EREFS categories, the AI algorithm (AI-EoEEREFS) performance improved to 0.96, 0.94, 0.95, and 0.992 for sensitivity, specificity, accuracy, and AUC, respectively. AI-EoE and AI-EoE-EREFS performed significantly better than endoscopy beginners and senior fellows on the same set of images. An AI algorithm can be trained to detect and quantify endoscopic features of EoE with excellent performance scores. The addition of the EREFS criteria improved the performance of the AI algorithm, which performed significantly better than endoscopists with a lower or medium experience level.}, language = {en} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and De Souza Jr., Luis Antonio and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @article{MeinikheimMendelPalmetal., author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik A. H. and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and Rueckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Influence of artificial intelligence on the diagnostic performance of endoscopists in the assessment of Barrett's esophagus: a tandem randomized and video trial}, series = {Endoscopy}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-2296-5696}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-72818}, pages = {9}, abstract = {Background This study evaluated the effect of an artificial intelligence (AI)-based clinical decision support system on the performance and diagnostic confidence of endoscopists in their assessment of Barrett's esophagus (BE). Methods 96 standardized endoscopy videos were assessed by 22 endoscopists with varying degrees of BE experience from 12 centers. Assessment was randomized into two video sets: group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a stand-alone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.3\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1\%, and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.8\% [95\%CI 65.2\%-74.2\%] to 78.0\% [95\%CI 74.0\%-82.0\%]; specificity 67.3\% [95\%CI 62.5\%-72.2\%] to 72.7\% [95\%CI 68.2\%-77.3\%]). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from additional AI. BE experts and nonexperts remained significantly below the stand-alone performance of AI, suggesting that there may be other factors influencing endoscopists' decisions to follow or discard AI advice.}, language = {en} } @article{MaierDesernoHandelsetal., author = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, title = {IJCARS: BVM 2021 special issue}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {16}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer}, doi = {10.1007/s11548-021-02534-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-21666}, pages = {2067 -- 2068}, abstract = {The German workshop on medical image computing (BVM) has been held in different locations in Germany for more than 20 years. In terms of content, BVM focused on the computer-aided analysis of medical image data with a wide range of applications, e.g. in the area of imaging, diagnostics, operation planning, computer-aided intervention and visualization. During this time, there have been remarkable methodological developments and upheavals, on which the BVM community has worked intensively. The area of machine learning should be emphasized, which has led to significant improvements, especially for tasks of classification and segmentation, but increasingly also in image formation and registration. As a result, work in connection with deep learning now dominates the BVM. These developments have also contributed to the establishment of medical image processing at the interface between computer science and medicine as one of the key technologies for the digitization of the health system. In addition to the presentation of current research results, a central aspect of the BVM is primarily the promotion of young scientists from the diverse BVM community, covering not only Germany but also Austria, Switzerland, The Netherland and other European neighbors. The conference serves primarily doctoral students and postdocs, but also students with excellent bachelor and master theses as a platform to present their work, to enter into professional discourse with the community, and to establish networks with specialist colleagues. Despite the many conferences and congresses that are also relevant for medical image processing, the BVM has therefore lost none of its importance and attractiveness and has retained its permanent place in the annual conference rhythm. Building on this foundation, there are some innovations and changes this year. The BVM 2021 was organized for the first time at the Ostbayerische Technische Hochschule Regensburg (OTH Regensburg, a technical university of applied sciences). After Aachen, Berlin, Erlangen, Freiburg, Hamburg, Heidelberg, Leipzig, L{\"u}beck, and Munich, Regensburg is not just a new venue. OTH Regensburg is the first representative of the universities of applied sciences (HAW) to organize the conference, which differs to universities, university hospitals, or research centers like Fraunhofer or Helmholtz. This also considers the further development of the research landscape in Germany, where HAWs increasingly contribute to applied research in addition to their focus on teaching. This development is also reflected in the contributions submitted to the BVM in recent years. At BVM 2021, which was held in a virtual format for the first time due to the Corona pandemic, an attractive and high-quality program was offered. Fortunately, the number of submissions increased significantly. Out of 97 submissions, 26 presentations, 51 posters and 5 software demonstrations were accepted via an anonymized reviewing process with three reviews each. The three best works have been awarded BVM prizes, selected by a separate committee. Based on these high-quality submissions, we are able to present another special issue in the International Journal of Computer Assisted Radiology and Surgery (IJCARS). Out of the 97 submissions, the ones with the highest scores have been invited to submit an extended version of their paper to be presented in IJCARS. As a result, we are now able to present this special issue with seven excellent articles. Many submissions focus on machine learning in a medical context.}, subject = {Bildgebendes Verfahren}, language = {en} } @article{MendelRauberdeSouzaJretal., author = {Mendel, Robert and Rauber, David and de Souza Jr., Luis Antonio and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Error-Correcting Mean-Teacher: Corrections instead of consistency-targets applied to semi-supervised medical image segmentation}, series = {Computers in Biology and Medicine}, volume = {154}, journal = {Computers in Biology and Medicine}, number = {March}, publisher = {Elsevier}, issn = {0010-4825}, doi = {10.1016/j.compbiomed.2023.106585}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-57790}, pages = {13}, abstract = {Semantic segmentation is an essential task in medical imaging research. Many powerful deep-learning-based approaches can be employed for this problem, but they are dependent on the availability of an expansive labeled dataset. In this work, we augment such supervised segmentation models to be suitable for learning from unlabeled data. Our semi-supervised approach, termed Error-Correcting Mean-Teacher, uses an exponential moving average model like the original Mean Teacher but introduces our new paradigm of error correction. The original segmentation network is augmented to handle this secondary correction task. Both tasks build upon the core feature extraction layers of the model. For the correction task, features detected in the input image are fused with features detected in the predicted segmentation and further processed with task-specific decoder layers. The combination of image and segmentation features allows the model to correct present mistakes in the given input pair. The correction task is trained jointly on the labeled data. On unlabeled data, the exponential moving average of the original network corrects the student's prediction. The combined outputs of the students' prediction with the teachers' correction form the basis for the semi-supervised update. We evaluate our method with the 2017 and 2018 Robotic Scene Segmentation data, the ISIC 2017 and the BraTS 2020 Challenges, a proprietary Endoscopic Submucosal Dissection dataset, Cityscapes, and Pascal VOC 2012. Additionally, we analyze the impact of the individual components and examine the behavior when the amount of labeled data varies, with experiments performed on two distinct segmentation architectures. Our method shows improvements in terms of the mean Intersection over Union over the supervised baseline and competing methods. Code is available at https://github.com/CloneRob/ECMT.}, language = {en} } @article{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, series = {Computers in Biology and Medicine}, volume = {169}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.compbiomed.2024.107929}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-69830}, pages = {24}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images and videos. In particular, the determination of the position and type of instruments is of great interest. Current work involves both spatial and temporal information, with the idea that predicting the movement of surgical tools over time may improve the quality of the final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify and characterize datasets used for method development and evaluation and quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images and videos. The paper focuses on methods that work purely visually, without markers of any kind attached to the instruments, considering both single-frame semantic and instance segmentation approaches, as well as those that incorporate temporal information. The publications analyzed were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking", resulting in a total of 741 articles published between 01/2015 and 07/2023, of which 123 were included using systematic selection criteria. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing the available potential for future developments.}, subject = {Deep Learning}, language = {en} } @article{WeihererEigenbergerEggeretal., author = {Weiherer, Maximilian and Eigenberger, Andreas and Egger, Bernhard and Br{\´e}bant, Vanessa and Prantl, Lukas and Palm, Christoph}, title = {Learning the shape of female breasts: an open-access 3D statistical shape model of the female breast built from 110 breast scans}, series = {The Visual Computer}, volume = {39}, journal = {The Visual Computer}, number = {4}, publisher = {Springer Nature}, doi = {10.1007/s00371-022-02431-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-30506}, pages = {1597 -- 1616}, abstract = {We present the Regensburg Breast Shape Model (RBSM)—a 3D statistical shape model of the female breast built from 110 breast scans acquired in a standing position, and the first publicly available. Together with the model, a fully automated, pairwise surface registration pipeline used to establish dense correspondence among 3D breast scans is introduced. Our method is computationally efficient and requires only four landmarks to guide the registration process. A major challenge when modeling female breasts from surface-only 3D breast scans is the non-separability of breast and thorax. In order to weaken the strong coupling between breast and surrounding areas, we propose to minimize the variance outside the breast region as much as possible. To achieve this goal, a novel concept called breast probability masks (BPMs) is introduced. A BPM assigns probabilities to each point of a 3D breast scan, telling how likely it is that a particular point belongs to the breast area. During registration, we use BPMs to align the template to the target as accurately as possible inside the breast region and only roughly outside. This simple yet effective strategy significantly reduces the unwanted variance outside the breast region, leading to better statistical shape models in which breast shapes are quite well decoupled from the thorax. The RBSM is thus able to produce a variety of different breast shapes as independently as possible from the shape of the thorax. Our systematic experimental evaluation reveals a generalization ability of 0.17 mm and a specificity of 2.8 mm. To underline the expressiveness of the proposed model, we finally demonstrate in two showcase applications how the RBSM can be used for surgical outcome simulation and the prediction of a missing breast from the remaining one. Our model is available at https://www.rbsm.re-mic.de/.}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and De Souza Jr., Luis Antonio and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Computer-aided diagnosis using deep learning in the evaluation of early oesophageal adenocarcinoma}, series = {GuT}, volume = {68}, journal = {GuT}, number = {7}, publisher = {British Society of Gastroenterology}, doi = {10.1136/gutjnl-2018-317573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-68}, pages = {1143 -- 1145}, abstract = {Computer-aided diagnosis using deep learning (CAD-DL) may be an instrument to improve endoscopic assessment of Barrett's oesophagus (BE) and early oesophageal adenocarcinoma (EAC). Based on still images from two databases, the diagnosis of EAC by CAD-DL reached sensitivities/specificities of 97\%/88\% (Augsburg data) and 92\%/100\% (Medical Image Computing and Computer-Assisted Intervention [MICCAI] data) for white light (WL) images and 94\%/80\% for narrow band images (NBI) (Augsburg data), respectively. Tumour margins delineated by experts into images were detected satisfactorily with a Dice coefficient (D) of 0.72. This could be a first step towards CAD-DL for BE assessment. If developed further, it could become a useful adjunctive tool for patient management.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @article{DeSouzaJrMendelStrasseretal., author = {De Souza Jr., Luis Antonio and Mendel, Robert and Strasser, Sophia and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Convolutional Neural Networks for the evaluation of cancer in Barrett's esophagus: Explainable AI to lighten up the black-box}, series = {Computers in Biology and Medicine}, volume = {135}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, issn = {0010-4825}, doi = {10.1016/j.compbiomed.2021.104578}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-20126}, pages = {1 -- 14}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their level of accountability and transparency must be provided in such evaluations. The reliability related to machine learning predictions must be explained and interpreted, especially if diagnosis support is addressed. For this task, the black-box nature of deep learning techniques must be lightened up to transfer its promising results into clinical practice. Hence, we aim to investigate the use of explainable artificial intelligence techniques to quantitatively highlight discriminative regions during the classification of earlycancerous tissues in Barrett's esophagus-diagnosed patients. Four Convolutional Neural Network models (AlexNet, SqueezeNet, ResNet50, and VGG16) were analyzed using five different interpretation techniques (saliency, guided backpropagation, integrated gradients, input × gradients, and DeepLIFT) to compare their agreement with experts' previous annotations of cancerous tissue. We could show that saliency attributes match best with the manual experts' delineations. Moreover, there is moderate to high correlation between the sensitivity of a model and the human-and-computer agreement. The results also lightened that the higher the model's sensitivity, the stronger the correlation of human and computational segmentation agreement. We observed a relevant relation between computational learning and experts' insights, demonstrating how human knowledge may influence the correct computational learning.}, subject = {Deep Learning}, language = {en} } @article{HartmannNieberlePalmetal., author = {Hartmann, Robin and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Kuehle, Reinald and Reichert, Torsten E. and Taxis, Juergen and Ettl, Tobias}, title = {Utility of Smartphone-based Three-dimensional Surface Imaging for Digital Facial Anthropometry}, series = {JPRAS Open}, volume = {39}, journal = {JPRAS Open}, publisher = {Elsevier}, doi = {10.1016/j.jpra.2024.01.014}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70348}, pages = {330 -- 343}, abstract = {Background The utilization of three-dimensional (3D) surface imaging for facial anthropometry is a significant asset for patients undergoing maxillofacial surgery. Notably, there have been recent advancements in smartphone technology that enable 3D surface imaging. In this study, anthropometric assessments of the face were performed using a smartphone and a sophisticated 3D surface imaging system. Methods 30 healthy volunteers (15 females and 15 males) were included in the study. An iPhone 14 Pro (Apple Inc., USA) using the application 3D Scanner App (Laan Consulting Corp., USA) and the Vectra M5 (Canfield Scientific, USA) were employed to create 3D surface models. For each participant, 19 anthropometric measurements were conducted on the 3D surface models. Subsequently, the anthropometric measurements generated by the two approaches were compared. The statistical techniques employed included the paired t-test, paired Wilcoxon signed-rank test, Bland-Altman analysis, and calculation of the intraclass correlation coefficient (ICC). Results All measurements showed excellent agreement between smartphone-based and Vectra M5-based measurements (ICC between 0.85 and 0.97). Statistical analysis revealed no statistically significant differences in the central tendencies for 17 of the 19 linear measurements. Despite the excellent agreement found, Bland-Altman analysis revealed that the 95\% limits of agreement between the two methods exceeded ±3 mm for the majority of measurements. Conclusion Digital facial anthropometry using smartphones can serve as a valuable supplementary tool for surgeons, enhancing their communication with patients. However, the proposed data suggest that digital facial anthropometry using smartphones may not yet be suitable for certain diagnostic purposes that require high accuracy.}, language = {en} } @article{TobiasDanielPalm, author = {Tobias, Rueckert and Daniel, Rueckert and Palm, Christoph}, title = {Corrigendum to "Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art" [Comput. Biol. Med. 169 (2024) 107929]}, series = {Computers in Biology and Medicine}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2024.108027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70337}, pages = {1}, abstract = {The authors regret that the SAR-RARP50 dataset is missing from the description of publicly available datasets presented in Chapter 4.}, language = {en} } @unpublished{AllanKondoBodenstedtetal., author = {Allan, Max and Kondo, Satoshi and Bodenstedt, Sebastian and Leger, Stefan and Kadkhodamohammadi, Rahim and Luengo, Imanol and Fuentes, Felix and Flouty, Evangello and Mohammed, Ahmed and Pedersen, Marius and Kori, Avinash and Alex, Varghese and Krishnamurthi, Ganapathy and Rauber, David and Mendel, Robert and Palm, Christoph and Bano, Sophia and Saibro, Guinther and Shih, Chi-Sheng and Chiang, Hsun-An and Zhuang, Juntang and Yang, Junlin and Iglovikov, Vladimir and Dobrenkii, Anton and Reddiboina, Madhu and Reddy, Anubhav and Liu, Xingtong and Gao, Cong and Unberath, Mathias and Kim, Myeonghyeon and Kim, Chanho and Kim, Chaewon and Kim, Hyejin and Lee, Gyeongmin and Ullah, Ihsan and Luna, Miguel and Park, Sang Hyun and Azizian, Mahdi and Stoyanov, Danail and Maier-Hein, Lena and Speidel, Stefanie}, title = {2018 Robotic Scene Segmentation Challenge}, doi = {10.48550/arXiv.2001.11190}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-50049}, pages = {11}, abstract = {In 2015 we began a sub-challenge at the EndoVis workshop at MICCAI in Munich using endoscope images of exvivo tissue with automatically generated annotations from robot forward kinematics and instrument CAD models. However, the limited background variation and simple motion rendered the dataset uninformative in learning about which techniques would be suitable for segmentation in real surgery. In 2017, at the same workshop in Quebec we introduced the robotic instrument segmentation dataset with 10 teams participating in the challenge to perform binary, articulating parts and type segmentation of da Vinci instruments. This challenge included realistic instrument motion and more complex porcine tissue as background and was widely addressed with modfications on U-Nets and other popular CNN architectures [1]. In 2018 we added to the complexity by introducing a set of anatomical objects and medical devices to the segmented classes. To avoid over-complicating the challenge, we continued with porcine data which is dramatically simpler than human tissue due to the lack of fatty tissue occluding many organs.}, subject = {Minimal-invasive Chirurgie}, language = {en} }