@misc{ScharfenbergMottokArtmannetal., author = {Scharfenberg, Georg and Mottok, J{\"u}rgen and Artmann, Christina and Hobelsberger, Martin and Paric, Ivan and Großmann, Benjamin and Pohlt, Clemens and Wackerbarth, Alena and Pausch, Uli and Heidrich, Christiane and Fadanelli, Martin and Elsner, Michael and P{\"o}cher, Daniel and Pittroff, Lenz and Beer, Stefan and Br{\"u}ckl, Oliver and Haslbeck, Matthias and Sterner, Michael and Thema, Martin and Muggenthaler, Nicole and Lenck, Thorsten and G{\"o}tz, Philipp and Eckert, Fabian and Deubzer, Michael and Stingl, Armin and Simsek, Erol and Kr{\"a}mer, Stefan and Großmann, Benjamin and Schlegl, Thomas and Niedersteiner, Sascha and Berlehner, Thomas and Joblin, Mitchell and Mauerer, Wolfgang and Apel, Sven and Siegmund, Janet and Riehle, Dirk and Weber, Joachim and Palm, Christoph and Zobel, Martin and Al-Falouji, Ghassan and Prestel, Dietmar and Scharfenberg, Georg and Mandl, Roland and Deinzer, Arnulf and Halang, W. and Margraf-Stiksrud, Jutta and Sick, Bernhard and Deinzer, Renate and Scherzinger, Stefanie and Klettke, Meike and St{\"o}rl, Uta and Wiech, Katharina and Kubata, Christoph and Sindersberger, Dirk and Monkman, Gareth J. and Dollinger, Markus and Dembianny, Sven and K{\"o}lbl, Andreas and Welker, Franz and Meier, Matthias and Thumann, Philipp and Swidergal, Krzysztof and Wagner, Marcus and Haug, Sonja and Vernim, Matthias and Seidenst{\"u}cker, Barbara and Weber, Karsten and Arsan, Christian and Schone, Reinhold and M{\"u}nder, Johannes and Schroll-Decker, Irmgard and Dillinger, Andrea Elisabeth and Fuchshofer, Rudolf and Monkman, Gareth J. and Shamonin (Chamonine), Mikhail and Geith, Markus A. and Koch, Fabian and {\"U}hlin, Christian and Schratzenstaller, Thomas and Saßmannshausen, Sean Patrick and Auchter, Eberhard and Kriz, Willy and Springer, Othmar and Thumann, Maria and Kusterle, Wolfgang and Obermeier, Andreas and Udalzow, Anton and Schmailzl, Anton and Hierl, Stefan and Langer, Christoph and Schreiner, Rupert}, title = {Forschungsbericht / Ostbayerische Technische Hochschule Regensburg}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, isbn = {978-3-00-048589-3}, doi = {10.35096/othr/pub-1386}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-13867}, language = {de} } @misc{MauererRexhepajMonkmanetal., author = {Mauerer, Wolfgang and Rexhepaj, Tanja and Monkman, Gareth J. and Sindersberger, Dirk and Diermeier, Andreas and Neidhart, Thomas and Wolfrum, Dominik and Sterner, Michael and Heberl, Michael and Nusko, Robert and Maier, Georg and Nagl, Klaus and Reuter, Monika and Hofrichter, Andreas and Lex, Thomas and Lesch, Florian and Kieninger, B{\"a}rbel and Szalo, Alexander Eduard and Zehner, Alexander and Palm, Christoph and Joblin, Mitchell and Apel, Sven and Ramsauer, Ralf and Lohmann, Daniel and Westner, Markus and Strasser, Artur and Munndi, Maximilian and Ebner, Lena and Elsner, Michael and Weiß, Nils and Segerer, Matthias and Hackenberg, Rudolf and Steger, Sebastian and Schmailzl, Anton and Dostalek, Michael and Armbruster, Dominik and Koch, Fabian and Hierl, Stefan and Thumann, Philipp and Swidergal, Krzysztof and Wagner, Marcus and Briem, Ulrich and Diermeier, Andreas and Spreitzer, Stefan and Beiderbeck, Sabrina and Hook, Christian and Zobel, Martin and Weber, Tim and Groß, Simon and Penzkofer, Rainer and Dendorfer, Sebastian and Schillitz, Ingo and Bauer, Thomas and Rudolph, Clarissa and Schmidt, Katja and Liebetruth, Thomas and Hamer, Markus and Haug, Sonja and Vernim, Matthias and Weber, Karsten and Saßmannshausen, Sean Patrick and Books, Sebastian and Neuleitner, Nikolaus and Rechenauer, Christian and Steffens, Oliver and Kusterle, Wolfgang and G{\"o}mmel, Roland and Wellnitz, Felix and Stierstorfer, Johannes and Stadler, Dominik and Hofmann, Matthias J. and Motschmann, Hubert and Shamonin (Chamonine), Mikhail and Bleicher, Veronika and Fischer, Sebastian and Hackenberg, Rudolf and Horn, Anton and Kawasch, Raphael and Petzenhauser, Michael and Probst, Tobias and Udalzow, Anton and Dams, Florian and Schreiner, Rupert and Langer, Christoph and Prommesberger, Christian and Ławrowski, Robert Damian}, title = {Forschungsbericht 2016}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, doi = {10.35096/othr/pub-1384}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-13840}, language = {de} } @misc{WeberDendorferSuessetal., author = {Weber, Karsten and Dendorfer, Sebastian and S{\"u}ß, Franz and Kubowitsch, Simone and Schratzenstaller, Thomas and Haug, Sonja and Mohr, Christa and Kiesl, Hans and Drechsler, J{\"o}rg and Westner, Markus and Kobus, J{\"o}rn and Schubert, Martin J. W. and Zenger, Stefan and Pietsch, Alexander and Weiß, Josef and Hinterseer, Sebastian and Schieck, Roland and Scherzinger, Stefanie and Klettke, Meike and Ringlstetter, Andreas and St{\"o}rl, Uta and Bissyand{\´e}, Tegawend{\´e} F. and Seeburger, Achim and Schindler, Timo and Ramsauer, Ralf and Kiszka, Jan and K{\"o}lbl, Andreas and Lohmann, Daniel and Mauerer, Wolfgang and Maier, Johannes and Scorna, Ulrike and Palm, Christoph and Soska, Alexander and Mottok, J{\"u}rgen and Ellermeier, Andreas and V{\"o}gele, Daniel and Hierl, Stefan and Briem, Ulrich and Buschmann, Knut and Ehrlich, Ingo and Pongratz, Christian and Pielmeier, Benjamin and Tyroller, Quirin and Monkman, Gareth J. and Gut, Franz and Roth, Carina and Hausler, Peter and Bierl, Rudolf and Prommesberger, Christian and Ławrowski, Robert Damian and Langer, Christoph and Schreiner, Rupert and Huang, Yifeng and She, Juncong and Ottl, Andreas and Rieger, Walter and Kraml, Agnes and Poxleitner, Thomas and Hofer, Simon and Heisterkamp, Benjamin and Lerch, Maximilian and Sammer, Nike and Golde, Olivia and Wellnitz, Felix and Schmid, Sandra and Muntschick, Claudia and Kusterle, Wolfgang and Paric, Ivan and Br{\"u}ckl, Oliver and Haslbeck, Matthias and Schmidt, Ottfried and Schwanzer, Peter and Rabl, Hans-Peter and Sterner, Michael and Bauer, Franz and Steinmann, Sven and Eckert, Fabian and Hofrichter, Andreas}, title = {Forschungsbericht 2017}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, isbn = {978-3-9818209-3-5}, doi = {10.35096/othr/pub-1383}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-13835}, subject = {Forschung}, language = {de} } @misc{LautenschlaegerLeisDendorferetal., author = {Lautenschl{\"a}ger, Toni and Leis, Alexander and Dendorfer, Sebastian and Palm, Christoph and Schreiner, Rupert and Langer, Christoph and Prommesberger, Christian and Ławrowski, Robert Damian and Dams, Florian and Bornmann, Benjamin and Navitski, Aliaksandr and Serbun, Pavel and M{\"u}ller, G{\"u}nter and Liebetruth, Thomas and Kohlert, Dieter and Pernsteiner, Jochen and Schreier, Franz and Heerklotz, Sabrina and Heerklotz, Allwin and Boos, Alexander and Herwald, Dominik and Monkman, Gareth J. and Treiber, Daniel and Mayer, Matthias and H{\"o}rner, Eva and Bentz, Alexander and Shamonin (Chamonine), Mikhail and Johansen, S{\o}ren Peter and Reichel, Marco and Stoll, Andrea and Briem, Ulrich and Dullien, Silvia and Renkawitz, Tobias and Weber, Tim and Dendorfer, Sebastian and Grifka, Joachim and Penzkofer, Rainer and Barnsteiner, K. and Jovanovik, M. and Wernecke, P. and V{\"o}gele, A. and Bachmann, T. and Pl{\"o}tz, Martin and Schliekmann, Claus and Wels, Harald and Helmberger, Paul and Kaspar, M. and H{\"o}nicka, M. and Schrammel, Siegfried and Enser, Markus and Schmidmeier, Monika and Schroll-Decker, Irmgard and Haug, Sonja and Gelfert, Verena and Vernim, Matthias}, title = {Forschungsbericht 2012}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, doi = {10.35096/othr/pub-783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-7834}, pages = {64}, language = {de} } @article{OttPalmVogtetal., author = {Ott, Tankred and Palm, Christoph and Vogt, Robert and Oberprieler, Christoph}, title = {GinJinn: An object-detection pipeline for automated feature extraction from herbarium specimens}, series = {Applications in Plant Sciences}, volume = {8}, journal = {Applications in Plant Sciences}, number = {6}, publisher = {Wiley, Botanical Society of America}, issn = {2168-0450}, doi = {10.1002/aps3.11351}, pages = {e11351}, abstract = {PREMISE: The generation of morphological data in evolutionary, taxonomic, and ecological studies of plants using herbarium material has traditionally been a labor-intensive task. Recent progress in machine learning using deep artificial neural networks (deep learning) for image classification and object detection has facilitated the establishment of a pipeline for the automatic recognition and extraction of relevant structures in images of herbarium specimens. METHODS AND RESULTS: We implemented an extendable pipeline based on state-of-the-art deep-learning object-detection methods to collect leaf images from herbarium specimens of two species of the genus Leucanthemum. Using 183 specimens as the training data set, our pipeline extracted one or more intact leaves in 95\% of the 61 test images. CONCLUSIONS: We establish GinJinn as a deep-learning object-detection tool for the automatic recognition and extraction of individual leaves or other structures from herbarium specimens. Our pipeline offers greater flexibility and a lower entrance barrier than previous image-processing approaches based on hand-crafted features.}, subject = {Deep Learning}, language = {en} } @article{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Stallmach, Andreas and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection of duodenal villous atrophy on endoscopic images using a deep learning algorithm}, series = {Gastrointestinal Endoscopy}, journal = {Gastrointestinal Endoscopy}, publisher = {Elsevier}, doi = {10.1016/j.gie.2023.01.006}, abstract = {Background and aims Celiac disease with its endoscopic manifestation of villous atrophy is underdiagnosed worldwide. The application of artificial intelligence (AI) for the macroscopic detection of villous atrophy at routine esophagogastroduodenoscopy may improve diagnostic performance. Methods A dataset of 858 endoscopic images of 182 patients with villous atrophy and 846 images from 323 patients with normal duodenal mucosa was collected and used to train a ResNet 18 deep learning model to detect villous atrophy. An external data set was used to test the algorithm, in addition to six fellows and four board certified gastroenterologists. Fellows could consult the AI algorithm's result during the test. From their consultation distribution, a stratification of test images into "easy" and "difficult" was performed and used for classified performance measurement. Results External validation of the AI algorithm yielded values of 90 \%, 76 \%, and 84 \% for sensitivity, specificity, and accuracy, respectively. Fellows scored values of 63 \%, 72 \% and 67 \%, while the corresponding values in experts were 72 \%, 69 \% and 71 \%, respectively. AI consultation significantly improved all trainee performance statistics. While fellows and experts showed significantly lower performance for "difficult" images, the performance of the AI algorithm was stable. Conclusion In this study, an AI algorithm outperformed endoscopy fellows and experts in the detection of villous atrophy on endoscopic still images. AI decision support significantly improved the performance of non-expert endoscopists. The stable performance on "difficult" images suggests a further positive add-on effect in challenging cases.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Verwendung k{\"u}nstlicher Intelligenz bei der Detektion der Papilla duodeni major}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1772000}, pages = {e593-e540}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der Diagnostik und Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Jedoch ist sie technisch sehr anspruchsvoll und weist eine vergleichsweise hohe Komplikationsrate auf. Ziele In der vorliegenden Machbarkeitsstudie soll gepr{\"u}ft werden, ob mithilfe eines Deep-learning-Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und somit f{\"u}r Endoskopiker mit geringer Erfahrung ein geeignetes Hilfsmittel, insbesondere f{\"u}r die Ausbildungssituation, darstellen k{\"o}nnten. Methodik Wir betrachteten insgesamt 606 Bilddatens{\"a}tze von 65 Patienten. In diesen wurde sowohl die Papilla duodeni major als auch das Ostium segmentiert. Anschließend wurde eine neuronales Netz mittels eines Deep-learning-Algorithmus trainiert. Außerdem erfolgte eine 5-fache Kreuzvaldierung. Ergebnisse Bei einer 5-fachen Kreuzvaldierung auf den 606 gelabelten Daten konnte f{\"u}r die Klasse Papille eine F1-Wert von 0,7908, eine Sensitivit{\"a}t von 0,7943 und eine Spezifit{\"a}t von 0,9785 erreicht werden, f{\"u}r die Klasse Ostium eine F1-Wert von 0,5538, eine Sensitivit{\"a}t von 0,5094 und eine Spezifit{\"a}t von 0,9970 (vgl. [Tab. 1]). Unabh{\"a}ngig von der Klasse zeigte sich gemittelt (Klasse Papille und Klasse Ostium) ein F1-Wert von 0,6673, eine Sensitivit{\"a}t von 0,6519 und eine Spezifit{\"a}t von 0,9877 (vgl. [Tab. 2]). Schlussfolgerung In vorliegende Machbarkeitsstudie konnte das neuronale Netz die Papilla duodeni major mit einer hohen Sensitivit{\"a}t und sehr hohen Spezifit{\"a}t identifizieren. Bei der Detektion des Ostiums war die Sensitivit{\"a}t deutlich geringer. Zuk{\"u}nftig soll das das neuronale Netz mit mehr Daten trainiert werden. Außerdem ist geplant, den Algorithmus auch auf Videos anzuwenden. Somit k{\"o}nnte langfristig ein geeignetes Hilfsmittel f{\"u}r die ERCP etabliert werden.}, language = {de} } @misc{ScheppachRauberStallhoferetal., author = {Scheppach, Markus and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Performance comparison of a deep learning algorithm with endoscopists in the detection of duodenal villous atrophy (VA)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765421}, pages = {S165}, abstract = {Aims VA is an endoscopic finding of celiac disease (CD), which can easily be missed if pretest probability is low. In this study, we aimed to develop an artificial intelligence (AI) algorithm for the detection of villous atrophy on endoscopic images. Methods 858 images from 182 patients with VA and 846 images from 323 patients with normal duodenal mucosa were used for training and internal validation of an AI algorithm (ResNet18). A separate dataset was used for external validation, as well as determination of detection performance of experts, trainees and trainees with AI support. According to the AI consultation distribution, images were stratified into "easy" and "difficult". Results Internal validation showed 82\%, 85\% and 84\% for sensitivity, specificity and accuracy. External validation showed 90\%, 76\% and 84\%. The algorithm was significantly more sensitive and accurate than trainees, trainees with AI support and experts in endoscopy. AI support in trainees was associated with significantly improved performance. While all endoscopists showed significantly lower detection for "difficult" images, AI performance remained stable. Conclusions The algorithm outperformed trainees and experts in sensitivity and accuracy for VA detection. The significant improvement with AI support suggests a potential clinical benefit. Stable performance of the algorithm in "easy" and "difficult" test images may indicate an advantage in macroscopically challenging cases.}, language = {en} } @misc{AppelhansKampmannMottoketal., author = {Appelhans, Marie-Luise and Kampmann, Matthias and Mottok, J{\"u}rgen and Riederer, Michael and Nagl, Klaus and Steffens, Oliver and D{\"u}nnweber, Jan and Wildgruber, Markus and Roth, Julius and Stadler, Timo and Palm, Christoph and Weiß, Martin Georg and Rochholz, Sandra and Bierl, Rudolf and Gschossmann, Andreas and Haug, Sonja and Schmidbauer, Simon and Koch, Anna and Westner, Markus and Bary, Benedikt von and Ellermeier, Andreas and V{\"o}gele, Daniel and Maiwald, Frederik and Hierl, Stefan and Schlamp, Matthias and Ehrlich, Ingo and Siegl, Marco and H{\"u}ntelmann, Sven and Wildfeuer, Matthias and Br{\"u}ckl, Oliver and Sterner, Michael and Hofrichter, Andreas and Eckert, Fabian and Bauer, Franz and Dawoud, Belal and Rabl, Hans-Peter and Gamisch, Bernd and Schmidt, Ottfried and Heberl, Michael and Thema, Martin and Mayer, Ulrike and Eller, Johannes and Sippenauer, Thomas and Adelt, Christian and Haslbeck, Matthias and Vogl, Bettina and Mauerer, Wolfgang and Ramsauer, Ralf and Lohmann, Daniel and Sax, Irmengard and Gabor, Thomas and Feld, Sebastian and Linnhoff-Popien, Claudia and Ławrowski, Robert Damian and Langer, Christoph and Schreiner, Rupert and Sellmair, Josef}, title = {Forschung 2019}, editor = {Baier, Wolfgang}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, isbn = {978-3-9818209-7-3}, doi = {10.35096/othr/pub-789}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-7890}, pages = {72}, abstract = {Bericht mit Forschungsprojekten aus verschiedenen Bereichen der OTH Regensburg mit dem Schwerpunktthema "K{\"u}nstliche Intelligenz" und einem Gespr{\"a}ch zur "Medizin der Zukunft"}, subject = {Forschung}, language = {de} } @misc{RoserMeinikheimMendeletal., author = {Roser, D. A. and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, A. and Scheppach, Markus W. and Nagl, S. and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, D. and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, T. and Fernandez-Esparrach, G. and Parsa, N. and Byrne, M. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @article{MeinikheimMendelPalmetal.2024, author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus Wolfgang and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F and Messmann, Helmut and Ebigbo, Alanna}, title = {Effect of AI on performance of endoscopists to detect Barrett neoplasia: A Randomized Tandem Trial}, series = {Endoscopy}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, issn = {0013-726X}, doi = {10.1055/a-2296-5696}, year = {2024}, abstract = {Background and study aims To evaluate the effect of an AI-based clinical decision support system (AI) on the performance and diagnostic confidence of endoscopists during the assessment of Barrett's esophagus (BE). Patients and Methods Ninety-six standardized endoscopy videos were assessed by 22 endoscopists from 12 different centers with varying degrees of BE experience. The assessment was randomized into two video sets: Group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a standalone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.6\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1 and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.7\% (95\% CI, 65.2\% - 74.2\%) to 78.0\% (95\% CI, 74.0\% - 82.0\%); specificity 67.3\% (95\% CI, 62.5\% - 72.2\%) to 72.7\% (95 CI, 68.2\% - 77.3\%). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from the additional AI. BE experts and nonexperts remained below the standalone performance of AI, suggesting that there may be other factors influencing endoscopists to follow or discard AI advice.}, language = {en} } @article{RoemmeleMendelBarrettetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Barrett, Caroline and Kiesl, Hans and Rauber, David and R{\"u}ckert, Tobias and Kraus, Lisa and Heinkele, Jakob and Dhillon, Christine and Grosser, Bianca and Prinz, Friederike and Wanzl, Julia and Fleischmann, Carola and Nagl, Sandra and Schnoy, Elisabeth and Schlottmann, Jakob and Dellon, Evan S. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {An artificial intelligence algorithm is highly accurate for detecting endoscopic features of eosinophilic esophagitis}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, publisher = {Nature Portfolio}, address = {London}, doi = {10.1038/s41598-022-14605-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-46928}, pages = {10}, abstract = {The endoscopic features associated with eosinophilic esophagitis (EoE) may be missed during routine endoscopy. We aimed to develop and evaluate an Artificial Intelligence (AI) algorithm for detecting and quantifying the endoscopic features of EoE in white light images, supplemented by the EoE Endoscopic Reference Score (EREFS). An AI algorithm (AI-EoE) was constructed and trained to differentiate between EoE and normal esophagus using endoscopic white light images extracted from the database of the University Hospital Augsburg. In addition to binary classification, a second algorithm was trained with specific auxiliary branches for each EREFS feature (AI-EoE-EREFS). The AI algorithms were evaluated on an external data set from the University of North Carolina, Chapel Hill (UNC), and compared with the performance of human endoscopists with varying levels of experience. The overall sensitivity, specificity, and accuracy of AI-EoE were 0.93 for all measures, while the AUC was 0.986. With additional auxiliary branches for the EREFS categories, the AI algorithm (AI-EoEEREFS) performance improved to 0.96, 0.94, 0.95, and 0.992 for sensitivity, specificity, accuracy, and AUC, respectively. AI-EoE and AI-EoE-EREFS performed significantly better than endoscopy beginners and senior fellows on the same set of images. An AI algorithm can be trained to detect and quantify endoscopic features of EoE with excellent performance scores. The addition of the EREFS criteria improved the performance of the AI algorithm, which performed significantly better than endoscopists with a lower or medium experience level.}, language = {en} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and De Souza Jr., Luis Antonio and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @misc{RoemmeleMendelRauberetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Rauber, David and R{\"u}ckert, Tobias and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Endoscopic Diagnosis of Eosinophilic Esophagitis Using a deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724274}, abstract = {Aims Eosinophilic esophagitis (EoE) is easily missed during endoscopy, either because physicians are not familiar with its endoscopic features or the morphologic changes are too subtle. In this preliminary paper, we present the first attempt to detect EoE in endoscopic white light (WL) images using a deep learning network (EoE-AI). Methods 401 WL images of eosinophilic esophagitis and 871 WL images of normal esophageal mucosa were evaluated. All images were assessed for the Endoscopic Reference score (EREFS) (edema, rings, exudates, furrows, strictures). Images with strictures were excluded. EoE was defined as the presence of at least 15 eosinophils per high power field on biopsy. A convolutional neural network based on the ResNet architecture with several five-fold cross-validation runs was used. Adding auxiliary EREFS-classification branches to the neural network allowed the inclusion of the scores as optimization criteria during training. EoE-AI was evaluated for sensitivity, specificity, and F1-score. In addition, two human endoscopists evaluated the images. Results EoE-AI showed a mean sensitivity, specificity, and F1 of 0.759, 0.976, and 0.834 respectively, averaged over the five distinct cross-validation runs. With the EREFS-augmented architecture, a mean sensitivity, specificity, and F1-score of 0.848, 0.945, and 0.861 could be demonstrated respectively. In comparison, the two human endoscopists had an average sensitivity, specificity, and F1-score of 0.718, 0.958, and 0.793. Conclusions To the best of our knowledge, this is the first application of deep learning to endoscopic images of EoE which were also assessed after augmentation with the EREFS-score. The next step is the evaluation of EoE-AI using an external dataset. We then plan to assess the EoE-AI tool on endoscopic videos, and also in real-time. This preliminary work is encouraging regarding the ability for AI to enhance physician detection of EoE, and potentially to do a true "optical biopsy" but more work is needed.}, language = {en} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Prinz, Friederike and Schlottmann, Jakob and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {Einfluss von K{\"u}nstlicher Intelligenz auf die Performance von niedergelassenen Gastroenterolog:innen bei der Beurteilung von Barrett-{\"O}sophagus}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {61}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {8}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0043-1771711}, abstract = {Einleitung Die Differenzierung zwischen nicht dysplastischem Barrett-{\"O}sophagus (NDBE) und mit Barrett-{\"O}sophagus assoziierten Neoplasien (BERN) w{\"a}hrend der endoskopischen Inspektion erfordert viel Expertise. Die fr{\"u}he Diagnosestellung ist wichtig f{\"u}r die weitere Prognose des Barrett-Karzinoms. In Deutschland werden Patient:innen mit einem Barrett-{\"O}sophagus (BE) in der Regel im niedergelassenen Sektor {\"u}berwacht. Ziele Ziel ist es, den Einfluss von einem auf K{\"u}nstlicher Intelligenz (KI) basierenden klinischen Entscheidungsunterst{\"u}tzungssystems (CDSS) auf die Performance von niedergelassenen Gastroenterolog:innen (NG) bei der Evaluation von Barrett-{\"O}sophagus (BE) zu untersuchen. Methodik Es erfolgte die prospektive Sammlung von 96 unver{\"a}nderten hochaufl{\"o}senden Videos mit F{\"a}llen von Patient:innen mit histologisch best{\"a}tigtem NDBE und BERN. Alle eingeschlossenen F{\"a}lle enthielten mindestens zwei der folgenden Darstellungsmethoden: HD-Weißlichtendoskopie, Narrow Band Imaging oder Texture and Color Enhancement Imaging. Sechs NG von sechs unterschiedlichen Praxen wurden als Proband:innen eingeschlossen. Es erfolgte eine permutierte Block-Randomisierung der Videof{\"a}lle in entweder Gruppe A oder Gruppe B. Gruppe A implizierte eine Evaluation des Falls durch Proband:innen zun{\"a}chst ohne KI und anschließend mit KI als CDSS. In Gruppe B erfolgte die Evaluation in umgekehrter Reihenfolge. Anschließend erfolgte eine zuf{\"a}llige Wiedergabe der so entstandenen Subgruppen im Rahmen des Tests. Ergebnis In diesem Test konnte ein von uns entwickeltes KI-System (Barrett-Ampel) eine Sensitivit{\"a}t von 92,2\%, eine Spezifit{\"a}t von 68,9\% und eine Accuracy von 81,3\% erreichen. Mit der Hilfe von KI verbesserte sich die Sensitivit{\"a}t der NG von 64,1\% auf 71,2\% (p<0,001) und die Accuracy von 66,3\% auf 70,8\% (p=0,006) signifikant. Eine signifikante Verbesserung dieser Parameter zeigte sich ebenfalls, wenn die Proband:innen die F{\"a}lle zun{\"a}chst ohne KI evaluierten (Gruppe A). Wurde der Fall jedoch als Erstes mit der Hilfe von KI evaluiert (Gruppe B), blieb die Performance nahezu konstant. Schlussfolgerung Es konnte ein performantes KI-System zur Evaluation von BE entwickelt werden. NG verbessern sich bei der Evaluation von BE durch den Einsatz von KI.}, language = {de} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Schnoy, Elisabeth and Nagl, Sandra and R{\"o}mmele, Christoph and Prinz, Friederike and Schlottmann, Jakob and Golger, Daniela and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {AI-assisted detection and characterization of early Barrett's neoplasia: Results of an Interim analysis}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765437}, pages = {S169}, abstract = {Aims Evaluation of the add-on effect an artificial intelligence (AI) based clinical decision support system has on the performance of endoscopists with different degrees of expertise in the field of Barrett's esophagus (BE) and Barrett's esophagus-related neoplasia (BERN). Methods The support system is based on a multi-task deep learning model trained to solve a segmentation and several classification tasks. The training approach represents an extension of the ECMT semi-supervised learning algorithm. The complete system evaluates a decision tree between estimated motion, classification, segmentation, and temporal constraints, to decide when and how the prediction is highlighted to the observer. In our current study, ninety-six video cases of patients with BE and BERN were prospectively collected and assessed by Barrett's specialists and non-specialists. All video cases were evaluated twice - with and without AI assistance. The order of appearance, either with or without AI support, was assigned randomly. Participants were asked to detect and characterize regions of dysplasia or early neoplasia within the video sequences. Results Standalone sensitivity, specificity, and accuracy of the AI system were 92.16\%, 68.89\%, and 81.25\%, respectively. Mean sensitivity, specificity, and accuracy of expert endoscopists without AI support were 83,33\%, 58,20\%, and 71,48 \%, respectively. Gastroenterologists without Barrett's expertise but with AI support had a comparable performance with a mean sensitivity, specificity, and accuracy of 76,63\%, 65,35\%, and 71,36\%, respectively. Conclusions Non-Barrett's experts with AI support had a similar performance as experts in a video-based study.}, language = {en} } @article{MaierDesernoHandelsetal., author = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, title = {IJCARS: BVM 2021 special issue}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {16}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer}, doi = {10.1007/s11548-021-02534-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-21666}, pages = {2067 -- 2068}, abstract = {The German workshop on medical image computing (BVM) has been held in different locations in Germany for more than 20 years. In terms of content, BVM focused on the computer-aided analysis of medical image data with a wide range of applications, e.g. in the area of imaging, diagnostics, operation planning, computer-aided intervention and visualization. During this time, there have been remarkable methodological developments and upheavals, on which the BVM community has worked intensively. The area of machine learning should be emphasized, which has led to significant improvements, especially for tasks of classification and segmentation, but increasingly also in image formation and registration. As a result, work in connection with deep learning now dominates the BVM. These developments have also contributed to the establishment of medical image processing at the interface between computer science and medicine as one of the key technologies for the digitization of the health system. In addition to the presentation of current research results, a central aspect of the BVM is primarily the promotion of young scientists from the diverse BVM community, covering not only Germany but also Austria, Switzerland, The Netherland and other European neighbors. The conference serves primarily doctoral students and postdocs, but also students with excellent bachelor and master theses as a platform to present their work, to enter into professional discourse with the community, and to establish networks with specialist colleagues. Despite the many conferences and congresses that are also relevant for medical image processing, the BVM has therefore lost none of its importance and attractiveness and has retained its permanent place in the annual conference rhythm. Building on this foundation, there are some innovations and changes this year. The BVM 2021 was organized for the first time at the Ostbayerische Technische Hochschule Regensburg (OTH Regensburg, a technical university of applied sciences). After Aachen, Berlin, Erlangen, Freiburg, Hamburg, Heidelberg, Leipzig, L{\"u}beck, and Munich, Regensburg is not just a new venue. OTH Regensburg is the first representative of the universities of applied sciences (HAW) to organize the conference, which differs to universities, university hospitals, or research centers like Fraunhofer or Helmholtz. This also considers the further development of the research landscape in Germany, where HAWs increasingly contribute to applied research in addition to their focus on teaching. This development is also reflected in the contributions submitted to the BVM in recent years. At BVM 2021, which was held in a virtual format for the first time due to the Corona pandemic, an attractive and high-quality program was offered. Fortunately, the number of submissions increased significantly. Out of 97 submissions, 26 presentations, 51 posters and 5 software demonstrations were accepted via an anonymized reviewing process with three reviews each. The three best works have been awarded BVM prizes, selected by a separate committee. Based on these high-quality submissions, we are able to present another special issue in the International Journal of Computer Assisted Radiology and Surgery (IJCARS). Out of the 97 submissions, the ones with the highest scores have been invited to submit an extended version of their paper to be presented in IJCARS. As a result, we are now able to present this special issue with seven excellent articles. Many submissions focus on machine learning in a medical context.}, subject = {Bildgebendes Verfahren}, language = {en} } @misc{KreftingZaunsederSaeringetal., author = {Krefting, Dagmar and Zaunseder, Sebastian and S{\"a}ring, Dennis and Wittenberg, Thomas and Palm, Christoph and Schiecke, Karin and Krenkel, Lars and Hennemuth, Anja and Schnell, Susanne and Spicher, Nicolai}, title = {Blutdruck, H{\"a}modynamik und Gef{\"a}ßzustand: Innovative Erfassung und Bewertung - Schwerpunkt bildbasierte Verfahren}, series = {66. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e. V. (GMDS), 12. Jahreskongress der Technologie- und Methodenplattform f{\"u}r die vernetzte medizinische Forschung e. V. (TMF), 26. - 30.09.2021, online}, journal = {66. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e. V. (GMDS), 12. Jahreskongress der Technologie- und Methodenplattform f{\"u}r die vernetzte medizinische Forschung e. V. (TMF), 26. - 30.09.2021, online}, doi = {10.3205/21gmds016}, url = {http://nbn-resolving.de/urn:nbn:de:0183-21gmds0167}, abstract = {Einleitung: Blutdruck gilt als sogenannter Vitalparameter als einer der grundlegenden Indikatoren f{\"u}r den Gesundheitszustand einer Person. Sowohl zu niedriger als auch zu hoher Blutdruck kann lebensbedrohend sein, letzerer ist dar{\"u}ber hinaus ein Risikofaktor insbesondere f{\"u}r Herz-Kreislauferkrankungen, die trotz wichtiger Fortschritte in der Behandlung immer noch die h{\"a}ufigste Todesursache in Deutschland darstellen. Die H{\"a}modynamik, also die raumzeitliche Dynamik des Blutflusses, und der Gef{\"a}ßzustand sind eng verbunden mit dem Blutdruck und ebenfalls von hoher klinischer Relevanz, u.a. zur Identifikation von Durchblutungsst{\"o}rungen und ung{\"u}nstigen Druckverteilungen der Gef{\"a}ßwand. Innovationen in der Messtechnik als auch in der Datenanalyse bieten heute neue M{\"o}glichkeiten der Erfassung und Bewertung von Blutdruck, H{\"a}modynamik und Gef{\"a}ßzustand [1], [2], [3], [4]. Methodik: In einer gemeinsamen Workshopserie der AG Medizinische Bild- und Signalverarbeitung der GMDS und des Fachausschusses Biosignale der DGBMT werden wir neue Ans{\"a}tze und L{\"o}sungen f{\"u}r Mess- und Analyseverfahren zu Blutdruck und -fluss sowie zum Gef{\"a}ßzustand vorstellen und diskutieren. Dabei stehen im ersten Workshop auf der GMDS Jahrestagung Bildbasierte Verfahren im Zentrum, w{\"a}hrend der zweite Workshop auf der DGBMT Jahrestagung den Fokus auf Biosignalbasierten Verfahren legt. Es werden aktuelle Forschungsergebnisse vorgestellt und diskutiert. Es sind jeweils mehrere Vortr{\"a}ge geplant mit ausreichend Zeit zur Diskussion. Folgende Vortr{\"a}ge sind geplant (Arbeitstitel): Sebastian Zaunseder: Videobasierte Erfassung des Blutdrucks Anja Hennemuth: A Visualization Toolkit for the Analysis of Aortic Anatomy and Pressure Distribution Lars Krenkel: Numerische Analyse der Rupturwahrscheinlichkeit zerebraler Aneurysmata Susanne Schnell: Messung des Blutflusses und h{\"a}modynamischer Parameter mit 4D flow MRI: M{\"o}glichkeiten und Herausforderungen Ergebnisse: Ziel des Workshops ist die Identifikation von innovativen Ans{\"a}tzen und neuen Methoden zur qualitativen und quantitativen Bestimmung von h{\"a}modynamischen Parametern sowie deren kritische Bewertung durch die Community f{\"u}r die Eignung in der klinischen Entscheidungsunterst{\"u}tzung. Diskussion: Der Workshop leistet inhaltlich einen Beitrag zu zentralen Aspekten f{\"u}r die Herz-Kreislauf-Medizin. Er bringt dabei Expertise aus verschiedenen Bereichen zusammen und schl{\"a}gt die Br{\"u}cke zwischen Kardiologie, Medizininformatik und Medizintechnik. Schlussfolgerung: Innovative Technologien aus Medizintechnik und Informatik erm{\"o}glichen zunehmend einfache und raumzeitlich aufgel{\"o}ste Erfassung und Bewertung wichtiger Informationen zur Unterst{\"u}tzung von Diagnose und Therapieverfolgung. [1] Zaunseder S, Trumpp A, Wedekind D, Malberg H. Cardiovascular assessment by imaging photoplethysmography - a review. Biomed Tech (Berl). 2018 Oct 25;63(5):617-34. [2] Huellebrand M, Messroghli D, Tautz L, Kuehne T, Hennemuth A. An extensible software platform for interdisciplinary cardiovascular imaging research. Comput Methods Programs Biomed. 2020 Feb;184:105277. [3] Schmitter S, Adriany G, Waks M, Moeller S, Aristova M, Vali A, et al. Bilateral Multiband 4D Flow MRI of the Carotid Arteries at 7T. Magn Reson Med. 2020 Oct;84(4):1947-60. [4] Birkenmaier C, and Krenkel, L. Flow in Artificial Lungs. In: New Results in Numerical and Experimental Fluid Mechanics XIII. Contributions to the 22nd STAB/DGLR Symposium. Springer; 2021.}, subject = {Blutdruck}, language = {de} } @article{MendelRauberdeSouzaJretal., author = {Mendel, Robert and Rauber, David and de Souza Jr., Luis Antonio and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Error-Correcting Mean-Teacher: Corrections instead of consistency-targets applied to semi-supervised medical image segmentation}, series = {Computers in Biology and Medicine}, volume = {154}, journal = {Computers in Biology and Medicine}, number = {March}, publisher = {Elsevier}, issn = {0010-4825}, doi = {10.1016/j.compbiomed.2023.106585}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-57790}, pages = {13}, abstract = {Semantic segmentation is an essential task in medical imaging research. Many powerful deep-learning-based approaches can be employed for this problem, but they are dependent on the availability of an expansive labeled dataset. In this work, we augment such supervised segmentation models to be suitable for learning from unlabeled data. Our semi-supervised approach, termed Error-Correcting Mean-Teacher, uses an exponential moving average model like the original Mean Teacher but introduces our new paradigm of error correction. The original segmentation network is augmented to handle this secondary correction task. Both tasks build upon the core feature extraction layers of the model. For the correction task, features detected in the input image are fused with features detected in the predicted segmentation and further processed with task-specific decoder layers. The combination of image and segmentation features allows the model to correct present mistakes in the given input pair. The correction task is trained jointly on the labeled data. On unlabeled data, the exponential moving average of the original network corrects the student's prediction. The combined outputs of the students' prediction with the teachers' correction form the basis for the semi-supervised update. We evaluate our method with the 2017 and 2018 Robotic Scene Segmentation data, the ISIC 2017 and the BraTS 2020 Challenges, a proprietary Endoscopic Submucosal Dissection dataset, Cityscapes, and Pascal VOC 2012. Additionally, we analyze the impact of the individual components and examine the behavior when the amount of labeled data varies, with experiments performed on two distinct segmentation architectures. Our method shows improvements in terms of the mean Intersection over Union over the supervised baseline and competing methods. Code is available at https://github.com/CloneRob/ECMT.}, language = {en} } @article{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Imitating human soft tissue on basis of a dual-material 3D print using a support-filled metamaterial to provide bimanual haptic for a hand surgery training system}, series = {Quantitative Imaging in Medicine and Surgery}, volume = {9}, journal = {Quantitative Imaging in Medicine and Surgery}, number = {1}, publisher = {AME Publishing Company}, doi = {10.21037/qims.2018.09.17}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-979}, pages = {30 -- 42}, abstract = {Background: Currently, it is common practice to use three-dimensional (3D) printers not only for rapid prototyping in the industry, but also in the medical area to create medical applications for training inexperienced surgeons. In a clinical training simulator for minimally invasive bone drilling to fix hand fractures with Kirschner-wires (K-wires), a 3D-printed hand phantom must not only be geometrically but also haptically correct. Due to a limited view during an operation, surgeons need to perfectly localize underlying risk structures only by feeling of specific bony protrusions of the human hand. Methods: The goal of this experiment is to imitate human soft tissue with its haptic and elasticity for a realistic hand phantom fabrication, using only a dual-material 3D printer and support-material-filled metamaterial between skin and bone. We present our workflow to generate lattice structures between hard bone and soft skin with iterative cube edge (CE) or cube face (CF) unit cells. Cuboid and finger shaped sample prints with and without inner hard bone in different lattice thickness are constructed and 3D printed. Results: The most elastic available rubber-like material is too firm to imitate soft tissue. By reducing the amount of rubber in the inner volume through support material (SUP), objects become significantly softer. Without metamaterial, after disintegration, the SUP can be shifted through the volume and thus the body loses its original shape. Although the CE design increases the elasticity, it cannot restore the fabric form. In contrast to CE, the CF design increases not only the elasticity but also guarantees a local limitation of the SUP. Therefore, the body retains its shape and internal bones remain in its intended place. Various unit cell sizes, lattice thickening and skin thickness regulate the rubber material and SUP ratio. Test prints with higher SUP and lower rubber material percentage appear softer and vice versa. This was confirmed by an expert surgeon evaluation. Subjects adjudged pure rubber-like material as too firm and samples only filled with SUP or lattice structure in CE design as not suitable for imitating tissue. 3D-printed finger samples in CF design were rated as realistic compared to the haptic of human tissue with a good palpable bone structure. Conclusions: We developed a new dual-material 3D print technique to imitate soft tissue of the human hand with its haptic properties. Blowy SUP is trapped within a lattice structure to soften rubber-like 3D print material, which makes it possible to reproduce a realistic replica of human hand soft tissue.}, subject = {Handchirurgie}, language = {en} }