@inproceedings{FranzDreherPrinzenetal., author = {Franz, Daniela and Dreher, Maria and Prinzen, Martin and Teßmann, Matthias and Palm, Christoph and Katzky, Uwe and Perret, Jerome and Hofer, Mathias and Wittenberg, Thomas}, title = {CT-basiertes virtuelles Fr{\"a}sen am Felsenbein}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-56537-7}, doi = {10.1007/978-3-662-56537-7_51}, pages = {176 -- 181}, abstract = {Im Rahmen der Entwicklung eines haptisch-visuellen Trainingssystems f{\"u}r das Fr{\"a}sen am Felsenbein werden ein Haptikarm und ein autostereoskopischer 3D-Monitor genutzt, um Chirurgen die virtuelle Manipulation von kn{\"o}chernen Strukturen im Kontext eines sog. Serious Game zu erm{\"o}glichen. Unter anderem sollen Assistenz{\"a}rzte im Rahmen ihrer Ausbildung das Fr{\"a}sen am Felsenbein f{\"u}r das chirurgische Einsetzen eines Cochlea-Implantats {\"u}ben k{\"o}nnen. Die Visualisierung des virtuellen Fr{\"a}sens muss daf{\"u}r in Echtzeit und m{\"o}glichst realistisch modelliert, implementiert und evaluiert werden. Wir verwenden verschiedene Raycasting Methoden mit linearer und Nearest Neighbor Interpolation und vergleichen die visuelle Qualit{\"a}t und die Bildwiederholfrequenzen der Methoden. Alle verglichenen Verfahren sind sind echtzeitf{\"a}hig, unterscheiden sich aber in ihrer visuellen Qualit{\"a}t.}, subject = {Felsenbein}, language = {de} } @inproceedings{MaierHuberKatzkyetal., author = {Maier, Johannes and Huber, Michaela and Katzky, Uwe and Perret, Jerome and Wittenberg, Thomas and Palm, Christoph}, title = {Force-Feedback-assisted Bone Drilling Simulation Based on CT Data}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-56537-7_78}, pages = {291 -- 296}, abstract = {In order to fix a fracture using minimally invasive surgery approaches, surgeons are drilling complex and tiny bones with a 2 dimensional X-ray as single imaging modality in the operating room. Our novel haptic force-feedback and visual assisted training system will potentially help hand surgeons to learn the drilling procedure in a realistic visual environment. Within the simulation, the collision detection as well as the interaction between virtual drill, bone voxels and surfaces are important. In this work, the chai3d collision detection and force calculation algorithms are combined with a physics engine to simulate the bone drilling process. The chosen Bullet-Physics-Engine provides a stable simulation of rigid bodies, if the collision model of the drill and the tool holder is generated as a compound shape. Three haptic points are added to the K-wire tip for removing single voxels from the bone. For the drilling process three modes are proposed to emulate the different phases of drilling in restricting the movement of a haptic device.}, subject = {Handchirurgie}, language = {en} } @inproceedings{EixelbergerWittenbergPerretetal., author = {Eixelberger, Thomas and Wittenberg, Thomas and Perret, Jerome and Katzky, Uwe and Simon, Martina and Schmitt-R{\"u}th, Stephanie and Hofer, Mathias and Sorge, M. and Jacob, R. and Engel, Felix B. and Gostian, A. and Palm, Christoph and Franz, Daniela}, title = {A haptic model for virtual petrosal bone milling}, series = {17. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2018), Tagungsband, 2018, Leipzig, 13.-15. September}, volume = {17}, booktitle = {17. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2018), Tagungsband, 2018, Leipzig, 13.-15. September}, pages = {214 -- 219}, abstract = {Virtual training of bone milling requires realtime and realistic haptics of the interaction between the "virtual mill" and a "virtual bone". We propose an exponential abrasion model between a virtual one and the mill bit and combine it with a coarse representation of the virtual bone and the mill shaft for collision detection using the Bullet Physics Engine. We compare our exponential abrasion model to a widely used linear abrasion model and evaluate it quantitatively and qualitatively. The evaluation results show, that we can provide virtual milling in real-time, with an abrasion behavior similar to that proposed in the literature and with a realistic feeling of five different surgeons.}, subject = {Osteosynthese}, language = {en} } @inproceedings{PalmSchanze, author = {Palm, Christoph and Schanze, Thomas}, title = {Biomedical Image and Signal Computing (BISC 2013)}, series = {58. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e.V. (GMDS 2013), L{\"u}beck, 01.-05.09.2013}, booktitle = {58. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e.V. (GMDS 2013), L{\"u}beck, 01.-05.09.2013}, number = {DocAbstr. 324}, publisher = {German Medical Science GMS Publishing House}, address = {D{\"u}sseldorf}, doi = {doi:10.3205/13gmds257}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Computer-aided diagnosis using deep learning in the evaluation of early oesophageal adenocarcinoma}, series = {GuT}, volume = {68}, journal = {GuT}, number = {7}, publisher = {British Society of Gastroenterology}, doi = {10.1136/gutjnl-2018-317573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-68}, pages = {1143 -- 1145}, abstract = {Computer-aided diagnosis using deep learning (CAD-DL) may be an instrument to improve endoscopic assessment of Barrett's oesophagus (BE) and early oesophageal adenocarcinoma (EAC). Based on still images from two databases, the diagnosis of EAC by CAD-DL reached sensitivities/specificities of 97\%/88\% (Augsburg data) and 92\%/100\% (Medical Image Computing and Computer-Assisted Intervention [MICCAI] data) for white light (WL) images and 94\%/80\% for narrow band images (NBI) (Augsburg data), respectively. Tumour margins delineated by experts into images were detected satisfactorily with a Dice coefficient (D) of 0.72. This could be a first step towards CAD-DL for BE assessment. If developed further, it could become a useful adjunctive tool for patient management.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @phdthesis{Maier, author = {Maier, Johannes}, title = {Entwicklung eines Haptisch und Visuell unterst{\"u}tzten Trainingssystems (HaptiVisT) f{\"u}r komplexe Knochenbohrungen in der minimalinvasiven Handchirurgie}, publisher = {Shaker}, address = {D{\"u}ren}, isbn = {978-3-8440-7547-2}, pages = {236}, abstract = {Eine g{\"a}ngige Operationsmethode, um Frakturen der menschlichen Hand nach einem Unfall zu korrigieren, ist eine Osteosynthese mit sogenannten Kirschnerdr{\"a}hten (K-Dr{\"a}hten) zur Stabilisierung von Knochenfragmenten. Die Einf{\"u}hrung dieser langen, d{\"u}nnen und scharfen Dr{\"a}hte durch manuelles Bohren ist eine komplexe minimalinvasive Operation, bei der ein Chirurg nahezu ohne visuelle Orientierung und nur durch eine kleine {\"O}ffnung der Haut des Patienten arbeitet. Als Orientierungshilfe f{\"u}r die optimale Lage der K-Dr{\"a}hte bleibt dem Chirurgen lediglich eine zweidimensionale (2D)-R{\"o}ntgendarstellung und das Ertasten von kn{\"o}chernen Vorspr{\"u}ngen auf der menschlichen Hand, um Verletzungen an Risikostrukturen (Nerven, Gef{\"a}ße usw.), die im Weichteilgewebe der Hand eingebettet sind, zu vermeiden. F{\"u}r eine sichere und fehlerfreie Durchf{\"u}hrung einer K-Draht-Osteosynthese ist deswegen eine gr{\"u}ndliche theoretische und praktische Ausbildung junger Chirurgen notwendig. Da traditionelle Trainingsmethoden zeitaufwendig, kostspielig, ethisch nicht korrekt und unzureichend realistisch sind, wird in dieser Arbeit ein innovativer, auf virtueller Realit{\"a}t (VR) basierender, Haptisch und Visuell unterst{\"u}tzter Trainingssimulator (HaptiVisT) f{\"u}r die Platzierung von K-Dr{\"a}hten entwickelt, der vor allem Handchirurgen mit {\"U}bungs- und Perfektionierungsbedarf dabei unterst{\"u}tzt, das Bohrverfahren in einer realistischen aber virtuellen Umgebung zu erlernen. Beim HaptiVisT-Prototypenaufbau werden reale Patientendaten segmentierter Volumendaten aus einer Computertomographie (CT) und einer Magnetresonanztomographie (MRT) im virtuellen, dreidimensionalen (3D) Raum auf einem 3D-Monitor visualisiert und f{\"u}r eine intuitive bimanuelle Haptik sowohl mit einem Kraftfeedback-Ger{\"a}t f{\"u}r den Bohrprozess und einer 3D-gedruckten und optisch getrackten Phantomhand kombiniert. Die vorliegende Arbeit beschreibt zun{\"a}chst alle verwendeten Hardwareger{\"a}te, die C++-Softwareumgebung, aufgebaut auf Multithreading (gleichzeitige Ausf{\"u}hrung mehrerer Anweisungsfolgen in einem Prozess), und die auf Oberfl{\"a}chen- und Volumenrendering basierte Visualisierung. Die Kollisionsdetektion zwischen Bohrer und Knochen im virtuellen Raum wird in zwei separate Ereignisse unterteilt: Kollisionen zwischen Objekten als Gesamtes (Simulation der Kollision {\"u}ber die gesamte Objektoberfl{\"a}che) und Kollisionen zwischen einer K-Draht-Spitze und dem Knochenvolumen f{\"u}r die Entfernung kleiner Volumenelemente (Voxel). Das Herzst{\"u}ck des Trainingssystems bildet eine echtzeitf{\"a}hige Bohrsimulation, die den gesamten Bohrprozess in eine endliche Anzahl logischer Unterprozesse gliedert und diese Zust{\"a}nde in einen endlichen Zustandsautomaten (FSM, engl.: Finite State Machine) zusammenfasst. Das Kraftfeedback w{\"a}hrend einer Bohrung wird mit sogenannten „Virtual Fixtures" (abstrakten sensorischen Informationen) berechnet und {\"u}ber einen Haptikarm auf den Benutzer {\"u}bertragen. Damit die Simulation der Realit{\"a}t entspricht, wird unter Zuhilfenahme eines experimentellen Aufbaus die reale Bohrgeschwindigkeit durch kortikale Knochen ermittelt. Anschließend werden ein Levelkonzept und alle im System verf{\"u}gbaren Bohrunterst{\"u}tzungswerkzeuge, wie haptische Korridore als Goldstandard oder eine R{\"o}ntgenbildsimulation, vorgestellt. Mit ihnen ist es m{\"o}glich, ausgesuchte Operationsf{\"a}lle in Level unterschiedlicher Schwierigkeit zu unterteilen und den Operationsvorgang qualitativ zu bewerten. Der 3D-Druck einer Phantomhand (realit{\"a}tsnahe Nachbildung einer Patientenhand) mit realistischen haptischen Eigenschaften zum Ertasten von Knochenvorspr{\"u}ngen wird {\"u}ber einen metamaterialbasierten Ansatz (Neuanordnung des Grundmaterials durch eine k{\"u}nstlich angelegte, sich wiederholende Struktur) realisiert, da das aktuell am Markt verf{\"u}gbare 3D-Druckmaterial f{\"u}r den Druck menschlichen Weichteilgewebes zu hart ist. Die Echtzeitverfolgung der Phantomhand beruht auf einem mit einer Stereokamera optisch getrackten Marker in Form eines Dodekaeders (K{\"o}rper mit zw{\"o}lf Fl{\"a}chen). Abschließend wird das HaptiVisT-Gesamtsystem in drei und der 3D-Druck einer Phantomhand in zwei Expertenevaluationen ausf{\"u}hrlich untersucht und ausgewertet. Das HaptiVisT-System versteht sich als notwendiges Komplement f{\"u}r den ersten und weltweit einzigen funktionsf{\"a}higen, kompakten Prototypen f{\"u}r virtuelle K-DrahtOsteosynthesen mit haptischen Kraftfeedback, der in Zukunft Chirurgen in Aus- und Weiterbildung an Kliniken oder Trainingszentren ein risikofreies, zeit- und ortsunabh{\"a}ngiges Training erm{\"o}glicht. Die Kernelemente dieser Arbeit sind: • Stereoskopische 3D-Darstellung von realen Patientendaten. • Bimanuelle Haptik aus haptischen Kraftfeedback des Bohrens verbunden mit einer optisch getrackten, haptisch korrekten und 3D-gedruckten Phantomhand. • Zuverl{\"a}ssige Kollisionsdetektion zwischen virtuellen Objekten als Grundlage f{\"u}r das Kraftfeedback und die Abtragung von Knochen. • Echtzeitf{\"a}hige Bohrsimulation durch Reduzierung des Bohrprozesses auf logische Bohr-Teilprozesse kombiniert mit einer Virtual Fixtures basierten Kraftberechnung. Haptisch korrekte Phantome sind vor allem im medizinischen Training von hoher Relevanz und die Berechnung des Kraftfeedbacks beruht erstmals auf der performanten und stabilen Simulation von Bohr-Teilprozessen unter Verwendung von Virtual Fixtures. Der Prototyp wird von Experten durchgehend positiv bewertet und bietet nach deren Einsch{\"a}tzung einen hohen Mehrwert f{\"u}r das chirurgische Training. Zuk{\"u}nftige Arbeiten k{\"o}nnten den Lerneffekt durch das HaptiVisT-Trainingssystem in stichhaltigen Evaluationen mit jungen Medizinstudenten unter Vorhandensein einer Kontrollgruppe statistisch validieren. Bei Best{\"a}tigung dieses Lerneffekts ist eine Ausgr{\"u}ndung als eigenst{\"a}ndiges Unternehmen und Weiterentwicklung des Prototyps mit Ausweitung auf weitere chirurgische Bereiche wie Knie- oder H{\"u}ftchirurgie denkbar. Unter Zuhilfenahme von automatischer Segmentierung k{\"o}nnten in Zukunft akut zu behandelnde Br{\"u}che abgebildet, vorab einer tats{\"a}chlichen Operation ge{\"u}bt und anschließend komplikationslos unter reduzierter Operationszeit durchgef{\"u}hrt werden.}, language = {de} } @article{WoehlMaierGehmertetal., author = {W{\"o}hl, Rebecca and Maier, Johannes and Gehmert, Sebastian and Palm, Christoph and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Huber, Michaela}, title = {3D Analysis of Osteosyntheses Material using semi-automated CT Segmentation}, series = {BMC Musculoskeletal Disorders}, volume = {19}, journal = {BMC Musculoskeletal Disorders}, publisher = {Springer Nature}, doi = {10.1186/s12891-018-1975-0}, pages = {1 -- 8}, abstract = {Backround Scaphoidectomy and midcarpal fusion can be performed using traditional fixation methods like K-wires, staples, screws or different dorsal (non)locking arthrodesis systems. The aim of this study is to test the Aptus four corner locking plate and to compare the clinical findings to the data revealed by CT scans and semi-automated segmentation. Methods: This is a retrospective review of eleven patients suffering from scapholunate advanced collapse (SLAC) or scaphoid non-union advanced collapse (SNAC) wrist, who received a four corner fusion between August 2011 and July 2014. The clinical evaluation consisted of measuring the range of motion (ROM), strength and pain on a visual analogue scale (VAS). Additionally, the Disabilities of the Arm, Shoulder and Hand (QuickDASH) and the Mayo Wrist Score were assessed. A computerized tomography (CT) of the wrist was obtained six weeks postoperatively. After semi-automated segmentation of the CT scans, the models were post processed and surveyed. Results During the six-month follow-up mean range of motion (ROM) of the operated wrist was 60°, consisting of 30° extension and 30° flexion. While pain levels decreased significantly, 54\% of grip strength and 89\% of pinch strength were preserved compared to the contralateral healthy wrist. Union could be detected in all CT scans of the wrist. While X-ray pictures obtained postoperatively revealed no pathology, two user related technical complications were found through the 3D analysis, which correlated to the clinical outcome. Conclusion Due to semi-automated segmentation and 3D analysis it has been proved that the plate design can keep up to the manufacturers' promises. Over all, this case series confirmed that the plate can compete with the coexisting techniques concerning clinical outcome, union and complication rate.}, subject = {Handchirurgie}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Rauber, David and R{\"u}ckert, Tobias and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Real-time detection and delineation of tissue during third-space endoscopy using artificial intelligence (AI)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765128}, pages = {S53 -- S54}, abstract = {Aims AI has proven great potential in assisting endoscopists in diagnostics, however its role in therapeutic endoscopy remains unclear. Endoscopic submucosal dissection (ESD) is a technically demanding intervention with a slow learning curve and relevant risks like bleeding and perforation. Therefore, we aimed to develop an algorithm for the real-time detection and delineation of relevant structures during third-space endoscopy. Methods 5470 still images from 59 full length videos (47 ESD, 12 POEM) were annotated. 179681 additional unlabeled images were added to the training dataset. Consequently, a DeepLabv3+ neural network architecture was trained with the ECMT semi-supervised algorithm (under review elsewhere). Evaluation of vessel detection was performed on a dataset of 101 standardized video clips from 15 separate third-space endoscopy videos with 200 predefined blood vessels. Results Internal validation yielded an overall mean Dice score of 85\% (68\% for blood vessels, 86\% for submucosal layer, 88\% for muscle layer). On the video test data, the overall vessel detection rate (VDR) was 94\% (96\% for ESD, 74\% for POEM). The median overall vessel detection time (VDT) was 0.32 sec (0.3 sec for ESD, 0.62 sec for POEM). Conclusions Evaluation of the developed algorithm on a video test dataset showed high VDR and quick VDT, especially for ESD. Further research will focus on a possible clinical benefit of the AI application for VDR and VDT during third-space endoscopy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{FranzKatzkyNeumannetal., author = {Franz, Daniela and Katzky, Uwe and Neumann, Sabine and Perret, Jerome and Hofer, Mathias and Huber, Michaela and Schmitt-R{\"u}th, Stephanie and Haug, Sonja and Weber, Karsten and Prinzen, Martin and Palm, Christoph and Wittenberg, Thomas}, title = {Haptisches Lernen f{\"u}r Cochlea Implantationen}, series = {15. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2016), Tagungsband, 2016, Bern, 29.09. - 01.10.}, booktitle = {15. Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC2016), Tagungsband, 2016, Bern, 29.09. - 01.10.}, pages = {21 -- 26}, abstract = {Die Implantation eines Cochlea Implantates ben{\"o}tigt einen chirurgischen Zugang im Felsenbein und durch die Paukenh{\"o}hle des Patienten. Der Chirurg hat eine eingeschr{\"a}nkte Sicht im Operationsgebiet, die weiterhin viele Risikostrukturen enth{\"a}lt. Um eine Cochlea Implantation sicher und fehlerfrei durchzuf{\"u}hren, ist eine umfangreiche theoretische und praktische (teilweise berufsbegleitende) Fortbildung sowie langj{\"a}hrige Erfahrung notwendig. Unter Nutzung von realen klinischen CT/MRT Daten von Innen- und Mittelohr und der interaktiven Segmentierung der darin abgebildeten Strukturen (Nerven, Cochlea, Geh{\"o}rkn{\"o}chelchen,...) wird im HaptiVisT Projekt ein haptisch-visuelles Trainingssystem f{\"u}r die Implantation von Innen- und Mittelohr-Implantaten realisiert, das als sog. „Serious Game" mit immersiver Didaktik gestaltet wird. Die Evaluierung des Demonstrators hinsichtlich Zweckm{\"a}ßigkeit erfolgt prozessbegleitend und ergebnisorientiert, um m{\"o}gliche technische oder didaktische Fehler vor Fertigstellung des Systems aufzudecken. Drei zeitlich versetzte Evaluationen fokussieren dabei chirurgisch-fachliche, didaktische sowie haptisch-ergonomische Akzeptanzkriterien.}, subject = {Cochlea-Implantat}, language = {de} } @inproceedings{SouzaJrHookPapaetal., author = {Souza Jr., Luis Antonio de and Hook, Christian and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using SURF Features}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_34}, pages = {141 -- 146}, abstract = {The development of adenocarcinoma in Barrett's esophagus is difficult to detect by endoscopic surveillance of patients with signs of dysplasia. Computer assisted diagnosis of endoscopic images (CAD) could therefore be most helpful in the demarcation and classification of neoplastic lesions. In this study we tested the feasibility of a CAD method based on Speeded up Robust Feature Detection (SURF). A given database containing 100 images from 39 patients served as benchmark for feature based classification models. Half of the images had previously been diagnosed by five clinical experts as being "cancerous", the other half as "non-cancerous". Cancerous image regions had been visibly delineated (masked) by the clinicians. SURF features acquired from full images as well as from masked areas were utilized for the supervised training and testing of an SVM classifier. The predictive accuracy of the developed CAD system is illustrated by sensitivity and specificity values. The results based on full image matching where 0.78 (sensitivity) and 0.82 (specificity) were achieved, while the masked region approach generated results of 0.90 and 0.95, respectively.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{MendelEbigboProbstetal., author = {Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using Convolutional Neural Networks}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_23}, pages = {80 -- 85}, abstract = {We propose an automatic approach for early detection of adenocarcinoma in the esophagus. High-definition endoscopic images (50 cancer, 50 Barrett) are partitioned into a dataset containing approximately equal amounts of patches showing cancerous and non-cancerous regions. A deep convolutional neural network is adapted to the data using a transfer learning approach. The final classification of an image is determined by at least one patch, for which the probability being a cancer patch exceeds a given threshold. The model was evaluated with leave one patient out cross-validation. With sensitivity and specificity of 0.94 and 0.88, respectively, our findings improve recently published results on the same image data base considerably. Furthermore, the visualization of the class probabilities of each individual patch indicates, that our approach might be extensible to the segmentation domain.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @article{SouzaJrPassosSantanaetal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Rauber, David and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Layer-selective deep representation to improve esophageal cancer classification}, series = {Medical \& Biological Engineering \& Computing}, volume = {62}, journal = {Medical \& Biological Engineering \& Computing}, publisher = {Springer Nature}, address = {Heidelberg}, doi = {10.1007/s11517-024-03142-8}, pages = {3355 -- 3372}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis.For this task, the deep learning techniques' black-box nature must somehow be lightened up to clarify its promising results. Hence, we aim to investigate the impact of the ResNet-50 deep convolutional design for Barrett's esophagus and adenocarcinoma classification. For such a task, and aiming at proposing a two-step learning technique, the output of each convolutional layer that composes the ResNet-50 architecture was trained and classified for further definition of layers that would provide more impact in the architecture. We showed that local information and high-dimensional features are essential to improve the classification for our task. Besides, we observed a significant improvement when the most discriminative layers expressed more impact in the training and classification of ResNet-50 for Barrett's esophagus and adenocarcinoma classification, demonstrating that both human knowledge and computational processing may influence the correct learning of such a problem.}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Byrne, Michael F. and Messmann, Helmut and Palm, Christoph}, title = {Multimodal imaging for detection and segmentation of Barrett's esophagus-related neoplasia using artificial intelligence}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {10}, edition = {E-Video}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1704-7885}, pages = {1}, abstract = {The early diagnosis of cancer in Barrett's esophagus is crucial for improving the prognosis. However, identifying Barrett's esophagus-related neoplasia (BERN) is challenging, even for experts [1]. Four-quadrant biopsies may improve the detection of neoplasia, but they can be associated with sampling errors. The application of artificial intelligence (AI) to the assessment of Barrett's esophagus could improve the diagnosis of BERN, and this has been demonstrated in both preclinical and clinical studies [2] [3]. In this video demonstration, we show the accurate detection and delineation of BERN in two patients ([Video 1]). In part 1, the AI system detects a mucosal cancer about 20 mm in size and accurately delineates the lesion in both white-light and narrow-band imaging. In part 2, a small island of BERN with high-grade dysplasia is detected and delineated in white-light, narrow-band, and texture and color enhancement imaging. The video shows the results using a transparent overlay of the mucosal cancer in real time as well as a full segmentation preview. Additionally, the optical flow allows for the assessment of endoscope movement, something which is inversely related to the reliability of the AI prediction. We demonstrate that multimodal imaging can be applied to the AI-assisted detection and segmentation of even small focal lesions in real time.}, language = {en} } @article{KolevKirchgessnerHoubenetal., author = {Kolev, Kalin and Kirchgeßner, Norbert and Houben, Sebastian and Csisz{\´a}r, Agnes and Rubner, Wolfgang and Palm, Christoph and Eiben, Bj{\"o}rn and Merkel, Rudolf and Cremers, Daniel}, title = {A variational approach to vesicle membrane reconstruction from fluorescence imaging}, series = {Pattern Recognition}, volume = {44}, journal = {Pattern Recognition}, number = {12}, publisher = {Elsevier}, doi = {10.1016/j.patcog.2011.04.019}, pages = {2944 -- 2958}, abstract = {Biological applications like vesicle membrane analysis involve the precise segmentation of 3D structures in noisy volumetric data, obtained by techniques like magnetic resonance imaging (MRI) or laser scanning microscopy (LSM). Dealing with such data is a challenging task and requires robust and accurate segmentation methods. In this article, we propose a novel energy model for 3D segmentation fusing various cues like regional intensity subdivision, edge alignment and orientation information. The uniqueness of the approach consists in the definition of a new anisotropic regularizer, which accounts for the unbalanced slicing of the measured volume data, and the generalization of an efficient numerical scheme for solving the arising minimization problem, based on linearization and fixed-point iteration. We show how the proposed energy model can be optimized globally by making use of recent continuous convex relaxation techniques. The accuracy and robustness of the presented approach are demonstrated by evaluating it on multiple real data sets and comparing it to alternative segmentation methods based on level sets. Although the proposed model is designed with focus on the particular application at hand, it is general enough to be applied to a variety of different segmentation tasks.}, subject = {Dreidimensionale Bildverarbeitung}, language = {en} } @inproceedings{MetzlerAachPalmetal., author = {Metzler, V. and Aach, T. and Palm, Christoph and Lehmann, Thomas M.}, title = {Texture Classification of Graylevel Images by Multiscale Cross-Co-Occurrence Matrices}, series = {Proceedings 15th International Conference on Pattern Recognition (ICPR-2000)}, booktitle = {Proceedings 15th International Conference on Pattern Recognition (ICPR-2000)}, doi = {10.1109/ICPR.2000.906133}, pages = {549 -- 552}, abstract = {Local gray level dependencies of natural images can be modelled by means of co-occurrence matrices containing joint probabilities of gray-level pairs. Texture, however, is a resolution-dependent phenomenon and hence, classification depends on the chosen scale. Since there is no optimal scale for all textures we employ a multiscale approach that acquires textural features at several scales. Thus linear and nonlinear scale-spaces are analyzed by multiscale co-occurrence matrices that describe the statistical behavior of a texture in scale-space. Classification is then performed on the basis of texture features taken from the individual scale with the highest discriminatory power. By considering cross-scale occurrences of gray level pairs, the impact of filters on the feature is described and used for classification of natural textures. This novel method was found to improve classification rates of the common co-occurrence matrix approach on standard textures significantly.}, language = {en} } @misc{OPUS4-100, title = {Bildverarbeitung f{\"u}r die Medizin 2019}, editor = {Handels, Heinz and Deserno, Thomas M. and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-658-25325-7}, issn = {1431-472X}, doi = {10.1007/978-3-658-25326-4}, abstract = {In den letzten Jahren hat sich der Workshop "Bildverarbeitung f{\"u}r die Medizin" durch erfolgreiche Veranstaltungen etabliert. Ziel ist auch 2019 wieder die Darstellung aktueller Forschungsergebnisse und die Vertiefung der Gespr{\"a}che zwischen Wissenschaftlern, Industrie und Anwendern. Die Beitr{\"a}ge dieses Bandes - einige davon in englischer Sprache - umfassen alle Bereiche der medizinischen Bildverarbeitung, insbesondere Bildgebung und -akquisition, Maschinelles Lernen, Bildsegmentierung und Bildanalyse, Visualisierung und Animation, Zeitreihenanalyse, Computerunterst{\"u}tzte Diagnose, Biomechanische Modellierung, Validierung und Qualit{\"a}tssicherung, Bildverarbeitung in der Telemedizin u.v.m.}, subject = {Bildgebendes Verfahren}, language = {de} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {Optical Flow als Methode zur Qualit{\"a}tssicherung KI-unterst{\"u}tzter Untersuchungen von Barrett-{\"O}sophagus und Barrett-{\"O}sophagus assoziierten Neoplasien}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0042-1754997}, abstract = {Einleitung {\"U}berm{\"a}ßige Bewegung im Bild kann die Performance von auf k{\"u}nstlicher Intelligenz (KI) basierenden klinischen Entscheidungsunterst{\"u}tzungssystemen (CDSS) reduzieren. Optical Flow (OF) ist eine Methode zur Lokalisierung und Quantifizierung von Bewegungen zwischen aufeinanderfolgenden Bildern. Ziel Ziel ist es, die Mensch-Computer-Interaktion (HCI) zu verbessern und Endoskopiker die unser KI-System „Barrett-Ampel" zur Unterst{\"u}tzung bei der Beurteilung von Barrett-{\"O}sophagus (BE) verwenden, ein Echtzeit-Feedback zur aktuellen Datenqualit{\"a}t anzubieten. Methodik Dazu wurden unver{\"a}nderte Videos in „Weißlicht" (WL), „Narrow Band Imaging" (NBI) und „Texture and Color Enhancement Imaging" (TXI) von acht endoskopischen Untersuchungen von histologisch gesichertem BE und mit Barrett-{\"O}sophagus assoziierten Neoplasien (BERN) durch unseren KI-Algorithmus analysiert. Der zur Bewertung der Bildqualit{\"a}t verwendete OF beinhaltete die mittlere Magnitude und die Entropie des Histogramms der Winkel. Frames wurden automatisch extrahiert, wenn die vordefinierten Schwellenwerte von 3,0 f{\"u}r die mittlere Magnitude und 9,0 f{\"u}r die Entropie des Histogramms der Winkel {\"u}berschritten wurden. Experten sahen sich zun{\"a}chst die Videos ohne KI-Unterst{\"u}tzung an und bewerteten, ob St{\"o}rfaktoren die Sicherheit mit der eine Diagnose im vorliegenden Fall gestellt werden kann negativ beeinflussen. Anschließend {\"u}berpr{\"u}ften sie die extrahierten Frames. Ergebnis Gleichm{\"a}ßige Bewegung in eine Richtung, wie etwa beim Vorschieben des Endoskops, spiegelte sich, bei insignifikant ver{\"a}nderter Entropie, in einer Erh{\"o}hung der Magnitude wider. Chaotische Bewegung, zum Beispiel w{\"a}hrend dem Sp{\"u}len, war mit erh{\"o}hter Entropie assoziiert. Insgesamt war eine unruhige endoskopische Darstellung, Fl{\"u}ssigkeit sowie {\"u}berm{\"a}ßige {\"O}sophagusmotilit{\"a}t mit erh{\"o}htem OF assoziiert und korrelierte mit der Meinung der Experten {\"u}ber die Qualit{\"a}t der Videos. Der OF und die subjektive Wahrnehmung der Experten {\"u}ber die Verwertbarkeit der vorliegenden Bildsequenzen korrelierten direkt proportional. Wenn die vordefinierten Schwellenwerte des OF {\"u}berschritten wurden, war die damit verbundene Bildqualit{\"a}t in 94\% der F{\"a}lle f{\"u}r eine definitive Interpretation auch f{\"u}r Experten unzureichend. Schlussfolgerung OF hat das Potenzial Endoskopiker ein Echtzeit-Feedback {\"u}ber die Qualit{\"a}t des Dateninputs zu bieten und so nicht nur die HCI zu verbessern, sondern auch die optimale Performance von KI-Algorithmen zu erm{\"o}glichen.}, language = {de} } @misc{MeinikheimMendelScheppachetal., author = {Meinikheim, Michael and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Prinz, Friederike and Schwamberger, Tanja and Schlottmann, Jakob and G{\"o}lder, Stefan Karl and Walter, Benjamin and Steinbr{\"u}ck, Ingo and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Einsatz von k{\"u}nstlicher Intelligenz (KI) als Entscheidungsunterst{\"u}tzungssystem f{\"u}r nicht-Experten bei der Beurteilung von Barrett-{\"O}sophagus assoziierten Neoplasien (BERN)}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {4}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0042-1745653}, pages = {251}, abstract = {Einleitung Die sichere Detektion und Charakterisierung von Barrett-{\"O}sophagus assoziierten Neoplasien (BERN) stellt selbst f{\"u}r erfahrene Endoskopiker eine Herausforderung dar. Ziel Ziel dieser Studie ist es, den Add-on Effekt eines k{\"u}nstlichen Intelligenz (KI) Systems (Barrett-Ampel) als Entscheidungsunterst{\"u}zungssystem f{\"u}r Endoskopiker ohne Expertise bei der Untersuchung von BERN zu evaluieren. Material und Methodik Zw{\"o}lf Videos in „Weißlicht" (WL), „narrow-band imaging" (NBI) und „texture and color enhanced imaging" (TXI) von histologisch best{\"a}tigten Barrett-Metaplasien oder BERN wurden von Experten und Untersuchern ohne Barrett-Expertise evaluiert. Die Probanden wurden dazu aufgefordert in den Videos auftauchende BERN zu identifizieren und gegebenenfalls die optimale Biopsiestelle zu markieren. Unser KI-System wurde demselben Test unterzogen, wobei dieses BERN in Echtzeit segmentierte und farblich von umliegendem Epithel differenzierte. Anschließend wurden den Probanden die Videos mit zus{\"a}tzlicher KI-Unterst{\"u}tzung gezeigt. Basierend auf dieser neuen Information, wurden die Probanden zu einer Reevaluation ihrer initialen Beurteilung aufgefordert. Ergebnisse Die „Barrett-Ampel" identifizierte unabh{\"a}ngig von den verwendeten Darstellungsmodi (WL, NBI, TXI) alle BERN. Zwei entz{\"u}ndlich ver{\"a}nderte L{\"a}sionen wurden fehlinterpretiert (Genauigkeit=75\%). W{\"a}hrend Experten vergleichbare Ergebnisse erzielten (Genauigkeit=70,8\%), hatten Endoskopiker ohne Expertise bei der Beurteilung von Barrett-Metaplasien eine Genauigkeit von lediglich 58,3\%. Wurden die nicht-Experten allerdings von unserem KI-System unterst{\"u}tzt, erreichten diese eine Genauigkeit von 75\%. Zusammenfassung Unser KI-System hat das Potential als Entscheidungsunterst{\"u}tzungssystem bei der Differenzierung zwischen Barrett-Metaplasie und BERN zu fungieren und so Endoskopiker ohne entsprechende Expertise zu assistieren. Eine Limitation dieser Studie ist die niedrige Anzahl an eingeschlossenen Videos. Um die Ergebnisse dieser Studie zu best{\"a}tigen, m{\"u}ssen randomisierte kontrollierte klinische Studien durchgef{\"u}hrt werden.}, language = {de} } @misc{MeinikheimMendelScheppachetal., author = {Meinikheim, Michael and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Prinz, Friederike and Schwamberger, Tanja and Schlottmann, Jakob and G{\"o}lder, Stefan Karl and Walter, Benjamin and Steinbr{\"u}ck, Ingo and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {INFLUENCE OF AN ARTIFICIAL INTELLIGENCE (AI) BASED DECISION SUPPORT SYSTEM (DSS) ON THE DIAGNOSTIC PERFORMANCE OF NON-EXPERTS IN BARRETT´S ESOPHAGUS RELATED NEOPLASIA (BERN)}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {S 01}, publisher = {Thieme}, doi = {10.1055/s-00000012}, pages = {S39}, abstract = {Aims Barrett´s esophagus related neoplasia (BERN) is difficult to detect and characterize during endoscopy, even for expert endoscopists. We aimed to assess the add-on effect of an Artificial Intelligence (AI) algorithm (Barrett-Ampel) as a decision support system (DSS) for non-expert endoscopists in the evaluation of Barrett's esophagus (BE) and BERN. Methods Twelve videos with multimodal imaging white light (WL), narrow-band imaging (NBI), texture and color enhanced imaging (TXI) of histologically confirmed BE and BERN were assessed by expert and non-expert endoscopists. For each video, endoscopists were asked to identify the area of BERN and decide on the biopsy spot. Videos were assessed by the AI algorithm and regions of BERN were highlighted in real-time by a transparent overlay. Finally, endoscopists were shown the AI videos and asked to either confirm or change their initial decision based on the AI support. Results Barrett-Ampel correctly identified all areas of BERN, irrespective of the imaging modality (WL, NBI, TXI), but misinterpreted two inflammatory lesions (Accuracy=75\%). Expert endoscopists had a similar performance (Accuracy=70,8\%), while non-experts had an accuracy of 58.3\%. When AI was implemented as a DSS, non-expert endoscopists improved their diagnostic accuracy to 75\%. Conclusions AI may have the potential to support non-expert endoscopists in the assessment of videos of BE and BERN. Limitations of this study include the low number of videos used. Randomized clinical trials in a real-life setting should be performed to confirm these results.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {ARTIFICIAL INTELLIGENCE (AI) - ASSISTED VESSEL AND TISSUE RECOGNITION IN THIRD-SPACE ENDOSCOPY}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {S01}, publisher = {Thieme}, doi = {10.1055/s-0042-1745037}, pages = {S175}, abstract = {Aims Third-space endoscopy procedures such as endoscopic submucosal dissection (ESD) and peroral endoscopic myotomy (POEM) are complex interventions with elevated risk of operator-dependent adverse events, such as intra-procedural bleeding and perforation. We aimed to design an artificial intelligence clinical decision support solution (AI-CDSS, "Smart ESD") for the detection and delineation of vessels, tissue structures, and instruments during third-space endoscopy procedures. Methods Twelve full-length third-space endoscopy videos were extracted from the Augsburg University Hospital database. 1686 frames were annotated for the following categories: Submucosal layer, blood vessels, electrosurgical knife and endoscopic instrument. A DeepLabv3+neural network with a 101-layer ResNet backbone was trained and validated internally. Finally, the ability of the AI system to detect visible vessels during ESD and POEM was determined on 24 separate video clips of 7 to 46 seconds duration and showing 33 predefined vessels. These video clips were also assessed by an expert in third-space endoscopy. Results Smart ESD showed a vessel detection rate (VDR) of 93.94\%, while an average of 1.87 false positive signals were recorded per minute. VDR of the expert endoscopist was 90.1\% with no false positive findings. On the internal validation data set using still images, the AI system demonstrated an Intersection over Union (IoU), mean Dice score and pixel accuracy of 63.47\%, 76.18\% and 86.61\%, respectively. Conclusions This is the first AI-CDSS aiming to mitigate operator-dependent limitations during third-space endoscopy. Further clinical trials are underway to better understand the role of AI in such procedures.}, language = {en} } @inproceedings{WeberNunesRauberPalm, author = {Weber Nunes, Danilo and Rauber, David and Palm, Christoph}, title = {Self-supervised 3D Vision Transformer Pre-training for Robust Brain Tumor Classification}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-47422-5_69}, pages = {298 -- 303}, abstract = {Brain tumors pose significant challenges in neurology, making precise classification crucial for prognosis and treatment planning. This work investigates the effectiveness of a self-supervised learning approach-masked autoencoding (MAE)-to pre-train a vision transformer (ViT) model for brain tumor classification. Our method uses non-domain specific data, leveraging the ADNI and OASIS-3 MRI datasets, which primarily focus on degenerative diseases, for pretraining. The model is subsequently fine-tuned and evaluated on the BraTS glioma and meningioma datasets, representing a novel use of these datasets for tumor classification. The pre-trained MAE ViT model achieves an average F1 score of 0.91 in a 5-fold cross-validation setting, outperforming the nnU-Net encoder trained from scratch, particularly under limited data conditions. These findings highlight the potential of self-supervised MAE in enhancing brain tumor classification accuracy, even with restricted labeled data.}, language = {en} } @inproceedings{WeiherervonRiedheimBrebantetal., author = {Weiherer, Maximilian and von Riedheim, Antonia and Br{\´e}bant, Vanessa and Egger, Bernhard and Palm, Christoph}, title = {iRBSM: A Deep Implicit 3D Breast Shape Model}, series = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025: Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-47422-5_11}, pages = {38 -- 43}, abstract = {We present the first deep implicit 3D shape model of the female breast, building upon and improving the recently proposed Regensburg Breast Shape Model (RBSM). Compared to its PCA-based predecessor, our model employs implicit neural representations; hence, it can be trained on raw 3D breast scans and eliminates the need for computationally demanding non-rigid registration, a task that is particularly difficult for feature-less breast shapes. The resulting model, dubbed iRBSM, captures detailed surface geometry including fine structures such as nipples and belly buttons, is highly expressive, and outperforms the RBSM on different surface reconstruction tasks. Finally, leveraging the iRBSM, we present a prototype application to 3D reconstruct breast shapes from just a single image. Model and code publicly available at https://rbsm.re-mic.de/implicit.}, language = {en} } @misc{OPUS4-6079, title = {Bildverarbeitung f{\"u}r die Medizin 2023}, editor = {Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-41656-0}, issn = {1431-472X}, doi = {10.1007/978-3-658-41657-7}, pages = {317}, abstract = {Seit mehr als 25 Jahren ist der Workshop "Bildverarbeitung f{\"u}r die Medizin" als erfolgreiche Veranstaltung etabliert. Ziel ist auch 2023 wieder die Darstellung aktueller Forschungsergebnisse und die Vertiefung der Gespr{\"a}che zwischen Wissenschaftlern, Industrie und Anwendern. Die Beitr{\"a}ge dieses Bandes - viele davon in englischer Sprache - umfassen alle Bereiche der medizinischen Bildverarbeitung, insbesondere die Bildgebung und -akquisition, Segmentierung und Analyse, Visualisierung und Animation, computerunterst{\"u}tzte Diagnose sowie bildgest{\"u}tzte Therapieplanung und Therapie. Hierbei kommen Methoden des maschinelles Lernens, der biomechanischen Modellierung sowie der Validierung und Qualit{\"a}tssicherung zum Einsatz.}, subject = {Bildverarbeitung}, language = {de} } @inproceedings{GutbrodGeislerRauberetal., author = {Gutbrod, Max and Geisler, Benedikt and Rauber, David and Palm, Christoph}, title = {Data Augmentation for Images of Chronic Foot Wounds}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_71}, pages = {261 -- 266}, abstract = {Training data for Neural Networks is often scarce in the medical domain, which often results in models that struggle to generalize and consequently showpoor performance on unseen datasets. Generally, adding augmentation methods to the training pipeline considerably enhances a model's performance. Using the dataset of the Foot Ulcer Segmentation Challenge, we analyze two additional augmentation methods in the domain of chronic foot wounds - local warping of wound edges along with projection and blurring of shapes inside wounds. Our experiments show that improvements in the Dice similarity coefficient and Normalized Surface Distance metrics depend on a sensible selection of those augmentation methods.}, language = {en} } @misc{SchroederSemmelmannSiegmundetal., author = {Schroeder, Josef A. and Semmelmann, Matthias and Siegmund, Heiko and Grafe, Claudia and Evert, Matthias and Palm, Christoph}, title = {Improved interactive computer-assisted approach for evaluation of ultrastructural cilia abnormalities}, series = {Ultrastructural Pathology}, volume = {41}, journal = {Ultrastructural Pathology}, number = {1}, doi = {10.1080/01913123.2016.1270978}, pages = {112 -- 113}, subject = {Zilie}, language = {en} } @misc{OPUS4-106, title = {Bildverarbeitung f{\"u}r die Medizin 2018}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-56537-7}, subject = {Bildanalyse}, language = {de} } @misc{OPUS4-7967, title = {Bildverarbeitung f{\"u}r die Medizin 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas M.}, publisher = {Springer Fachmedien Wiesbaden}, address = {Wiesbaden}, isbn = {978-3-658-47421-8}, issn = {1431-472X}, doi = {10.1007/978-3-658-47422-5}, pages = {XXIII, 354}, abstract = {Die Konferenz "BVM - Bildverarbeitung f{\"u}r die Medizin" ist seit vielen Jahren als die nationale Plattform f{\"u}r den Austausch von Ideen und die Diskussion der neuesten Forschungsergebnisse im Bereich der Medizinischen Bildverarbeitung und der K{\"u}nstlichen Intelligenz (KI) etabliert. Auch 2025 werden wir aktuelle Forschungsergebnisse vorstellen und Gespr{\"a}che zwischen (jungen) Wissenschaftler*innen, Industrie und Anwender*innen vertiefen. Die Beitr{\"a}ge dieses Bandes - die meisten davon in englischer Sprache - umfassen alle Bereiche der medizinischen Bildverarbeitung, insbesondere die Bildgebung und -akquisition, Segmentierung und Analyse, Registrierung, Visualisierung und Animation, computerunterst{\"u}tzte Diagnose sowie bildgest{\"u}tzte Therapieplanung und Therapie. Hierbei kommen Methoden des maschinellen Lernens, der biomechanischen Modellierung sowie der Validierung und Qualit{\"a}tssicherung zum Einsatz. Das Kapitel "Leveraging multiple total body segmentators and anatomy-informed post-processing for segmenting bones in Lung CTs" ist unter einer Creative Commons Attribution 4.0 International License {\"u}ber link.springer.com frei verf{\"u}gbar (Open Access). Die Herausgebenden Prof. Palm forscht im Bereich KI f{\"u}r die Medizin mit einem Schwerpunkt in der Analyse endoskopischer Bilddaten zur computerunterst{\"u}tzten Diagnose und Therapie. Prof. Breininger entwickelt robuste Ans{\"a}tze des maschinellen Lernens in verschiedenen interdisziplin{\"a}ren Bereichen, mit einem Schwerpunkt auf medizinischen Bilddaten. Prof. Deserno forscht in Biosignal- und Bilderzeugung und -verarbeitung, insbesondere in der videobasierten Vitaldatenmessung. Prof. Handels entwickelt problemoptimierte, lernf{\"a}hige Bildverarbeitungsmethoden und integriert diese in hybride Bildverarbeitungssysteme zur Unterst{\"u}tzung der medizinischen Diagnostik und Therapie. Prof. Maier entwickelt Anwendungen in der medizinischen Bildverarbeitung zur Diagnoseunterst{\"u}tzung bis hin zur Schichtbildberechnung durch k{\"u}nstliche Intelligenz. Prof. Maier-Hein forscht im Bereich maschinelles Lernen und entwickelt Open-Source-L{\"o}sungen wie das Medical Imaging Interaction Toolkit (MITK), Kaapana oder das nnU-Net. Prof. em. Tolxdorff ist Experte f{\"u}r maschinelles Lernen, biomedizinisches Datenmanagement, Datenvisualisierung und -analyse sowie Medizinproduktentwicklung in klinischen Workflows.}, subject = {Bildverarbeitung}, language = {de} } @article{HartwigBerletCzempieletal., author = {Hartwig, Regine and Berlet, Maximilian and Czempiel, Tobias and Fuchtmann, Jonas and R{\"u}ckert, Tobias and Feussner, Hubertus and Wilhelm, Dirk}, title = {Bildbasierte Unterst{\"u}tzungsmethoden f{\"u}r die zuk{\"u}nftige Anwendung in der Chirurgie}, series = {Die Chirurgie}, volume = {93}, journal = {Die Chirurgie}, publisher = {Springer}, doi = {10.1007/s00104-022-01668-x}, pages = {956 -- 965}, abstract = {Hintergrund: Die Entwicklung assistiver Technologien wird in den kommenden Jahren nicht nur in der Chirurgie von zunehmender Bedeutung sein. Die Wahrnehmung der Istsituation stellt hierbei die Grundlage jeder autonomen Handlung dar. Hierf{\"u}r k{\"o}nnen unterschiedliche Sensorsysteme genutzt werden, wobei videobasierte Systeme ein besonderes Potenzial aufweisen. Methode: Anhand von Literaturangaben und auf Basis eigener Forschungsarbeiten werden zentrale Aspekte bildbasierter Unterst{\"u}tzungssysteme f{\"u}r die Chirurgie dargestellt. Hierbei wird deren Potenzial, aber auch die Limitationen der Methoden erl{\"a}utert. Ergebnisse: Eine etablierte Anwendung stellt die Phasendetektion chirurgischer Eingriffe dar, f{\"u}r die Operationsvideos mittels neuronaler Netzwerke analysiert werden. Durch eine zeitlich gest{\"u}tzte und transformative Analyse konnten die Ergebnisse der Pr{\"a}diktion j{\"u}ngst deutlich verbessert werden. Aber auch robotische Kameraf{\"u}hrungssysteme nutzen Bilddaten, um das Laparoskop zuk{\"u}nftig autonom zu navigieren. Um die Zuverl{\"a}ssigkeit an die hohen Anforderungen in der Chirurgie anzugleichen, m{\"u}ssen diese jedoch durch zus{\"a}tzliche Informationen erg{\"a}nzt werden. Ein vergleichbarer multimodaler Ansatz wurde bereits f{\"u}r die Navigation und Lokalisation bei laparoskopischen Eingriffen umgesetzt. Hierzu werden Videodaten mittels verschiedener Methoden analysiert und diese Ergebnisse mit anderen Sensormodalit{\"a}ten fusioniert. Diskussion: Bildbasierte Unterst{\"u}tzungsmethoden sind bereits f{\"u}r diverse Aufgaben verf{\"u}gbar und stellen einen wichtigen Aspekt f{\"u}r die Chirurgie der Zukunft dar. Um hier jedoch zuverl{\"a}ssig und f{\"u}r autonome Funktionen eingesetzt werden zu k{\"o}nnen, m{\"u}ssen sie zuk{\"u}nftig in multimodale Ans{\"a}tze eingebettet werden, um die erforderliche Sicherheit bieten zu k{\"o}nnen.}, language = {de} } @inproceedings{RueckertRiederFeussneretal., author = {R{\"u}ckert, Tobias and Rieder, Maximilian and Feussner, Hubertus and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Smoke Classification in Laparoscopic Cholecystectomy Videos Incorporating Spatio-temporal Information}, series = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2024: Proceedings, German Workshop on Medical Image Computing, March 10-12, 2024, Erlangen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springeer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-44037-4_78}, pages = {298 -- 303}, abstract = {Heavy smoke development represents an important challenge for operating physicians during laparoscopic procedures and can potentially affect the success of an intervention due to reduced visibility and orientation. Reliable and accurate recognition of smoke is therefore a prerequisite for the use of downstream systems such as automated smoke evacuation systems. Current approaches distinguish between non-smoked and smoked frames but often ignore the temporal context inherent in endoscopic video data. In this work, we therefore present a method that utilizes the pixel-wise displacement from randomly sampled images to the preceding frames determined using the optical flow algorithm by providing the transformed magnitude of the displacement as an additional input to the network. Further, we incorporate the temporal context at evaluation time by applying an exponential moving average on the estimated class probabilities of the model output to obtain more stable and robust results over time. We evaluate our method on two convolutional-based and one state-of-the-art transformer architecture and show improvements in the classification results over a baseline approach, regardless of the network used.}, language = {en} } @unpublished{MendelRueckertWilhelmetal., author = {Mendel, Robert and R{\"u}ckert, Tobias and Wilhelm, Dirk and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Motion-Corrected Moving Average: Including Post-Hoc Temporal Information for Improved Video Segmentation}, doi = {10.48550/arXiv.2403.03120}, pages = {9}, abstract = {Real-time computational speed and a high degree of precision are requirements for computer-assisted interventions. Applying a segmentation network to a medical video processing task can introduce significant inter-frame prediction noise. Existing approaches can reduce inconsistencies by including temporal information but often impose requirements on the architecture or dataset. This paper proposes a method to include temporal information in any segmentation model and, thus, a technique to improve video segmentation performance without alterations during training or additional labeling. With Motion-Corrected Moving Average, we refine the exponential moving average between the current and previous predictions. Using optical flow to estimate the movement between consecutive frames, we can shift the prior term in the moving-average calculation to align with the geometry of the current frame. The optical flow calculation does not require the output of the model and can therefore be performed in parallel, leading to no significant runtime penalty for our approach. We evaluate our approach on two publicly available segmentation datasets and two proprietary endoscopic datasets and show improvements over a baseline approach.}, subject = {Deep Learning}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) - assisted vessel and tissue recognition during third space endoscopy (Smart ESD)}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0042-1755110}, abstract = {Clinical setting Third space procedures such as endoscopic submucosal dissection (ESD) and peroral endoscopic myotomy (POEM) are complex minimally invasive techniques with an elevated risk for operator-dependent adverse events such as bleeding and perforation. This risk arises from accidental dissection into the muscle layer or through submucosal blood vessels as the submucosal cutting plane within the expanding resection site is not always apparent. Deep learning algorithms have shown considerable potential for the detection and characterization of gastrointestinal lesions. So-called AI - clinical decision support solutions (AI-CDSS) are commercially available for polyp detection during colonoscopy. Until now, these computer programs have concentrated on diagnostics whereas an AI-CDSS for interventional endoscopy has not yet been introduced. We aimed to develop an AI-CDSS („Smart ESD") for real-time intra-procedural detection and delineation of blood vessels, tissue structures and endoscopic instruments during third-space endoscopic procedures. Characteristics of Smart ESD An AI-CDSS was invented that delineates blood vessels, tissue structures and endoscopic instruments during third-space endoscopy in real-time. The output can be displayed by an overlay over the endoscopic image with different modes of visualization, such as a color-coded semitransparent area overlay, or border tracing (demonstration video). Hereby the optimal layer for dissection can be visualized, which is close above or directly at the muscle layer, depending on the applied technique (ESD or POEM). Furthermore, relevant blood vessels (thickness> 1mm) are delineated. Spatial proximity between the electrosurgical knife and a blood vessel triggers a warning signal. By this guidance system, inadvertent dissection through blood vessels could be averted. Technical specifications A DeepLabv3+ neural network architecture with KSAC and a 101-layer ResNeSt backbone was used for the development of Smart ESD. It was trained and validated with 2565 annotated still images from 27 full length third-space endoscopic videos. The annotation classes were blood vessel, submucosal layer, muscle layer, electrosurgical knife and endoscopic instrument shaft. A test on a separate data set yielded an intersection over union (IoU) of 68\%, a Dice Score of 80\% and a pixel accuracy of 87\%, demonstrating a high overlap between expert and AI segmentation. Further experiments on standardized video clips showed a mean vessel detection rate (VDR) of 85\% with values of 92\%, 70\% and 95\% for POEM, rectal ESD and esophageal ESD respectively. False positive measurements occurred 0.75 times per minute. 7 out of 9 vessels which caused intraprocedural bleeding were caught by the algorithm, as well as both vessels which required hemostasis via hemostatic forceps. Future perspectives Smart ESD performed well for vessel and tissue detection and delineation on still images, as well as on video clips. During a live demonstration in the endoscopy suite, clinical applicability of the innovation was examined. The lag time for processing of the live endoscopic image was too short to be visually detectable for the interventionist. Even though the algorithm could not be applied during actual dissection by the interventionist, Smart ESD appeared readily deployable during visual assessment by ESD experts. Therefore, we plan to conduct a clinical trial in order to obtain CE-certification of the algorithm. This new technology may improve procedural safety and speed, as well as training of modern minimally invasive endoscopic resection techniques.}, subject = {Bildgebendes Verfahren}, language = {en} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {Barrett-Ampel}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0042-1755109}, abstract = {Hintergrund Adenokarzinome des {\"O}sophagus sind bis heute mit einer infausten Prognose vergesellschaftet (1). Obwohl Endoskopiker mit Barrett-{\"O}sophagus als Pr{\"a}kanzerose konfrontiert werden, ist vor allem f{\"u}r nicht-Experten die Differenzierung zwischen Barrett-{\"O}sophagus ohne Dysplasie und assoziierten Neoplasien mitunter schwierig. Existierende Biopsieprotokolle (z.B. Seattle Protokoll) sind oftmals unzuverl{\"a}ssig (2). Eine fr{\"u}hzeitige Diagnose des Adenokarzinoms ist allerdings von fundamentaler Bedeutung f{\"u}r die Prognose des Patienten. Forschungsansatz Auf der Grundlage dieser Problematik, entwickelten wir in Kooperation mit dem Forschungslabor „Regensburg Medical Image Computing (ReMIC)" der OTH Regensburg ein auf k{\"u}nstlicher Intelligenz (KI) basiertes Entscheidungsunterst{\"u}tzungssystem (CDSS). Das auf einer DeepLabv3+ neuronalen Netzwerkarchitektur basierende CDSS differenziert mittels Mustererkennung Barrett- {\"O}sophagus ohne Dysplasie von Barrett-{\"O}sophagus mit Dysplasie bzw. Neoplasie („Klassifizierung"). Hierbei werden gemittelte Ausgabewahrscheinlichkeiten mit einem vom Benutzer definierten Schwellenwert verglichen. F{\"u}r Vorhersagen, die den Schwellenwert {\"u}berschreiten, berechnen wir die Kontur der Region und die Fl{\"a}che. Sobald die vorhergesagte L{\"a}sion eine bestimmte Gr{\"o}ße in der Eingabe {\"u}berschreitet, heben wir sie und ihren Umriss hervor. So erm{\"o}glicht eine farbkodierte Visualisierung eine Abgrenzung zwischen Dysplasie bzw. Neoplasie und normalem Barrett-Epithel („Segmentierung"). In einer Studie an Bildern in „Weißlicht" (WL) und „Narrow Band Imaging" (NBI) demonstrierten wir eine Sensitivit{\"a}t von mehr als 90\% und eine Spezifit{\"a}t von mehr als 80\% (3). In einem n{\"a}chsten Schritt, differenzierte unser KI-Algorithmus Barrett- Metaplasien von assoziierten Neoplasien anhand von zuf{\"a}llig abgegriffenen Bildern in Echtzeit mit einer Accuracy von 89.9\% (4). Darauf folgend, entwickelten wir unser System dahingehend weiter, dass unser Algorithmus nun auch dazu in der Lage ist, Untersuchungsvideos in WL, NBI und „Texture and Color Enhancement Imaging" (TXI) in Echtzeit zu analysieren (5). Aktuell f{\"u}hren wir eine Studie in einem randomisiert-kontrollierten Ansatz an unver{\"a}nderten Untersuchungsvideos in WL, NBI und TXI durch. Ausblick Um Patienten mit aus Barrett-Metaplasien resultierenden Neoplasien fr{\"u}hestm{\"o}glich an „High-Volume"-Zentren {\"u}berweisen zu k{\"o}nnen, soll unser KI-Algorithmus zuk{\"u}nftig vor allem Endoskopiker ohne extensive Erfahrung bei der Beurteilung von Barrett- {\"O}sophagus in der Krebsfr{\"u}herkennung unterst{\"u}tzen.}, subject = {Speiser{\"o}hrenkrebs}, language = {de} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Intraprozedurale Strukturerkennung bei Third-Space Endoskopie mithilfe eines Deep-Learning Algorithmus}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {04}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0042-1745652}, pages = {e250-e251}, abstract = {Einleitung Third-Space Interventionen wie die endoskopische Submukosadissektion (ESD) und die perorale endoskopische Myotomie (POEM) sind technisch anspruchsvoll und mit einem erh{\"o}hten Risiko f{\"u}r intraprozedurale Komplikationen wie Blutung oder Perforation assoziiert. Moderne Computerprogramme zur Unterst{\"u}tzung bei diagnostischen Entscheidungen werden unter Einsatz von k{\"u}nstlicher Intelligenz (KI) in der Endoskopie bereits erfolgreich eingesetzt. Ziel der vorliegenden Arbeit war es, relevante anatomische Strukturen mithilfe eines Deep-Learning Algorithmus zu detektieren und segmentieren, um die Sicherheit und Anwendbarkeit von ESD und POEM zu erh{\"o}hen. Methoden Zw{\"o}lf Videoaufnahmen in voller L{\"a}nge von Third-Space Endoskopien wurden aus der Datenbank des Universit{\"a}tsklinikums Augsburg extrahiert. 1686 Einzelbilder wurden f{\"u}r die Kategorien Submukosa, Blutgef{\"a}ß, Dissektionsmesser und endoskopisches Instrument annotiert und segmentiert. Mit diesem Datensatz wurde ein DeepLabv3+neuronales Netzwerk auf der Basis eines ResNet mit 101 Schichten trainiert und intern anhand der Parameter Intersection over Union (IoU), Dice Score und Pixel Accuracy validiert. Die F{\"a}higkeit des Algorithmus zur Gef{\"a}ßdetektion wurde anhand von 24 Videoclips mit einer Spieldauer von 7 bis 46 Sekunden mit 33 vordefinierten Gef{\"a}ßen evaluiert. Anhand dieses Tests wurde auch die Gef{\"a}ßdetektionsrate eines Experten in der Third-Space Endoskopie ermittelt. Ergebnisse Der Algorithmus zeigte eine Gef{\"a}ßdetektionsrate von 93,94\% mit einer mittleren Rate an falsch positiven Signalen von 1,87 pro Minute. Die Gef{\"a}ßdetektionsrate des Experten lag bei 90,1\% ohne falsch positive Ergebnisse. In der internen Validierung an Einzelbildern wurde eine IoU von 63,47\%, ein mittlerer Dice Score von 76,18\% und eine Pixel Accuracy von 86,61\% ermittelt. Zusammenfassung Dies ist der erste KI-Algorithmus, der f{\"u}r den Einsatz in der therapeutischen Endoskopie entwickelt wurde. Pr{\"a}limin{\"a}re Ergebnisse deuten auf eine mit Experten vergleichbare Detektion von Gef{\"a}ßen w{\"a}hrend der Untersuchung hin. Weitere Untersuchungen sind n{\"o}tig, um die Leistung des Algorithmus im Vergleich zum Experten genauer zu eruieren sowie einen m{\"o}glichen klinischen Nutzen zu ermitteln.}, language = {de} } @article{SouzaJrPachecoPassosetal., author = {Souza Jr., Luis Antonio de and Pacheco, Andr{\´e} G.C. and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {DeepCraftFuse: visual and deeply-learnable features work better together for esophageal cancer detection in patients with Barrett's esophagus}, series = {Neural Computing and Applications}, volume = {36}, journal = {Neural Computing and Applications}, publisher = {Springer}, address = {London}, doi = {10.1007/s00521-024-09615-z}, pages = {10445 -- 10459}, abstract = {Limitations in computer-assisted diagnosis include lack of labeled data and inability to model the relation between what experts see and what computers learn. Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis. While deep learning techniques are broad so that unseen information might help learn patterns of interest, human insights to describe objects of interest help in decision-making. This paper proposes a novel approach, DeepCraftFuse, to address the challenge of combining information provided by deep networks with visual-based features to significantly enhance the correct identification of cancerous tissues in patients affected with Barrett's esophagus (BE). We demonstrate that DeepCraftFuse outperforms state-of-the-art techniques on private and public datasets, reaching results of around 95\% when distinguishing patients affected by BE that is either positive or negative to esophageal cancer.}, subject = {Deep Learning}, language = {en} } @article{SouzaPachecodeSouzaetal., author = {Souza, Luis A. and Pacheco, Andr{\´e} G.C. and de Souza, Alberto F. and Oliveira-Santos, Thiago and Badue, Claudine and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {TransConv: a lightweight architecture based on transformers and convolutional neural networks for adenocarcinoma and Barrett's esophagus identification}, series = {Neural Computing and Applications}, journal = {Neural Computing and Applications}, number = {37}, publisher = {Springer}, doi = {10.1007/s00521-025-11299-y}, pages = {15535 -- 15546}, abstract = {Barrett's esophagus, also known as BE, is commonly associated with repeated exposure to stomach acid. If not treated properly, it may evolve into esophageal adenocarcinoma, aka esophageal cancer. This paper proposes TransConv, a hybrid architecture that benefits from features learned by pre-trained vision transformers (ViTs) and convolutional neural networks (CNNs), followed by a shallow neural network composed of three normalizations, ReLU activations, and fully connected layers, and a SoftMax head to distinguish between BE and esophageal cancer. TransConv is designed to be training-lightweight, and for the ViT and CNN backbone models, weights are kept frozen during training, i.e., the primary goal of TransConv is to learn the weights of the fully connected layer from both backbones only, avoiding the burden of updating their weights but still learning their final descriptions for the lightweight convolutional model. We report promising results with low computational training costs in two datasets, one public and another private. From our achievements, TransConv was able to deliver balanced accuracy results around 85\% and 86\% for each evaluated dataset, respectively, in a design that required only 50 epochs of model training, a very reduced number compared to state-of-the-art conducted studies in the same domain.}, language = {en} } @unpublished{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, doi = {10.48550/arXiv.2304.13014}, pages = {25}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images. Especially the determination of the position and type of the instruments is of great interest here. Current work involves both spatial and temporal information with the idea, that the prediction of movement of surgical tools over time may improve the quality of final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify datasets used for method development and evaluation, as well as quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images. The paper focuses on methods that work purely visually without attached markers of any kind on the instruments, taking into account both single-frame segmentation approaches as well as those involving temporal information. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing available potential for future developments. The publications considered were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking" and result in 408 articles published between 2015 and 2022 from which 109 were included using systematic selection criteria.}, language = {en} } @techreport{AlexEichingerHeyderetal., type = {Working Paper}, author = {Alex, Karla and Eichinger, Johanna and Heyder, Clemens and Kandlbinder, Agnes and Loder, Sandra and Rolfes, Vasilija and R{\"o}ttger, Sara and Scharf, Anna and Scorna, Ulrike and Weigold, Stefanie}, title = {Abstractband I: In-vitro-Gametogenese (IVG) und artifizieller Uterus (AU) - Problemausl{\"o}ser oder Probleml{\"o}ser? Ethische, soziale und rechtliche Aspekte zuk{\"u}nftiger reproduktionsmedizinischer Verfahren}, editor = {Cerullo, Laura and Gerhards, Helene and Weber, Karsten}, doi = {10.13140/RG.2.2.30303.53928}, pages = {21}, language = {de} } @misc{OrtnerBluemelLerneretal.2024, author = {Ortner, Martina and Bl{\"u}mel, Edwina and Lerner, Thomas and Stiegler, Elisabeth and Weger, Yannick and Graf, Michaela and Penn, Karolin and Resch, Maximilian and Riedl, Claudia and Schnurrer, Stefanie and Wilhelm, Lea and Beer, Theresa and K{\"o}kten, Rabia}, title = {BABS-Mi}, volume = {5}, editor = {Ortner, Martina}, publisher = {Ostbayerische Technische Hochschule Regensburg}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, doi = {10.35096/othr/pub-7707}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-77073}, pages = {55}, year = {2024}, abstract = {In dieser Ausgabe besch{\"a}ftigen wir uns mit der Frage, was es heißt, einen so genannten Migrationshintergrund zu haben. Ist dieser wie ein Schatten, der die Person immer begleitet? Was bedeutet das f{\"u}r die Person? Wie reagiert das Umfeld? Im berufsbegleitenden Studiengang Soziale Arbeit (BABS) an der OTH Regensburg gibt es im neunten Semester eine Lehrveranstaltung mit dem Titel „Gesellschaft und Migration" und im zehnten Semester mit dem Titel „Migrationssensible Soziale Arbeit". Beide Lehrveranstaltungen haben jeweils einen zeitlichen Umfang von sechs Semesterwochenstunden. Die Studierenden gingen in beiden Veranstaltungen, also {\"u}ber zwei Semester, der Frage nach, ob durch Begriffe wie igrationshintergrund, Migrationsbiografie etc. Fremdheit konstruiert wird. Ist f{\"u}r die Person dieser Teil der Biografie oder der Familiengeschichte {\"u}berhaupt relevant oder interessant? Und welche Bedeutung hat dieser Aspekt des Lebens f{\"u}r das Umfeld, z.B. der regelm{\"a}ßige Besuch einer Ausl{\"a}nderbeh{\"o}rde zur Verl{\"a}ngerung des Aufenthaltes, die Kommentierung ihrer Deutschkenntnisse und/oder die Frage, wo sie denn eigentlich herkommen? Die Ergebnisse k{\"o}nnen Sie hier lesen.}, subject = {Migration}, language = {de} } @misc{CurrleHaugWeber, author = {Currle, Edda and Haug, Sonja and Weber, Karsten}, title = {Artificial Intelligence and anamnesis: Results of a population survey}, doi = {10.13140/RG.2.2.31952.01285}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-83325}, abstract = {Digital procedures are increasingly implemented to enhance efficiency in healthcare, with Artificial Intelligence (AI) — particularly chatbot s— showing significant potential for future applications. However, little is known about patients' acceptance of such technologies. The study "AI and Anamnesis" addresses this gap by investigating the German population's acceptance of and willingness to use AI-driven technologies for digital anamnesis. This poster presents results from the first wave of the survey, offering initial insights into public attitudes and potential barriers to adoption.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @article{LehlePhilippKrenkeletal., author = {Lehle, Karla and Philipp, Alois and Krenkel, Lars and Gruber, Michael and Hiller, Karl-Anton and M{\"u}ller, Thomas and Lubnow, Matthias}, title = {Thrombocytopenia During Venovenous Extracorporeal Membrane Oxygenation in Adult Patients With Bacterial, Viral, and COVID-19 Pneumonia}, series = {ASAIO Journal}, journal = {ASAIO Journal}, publisher = {Wolters Kluwer}, issn = {1058-2916}, doi = {10.1097/MAT.0000000000002383}, abstract = {Contact of blood with artificial surfaces triggers platelet activation. The aim was to compare platelet kinetics after venovenous extracorporeal membrane oxygenation (V-V ECMO) start and after system exchange in different etiologies of acute lung failure. Platelet counts and coagulation parameters were analyzed from adult patients with long and exchange-free (≥8 days) ECMO runs (n = 330) caused by bacterial (n = 142), viral (n = 76), or coronavirus disease 2019 (COVID-19) (n = 112) pneumonia. A subpopulation requiring a system exchange and with long, exchange-free runs of the second oxygenator (≥7 days) (n = 110) was analyzed analogously. Patients with COVID-19 showed the highest platelet levels before ECMO implantation. Independent of the underlying disease and ECMO type, platelet counts decreased significantly within 24 hours and reached a steady state after 5 days. In the subpopulation, at the day of a system exchange, platelet counts were lower compared with ECMO start, but without differences between underlying diseases. Subsequently, platelets remained unchanged in the bacterial pneumonia group, but increased in the COVID-19 and viral pneumonia groups within 2-4 days, whereas D-dimers decreased and fibrinogen levels increased. Thus, overall platelet counts on V-V ECMO show disease-specific initial dynamics followed by an ongoing consumption by the ECMO device, which is not boosted by new artificial surfaces after a system exchange.}, language = {en} } @misc{Weber, author = {Weber, Karsten}, title = {KI-Nutzung f{\"u}r (psychisch) kranke Menschen: Auswirkungen auf das Professionsverst{\"a}ndnis der Fachkr{\"a}fte}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {Elektrisch + digital = nachhaltig? Welche Nachhaltigkeitsgewinne haben neue Mobilit{\"a}tformen?}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {Medizin, Mobilit{\"a}t, Medien: Digitaler Wandel als Chance und Herausforderung}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {Der Mars zwischen Science und Fiction}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {M{\"o}gliche Zuk{\"u}nfte der K{\"u}nstlichen Intelligenz}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {M{\"o}gliche Zuk{\"u}nfte der K{\"u}nstlichen Intelligenz}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {Der Mensch nach Maß?!}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {The weapons of war and conflict are technology}, language = {en} } @misc{Weber, author = {Weber, Karsten}, title = {Social science research, research on acceptance, and applied ethics on technology and artificial intelligence in the health sector}, language = {en} } @misc{Weber, author = {Weber, Karsten}, title = {KI und Roboter in der Science Fiction - eine (augenzwinkernde) Einf{\"u}hrung zur KI-Forschung}, language = {de} } @misc{Weber, author = {Weber, Karsten}, title = {Die {\"u}blichen Verd{\"a}chtigen - automatisierte Strafverfolgung aus Perspektive der Technikfolgenabsch{\"a}tzung und Technikbewertung}, language = {de} }