@article{DesernoHandelsMaierHeinetal., author = {Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Mersmann, Sven and Palm, Christoph and Tolxdorff, Thomas and Wagenknecht, Gudrun and Wittenberg, Thomas}, title = {Viewpoints on Medical Image Processing}, series = {Current Medical Imaging Reviews}, volume = {9}, journal = {Current Medical Imaging Reviews}, number = {2}, doi = {10.2174/1573405611309020002}, pages = {79 -- 88}, abstract = {Medical image processing provides core innovation for medical imaging. This paper is focused on recent developments from science to applications analyzing the past fifteen years of history of the proceedings of the German annual meeting on medical image processing (BVM). Furthermore, some members of the program committee present their personal points of views: (i) multi-modality for imaging and diagnosis, (ii) analysis of diffusion-weighted imaging, (iii) model-based image analysis, (iv) registration of section images, (v) from images to information in digital endoscopy, and (vi) virtual reality and robotics. Medical imaging and medical image computing is seen as field of rapid development with clear trends to integrated applications in diagnostics, treatment planning and treatment.}, subject = {Bildgebendes Verfahren}, language = {en} } @inproceedings{WeberDoenitzBrawanskietal., author = {Weber, Joachim and Doenitz, Christian and Brawanski, Alexander and Palm, Christoph}, title = {Data-Parallel MRI Brain Segmentation in Clinicial Use}, series = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-46224-9_67}, pages = {389 -- 394}, abstract = {Structural MRI brain analysis and segmentation is a crucial part in the daily routine in neurosurgery for intervention planning. Exemplarily, the free software FSL-FAST (FMRIB's Segmentation Library - FMRIB's Automated Segmentation Tool) in version 4 is used for segmentation of brain tissue types. To speed up the segmentation procedure by parallel execution, we transferred FSL-FAST to a General Purpose Graphics Processing Unit (GPGPU) using Open Computing Language (OpenCL) [1]. The necessary steps for parallelization resulted in substantially different and less useful results. Therefore, the underlying methods were revised and adapted yielding computational overhead. Nevertheless, we achieved a speed-up factor of 3.59 from CPU to GPGPU execution, as well providing similar useful or even better results.}, subject = {Kernspintomografie}, language = {en} } @inproceedings{SzaloZehnerPalm, author = {Szalo, Alexander Eduard and Zehner, Alexander and Palm, Christoph}, title = {GraphMIC: Medizinische Bildverarbeitung in der Lehre}, series = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-46224-9_68}, pages = {395 -- 400}, abstract = {Die Lehre der medizinischen Bildverarbeitung vermittelt Kenntnisse mit einem breiten Methodenspektrum. Neben den Grundlagen der Verfahren soll ein Gef{\"u}hl f{\"u}r eine geeignete Ausf{\"u}hrungsreihenfolge und ihrer Wirkung auf medizinische Bilddaten entwickelt werden. Die Komplexit{\"a}t der Methoden erfordert vertiefte Programmierkenntnisse, sodass bereits einfache Operationen mit großem Programmieraufwand verbunden sind. Die Software GraphMIC stellt Bildverarbeitungsoperationen in Form interaktiver Knoten zur Verf{\"u}gung und erlaubt das Arrangieren, Parametrisieren und Ausf{\"u}hren komplexer Verarbeitungssequenzen in einem Graphen. Durch den Fokus auf das Design einer Pipeline, weg von sprach- und frameworkspezifischen Implementierungsdetails, lassen sich grundlegende Prinzipien der Bildverarbeitung anschaulich erlernen. In diesem Beitrag stellen wir die visuelle Programmierung mit GraphMIC der nativen Implementierung {\"a}quivalenter Funktionen gegen{\"u}ber. Die in C++ entwickelte Applikation basiert auf Qt, ITK, OpenCV, VTK und MITK.}, subject = {Bildverarbeitung}, language = {de} } @inproceedings{WoehlHuberLoibletal., author = {W{\"o}hl, Rebecca and Huber, Michaela and Loibl, Markus and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Palm, Christoph}, title = {The Impact of Semi-Automated Segmentation and 3D Analysis on Testing New Osteosynthesis Material}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_30}, pages = {122 -- 127}, abstract = {A new protocol for testing osteosynthesis material postoperatively combining semi-automated segmentation and 3D analysis of surface meshes is proposed. By various steps of transformation and measuring, objective data can be collected. In this study the specifications of a locking plate used for mediocarpal arthrodesis of the wrist were examined. The results show, that union of the lunate, triquetrum, hamate and capitate was achieved and that the plate is comparable to coexisting arthrodesis systems. Additionally, it was shown, that the complications detected correlate to the clinical outcome. In synopsis, this protocol is considered beneficial and should be taken into account in further studies.}, subject = {Osteosynthese}, language = {en} } @inproceedings{MendelEbigboProbstetal., author = {Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using Convolutional Neural Networks}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_23}, pages = {80 -- 85}, abstract = {We propose an automatic approach for early detection of adenocarcinoma in the esophagus. High-definition endoscopic images (50 cancer, 50 Barrett) are partitioned into a dataset containing approximately equal amounts of patches showing cancerous and non-cancerous regions. A deep convolutional neural network is adapted to the data using a transfer learning approach. The final classification of an image is determined by at least one patch, for which the probability being a cancer patch exceeds a given threshold. The model was evaluated with leave one patient out cross-validation. With sensitivity and specificity of 0.94 and 0.88, respectively, our findings improve recently published results on the same image data base considerably. Furthermore, the visualization of the class probabilities of each individual patch indicates, that our approach might be extensible to the segmentation domain.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @inproceedings{SouzaJrHookPapaetal., author = {Souza Jr., Luis Antonio de and Hook, Christian and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using SURF Features}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_34}, pages = {141 -- 146}, abstract = {The development of adenocarcinoma in Barrett's esophagus is difficult to detect by endoscopic surveillance of patients with signs of dysplasia. Computer assisted diagnosis of endoscopic images (CAD) could therefore be most helpful in the demarcation and classification of neoplastic lesions. In this study we tested the feasibility of a CAD method based on Speeded up Robust Feature Detection (SURF). A given database containing 100 images from 39 patients served as benchmark for feature based classification models. Half of the images had previously been diagnosed by five clinical experts as being "cancerous", the other half as "non-cancerous". Cancerous image regions had been visibly delineated (masked) by the clinicians. SURF features acquired from full images as well as from masked areas were utilized for the supervised training and testing of an SVM classifier. The predictive accuracy of the developed CAD system is illustrated by sensitivity and specificity values. The results based on full image matching where 0.78 (sensitivity) and 0.82 (specificity) were achieved, while the masked region approach generated results of 0.90 and 0.95, respectively.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{FranzDreherPrinzenetal., author = {Franz, Daniela and Dreher, Maria and Prinzen, Martin and Teßmann, Matthias and Palm, Christoph and Katzky, Uwe and Perret, Jerome and Hofer, Mathias and Wittenberg, Thomas}, title = {CT-basiertes virtuelles Fr{\"a}sen am Felsenbein}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-56537-7}, doi = {10.1007/978-3-662-56537-7_51}, pages = {176 -- 181}, abstract = {Im Rahmen der Entwicklung eines haptisch-visuellen Trainingssystems f{\"u}r das Fr{\"a}sen am Felsenbein werden ein Haptikarm und ein autostereoskopischer 3D-Monitor genutzt, um Chirurgen die virtuelle Manipulation von kn{\"o}chernen Strukturen im Kontext eines sog. Serious Game zu erm{\"o}glichen. Unter anderem sollen Assistenz{\"a}rzte im Rahmen ihrer Ausbildung das Fr{\"a}sen am Felsenbein f{\"u}r das chirurgische Einsetzen eines Cochlea-Implantats {\"u}ben k{\"o}nnen. Die Visualisierung des virtuellen Fr{\"a}sens muss daf{\"u}r in Echtzeit und m{\"o}glichst realistisch modelliert, implementiert und evaluiert werden. Wir verwenden verschiedene Raycasting Methoden mit linearer und Nearest Neighbor Interpolation und vergleichen die visuelle Qualit{\"a}t und die Bildwiederholfrequenzen der Methoden. Alle verglichenen Verfahren sind sind echtzeitf{\"a}hig, unterscheiden sich aber in ihrer visuellen Qualit{\"a}t.}, subject = {Felsenbein}, language = {de} } @article{WoehlMaierGehmertetal., author = {W{\"o}hl, Rebecca and Maier, Johannes and Gehmert, Sebastian and Palm, Christoph and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Huber, Michaela}, title = {3D Analysis of Osteosyntheses Material using semi-automated CT Segmentation}, series = {BMC Musculoskeletal Disorders}, volume = {19}, journal = {BMC Musculoskeletal Disorders}, publisher = {Springer Nature}, doi = {10.1186/s12891-018-1975-0}, pages = {1 -- 8}, abstract = {Backround Scaphoidectomy and midcarpal fusion can be performed using traditional fixation methods like K-wires, staples, screws or different dorsal (non)locking arthrodesis systems. The aim of this study is to test the Aptus four corner locking plate and to compare the clinical findings to the data revealed by CT scans and semi-automated segmentation. Methods: This is a retrospective review of eleven patients suffering from scapholunate advanced collapse (SLAC) or scaphoid non-union advanced collapse (SNAC) wrist, who received a four corner fusion between August 2011 and July 2014. The clinical evaluation consisted of measuring the range of motion (ROM), strength and pain on a visual analogue scale (VAS). Additionally, the Disabilities of the Arm, Shoulder and Hand (QuickDASH) and the Mayo Wrist Score were assessed. A computerized tomography (CT) of the wrist was obtained six weeks postoperatively. After semi-automated segmentation of the CT scans, the models were post processed and surveyed. Results During the six-month follow-up mean range of motion (ROM) of the operated wrist was 60°, consisting of 30° extension and 30° flexion. While pain levels decreased significantly, 54\% of grip strength and 89\% of pinch strength were preserved compared to the contralateral healthy wrist. Union could be detected in all CT scans of the wrist. While X-ray pictures obtained postoperatively revealed no pathology, two user related technical complications were found through the 3D analysis, which correlated to the clinical outcome. Conclusion Due to semi-automated segmentation and 3D analysis it has been proved that the plate design can keep up to the manufacturers' promises. Over all, this case series confirmed that the plate can compete with the coexisting techniques concerning clinical outcome, union and complication rate.}, subject = {Handchirurgie}, language = {en} } @inproceedings{MaierHuberKatzkyetal., author = {Maier, Johannes and Huber, Michaela and Katzky, Uwe and Perret, Jerome and Wittenberg, Thomas and Palm, Christoph}, title = {Force-Feedback-assisted Bone Drilling Simulation Based on CT Data}, series = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 11. bis 13. M{\"a}rz 2018 in Erlangen}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-56537-7_78}, pages = {291 -- 296}, abstract = {In order to fix a fracture using minimally invasive surgery approaches, surgeons are drilling complex and tiny bones with a 2 dimensional X-ray as single imaging modality in the operating room. Our novel haptic force-feedback and visual assisted training system will potentially help hand surgeons to learn the drilling procedure in a realistic visual environment. Within the simulation, the collision detection as well as the interaction between virtual drill, bone voxels and surfaces are important. In this work, the chai3d collision detection and force calculation algorithms are combined with a physics engine to simulate the bone drilling process. The chosen Bullet-Physics-Engine provides a stable simulation of rigid bodies, if the collision model of the drill and the tool holder is generated as a compound shape. Three haptic points are added to the K-wire tip for removing single voxels from the bone. For the drilling process three modes are proposed to emulate the different phases of drilling in restricting the movement of a haptic device.}, subject = {Handchirurgie}, language = {en} } @article{GrassmannMengelkampBrandletal., author = {Graßmann, Felix and Mengelkamp, Judith and Brandl, Caroline and Harsch, Sebastian and Zimmermann, Martina E. and Linkohr, Birgit and Peters, Annette and Heid, Iris M. and Palm, Christoph and Weber, Bernhard H. F.}, title = {A Deep Learning Algorithm for Prediction of Age-Related Eye Disease Study Severity Scale for Age-Related Macular Degeneration from Color Fundus Photography}, series = {Ophtalmology}, volume = {125}, journal = {Ophtalmology}, number = {9}, publisher = {Elsevier}, doi = {10.1016/j.ophtha.2018.02.037}, pages = {1410 -- 1420}, abstract = {Purpose Age-related macular degeneration (AMD) is a common threat to vision. While classification of disease stages is critical to understanding disease risk and progression, several systems based on color fundus photographs are known. Most of these require in-depth and time-consuming analysis of fundus images. Herein, we present an automated computer-based classification algorithm. Design Algorithm development for AMD classification based on a large collection of color fundus images. Validation is performed on a cross-sectional, population-based study. Participants. We included 120 656 manually graded color fundus images from 3654 Age-Related Eye Disease Study (AREDS) participants. AREDS participants were >55 years of age, and non-AMD sight-threatening diseases were excluded at recruitment. In addition, performance of our algorithm was evaluated in 5555 fundus images from the population-based Kooperative Gesundheitsforschung in der Region Augsburg (KORA; Cooperative Health Research in the Region of Augsburg) study. Methods. We defined 13 classes (9 AREDS steps, 3 late AMD stages, and 1 for ungradable images) and trained several convolution deep learning architectures. An ensemble of network architectures improved prediction accuracy. An independent dataset was used to evaluate the performance of our algorithm in a population-based study. Main Outcome Measures. κ Statistics and accuracy to evaluate the concordance between predicted and expert human grader classification. Results. A network ensemble of 6 different neural net architectures predicted the 13 classes in the AREDS test set with a quadratic weighted κ of 92\% (95\% confidence interval, 89\%-92\%) and an overall accuracy of 63.3\%. In the independent KORA dataset, images wrongly classified as AMD were mainly the result of a macular reflex observed in young individuals. By restricting the KORA analysis to individuals >55 years of age and prior exclusion of other retinopathies, the weighted and unweighted κ increased to 50\% and 63\%, respectively. Importantly, the algorithm detected 84.2\% of all fundus images with definite signs of early or late AMD. Overall, 94.3\% of healthy fundus images were classified correctly. Conclusions Our deep learning algoritm revealed a weighted κ outperforming human graders in the AREDS study and is suitable to classify AMD fundus images in other datasets using individuals >55 years of age.}, subject = {Senile Makuladegeneration}, language = {en} } @article{SouzaJrPalmMendeletal., author = {Souza Jr., Luis Antonio de and Palm, Christoph and Mendel, Robert and Hook, Christian and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Weber, Silke A. T. and Papa, Jo{\~a}o Paulo}, title = {A survey on Barrett's esophagus analysis using machine learning}, series = {Computers in Biology and Medicine}, volume = {96}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2018.03.014}, pages = {203 -- 213}, abstract = {This work presents a systematic review concerning recent studies and technologies of machine learning for Barrett's esophagus (BE) diagnosis and treatment. The use of artificial intelligence is a brand new and promising way to evaluate such disease. We compile some works published at some well-established databases, such as Science Direct, IEEEXplore, PubMed, Plos One, Multidisciplinary Digital Publishing Institute (MDPI), Association for Computing Machinery (ACM), Springer, and Hindawi Publishing Corporation. Each selected work has been analyzed to present its objective, methodology, and results. The BE progression to dysplasia or adenocarcinoma shows a complex pattern to be detected during endoscopic surveillance. Therefore, it is valuable to assist its diagnosis and automatic identification using computer analysis. The evaluation of the BE dysplasia can be performed through manual or automated segmentation through machine learning techniques. Finally, in this survey, we reviewed recent studies focused on the automatic detection of the neoplastic region for classification purposes using machine learning methods.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Imitating human soft tissue on basis of a dual-material 3D print using a support-filled metamaterial to provide bimanual haptic for a hand surgery training system}, series = {Quantitative Imaging in Medicine and Surgery}, volume = {9}, journal = {Quantitative Imaging in Medicine and Surgery}, number = {1}, publisher = {AME Publishing Company}, doi = {10.21037/qims.2018.09.17}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-979}, pages = {30 -- 42}, abstract = {Background: Currently, it is common practice to use three-dimensional (3D) printers not only for rapid prototyping in the industry, but also in the medical area to create medical applications for training inexperienced surgeons. In a clinical training simulator for minimally invasive bone drilling to fix hand fractures with Kirschner-wires (K-wires), a 3D-printed hand phantom must not only be geometrically but also haptically correct. Due to a limited view during an operation, surgeons need to perfectly localize underlying risk structures only by feeling of specific bony protrusions of the human hand. Methods: The goal of this experiment is to imitate human soft tissue with its haptic and elasticity for a realistic hand phantom fabrication, using only a dual-material 3D printer and support-material-filled metamaterial between skin and bone. We present our workflow to generate lattice structures between hard bone and soft skin with iterative cube edge (CE) or cube face (CF) unit cells. Cuboid and finger shaped sample prints with and without inner hard bone in different lattice thickness are constructed and 3D printed. Results: The most elastic available rubber-like material is too firm to imitate soft tissue. By reducing the amount of rubber in the inner volume through support material (SUP), objects become significantly softer. Without metamaterial, after disintegration, the SUP can be shifted through the volume and thus the body loses its original shape. Although the CE design increases the elasticity, it cannot restore the fabric form. In contrast to CE, the CF design increases not only the elasticity but also guarantees a local limitation of the SUP. Therefore, the body retains its shape and internal bones remain in its intended place. Various unit cell sizes, lattice thickening and skin thickness regulate the rubber material and SUP ratio. Test prints with higher SUP and lower rubber material percentage appear softer and vice versa. This was confirmed by an expert surgeon evaluation. Subjects adjudged pure rubber-like material as too firm and samples only filled with SUP or lattice structure in CE design as not suitable for imitating tissue. 3D-printed finger samples in CF design were rated as realistic compared to the haptic of human tissue with a good palpable bone structure. Conclusions: We developed a new dual-material 3D print technique to imitate soft tissue of the human hand with its haptic properties. Blowy SUP is trapped within a lattice structure to soften rubber-like 3D print material, which makes it possible to reproduce a realistic replica of human hand soft tissue.}, subject = {Handchirurgie}, language = {en} } @article{MaierPerretSimonetal., author = {Maier, Johannes and Perret, Jerome and Simon, Martina and Schmitt-R{\"u}th, Stephanie and Wittenberg, Thomas and Palm, Christoph}, title = {Force-feedback assisted and virtual fixtures based K-wire drilling simulation}, series = {Computers in Biology and Medicine}, volume = {114}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2019.103473}, pages = {1 -- 10}, abstract = {One common method to fix fractures of the human hand after an accident is an osteosynthesis with Kirschner wires (K-wires) to stabilize the bone fragments. The insertion of K-wires is a delicate minimally invasive surgery, because surgeons operate almost without a sight. Since realistic training methods are time consuming, costly and insufficient, a virtual-reality (VR) based training system for the placement of K-wires was developed. As part of this, the current work deals with the real-time bone drilling simulation using a haptic force-feedback device. To simulate the drilling, we introduce a virtual fixture based force-feedback drilling approach. By decomposition of the drilling task into individual phases, each phase can be handled individually to perfectly control the drilling procedure. We report about the related finite state machine (FSM), describe the haptic feedback of each state and explain, how to avoid jerking of the haptic force-feedback during state transition. The usage of the virtual fixture approach results in a good haptic performance and a stable drilling behavior. This was confirmed by 26 expert surgeons, who evaluated the virtual drilling on the simulator and rated it as very realistic. To make the system even more convincing, we determined real drilling feed rates through experimental pig bone drilling and transferred them to our system. Due to a constant simulation thread we can guarantee a precise drilling motion. Virtual fixtures based force-feedback calculation is able to simulate force-feedback assisted bone drilling with high quality and, thus, will have a great potential in developing medical applications.}, subject = {Handchirurgie}, language = {en} } @inproceedings{MiddelPalmErdt, author = {Middel, Luise and Palm, Christoph and Erdt, Marius}, title = {Synthesis of Medical Images Using GANs}, series = {Uncertainty for safe utilization of machine learning in medical imaging and clinical image-based procedures. First International Workshop, UNSURE 2019, and 8th International Workshop, CLIP 2019, held in conjunction with MICCAI 2019, Shenzhen, China, October 17, 2019}, booktitle = {Uncertainty for safe utilization of machine learning in medical imaging and clinical image-based procedures. First International Workshop, UNSURE 2019, and 8th International Workshop, CLIP 2019, held in conjunction with MICCAI 2019, Shenzhen, China, October 17, 2019}, publisher = {Springer Nature}, address = {Cham}, isbn = {978-3-030-32688-3}, issn = {0302-9743}, doi = {10.1007/978-3-030-32689-0_13}, pages = {125 -- 134}, abstract = {The success of artificial intelligence in medicine is based on the need for large amounts of high quality training data. Sharing of medical image data, however, is often restricted by laws such as doctor-patient confidentiality. Although there are publicly available medical datasets, their quality and quantity are often low. Moreover, datasets are often imbalanced and only represent a fraction of the images generated in hospitals or clinics and can thus usually only be used as training data for specific problems. The introduction of generative adversarial networks (GANs) provides a mean to generate artificial images by training two convolutional networks. This paper proposes a method which uses GANs trained on medical images in order to generate a large number of artificial images that could be used to train other artificial intelligence algorithms. This work is a first step towards alleviating data privacy concerns and being able to publicly share data that still contains a substantial amount of the information in the original private data. The method has been evaluated on several public datasets and quantitative and qualitative tests showing promising results.}, subject = {Neuronale Netze}, language = {en} } @article{BrownConsortiumZhouetal., author = {Brown, Peter and Consortium, RELISH and Zhou, Yaoqi and Palm, Christoph}, title = {Large expert-curated database for benchmarking document similarity detection in biomedical literature search}, series = {Database}, volume = {2019}, journal = {Database}, publisher = {Oxford University Pres}, doi = {10.1093/database/baz085}, pages = {1 -- 66}, abstract = {Document recommendation systems for locating relevant literature have mostly relied on methods developed a decade ago. This is largely due to the lack of a large offline gold-standard benchmark of relevant documents that cover a variety of research fields such that newly developed literature search techniques can be compared, improved and translated into practice. To overcome this bottleneck, we have established the RElevant LIterature SearcH consortium consisting of more than 1500 scientists from 84 countries, who have collectively annotated the relevance of over 180 000 PubMed-listed articles with regard to their respective seed (input) article/s. The majority of annotations were contributed by highly experienced, original authors of the seed articles. The collected data cover 76\% of all unique PubMed Medical Subject Headings descriptors. No systematic biases were observed across different experience levels, research fields or time spent on annotations. More importantly, annotations of the same document pairs contributed by different scientists were highly concordant. We further show that the three representative baseline methods used to generate recommended articles for evaluation (Okapi Best Matching 25, Term Frequency-Inverse Document Frequency and PubMed Related Articles) had similar overall performances. Additionally, we found that these methods each tend to produce distinct collections of recommended articles, suggesting that a hybrid method may be required to completely capture all relevant articles. The established database server located at https://relishdb.ict.griffith.edu.au is freely available for the downloading of annotation data and the blind testing of new methods. We expect that this benchmark will be useful for stimulating the development of new powerful techniques for title and title/abstract-based search engines for relevant articles in biomedical research.}, subject = {Information Retrieval}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Computer-aided diagnosis using deep learning in the evaluation of early oesophageal adenocarcinoma}, series = {GuT}, volume = {68}, journal = {GuT}, number = {7}, publisher = {British Society of Gastroenterology}, doi = {10.1136/gutjnl-2018-317573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-68}, pages = {1143 -- 1145}, abstract = {Computer-aided diagnosis using deep learning (CAD-DL) may be an instrument to improve endoscopic assessment of Barrett's oesophagus (BE) and early oesophageal adenocarcinoma (EAC). Based on still images from two databases, the diagnosis of EAC by CAD-DL reached sensitivities/specificities of 97\%/88\% (Augsburg data) and 92\%/100\% (Medical Image Computing and Computer-Assisted Intervention [MICCAI] data) for white light (WL) images and 94\%/80\% for narrow band images (NBI) (Augsburg data), respectively. Tumour margins delineated by experts into images were detected satisfactorily with a Dice coefficient (D) of 0.72. This could be a first step towards CAD-DL for BE assessment. If developed further, it could become a useful adjunctive tool for patient management.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @misc{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Artificial Intelligence in Early Barrett's Cancer: The Segmentation Task}, series = {Endoscopy}, volume = {51}, journal = {Endoscopy}, number = {04}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0039-1681187}, pages = {6}, abstract = {Aims: The delineation of outer margins of early Barrett's cancer can be challenging even for experienced endoscopists. Artificial intelligence (AI) could assist endoscopists faced with this task. As of date, there is very limited experience in this domain. In this study, we demonstrate the measure of overlap (Dice coefficient = D) between highly experienced Barrett endoscopists and an AI system in the delineation of cancer margins (segmentation task). Methods: An AI system with a deep convolutional neural network (CNN) was trained and tested on high-definition endoscopic images of early Barrett's cancer (n = 33) and normal Barrett's mucosa (n = 41). The reference standard for the segmentation task were the manual delineations of tumor margins by three highly experienced Barrett endoscopists. Training of the AI system included patch generation, patch augmentation and adjustment of the CNN weights. Then, the segmentation results from patch classification and thresholding of the class probabilities. Segmentation results were evaluated using the Dice coefficient (D). Results: The Dice coefficient (D) which can range between 0 (no overlap) and 1 (complete overlap) was computed only for images correctly classified by the AI-system as cancerous. At a threshold of t = 0.5, a mean value of D = 0.72 was computed. Conclusions: AI with CNN performed reasonably well in the segmentation of the tumor region in Barrett's cancer, at least when compared with expert Barrett's endoscopists. AI holds a lot of promise as a tool for better visualization of tumor margins but may need further improvement and enhancement especially in real-time settings.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{EbigboPalmProbstetal., author = {Ebigbo, Alanna and Palm, Christoph and Probst, Andreas and Mendel, Robert and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Siersema, Peter and Messmann, Helmut}, title = {A technical review of artificial intelligence as applied to gastrointestinal endoscopy: clarifying the terminology}, series = {Endoscopy International Open}, volume = {07}, journal = {Endoscopy International Open}, number = {12}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1010-5705}, pages = {1616 -- 1623}, abstract = {The growing number of publications on the application of artificial intelligence (AI) in medicine underlines the enormous importance and potential of this emerging field of research. In gastrointestinal endoscopy, AI has been applied to all segments of the gastrointestinal tract most importantly in the detection and characterization of colorectal polyps. However, AI research has been published also in the stomach and esophagus for both neoplastic and non-neoplastic disorders. The various technical as well as medical aspects of AI, however, remain confusing especially for non-expert physicians. This physician-engineer co-authored review explains the basic technical aspects of AI and provides a comprehensive overview of recent publications on AI in gastrointestinal endoscopy. Finally, a basic insight is offered into understanding publications on AI in gastrointestinal endoscopy.}, subject = {Diagnose}, language = {en} } @article{PassosSouzaJrMendeletal., author = {Passos, Leandro A. and Souza Jr., Luis Antonio de and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Barrett's esophagus analysis using infinity Restricted Boltzmann Machines}, series = {Journal of Visual Communication and Image Representation}, volume = {59}, journal = {Journal of Visual Communication and Image Representation}, publisher = {Elsevier}, doi = {10.1016/j.jvcir.2019.01.043}, pages = {475 -- 485}, abstract = {The number of patients with Barret's esophagus (BE) has increased in the last decades. Considering the dangerousness of the disease and its evolution to adenocarcinoma, an early diagnosis of BE may provide a high probability of cancer remission. However, limitations regarding traditional methods of detection and management of BE demand alternative solutions. As such, computer-aided tools have been recently used to assist in this problem, but the challenge still persists. To manage the problem, we introduce the infinity Restricted Boltzmann Machines (iRBMs) to the task of automatic identification of Barrett's esophagus from endoscopic images of the lower esophagus. Moreover, since iRBM requires a proper selection of its meta-parameters, we also present a discriminative iRBM fine-tuning using six meta-heuristic optimization techniques. We showed that iRBMs are suitable for the context since it provides competitive results, as well as the meta-heuristic techniques showed to be appropriate for such task.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{HartmannWeihererSchiltzetal., author = {Hartmann, Robin and Weiherer, Maximilian and Schiltz, Daniel and Seitz, Stephan and Lotter, Luisa and Anker, Alexandra and Palm, Christoph and Prantl, Lukas and Br{\´e}bant, Vanessa}, title = {A Novel Method of Outcome Assessment in Breast Reconstruction Surgery: Comparison of Autologous and Alloplastic Techniques Using Three-Dimensional Surface Imaging}, series = {Aesthetic Plastic Surgery}, volume = {44}, journal = {Aesthetic Plastic Surgery}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s00266-020-01749-4}, pages = {1980 -- 1987}, abstract = {Background Breast reconstruction is an important coping tool for patients undergoing a mastectomy. There are numerous surgical techniques in breast reconstruction surgery (BRS). Regardless of the technique used, creating a symmetric outcome is crucial for patients and plastic surgeons. Three-dimensional surface imaging enables surgeons and patients to assess the outcome's symmetry in BRS. To discriminate between autologous and alloplastic techniques, we analyzed both techniques using objective optical computerized symmetry analysis. Software was developed that enables clinicians to assess optical breast symmetry using three-dimensional surface imaging. Methods Twenty-seven patients who had undergone autologous (n = 12) or alloplastic (n = 15) BRS received three-dimensional surface imaging. Anthropomorphic data were collected digitally using semiautomatic measurements and automatic measurements. Automatic measurements were taken using the newly developed software. To quantify symmetry, a Symmetry Index is proposed. Results Statistical analysis revealed that there is no dif- ference in the outcome symmetry between the two groups (t test for independent samples; p = 0.48, two-tailed). Conclusion This study's findings provide a foundation for qualitative symmetry assessment in BRS using automatized digital anthropometry. In the present trial, no difference in the outcomes' optical symmetry was detected between autologous and alloplastic approaches.}, subject = {Mammoplastik}, language = {en} } @inproceedings{WeihererZornWittenbergetal., author = {Weiherer, Maximilian and Zorn, Martin and Wittenberg, Thomas and Palm, Christoph}, title = {Retrospective Color Shading Correction for Endoscopic Images}, series = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {10.1007/978-3-658-29267-6}, pages = {14 -- 19}, abstract = {In this paper, we address the problem of retrospective color shading correction. An extension of the established gray-level shading correction algorithm based on signal envelope (SE) estimation to color images is developed using principal color components. Compared to the probably most general shading correction algorithm based on entropy minimization, SE estimation does not need any computationally expensive optimization and thus can be implemented more effciently. We tested our new shading correction scheme on artificial as well as real endoscopic images and observed promising results. Additionally, an indepth analysis of the stop criterion used in the SE estimation algorithm is provided leading to the conclusion that a fixed, user-defined threshold is generally not feasible. Thus, we present new ideas how to develop a non-parametric version of the SE estimation algorithm using entropy.}, subject = {Endoskopie}, language = {en} } @article{OttPalmVogtetal., author = {Ott, Tankred and Palm, Christoph and Vogt, Robert and Oberprieler, Christoph}, title = {GinJinn: An object-detection pipeline for automated feature extraction from herbarium specimens}, series = {Applications in Plant Sciences}, volume = {8}, journal = {Applications in Plant Sciences}, number = {6}, publisher = {Wiley, Botanical Society of America}, issn = {2168-0450}, doi = {10.1002/aps3.11351}, pages = {e11351}, abstract = {PREMISE: The generation of morphological data in evolutionary, taxonomic, and ecological studies of plants using herbarium material has traditionally been a labor-intensive task. Recent progress in machine learning using deep artificial neural networks (deep learning) for image classification and object detection has facilitated the establishment of a pipeline for the automatic recognition and extraction of relevant structures in images of herbarium specimens. METHODS AND RESULTS: We implemented an extendable pipeline based on state-of-the-art deep-learning object-detection methods to collect leaf images from herbarium specimens of two species of the genus Leucanthemum. Using 183 specimens as the training data set, our pipeline extracted one or more intact leaves in 95\% of the 61 test images. CONCLUSIONS: We establish GinJinn as a deep-learning object-detection tool for the automatic recognition and extraction of individual leaves or other structures from herbarium specimens. Our pipeline offers greater flexibility and a lower entrance barrier than previous image-processing approaches based on hand-crafted features.}, subject = {Deep Learning}, language = {en} } @article{MaierWeihererHuberetal., author = {Maier, Johannes and Weiherer, Maximilian and Huber, Michaela and Palm, Christoph}, title = {Optically tracked and 3D printed haptic phantom hand for surgical training system}, series = {Quantitative Imaging in Medicine and Surgery}, volume = {10}, journal = {Quantitative Imaging in Medicine and Surgery}, number = {02}, publisher = {AME Publishing Company}, address = {Hong Kong, China}, doi = {10.21037/qims.2019.12.03}, pages = {340 -- 455}, abstract = {Background: For surgical fixation of bone fractures of the human hand, so-called Kirschner-wires (K-wires) are drilled through bone fragments. Due to the minimally invasive drilling procedures without a view of risk structures like vessels and nerves, a thorough training of young surgeons is necessary. For the development of a virtual reality (VR) based training system, a three-dimensional (3D) printed phantom hand is required. To ensure an intuitive operation, this phantom hand has to be realistic in both, its position relative to the driller as well as in its haptic features. The softest 3D printing material available on the market, however, is too hard to imitate human soft tissue. Therefore, a support-material (SUP) filled metamaterial is used to soften the raw material. Realistic haptic features are important to palpate protrusions of the bone to determine the drilling starting point and angle. An optical real-time tracking is used to transfer position and rotation to the training system. Methods: A metamaterial already developed in previous work is further improved by use of a new unit cell. Thus, the amount of SUP within the volume can be increased and the tissue is softened further. In addition, the human anatomy is transferred to the entire hand model. A subcutaneous fat layer and penetration of air through pores into the volume simulate shiftability of skin layers. For optical tracking, a rotationally symmetrical marker attached to the phantom hand with corresponding reference marker is developed. In order to ensure trouble-free position transmission, various types of marker point applications are tested. Results: Several cuboid and forearm sample prints lead to a final 30 centimeter long hand model. The whole haptic phantom could be printed faultless within about 17 hours. The metamaterial consisting of the new unit cell results in an increased SUP share of 4.32\%. Validated by an expert surgeon study, this allows in combination with a displacement of the uppermost skin layer a good palpability of the bones. Tracking of the hand marker in dodecahedron design works trouble-free in conjunction with a reference marker attached to the worktop of the training system. Conclusions: In this work, an optically tracked and haptically correct phantom hand was developed using dual-material 3D printing, which can be easily integrated into a surgical training system.}, subject = {Handchirurgie}, language = {en} } @inproceedings{ChangLinLeeetal., author = {Chang, Ching-Sheng and Lin, Jin-Fa and Lee, Ming-Ching and Palm, Christoph}, title = {Semantic Lung Segmentation Using Convolutional Neural Networks}, series = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {10.1007/978-3-658-29267-6_17}, pages = {75 -- 80}, abstract = {Chest X-Ray (CXR) images as part of a non-invasive diagnosis method are commonly used in today's medical workflow. In traditional methods, physicians usually use their experience to interpret CXR images, however, there is a large interobserver variance. Computer vision may be used as a standard for assisted diagnosis. In this study, we applied an encoder-decoder neural network architecture for automatic lung region detection. We compared a three-class approach (left lung, right lung, background) and a two-class approach (lung, background). The differentiation of left and right lungs as direct result of a semantic segmentation on basis of neural nets rather than post-processing a lung-background segmentation is done here for the first time. Our evaluation was done on the NIH Chest X-ray dataset, from which 1736 images were extracted and manually annotated. We achieved 94:9\% mIoU and 92\% mIoU as segmentation quality measures for the two-class-model and the three-class-model, respectively. This result is very promising for the segmentation of lung regions having the simultaneous classification of left and right lung in mind.}, subject = {Neuronales Netz}, language = {en} } @misc{EbigboMendelTziatziosetal., author = {Ebigbo, Alanna and Mendel, Robert and Tziatzios, Georgios and Probst, Andreas and Palm, Christoph and Messmann, Helmut}, title = {Real-Time Diagnosis of an Early Barrett's Carcinoma using Artificial Intelligence (AI) - Video Case Demonstration}, series = {Endoscopy}, volume = {52}, journal = {Endoscopy}, number = {S 01}, publisher = {Thieme}, doi = {10.1055/s-0040-1704075}, pages = {S23}, abstract = {Introduction We present a clinical case showing the real-time detection, characterization and delineation of an early Barrett's cancer using AI. Patients and methods A 70-year old patient with a long-segment Barrett's esophagus (C5M7) was assessed with an AI algorithm. Results The AI system detected a 10 mm focal lesion and AI characterization predicted cancer with a probability of >90\%. After ESD resection, histopathology showed mucosal adenocarcinoma (T1a (m), R0) confirming AI diagnosis. Conclusion We demonstrate the real-time AI detection, characterization and delineation of a small and early mucosal Barrett's cancer.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Real-time use of artificial intelligence in the evaluation of cancer in Barrett's oesophagus}, series = {Gut}, volume = {69}, journal = {Gut}, number = {4}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2019-319460}, pages = {615 -- 616}, abstract = {Based on previous work by our group with manual annotation of visible Barrett oesophagus (BE) cancer images, a real-time deep learning artificial intelligence (AI) system was developed. While an expert endoscopist conducts the endoscopic assessment of BE, our AI system captures random images from the real-time camera livestream and provides a global prediction (classification), as well as a dense prediction (segmentation) differentiating accurately between normal BE and early oesophageal adenocarcinoma (EAC). The AI system showed an accuracy of 89.9\% on 14 cases with neoplastic BE.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{MendelSouzaJrRauberetal., author = {Mendel, Robert and Souza Jr., Luis Antonio de and Rauber, David and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Semi-supervised Segmentation Based on Error-Correcting Supervision}, series = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, booktitle = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-58525-9}, doi = {10.1007/978-3-030-58526-6_9}, pages = {141 -- 157}, abstract = {Pixel-level classification is an essential part of computer vision. For learning from labeled data, many powerful deep learning models have been developed recently. In this work, we augment such supervised segmentation models by allowing them to learn from unlabeled data. Our semi-supervised approach, termed Error-Correcting Supervision, leverages a collaborative strategy. Apart from the supervised training on the labeled data, the segmentation network is judged by an additional network. The secondary correction network learns on the labeled data to optimally spot correct predictions, as well as to amend incorrect ones. As auxiliary regularization term, the corrector directly influences the supervised training of the segmentation network. On unlabeled data, the output of the correction network is essential to create a proxy for the unknown truth. The corrector's output is combined with the segmentation network's prediction to form the new target. We propose a loss function that incorporates both the pseudo-labels as well as the predictive certainty of the correction network. Our approach can easily be added to supervised segmentation models. We show consistent improvements over a supervised baseline on experiments on both the Pascal VOC 2012 and the Cityscapes datasets with varying amounts of labeled data.}, subject = {Semi-Supervised Learning}, language = {en} } @article{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Assisting Barrett's esophagus identification using endoscopic data augmentation based on Generative Adversarial Networks}, series = {Computers in Biology and Medicine}, volume = {126}, journal = {Computers in Biology and Medicine}, number = {November}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2020.104029}, pages = {12}, abstract = {Barrett's esophagus figured a swift rise in the number of cases in the past years. Although traditional diagnosis methods offered a vital role in early-stage treatment, they are generally time- and resource-consuming. In this context, computer-aided approaches for automatic diagnosis emerged in the literature since early detection is intrinsically related to remission probabilities. However, they still suffer from drawbacks because of the lack of available data for machine learning purposes, thus implying reduced recognition rates. This work introduces Generative Adversarial Networks to generate high-quality endoscopic images, thereby identifying Barrett's esophagus and adenocarcinoma more precisely. Further, Convolution Neural Networks are used for feature extraction and classification purposes. The proposed approach is validated over two datasets of endoscopic images, with the experiments conducted over the full and patch-split images. The application of Deep Convolutional Generative Adversarial Networks for the data augmentation step and LeNet-5 and AlexNet for the classification step allowed us to validate the proposed methodology over an extensive set of datasets (based on original and augmented sets), reaching results of 90\% of accuracy for the patch-based approach and 85\% for the image-based approach. Both results are based on augmented datasets and are statistically different from the ones obtained in the original datasets of the same kind. Moreover, the impact of data augmentation was evaluated in the context of image description and classification, and the results obtained using synthetic images outperformed the ones over the original datasets, as well as other recent approaches from the literature. Such results suggest promising insights related to the importance of proper data for the accurate classification concerning computer-assisted Barrett's esophagus and adenocarcinoma detection.}, subject = {Maschinelles Lernen}, language = {en} } @article{EbigboPalmMessmann, author = {Ebigbo, Alanna and Palm, Christoph and Messmann, Helmut}, title = {Barrett esophagus: What to expect from Artificial Intelligence?}, series = {Best Practice \& Research Clinical Gastroenterology}, volume = {52-53}, journal = {Best Practice \& Research Clinical Gastroenterology}, number = {June-August}, publisher = {Elsevier}, issn = {1521-6918}, doi = {10.1016/j.bpg.2021.101726}, abstract = {The evaluation and assessment of Barrett's esophagus is challenging for both expert and nonexpert endoscopists. However, the early diagnosis of cancer in Barrett's esophagus is crucial for its prognosis, and could save costs. Pre-clinical and clinical studies on the application of Artificial Intelligence (AI) in Barrett's esophagus have shown promising results. In this review, we focus on the current challenges and future perspectives of implementing AI systems in the management of patients with Barrett's esophagus.}, subject = {Deep Learning}, language = {en} } @article{ArribasAntonelliFrazzonietal., author = {Arribas, Julia and Antonelli, Giulio and Frazzoni, Leonardo and Fuccio, Lorenzo and Ebigbo, Alanna and van der Sommen, Fons and Ghatwary, Noha and Palm, Christoph and Coimbra, Miguel and Renna, Francesco and Bergman, Jacques J.G.H.M. and Sharma, Prateek and Messmann, Helmut and Hassan, Cesare and Dinis-Ribeiro, Mario J.}, title = {Standalone performance of artificial intelligence for upper GI neoplasia: a meta-analysis}, series = {Gut}, volume = {70}, journal = {Gut}, number = {8}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2020-321922}, pages = {1458 -- 1468}, abstract = {Objective: Artificial intelligence (AI) may reduce underdiagnosed or overlooked upper GI (UGI) neoplastic and preneoplastic conditions, due to subtle appearance and low disease prevalence. Only disease-specific AI performances have been reported, generating uncertainty on its clinical value. Design: We searched PubMed, Embase and Scopus until July 2020, for studies on the diagnostic performance of AI in detection and characterisation of UGI lesions. Primary outcomes were pooled diagnostic accuracy, sensitivity and specificity of AI. Secondary outcomes were pooled positive (PPV) and negative (NPV) predictive values. We calculated pooled proportion rates (\%), designed summary receiving operating characteristic curves with respective area under the curves (AUCs) and performed metaregression and sensitivity analysis. Results: Overall, 19 studies on detection of oesophageal squamous cell neoplasia (ESCN) or Barrett's esophagus-related neoplasia (BERN) or gastric adenocarcinoma (GCA) were included with 218, 445, 453 patients and 7976, 2340, 13 562 images, respectively. AI-sensitivity/specificity/PPV/NPV/positive likelihood ratio/negative likelihood ratio for UGI neoplasia detection were 90\% (CI 85\% to 94\%)/89\% (CI 85\% to 92\%)/87\% (CI 83\% to 91\%)/91\% (CI 87\% to 94\%)/8.2 (CI 5.7 to 11.7)/0.111 (CI 0.071 to 0.175), respectively, with an overall AUC of 0.95 (CI 0.93 to 0.97). No difference in AI performance across ESCN, BERN and GCA was found, AUC being 0.94 (CI 0.52 to 0.99), 0.96 (CI 0.95 to 0.98), 0.93 (CI 0.83 to 0.99), respectively. Overall, study quality was low, with high risk of selection bias. No significant publication bias was found. Conclusion: We found a high overall AI accuracy for the diagnosis of any neoplastic lesion of the UGI tract that was independent of the underlying condition. This may be expected to substantially reduce the miss rate of precancerous lesions and early cancer when implemented in clinical practice.}, language = {en} } @article{HartmannWeihererSchiltzetal., author = {Hartmann, Robin and Weiherer, Maximilian and Schiltz, Daniel and Baringer, Magnus and Noisser, Vivien and H{\"o}sl, Vanessa and Eigenberger, Andreas and Seitz, Stefan and Palm, Christoph and Prantl, Lukas and Br{\´e}bant, Vanessa}, title = {New aspects in digital breast assessment: further refinement of a method for automated digital anthropometry}, series = {Archives of Gynecology and Obstetrics}, volume = {303}, journal = {Archives of Gynecology and Obstetrics}, publisher = {Springer Nature}, address = {Heidelberg}, issn = {1432-0711}, doi = {10.1007/s00404-020-05862-2}, pages = {721 -- 728}, abstract = {Purpose: In this trial, we used a previously developed prototype software to assess aesthetic results after reconstructive surgery for congenital breast asymmetry using automated anthropometry. To prove the consensus between the manual and automatic digital measurements, we evaluated the software by comparing the manual and automatic measurements of 46 breasts. Methods: Twenty-three patients who underwent reconstructive surgery for congenital breast asymmetry at our institution were examined and underwent 3D surface imaging. Per patient, 14 manual and 14 computer-based anthropometric measurements were obtained according to a standardized protocol. Manual and automatic measurements, as well as the previously proposed Symmetry Index (SI), were compared. Results: The Wilcoxon signed-rank test revealed no significant differences in six of the seven measurements between the automatic and manual assessments. The SI showed robust agreement between the automatic and manual methods. Conclusion: The present trial validates our method for digital anthropometry. Despite the discrepancy in one measurement, all remaining measurements, including the SI, showed high agreement between the manual and automatic methods. The proposed data bring us one step closer to the long-term goal of establishing robust instruments to evaluate the results of breast surgery.}, language = {en} } @misc{RoemmeleMendelRauberetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Rauber, David and R{\"u}ckert, Tobias and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Endoscopic Diagnosis of Eosinophilic Esophagitis Using a deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724274}, abstract = {Aims Eosinophilic esophagitis (EoE) is easily missed during endoscopy, either because physicians are not familiar with its endoscopic features or the morphologic changes are too subtle. In this preliminary paper, we present the first attempt to detect EoE in endoscopic white light (WL) images using a deep learning network (EoE-AI). Methods 401 WL images of eosinophilic esophagitis and 871 WL images of normal esophageal mucosa were evaluated. All images were assessed for the Endoscopic Reference score (EREFS) (edema, rings, exudates, furrows, strictures). Images with strictures were excluded. EoE was defined as the presence of at least 15 eosinophils per high power field on biopsy. A convolutional neural network based on the ResNet architecture with several five-fold cross-validation runs was used. Adding auxiliary EREFS-classification branches to the neural network allowed the inclusion of the scores as optimization criteria during training. EoE-AI was evaluated for sensitivity, specificity, and F1-score. In addition, two human endoscopists evaluated the images. Results EoE-AI showed a mean sensitivity, specificity, and F1 of 0.759, 0.976, and 0.834 respectively, averaged over the five distinct cross-validation runs. With the EREFS-augmented architecture, a mean sensitivity, specificity, and F1-score of 0.848, 0.945, and 0.861 could be demonstrated respectively. In comparison, the two human endoscopists had an average sensitivity, specificity, and F1-score of 0.718, 0.958, and 0.793. Conclusions To the best of our knowledge, this is the first application of deep learning to endoscopic images of EoE which were also assessed after augmentation with the EREFS-score. The next step is the evaluation of EoE-AI using an external dataset. We then plan to assess the EoE-AI tool on endoscopic videos, and also in real-time. This preliminary work is encouraging regarding the ability for AI to enhance physician detection of EoE, and potentially to do a true "optical biopsy" but more work is needed.}, language = {en} } @misc{ScheppachRauberMendeletal., author = {Scheppach, Markus W. and Rauber, David and Mendel, Robert and Palm, Christoph and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection Of Celiac Disease Using A Deep Learning Algorithm}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {S 01}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0041-1724970}, abstract = {Aims Celiac disease (CD) is a complex condition caused by an autoimmune reaction to ingested gluten. Due to its polymorphic manifestation and subtle endoscopic presentation, the diagnosis is difficult and thus the disorder is underreported. We aimed to use deep learning to identify celiac disease on endoscopic images of the small bowel. Methods Patients with small intestinal histology compatible with CD (MARSH classification I-III) were extracted retrospectively from the database of Augsburg University hospital. They were compared to patients with no clinical signs of CD and histologically normal small intestinal mucosa. In a first step MARSH III and normal small intestinal mucosa were differentiated with the help of a deep learning algorithm. For this, the endoscopic white light images were divided into five equal-sized subsets. We avoided splitting the images of one patient into several subsets. A ResNet-50 model was trained with the images from four subsets and then validated with the remaining subset. This process was repeated for each subset, such that each subset was validated once. Sensitivity, specificity, and harmonic mean (F1) of the algorithm were determined. Results The algorithm showed values of 0.83, 0.88, and 0.84 for sensitivity, specificity, and F1, respectively. Further data showing a comparison between the detection rate of the AI model and that of experienced endoscopists will be available at the time of the upcoming conference. Conclusions We present the first clinical report on the use of a deep learning algorithm for the detection of celiac disease using endoscopic images. Further evaluation on an external data set, as well as in the detection of CD in real-time, will follow. However, this work at least suggests that AI can assist endoscopists in the endoscopic diagnosis of CD, and ultimately may be able to do a true optical biopsy in live-time.}, language = {en} } @inproceedings{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Fine-tuning Generative Adversarial Networks using Metaheuristics}, series = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, doi = {10.1007/978-3-658-33198-6_50}, pages = {205 -- 210}, abstract = {Barrett's esophagus denotes a disorder in the digestive system that affects the esophagus' mucosal cells, causing reflux, and showing potential convergence to esophageal adenocarcinoma if not treated in initial stages. Thus, fast and reliable computer-aided diagnosis becomes considerably welcome. Nevertheless, such approaches usually suffer from imbalanced datasets, which can be addressed through Generative Adversarial Networks (GANs). Such techniques generate realistic images based on observed samples, even though at the cost of a proper selection of its hyperparameters. Many works employed a class of nature-inspired algorithms called metaheuristics to tackle the problem considering distinct deep learning approaches. Therefore, this paper's main contribution is to introduce metaheuristic techniques to fine-tune GANs in the context of Barrett's esophagus identification, as well as to investigate the feasibility of generating high-quality synthetic images for early-cancer assisted identification.}, subject = {Endoskopie}, language = {en} } @article{SouzaJrMendelStrasseretal., author = {Souza Jr., Luis Antonio de and Mendel, Robert and Strasser, Sophia and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Convolutional Neural Networks for the evaluation of cancer in Barrett's esophagus: Explainable AI to lighten up the black-box}, series = {Computers in Biology and Medicine}, volume = {135}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, issn = {0010-4825}, doi = {10.1016/j.compbiomed.2021.104578}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-20126}, pages = {1 -- 14}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their level of accountability and transparency must be provided in such evaluations. The reliability related to machine learning predictions must be explained and interpreted, especially if diagnosis support is addressed. For this task, the black-box nature of deep learning techniques must be lightened up to transfer its promising results into clinical practice. Hence, we aim to investigate the use of explainable artificial intelligence techniques to quantitatively highlight discriminative regions during the classification of earlycancerous tissues in Barrett's esophagus-diagnosed patients. Four Convolutional Neural Network models (AlexNet, SqueezeNet, ResNet50, and VGG16) were analyzed using five different interpretation techniques (saliency, guided backpropagation, integrated gradients, input × gradients, and DeepLIFT) to compare their agreement with experts' previous annotations of cancerous tissue. We could show that saliency attributes match best with the manual experts' delineations. Moreover, there is moderate to high correlation between the sensitivity of a model and the human-and-computer agreement. The results also lightened that the higher the model's sensitivity, the stronger the correlation of human and computational segmentation agreement. We observed a relevant relation between computational learning and experts' insights, demonstrating how human knowledge may influence the correct computational learning.}, subject = {Deep Learning}, language = {en} } @article{EbigboMendelRueckertetal., author = {Ebigbo, Alanna and Mendel, Robert and R{\"u}ckert, Tobias and Schuster, Laurin and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Mende, Matthias and Steinbr{\"u}ck, Ingo and Faiss, Siegbert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Deprez, Pierre and Oyama, Tsuneo and Takahashi, Akiko and Seewald, Stefan and Sharma, Prateek and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Endoscopic prediction of submucosal invasion in Barrett's cancer with the use of Artificial Intelligence: A pilot Study}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {09}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-1311-8570}, pages = {878 -- 883}, abstract = {Background and aims: The accurate differentiation between T1a and T1b Barrett's cancer has both therapeutic and prognostic implications but is challenging even for experienced physicians. We trained an Artificial Intelligence (AI) system on the basis of deep artificial neural networks (deep learning) to differentiate between T1a and T1b Barrett's cancer white-light images. Methods: Endoscopic images from three tertiary care centres in Germany were collected retrospectively. A deep learning system was trained and tested using the principles of cross-validation. A total of 230 white-light endoscopic images (108 T1a and 122 T1b) was evaluated with the AI-system. For comparison, the images were also classified by experts specialized in endoscopic diagnosis and treatment of Barrett's cancer. Results: The sensitivity, specificity, F1 and accuracy of the AI-system in the differentiation between T1a and T1b cancer lesions was 0.77, 0.64, 0.73 and 0.71, respectively. There was no statistically significant difference between the performance of the AI-system and that of human experts with sensitivity, specificity, F1 and accuracy of 0.63, 0.78, 0.67 and 0.70 respectively. Conclusion: This pilot study demonstrates the first multicenter application of an AI-based system in the prediction of submucosal invasion in endoscopic images of Barrett's cancer. AI scored equal to international experts in the field, but more work is necessary to improve the system and apply it to video sequences and in a real-life setting. Nevertheless, the correct prediction of submucosal invasion in Barret´s cancer remains challenging for both experts and AI.}, subject = {Maschinelles Lernen}, language = {en} } @inproceedings{WeberNunesHammerHammeretal., author = {Weber Nunes, Danilo and Hammer, Michael and Hammer, Simone and Uller, Wibke and Palm, Christoph}, title = {Classification of Vascular Malformations Based on T2 STIR Magnetic Resonance Imaging}, series = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-36932-3_57}, pages = {267 -- 272}, abstract = {Vascular malformations (VMs) are a rare condition. They can be categorized into high-flow and low-flow VMs, which is a challenging task for radiologists. In this work, a very heterogeneous set of MRI images with only rough annotations are used for classification with a convolutional neural network. The main focus is to describe the challenging data set and strategies to deal with such data in terms of preprocessing, annotation usage and choice of the network architecture. We achieved a classification result of 89.47 \% F1-score with a 3D ResNet 18.}, language = {en} } @article{KnoedlerBaecherKaukeNavarroetal., author = {Knoedler, Leonard and Baecher, Helena and Kauke-Navarro, Martin and Prantl, Lukas and Machens, Hans-G{\"u}nther and Scheuermann, Philipp and Palm, Christoph and Baumann, Raphael and Kehrer, Andreas and Panayi, Adriana C. and Knoedler, Samuel}, title = {Towards a Reliable and Rapid Automated Grading System in Facial Palsy Patients: Facial Palsy Surgery Meets Computer Science}, series = {Journal of Clinical Medicine}, volume = {11}, journal = {Journal of Clinical Medicine}, number = {17}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/jcm11174998}, abstract = {Background: Reliable, time- and cost-effective, and clinician-friendly diagnostic tools are cornerstones in facial palsy (FP) patient management. Different automated FP grading systems have been developed but revealed persisting downsides such as insufficient accuracy and cost-intensive hardware. We aimed to overcome these barriers and programmed an automated grading system for FP patients utilizing the House and Brackmann scale (HBS). Methods: Image datasets of 86 patients seen at the Department of Plastic, Hand, and Reconstructive Surgery at the University Hospital Regensburg, Germany, between June 2017 and May 2021, were used to train the neural network and evaluate its accuracy. Nine facial poses per patient were analyzed by the algorithm. Results: The algorithm showed an accuracy of 100\%. Oversampling did not result in altered outcomes, while the direct form displayed superior accuracy levels when compared to the modular classification form (n = 86; 100\% vs. 99\%). The Early Fusion technique was linked to improved accuracy outcomes in comparison to the Late Fusion and sequential method (n = 86; 100\% vs. 96\% vs. 97\%). Conclusions: Our automated FP grading system combines high-level accuracy with cost- and time-effectiveness. Our algorithm may accelerate the grading process in FP patients and facilitate the FP surgeon's workflow.}, language = {en} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) - assisted vessel and tissue recognition during third space endoscopy (Smart ESD)}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0042-1755110}, abstract = {Clinical setting Third space procedures such as endoscopic submucosal dissection (ESD) and peroral endoscopic myotomy (POEM) are complex minimally invasive techniques with an elevated risk for operator-dependent adverse events such as bleeding and perforation. This risk arises from accidental dissection into the muscle layer or through submucosal blood vessels as the submucosal cutting plane within the expanding resection site is not always apparent. Deep learning algorithms have shown considerable potential for the detection and characterization of gastrointestinal lesions. So-called AI - clinical decision support solutions (AI-CDSS) are commercially available for polyp detection during colonoscopy. Until now, these computer programs have concentrated on diagnostics whereas an AI-CDSS for interventional endoscopy has not yet been introduced. We aimed to develop an AI-CDSS („Smart ESD") for real-time intra-procedural detection and delineation of blood vessels, tissue structures and endoscopic instruments during third-space endoscopic procedures. Characteristics of Smart ESD An AI-CDSS was invented that delineates blood vessels, tissue structures and endoscopic instruments during third-space endoscopy in real-time. The output can be displayed by an overlay over the endoscopic image with different modes of visualization, such as a color-coded semitransparent area overlay, or border tracing (demonstration video). Hereby the optimal layer for dissection can be visualized, which is close above or directly at the muscle layer, depending on the applied technique (ESD or POEM). Furthermore, relevant blood vessels (thickness> 1mm) are delineated. Spatial proximity between the electrosurgical knife and a blood vessel triggers a warning signal. By this guidance system, inadvertent dissection through blood vessels could be averted. Technical specifications A DeepLabv3+ neural network architecture with KSAC and a 101-layer ResNeSt backbone was used for the development of Smart ESD. It was trained and validated with 2565 annotated still images from 27 full length third-space endoscopic videos. The annotation classes were blood vessel, submucosal layer, muscle layer, electrosurgical knife and endoscopic instrument shaft. A test on a separate data set yielded an intersection over union (IoU) of 68\%, a Dice Score of 80\% and a pixel accuracy of 87\%, demonstrating a high overlap between expert and AI segmentation. Further experiments on standardized video clips showed a mean vessel detection rate (VDR) of 85\% with values of 92\%, 70\% and 95\% for POEM, rectal ESD and esophageal ESD respectively. False positive measurements occurred 0.75 times per minute. 7 out of 9 vessels which caused intraprocedural bleeding were caught by the algorithm, as well as both vessels which required hemostasis via hemostatic forceps. Future perspectives Smart ESD performed well for vessel and tissue detection and delineation on still images, as well as on video clips. During a live demonstration in the endoscopy suite, clinical applicability of the innovation was examined. The lag time for processing of the live endoscopic image was too short to be visually detectable for the interventionist. Even though the algorithm could not be applied during actual dissection by the interventionist, Smart ESD appeared readily deployable during visual assessment by ESD experts. Therefore, we plan to conduct a clinical trial in order to obtain CE-certification of the algorithm. This new technology may improve procedural safety and speed, as well as training of modern minimally invasive endoscopic resection techniques.}, subject = {Bildgebendes Verfahren}, language = {en} } @misc{MeinikheimMendelScheppachetal., author = {Meinikheim, Michael and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Prinz, Friederike and Schwamberger, Tanja and Schlottmann, Jakob and G{\"o}lder, Stefan Karl and Walter, Benjamin and Steinbr{\"u}ck, Ingo and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {INFLUENCE OF AN ARTIFICIAL INTELLIGENCE (AI) BASED DECISION SUPPORT SYSTEM (DSS) ON THE DIAGNOSTIC PERFORMANCE OF NON-EXPERTS IN BARRETT´S ESOPHAGUS RELATED NEOPLASIA (BERN)}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {S 01}, publisher = {Thieme}, doi = {10.1055/s-00000012}, pages = {S39}, abstract = {Aims Barrett´s esophagus related neoplasia (BERN) is difficult to detect and characterize during endoscopy, even for expert endoscopists. We aimed to assess the add-on effect of an Artificial Intelligence (AI) algorithm (Barrett-Ampel) as a decision support system (DSS) for non-expert endoscopists in the evaluation of Barrett's esophagus (BE) and BERN. Methods Twelve videos with multimodal imaging white light (WL), narrow-band imaging (NBI), texture and color enhanced imaging (TXI) of histologically confirmed BE and BERN were assessed by expert and non-expert endoscopists. For each video, endoscopists were asked to identify the area of BERN and decide on the biopsy spot. Videos were assessed by the AI algorithm and regions of BERN were highlighted in real-time by a transparent overlay. Finally, endoscopists were shown the AI videos and asked to either confirm or change their initial decision based on the AI support. Results Barrett-Ampel correctly identified all areas of BERN, irrespective of the imaging modality (WL, NBI, TXI), but misinterpreted two inflammatory lesions (Accuracy=75\%). Expert endoscopists had a similar performance (Accuracy=70,8\%), while non-experts had an accuracy of 58.3\%. When AI was implemented as a DSS, non-expert endoscopists improved their diagnostic accuracy to 75\%. Conclusions AI may have the potential to support non-expert endoscopists in the assessment of videos of BE and BERN. Limitations of this study include the low number of videos used. Randomized clinical trials in a real-life setting should be performed to confirm these results.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {ARTIFICIAL INTELLIGENCE (AI) - ASSISTED VESSEL AND TISSUE RECOGNITION IN THIRD-SPACE ENDOSCOPY}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {S01}, publisher = {Thieme}, doi = {10.1055/s-0042-1745037}, pages = {S175}, abstract = {Aims Third-space endoscopy procedures such as endoscopic submucosal dissection (ESD) and peroral endoscopic myotomy (POEM) are complex interventions with elevated risk of operator-dependent adverse events, such as intra-procedural bleeding and perforation. We aimed to design an artificial intelligence clinical decision support solution (AI-CDSS, "Smart ESD") for the detection and delineation of vessels, tissue structures, and instruments during third-space endoscopy procedures. Methods Twelve full-length third-space endoscopy videos were extracted from the Augsburg University Hospital database. 1686 frames were annotated for the following categories: Submucosal layer, blood vessels, electrosurgical knife and endoscopic instrument. A DeepLabv3+neural network with a 101-layer ResNet backbone was trained and validated internally. Finally, the ability of the AI system to detect visible vessels during ESD and POEM was determined on 24 separate video clips of 7 to 46 seconds duration and showing 33 predefined vessels. These video clips were also assessed by an expert in third-space endoscopy. Results Smart ESD showed a vessel detection rate (VDR) of 93.94\%, while an average of 1.87 false positive signals were recorded per minute. VDR of the expert endoscopist was 90.1\% with no false positive findings. On the internal validation data set using still images, the AI system demonstrated an Intersection over Union (IoU), mean Dice score and pixel accuracy of 63.47\%, 76.18\% and 86.61\%, respectively. Conclusions This is the first AI-CDSS aiming to mitigate operator-dependent limitations during third-space endoscopy. Further clinical trials are underway to better understand the role of AI in such procedures.}, language = {en} } @article{RoemmeleMendelBarrettetal., author = {R{\"o}mmele, Christoph and Mendel, Robert and Barrett, Caroline and Kiesl, Hans and Rauber, David and R{\"u}ckert, Tobias and Kraus, Lisa and Heinkele, Jakob and Dhillon, Christine and Grosser, Bianca and Prinz, Friederike and Wanzl, Julia and Fleischmann, Carola and Nagl, Sandra and Schnoy, Elisabeth and Schlottmann, Jakob and Dellon, Evan S. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {An artificial intelligence algorithm is highly accurate for detecting endoscopic features of eosinophilic esophagitis}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, publisher = {Nature Portfolio}, address = {London}, doi = {10.1038/s41598-022-14605-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-46928}, pages = {10}, abstract = {The endoscopic features associated with eosinophilic esophagitis (EoE) may be missed during routine endoscopy. We aimed to develop and evaluate an Artificial Intelligence (AI) algorithm for detecting and quantifying the endoscopic features of EoE in white light images, supplemented by the EoE Endoscopic Reference Score (EREFS). An AI algorithm (AI-EoE) was constructed and trained to differentiate between EoE and normal esophagus using endoscopic white light images extracted from the database of the University Hospital Augsburg. In addition to binary classification, a second algorithm was trained with specific auxiliary branches for each EREFS feature (AI-EoE-EREFS). The AI algorithms were evaluated on an external data set from the University of North Carolina, Chapel Hill (UNC), and compared with the performance of human endoscopists with varying levels of experience. The overall sensitivity, specificity, and accuracy of AI-EoE were 0.93 for all measures, while the AUC was 0.986. With additional auxiliary branches for the EREFS categories, the AI algorithm (AI-EoEEREFS) performance improved to 0.96, 0.94, 0.95, and 0.992 for sensitivity, specificity, accuracy, and AUC, respectively. AI-EoE and AI-EoE-EREFS performed significantly better than endoscopy beginners and senior fellows on the same set of images. An AI algorithm can be trained to detect and quantify endoscopic features of EoE with excellent performance scores. The addition of the EREFS criteria improved the performance of the AI algorithm, which performed significantly better than endoscopists with a lower or medium experience level.}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Byrne, Michael F. and Messmann, Helmut and Palm, Christoph}, title = {Multimodal imaging for detection and segmentation of Barrett's esophagus-related neoplasia using artificial intelligence}, series = {Endoscopy}, volume = {54}, journal = {Endoscopy}, number = {10}, edition = {E-Video}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1704-7885}, pages = {1}, abstract = {The early diagnosis of cancer in Barrett's esophagus is crucial for improving the prognosis. However, identifying Barrett's esophagus-related neoplasia (BERN) is challenging, even for experts [1]. Four-quadrant biopsies may improve the detection of neoplasia, but they can be associated with sampling errors. The application of artificial intelligence (AI) to the assessment of Barrett's esophagus could improve the diagnosis of BERN, and this has been demonstrated in both preclinical and clinical studies [2] [3]. In this video demonstration, we show the accurate detection and delineation of BERN in two patients ([Video 1]). In part 1, the AI system detects a mucosal cancer about 20 mm in size and accurately delineates the lesion in both white-light and narrow-band imaging. In part 2, a small island of BERN with high-grade dysplasia is detected and delineated in white-light, narrow-band, and texture and color enhancement imaging. The video shows the results using a transparent overlay of the mucosal cancer in real time as well as a full segmentation preview. Additionally, the optical flow allows for the assessment of endoscope movement, something which is inversely related to the reliability of the AI prediction. We demonstrate that multimodal imaging can be applied to the AI-assisted detection and segmentation of even small focal lesions in real time.}, language = {en} } @inproceedings{RauberMendelScheppachetal., author = {Rauber, David and Mendel, Robert and Scheppach, Markus W. and Ebigbo, Alanna and Messmann, Helmut and Palm, Christoph}, title = {Analysis of Celiac Disease with Multimodal Deep Learning}, series = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2022: Proceedings, German Workshop on Medical Image Computing, Heidelberg, June 26-28, 2022}, publisher = {Springer Vieweg}, address = {Wiesbaden}, doi = {10.1007/978-3-658-36932-3_25}, pages = {115 -- 120}, abstract = {Celiac disease is an autoimmune disorder caused by gluten that results in an inflammatory response of the small intestine.We investigated whether celiac disease can be detected using endoscopic images through a deep learning approach. The results show that additional clinical parameters can improve the classification accuracy. In this work, we distinguished between healthy tissue and Marsh III, according to the Marsh score system. We first trained a baseline network to classify endoscopic images of the small bowel into these two classes and then augmented the approach with a multimodality component that took the antibody status into account.}, language = {en} } @misc{MeinikheimMendelScheppachetal., author = {Meinikheim, Michael and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Prinz, Friederike and Schwamberger, Tanja and Schlottmann, Jakob and G{\"o}lder, Stefan Karl and Walter, Benjamin and Steinbr{\"u}ck, Ingo and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Einsatz von k{\"u}nstlicher Intelligenz (KI) als Entscheidungsunterst{\"u}tzungssystem f{\"u}r nicht-Experten bei der Beurteilung von Barrett-{\"O}sophagus assoziierten Neoplasien (BERN)}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {4}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0042-1745653}, pages = {251}, abstract = {Einleitung Die sichere Detektion und Charakterisierung von Barrett-{\"O}sophagus assoziierten Neoplasien (BERN) stellt selbst f{\"u}r erfahrene Endoskopiker eine Herausforderung dar. Ziel Ziel dieser Studie ist es, den Add-on Effekt eines k{\"u}nstlichen Intelligenz (KI) Systems (Barrett-Ampel) als Entscheidungsunterst{\"u}zungssystem f{\"u}r Endoskopiker ohne Expertise bei der Untersuchung von BERN zu evaluieren. Material und Methodik Zw{\"o}lf Videos in „Weißlicht" (WL), „narrow-band imaging" (NBI) und „texture and color enhanced imaging" (TXI) von histologisch best{\"a}tigten Barrett-Metaplasien oder BERN wurden von Experten und Untersuchern ohne Barrett-Expertise evaluiert. Die Probanden wurden dazu aufgefordert in den Videos auftauchende BERN zu identifizieren und gegebenenfalls die optimale Biopsiestelle zu markieren. Unser KI-System wurde demselben Test unterzogen, wobei dieses BERN in Echtzeit segmentierte und farblich von umliegendem Epithel differenzierte. Anschließend wurden den Probanden die Videos mit zus{\"a}tzlicher KI-Unterst{\"u}tzung gezeigt. Basierend auf dieser neuen Information, wurden die Probanden zu einer Reevaluation ihrer initialen Beurteilung aufgefordert. Ergebnisse Die „Barrett-Ampel" identifizierte unabh{\"a}ngig von den verwendeten Darstellungsmodi (WL, NBI, TXI) alle BERN. Zwei entz{\"u}ndlich ver{\"a}nderte L{\"a}sionen wurden fehlinterpretiert (Genauigkeit=75\%). W{\"a}hrend Experten vergleichbare Ergebnisse erzielten (Genauigkeit=70,8\%), hatten Endoskopiker ohne Expertise bei der Beurteilung von Barrett-Metaplasien eine Genauigkeit von lediglich 58,3\%. Wurden die nicht-Experten allerdings von unserem KI-System unterst{\"u}tzt, erreichten diese eine Genauigkeit von 75\%. Zusammenfassung Unser KI-System hat das Potential als Entscheidungsunterst{\"u}tzungssystem bei der Differenzierung zwischen Barrett-Metaplasie und BERN zu fungieren und so Endoskopiker ohne entsprechende Expertise zu assistieren. Eine Limitation dieser Studie ist die niedrige Anzahl an eingeschlossenen Videos. Um die Ergebnisse dieser Studie zu best{\"a}tigen, m{\"u}ssen randomisierte kontrollierte klinische Studien durchgef{\"u}hrt werden.}, language = {de} } @misc{ScheppachMendelProbstetal., author = {Scheppach, Markus W. and Mendel, Robert and Probst, Andreas and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Intraprozedurale Strukturerkennung bei Third-Space Endoskopie mithilfe eines Deep-Learning Algorithmus}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {04}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0042-1745652}, pages = {e250-e251}, abstract = {Einleitung Third-Space Interventionen wie die endoskopische Submukosadissektion (ESD) und die perorale endoskopische Myotomie (POEM) sind technisch anspruchsvoll und mit einem erh{\"o}hten Risiko f{\"u}r intraprozedurale Komplikationen wie Blutung oder Perforation assoziiert. Moderne Computerprogramme zur Unterst{\"u}tzung bei diagnostischen Entscheidungen werden unter Einsatz von k{\"u}nstlicher Intelligenz (KI) in der Endoskopie bereits erfolgreich eingesetzt. Ziel der vorliegenden Arbeit war es, relevante anatomische Strukturen mithilfe eines Deep-Learning Algorithmus zu detektieren und segmentieren, um die Sicherheit und Anwendbarkeit von ESD und POEM zu erh{\"o}hen. Methoden Zw{\"o}lf Videoaufnahmen in voller L{\"a}nge von Third-Space Endoskopien wurden aus der Datenbank des Universit{\"a}tsklinikums Augsburg extrahiert. 1686 Einzelbilder wurden f{\"u}r die Kategorien Submukosa, Blutgef{\"a}ß, Dissektionsmesser und endoskopisches Instrument annotiert und segmentiert. Mit diesem Datensatz wurde ein DeepLabv3+neuronales Netzwerk auf der Basis eines ResNet mit 101 Schichten trainiert und intern anhand der Parameter Intersection over Union (IoU), Dice Score und Pixel Accuracy validiert. Die F{\"a}higkeit des Algorithmus zur Gef{\"a}ßdetektion wurde anhand von 24 Videoclips mit einer Spieldauer von 7 bis 46 Sekunden mit 33 vordefinierten Gef{\"a}ßen evaluiert. Anhand dieses Tests wurde auch die Gef{\"a}ßdetektionsrate eines Experten in der Third-Space Endoskopie ermittelt. Ergebnisse Der Algorithmus zeigte eine Gef{\"a}ßdetektionsrate von 93,94\% mit einer mittleren Rate an falsch positiven Signalen von 1,87 pro Minute. Die Gef{\"a}ßdetektionsrate des Experten lag bei 90,1\% ohne falsch positive Ergebnisse. In der internen Validierung an Einzelbildern wurde eine IoU von 63,47\%, ein mittlerer Dice Score von 76,18\% und eine Pixel Accuracy von 86,61\% ermittelt. Zusammenfassung Dies ist der erste KI-Algorithmus, der f{\"u}r den Einsatz in der therapeutischen Endoskopie entwickelt wurde. Pr{\"a}limin{\"a}re Ergebnisse deuten auf eine mit Experten vergleichbare Detektion von Gef{\"a}ßen w{\"a}hrend der Untersuchung hin. Weitere Untersuchungen sind n{\"o}tig, um die Leistung des Algorithmus im Vergleich zum Experten genauer zu eruieren sowie einen m{\"o}glichen klinischen Nutzen zu ermitteln.}, language = {de} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {Barrett-Ampel}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0042-1755109}, abstract = {Hintergrund Adenokarzinome des {\"O}sophagus sind bis heute mit einer infausten Prognose vergesellschaftet (1). Obwohl Endoskopiker mit Barrett-{\"O}sophagus als Pr{\"a}kanzerose konfrontiert werden, ist vor allem f{\"u}r nicht-Experten die Differenzierung zwischen Barrett-{\"O}sophagus ohne Dysplasie und assoziierten Neoplasien mitunter schwierig. Existierende Biopsieprotokolle (z.B. Seattle Protokoll) sind oftmals unzuverl{\"a}ssig (2). Eine fr{\"u}hzeitige Diagnose des Adenokarzinoms ist allerdings von fundamentaler Bedeutung f{\"u}r die Prognose des Patienten. Forschungsansatz Auf der Grundlage dieser Problematik, entwickelten wir in Kooperation mit dem Forschungslabor „Regensburg Medical Image Computing (ReMIC)" der OTH Regensburg ein auf k{\"u}nstlicher Intelligenz (KI) basiertes Entscheidungsunterst{\"u}tzungssystem (CDSS). Das auf einer DeepLabv3+ neuronalen Netzwerkarchitektur basierende CDSS differenziert mittels Mustererkennung Barrett- {\"O}sophagus ohne Dysplasie von Barrett-{\"O}sophagus mit Dysplasie bzw. Neoplasie („Klassifizierung"). Hierbei werden gemittelte Ausgabewahrscheinlichkeiten mit einem vom Benutzer definierten Schwellenwert verglichen. F{\"u}r Vorhersagen, die den Schwellenwert {\"u}berschreiten, berechnen wir die Kontur der Region und die Fl{\"a}che. Sobald die vorhergesagte L{\"a}sion eine bestimmte Gr{\"o}ße in der Eingabe {\"u}berschreitet, heben wir sie und ihren Umriss hervor. So erm{\"o}glicht eine farbkodierte Visualisierung eine Abgrenzung zwischen Dysplasie bzw. Neoplasie und normalem Barrett-Epithel („Segmentierung"). In einer Studie an Bildern in „Weißlicht" (WL) und „Narrow Band Imaging" (NBI) demonstrierten wir eine Sensitivit{\"a}t von mehr als 90\% und eine Spezifit{\"a}t von mehr als 80\% (3). In einem n{\"a}chsten Schritt, differenzierte unser KI-Algorithmus Barrett- Metaplasien von assoziierten Neoplasien anhand von zuf{\"a}llig abgegriffenen Bildern in Echtzeit mit einer Accuracy von 89.9\% (4). Darauf folgend, entwickelten wir unser System dahingehend weiter, dass unser Algorithmus nun auch dazu in der Lage ist, Untersuchungsvideos in WL, NBI und „Texture and Color Enhancement Imaging" (TXI) in Echtzeit zu analysieren (5). Aktuell f{\"u}hren wir eine Studie in einem randomisiert-kontrollierten Ansatz an unver{\"a}nderten Untersuchungsvideos in WL, NBI und TXI durch. Ausblick Um Patienten mit aus Barrett-Metaplasien resultierenden Neoplasien fr{\"u}hestm{\"o}glich an „High-Volume"-Zentren {\"u}berweisen zu k{\"o}nnen, soll unser KI-Algorithmus zuk{\"u}nftig vor allem Endoskopiker ohne extensive Erfahrung bei der Beurteilung von Barrett- {\"O}sophagus in der Krebsfr{\"u}herkennung unterst{\"u}tzen.}, subject = {Speiser{\"o}hrenkrebs}, language = {de} } @misc{MeinikheimMendelProbstetal., author = {Meinikheim, Michael and Mendel, Robert and Probst, Andreas and Scheppach, Markus W. and Messmann, Helmut and Palm, Christoph and Ebigbo, Alanna}, title = {Optical Flow als Methode zur Qualit{\"a}tssicherung KI-unterst{\"u}tzter Untersuchungen von Barrett-{\"O}sophagus und Barrett-{\"O}sophagus assoziierten Neoplasien}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {60}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {08}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0042-1754997}, abstract = {Einleitung {\"U}berm{\"a}ßige Bewegung im Bild kann die Performance von auf k{\"u}nstlicher Intelligenz (KI) basierenden klinischen Entscheidungsunterst{\"u}tzungssystemen (CDSS) reduzieren. Optical Flow (OF) ist eine Methode zur Lokalisierung und Quantifizierung von Bewegungen zwischen aufeinanderfolgenden Bildern. Ziel Ziel ist es, die Mensch-Computer-Interaktion (HCI) zu verbessern und Endoskopiker die unser KI-System „Barrett-Ampel" zur Unterst{\"u}tzung bei der Beurteilung von Barrett-{\"O}sophagus (BE) verwenden, ein Echtzeit-Feedback zur aktuellen Datenqualit{\"a}t anzubieten. Methodik Dazu wurden unver{\"a}nderte Videos in „Weißlicht" (WL), „Narrow Band Imaging" (NBI) und „Texture and Color Enhancement Imaging" (TXI) von acht endoskopischen Untersuchungen von histologisch gesichertem BE und mit Barrett-{\"O}sophagus assoziierten Neoplasien (BERN) durch unseren KI-Algorithmus analysiert. Der zur Bewertung der Bildqualit{\"a}t verwendete OF beinhaltete die mittlere Magnitude und die Entropie des Histogramms der Winkel. Frames wurden automatisch extrahiert, wenn die vordefinierten Schwellenwerte von 3,0 f{\"u}r die mittlere Magnitude und 9,0 f{\"u}r die Entropie des Histogramms der Winkel {\"u}berschritten wurden. Experten sahen sich zun{\"a}chst die Videos ohne KI-Unterst{\"u}tzung an und bewerteten, ob St{\"o}rfaktoren die Sicherheit mit der eine Diagnose im vorliegenden Fall gestellt werden kann negativ beeinflussen. Anschließend {\"u}berpr{\"u}ften sie die extrahierten Frames. Ergebnis Gleichm{\"a}ßige Bewegung in eine Richtung, wie etwa beim Vorschieben des Endoskops, spiegelte sich, bei insignifikant ver{\"a}nderter Entropie, in einer Erh{\"o}hung der Magnitude wider. Chaotische Bewegung, zum Beispiel w{\"a}hrend dem Sp{\"u}len, war mit erh{\"o}hter Entropie assoziiert. Insgesamt war eine unruhige endoskopische Darstellung, Fl{\"u}ssigkeit sowie {\"u}berm{\"a}ßige {\"O}sophagusmotilit{\"a}t mit erh{\"o}htem OF assoziiert und korrelierte mit der Meinung der Experten {\"u}ber die Qualit{\"a}t der Videos. Der OF und die subjektive Wahrnehmung der Experten {\"u}ber die Verwertbarkeit der vorliegenden Bildsequenzen korrelierten direkt proportional. Wenn die vordefinierten Schwellenwerte des OF {\"u}berschritten wurden, war die damit verbundene Bildqualit{\"a}t in 94\% der F{\"a}lle f{\"u}r eine definitive Interpretation auch f{\"u}r Experten unzureichend. Schlussfolgerung OF hat das Potenzial Endoskopiker ein Echtzeit-Feedback {\"u}ber die Qualit{\"a}t des Dateninputs zu bieten und so nicht nur die HCI zu verbessern, sondern auch die optimale Performance von KI-Algorithmen zu erm{\"o}glichen.}, language = {de} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @article{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Stallmach, Andreas and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Detection of duodenal villous atrophy on endoscopic images using a deep learning algorithm}, series = {Gastrointestinal Endoscopy}, journal = {Gastrointestinal Endoscopy}, publisher = {Elsevier}, doi = {10.1016/j.gie.2023.01.006}, abstract = {Background and aims Celiac disease with its endoscopic manifestation of villous atrophy is underdiagnosed worldwide. The application of artificial intelligence (AI) for the macroscopic detection of villous atrophy at routine esophagogastroduodenoscopy may improve diagnostic performance. Methods A dataset of 858 endoscopic images of 182 patients with villous atrophy and 846 images from 323 patients with normal duodenal mucosa was collected and used to train a ResNet 18 deep learning model to detect villous atrophy. An external data set was used to test the algorithm, in addition to six fellows and four board certified gastroenterologists. Fellows could consult the AI algorithm's result during the test. From their consultation distribution, a stratification of test images into "easy" and "difficult" was performed and used for classified performance measurement. Results External validation of the AI algorithm yielded values of 90 \%, 76 \%, and 84 \% for sensitivity, specificity, and accuracy, respectively. Fellows scored values of 63 \%, 72 \% and 67 \%, while the corresponding values in experts were 72 \%, 69 \% and 71 \%, respectively. AI consultation significantly improved all trainee performance statistics. While fellows and experts showed significantly lower performance for "difficult" images, the performance of the AI algorithm was stable. Conclusion In this study, an AI algorithm outperformed endoscopy fellows and experts in the detection of villous atrophy on endoscopic still images. AI decision support significantly improved the performance of non-expert endoscopists. The stable performance on "difficult" images suggests a further positive add-on effect in challenging cases.}, language = {en} }