@misc{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Artificial Intelligence in Early Barrett's Cancer: The Segmentation Task}, series = {Endoscopy}, volume = {51}, journal = {Endoscopy}, number = {04}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0039-1681187}, pages = {6}, abstract = {Aims: The delineation of outer margins of early Barrett's cancer can be challenging even for experienced endoscopists. Artificial intelligence (AI) could assist endoscopists faced with this task. As of date, there is very limited experience in this domain. In this study, we demonstrate the measure of overlap (Dice coefficient = D) between highly experienced Barrett endoscopists and an AI system in the delineation of cancer margins (segmentation task). Methods: An AI system with a deep convolutional neural network (CNN) was trained and tested on high-definition endoscopic images of early Barrett's cancer (n = 33) and normal Barrett's mucosa (n = 41). The reference standard for the segmentation task were the manual delineations of tumor margins by three highly experienced Barrett endoscopists. Training of the AI system included patch generation, patch augmentation and adjustment of the CNN weights. Then, the segmentation results from patch classification and thresholding of the class probabilities. Segmentation results were evaluated using the Dice coefficient (D). Results: The Dice coefficient (D) which can range between 0 (no overlap) and 1 (complete overlap) was computed only for images correctly classified by the AI-system as cancerous. At a threshold of t = 0.5, a mean value of D = 0.72 was computed. Conclusions: AI with CNN performed reasonably well in the segmentation of the tumor region in Barrett's cancer, at least when compared with expert Barrett's endoscopists. AI holds a lot of promise as a tool for better visualization of tumor margins but may need further improvement and enhancement especially in real-time settings.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{PalmLehmannBrednoetal., author = {Palm, Christoph and Lehmann, Thomas M. and Bredno, J. and Neuschaefer-Rube, C. and Klajman, S. and Spitzer, Klaus}, title = {Automated Analysis of Stroboscopic Image Sequences by Vibration Profiles}, series = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 5th International Workshop}, booktitle = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 5th International Workshop}, abstract = {A method for automated segmentation of vocal cords in stroboscopic video sequences is presented. In contrast to earlier approaches, the inner and outer contours of the vocal cords are independently delineated. Automatic segmentation of the low contrasted images is carried out by connecting the shape constraint of a point distribution model to a multi-channel regionbased balloon model. This enables us to robustly compute a vibration profile that is used as a new diagnostic tool to visualize several vibration parameters in only one graphic. The vibration profiles are studied in two cases: one physiological vibration and one functional pathology.}, language = {en} } @inproceedings{PalmFischerLehmannetal., author = {Palm, Christoph and Fischer, B. and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Hierarchische Wasserscheiden-Transformation zur Lippensegmentierung in Farbbildern}, series = {Bildverarbeitung f{\"u}r die Medizin 2000}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2000}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-642-59757-2_20}, pages = {106 -- 110}, abstract = {Zur L{\"o}sung komplexer Segmentierungsprobleme wird eine hierarchische und farbbasierte Wasserscheidentransformation vorgestellt. Geringe Modifikationen bez{\"u}glich Startpunktwahl und Flutungsprozess resultieren in signifikanten Verbesserungen der Segmentierung. Das Verfahren wurde zur Lippendetektion in Farbbildsequenzen eingesetzt, die zur quantitativen Beschreibung von Sprechbewegungsabl{\"a}ufen automatisch ausgewertet werden. Die Experimente mit 245 Bildern aus 6 Sequenzen zeigten eine Fehlerrate von 13\%.}, language = {de} } @inproceedings{PalmLehmannSpitzer, author = {Palm, Christoph and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Color Texture Analysis of Moving Vocal Cords Using Approaches from Statistics and Signal Theory}, series = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 4th International Workshop, Friedrich Schiller University, Jena}, booktitle = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Procs. 4th International Workshop, Friedrich Schiller University, Jena}, pages = {49 -- 56}, abstract = {Textural features are applied for detection of morphological pathologies of vocal cords. Cooccurrence matrices as statistical features are presented as well as filter bank analysis by Gabor filters. Both methods are extended to handle color images. Their robustness against camera movement and vibration of vocal cords is evaluated. Classification results due to three in vivo sequences are in between 94.4 \% and 98.9\%. The classification errors decrease if color features are used instead of grayscale features for both statistical and Fourier features}, language = {en} } @article{NeuschaeferRubeLehmannPalmetal., author = {Neuschaefer-Rube, C. and Lehmann, Thomas M. and Palm, Christoph and Bredno, J. and Klajman, S. and Spitzer, Klaus}, title = {3D-Visualisierung glottaler Abduktionsbewegungen}, series = {Aktuelle phoniatrisch-p{\"a}daudiologische Aspekte}, volume = {2001/2002}, journal = {Aktuelle phoniatrisch-p{\"a}daudiologische Aspekte}, number = {9}, publisher = {Median}, isbn = {3-922766-76-5}, pages = {58 -- 61}, language = {de} } @inproceedings{FischerPalmLehmannetal., author = {Fischer, B. and Palm, Christoph and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Selektion von Farbtexturmerkmalen zur Tumorklassifikation dermatoskopischer Fotografien}, series = {Bildverarbeitung f{\"u}r die Medizin 2002}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2002}, publisher = {Springer}, address = {Berlin}, pages = {238 -- 241}, language = {de} } @book{Palm, author = {Palm, Christoph}, title = {Integrative Auswertung von Farbe und Textur}, publisher = {Der Andere Verlag}, language = {de} } @article{PalmDehnhardtVietenetal., author = {Palm, Christoph and Dehnhardt, Markus and Vieten, Andrea and Pietrzyk, Uwe and Bauer, Andreas and Zilles, Karl}, title = {3D rat brain tumors}, series = {Naunyn-Schmiedebergs Archives of Pharmacology}, volume = {371}, journal = {Naunyn-Schmiedebergs Archives of Pharmacology}, number = {R103}, language = {en} } @inproceedings{PalmSchollLehmannetal., author = {Palm, Christoph and Scholl, Ingrid and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Nutzung eines Farbkonstanz-Algorithmus zur Entfernung von Glanzlichtern in laryngoskopischen Bildern}, series = {Methoden der Medizinischen Informatik, Biometrie und Epidemiologie in der modernen Informationsgesellschaft}, booktitle = {Methoden der Medizinischen Informatik, Biometrie und Epidemiologie in der modernen Informationsgesellschaft}, editor = {Greiser, E. and Wischnewsky, M.}, publisher = {MMV Medien und Medizin}, address = {M{\"u}nchen}, isbn = {9783820813357}, pages = {300 -- 303}, abstract = {1 Einf{\"u}hrung Funktionelle und organische St{\"o}rungen im Larynx beeintr{\"a}chtigen die Ausdrucksf{\"a}higkeit des Menschen. Zur Diagnostik und Verlaufkontrolle werden die Stimmlippen im Larynx mit Hilfe der Video-Laryngoskopie aufgenommen. Zur optimalen Farbmessung wird dazu an das Lupenendoskop eine 3-Chip-CCD-Kamera angeschlossen, die eine unabh{\"a}ngige Aufnahme der drei Farbkan{\"a}le erlaubt. Die bisherige subjektive Befundung ist von der Erfahrung des Untersuchers abh{\"a}ngig und l{\"a}ßt nur eine grobe Klassifikation der Krankheitsbilder zu. Zur Objektivierung werden daher quantitative Parameter f{\"u}r Farbe, Textur und Schwingung entwickelt. Neben dem Einfluß der wechselnden Lichtquellenfarbe auf den Farbeindruck ist die Sekretauflage auf den Stimmlippen ein Problem bei der Farb-und Texturanalyse. Sie kann zu ausgedehnten Glanzlichtern f{\"u}hren und so weite Bereiche der Stimmlippen f{\"u}r die Farb-und Texturanalyse unbrauchbar machen. Dieser Beitrag stellt einen Farbkonstanz-Algorithmus vor, der unabh{\"a}ngig von der Lichtquelle quantitative Farbwerte des Gewebes liefert und die Glanzlichtdetektion und -elimination erm{\"o}glicht. 2 Methodik Ziel des Farbkonstanz-Algorithmus ist die Trennung von Lichtquellen-und Gewebefarbe. Unter Verwendung des dichromatischen Reflexionsmodells [1] kann die Oberfl{\"a}chenreflexion mit der Farbe der Lichtquelle und die K{\"o}rperreflexion mit der Gewebefarbe identifiziert werden. Der Farbeindruck entsteht aus der Linearkombination beider Farbkomponenten. Ihre Gewichtung ist von der Aufnahmegeometrie abh{\"a}ngig, insbesondere vom Winkel zwischen Oberfl{\"a}chennormalen und dem Positionsvektor der Lichtquelle. In einem zweistufigen Verfahren wird zun{\"a}chst die Lichtquellenfarbe gesch{\"a}tzt, dann die Gewebefarbe ermittelt. Hieraus k{\"o}nnen beide Farbanteile durch die Berechnung der Gewichtsfaktoren pixelweise getrennt werden.}, language = {de} } @inproceedings{ChangLinLeeetal., author = {Chang, Ching-Sheng and Lin, Jin-Fa and Lee, Ming-Ching and Palm, Christoph}, title = {Semantic Lung Segmentation Using Convolutional Neural Networks}, series = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {10.1007/978-3-658-29267-6_17}, pages = {75 -- 80}, abstract = {Chest X-Ray (CXR) images as part of a non-invasive diagnosis method are commonly used in today's medical workflow. In traditional methods, physicians usually use their experience to interpret CXR images, however, there is a large interobserver variance. Computer vision may be used as a standard for assisted diagnosis. In this study, we applied an encoder-decoder neural network architecture for automatic lung region detection. We compared a three-class approach (left lung, right lung, background) and a two-class approach (lung, background). The differentiation of left and right lungs as direct result of a semantic segmentation on basis of neural nets rather than post-processing a lung-background segmentation is done here for the first time. Our evaluation was done on the NIH Chest X-ray dataset, from which 1736 images were extracted and manually annotated. We achieved 94:9\% mIoU and 92\% mIoU as segmentation quality measures for the two-class-model and the three-class-model, respectively. This result is very promising for the segmentation of lung regions having the simultaneous classification of left and right lung in mind.}, subject = {Neuronales Netz}, language = {en} } @inproceedings{SouzaJrEbigboProbstetal., author = {Souza Jr., Luis Antonio de and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Mendel, Robert and Palm, Christoph}, title = {Barrett's Esophagus Identification Using Color Co-occurrence Matrices}, series = {31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Parana, 2018}, booktitle = {31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Parana, 2018}, doi = {10.1109/SIBGRAPI.2018.00028}, pages = {166 -- 173}, abstract = {In this work, we propose the use of single channel Color Co-occurrence Matrices for texture description of Barrett'sEsophagus (BE)and adenocarcinoma images. Further classification using supervised learning techniques, such as Optimum-Path Forest (OPF), Support Vector Machines with Radial Basisunction (SVM-RBF) and Bayesian classifier supports the contextof automatic BE and adenocarcinoma diagnosis. We validated three approaches of classification based on patches, patients and images in two datasets (MICCAI 2015 and Augsburg) using the color-and-texture descriptors and the machine learning techniques. Concerning MICCAI 2015 dataset, the best results were obtained using the blue channel for the descriptors and the supervised OPF for classification purposes in the patch-based approach, with sensitivity nearly to 73\% for positive adenocarcinoma identification and specificity close to 77\% for BE (non-cancerous) patch classification. Regarding the Augsburg dataset, the most accurate results were also obtained using both OPF classifier and blue channel descriptor for the feature extraction, with sensitivity close to 67\% and specificity around to76\%. Our work highlights new advances in the related research area and provides a promising technique that combines color and texture information, allied to three different approaches of dataset pre-processing aiming to configure robust scenarios for the classification step.}, language = {en} } @inproceedings{MiddelPalmErdt, author = {Middel, Luise and Palm, Christoph and Erdt, Marius}, title = {Synthesis of Medical Images Using GANs}, series = {Uncertainty for safe utilization of machine learning in medical imaging and clinical image-based procedures. First International Workshop, UNSURE 2019, and 8th International Workshop, CLIP 2019, held in conjunction with MICCAI 2019, Shenzhen, China, October 17, 2019}, booktitle = {Uncertainty for safe utilization of machine learning in medical imaging and clinical image-based procedures. First International Workshop, UNSURE 2019, and 8th International Workshop, CLIP 2019, held in conjunction with MICCAI 2019, Shenzhen, China, October 17, 2019}, publisher = {Springer Nature}, address = {Cham}, isbn = {978-3-030-32688-3}, issn = {0302-9743}, doi = {10.1007/978-3-030-32689-0_13}, pages = {125 -- 134}, abstract = {The success of artificial intelligence in medicine is based on the need for large amounts of high quality training data. Sharing of medical image data, however, is often restricted by laws such as doctor-patient confidentiality. Although there are publicly available medical datasets, their quality and quantity are often low. Moreover, datasets are often imbalanced and only represent a fraction of the images generated in hospitals or clinics and can thus usually only be used as training data for specific problems. The introduction of generative adversarial networks (GANs) provides a mean to generate artificial images by training two convolutional networks. This paper proposes a method which uses GANs trained on medical images in order to generate a large number of artificial images that could be used to train other artificial intelligence algorithms. This work is a first step towards alleviating data privacy concerns and being able to publicly share data that still contains a substantial amount of the information in the original private data. The method has been evaluated on several public datasets and quantitative and qualitative tests showing promising results.}, subject = {Neuronale Netze}, language = {en} } @inproceedings{PalmLehmannSpitzer, author = {Palm, Christoph and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Bestimmung der Lichtquellenfarbe bei der Endoskopie mikrotexturierter Oberfl{\"a}chen des Kehlkopfes}, series = {5. Workshop Farbbildverarbeitung, Ilmenau, 1999}, booktitle = {5. Workshop Farbbildverarbeitung, Ilmenau, 1999}, pages = {3 -- 10}, abstract = {Zur Unterst{\"u}tzung der Diagnose von Stimmlippenerkrankungen werden innerhalb des Forschungsprojektes Quantitative Digitale Laryngoskopie objektive Parameter zur Beschreibung der Bewegung, der Farbe sowie der Form der Stimmlippen entwickelt und klinisch evaluiert. W{\"a}hrend die Bewegungsanalyse Aufschluß {\"u}ber funktionelle Stimmst{\"o}rungen gibt, beschreiben Parameter der Farb- und Formanalyse morphologische Ver{\"a}nderungen des Stimmlippengewebes. In diesem Beitrag werden die Methoden und bisherigen Ergebnisse zur Bewegungs- und Farbanalyse vorgestellt. Die Bewegungsanalyse wurde mit einem erweiterten Konturmodell (Snakes) durchgef{\"u}hrt. Aufgrund des modifizierten Konturmodells konnten die Konturen der Stimmlippen automatisch {\"u}ber die gesmate Bildsequenz zuverl{\"a}ssig detektiert werden. Die Vermssung der Konturen liefert neue quantitative Parameter zur Befundung von laryngoskopischen Stimmlippenaufnahmen. Um die Farbeigenschaften der Stimmlippen zu bestimmen, wurde ausgehend vom RGB-Bild die Objektfarbe unabh{\"a}ngig von der Farbe der Lichtquelle durch Verwendung von Clusterverfahren und der Viertelkreisanalyse berechnet. Mit dieser Farbanalyse konnte die Farbe der Lichtquelle ermittelt und das beleuchtungsunabh{\"a}ngige Farbbild berechnet werden. Die Quanitifizierung der R{\"o}tung der Stimmlippen ist z.B. ein entscheidendes Kriterium zur Diagnostik der akuten Laryngitis.}, language = {de} } @inproceedings{WeihererZornWittenbergetal., author = {Weiherer, Maximilian and Zorn, Martin and Wittenberg, Thomas and Palm, Christoph}, title = {Retrospective Color Shading Correction for Endoscopic Images}, series = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020. Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {10.1007/978-3-658-29267-6}, pages = {14 -- 19}, abstract = {In this paper, we address the problem of retrospective color shading correction. An extension of the established gray-level shading correction algorithm based on signal envelope (SE) estimation to color images is developed using principal color components. Compared to the probably most general shading correction algorithm based on entropy minimization, SE estimation does not need any computationally expensive optimization and thus can be implemented more effciently. We tested our new shading correction scheme on artificial as well as real endoscopic images and observed promising results. Additionally, an indepth analysis of the stop criterion used in the SE estimation algorithm is provided leading to the conclusion that a fixed, user-defined threshold is generally not feasible. Thus, we present new ideas how to develop a non-parametric version of the SE estimation algorithm using entropy.}, subject = {Endoskopie}, language = {en} } @inproceedings{PalmKeysersLehmannetal., author = {Palm, Christoph and Keysers, Daniel and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Gabor Filtering of Complex Hue/Saturation Images for Color Texture Classification}, series = {Proceedings of the 5th Joint Conference on Information Science (JCIS) 2, The Association for Intelligent Machinery, Atlantic City, NJ, 2000}, booktitle = {Proceedings of the 5th Joint Conference on Information Science (JCIS) 2, The Association for Intelligent Machinery, Atlantic City, NJ, 2000}, pages = {45 -- 49}, abstract = {Objective: Complex hue/saturation images as a new approach for color texture classification using Gabor filters are introduced and compared with common techniques. Method: The interpretation of hue and saturationas polar coordinates allows direct use of the HSV-colorspace for Fourier transform. This technique is applied for Gabor feature extraction of color textures. In contrast to other color features based on the RGB-colorspace [1] the combination of color bands is done previous to the filtering. Results: The performance of the new HS-featuresis compared with that of RGB based as well as grayscale Gabor features by evaluating the classifi-cation of 30 natural textures. The new HS-featuresshow same results like the best RGB features but allow a more compact representation. On the averagethe color features improve the results of grayscale features. Conclusion: The consideration of the color information enhances the classification of color texture. The choice of colorspace cannot be adjudged finally, but the introduced features suggest the use of the HSV-colorspace with less features than RGB.}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Real-time use of artificial intelligence in the evaluation of cancer in Barrett's oesophagus}, series = {Gut}, volume = {69}, journal = {Gut}, number = {4}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2019-319460}, pages = {615 -- 616}, abstract = {Based on previous work by our group with manual annotation of visible Barrett oesophagus (BE) cancer images, a real-time deep learning artificial intelligence (AI) system was developed. While an expert endoscopist conducts the endoscopic assessment of BE, our AI system captures random images from the real-time camera livestream and provides a global prediction (classification), as well as a dense prediction (segmentation) differentiating accurately between normal BE and early oesophageal adenocarcinoma (EAC). The AI system showed an accuracy of 89.9\% on 14 cases with neoplastic BE.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{PalmLehmann, author = {Palm, Christoph and Lehmann, Thomas M.}, title = {Classification of Color Textures by Gabor Filtering}, series = {Machine GRAPHICS \& VISION}, volume = {11}, journal = {Machine GRAPHICS \& VISION}, number = {2/3}, pages = {195 -- 219}, language = {en} } @article{ArribasAntonelliFrazzonietal., author = {Arribas, Julia and Antonelli, Giulio and Frazzoni, Leonardo and Fuccio, Lorenzo and Ebigbo, Alanna and van der Sommen, Fons and Ghatwary, Noha and Palm, Christoph and Coimbra, Miguel and Renna, Francesco and Bergman, Jacques J.G.H.M. and Sharma, Prateek and Messmann, Helmut and Hassan, Cesare and Dinis-Ribeiro, Mario J.}, title = {Standalone performance of artificial intelligence for upper GI neoplasia: a meta-analysis}, series = {Gut}, volume = {70}, journal = {Gut}, number = {8}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2020-321922}, pages = {1458 -- 1468}, abstract = {Objective: Artificial intelligence (AI) may reduce underdiagnosed or overlooked upper GI (UGI) neoplastic and preneoplastic conditions, due to subtle appearance and low disease prevalence. Only disease-specific AI performances have been reported, generating uncertainty on its clinical value. Design: We searched PubMed, Embase and Scopus until July 2020, for studies on the diagnostic performance of AI in detection and characterisation of UGI lesions. Primary outcomes were pooled diagnostic accuracy, sensitivity and specificity of AI. Secondary outcomes were pooled positive (PPV) and negative (NPV) predictive values. We calculated pooled proportion rates (\%), designed summary receiving operating characteristic curves with respective area under the curves (AUCs) and performed metaregression and sensitivity analysis. Results: Overall, 19 studies on detection of oesophageal squamous cell neoplasia (ESCN) or Barrett's esophagus-related neoplasia (BERN) or gastric adenocarcinoma (GCA) were included with 218, 445, 453 patients and 7976, 2340, 13 562 images, respectively. AI-sensitivity/specificity/PPV/NPV/positive likelihood ratio/negative likelihood ratio for UGI neoplasia detection were 90\% (CI 85\% to 94\%)/89\% (CI 85\% to 92\%)/87\% (CI 83\% to 91\%)/91\% (CI 87\% to 94\%)/8.2 (CI 5.7 to 11.7)/0.111 (CI 0.071 to 0.175), respectively, with an overall AUC of 0.95 (CI 0.93 to 0.97). No difference in AI performance across ESCN, BERN and GCA was found, AUC being 0.94 (CI 0.52 to 0.99), 0.96 (CI 0.95 to 0.98), 0.93 (CI 0.83 to 0.99), respectively. Overall, study quality was low, with high risk of selection bias. No significant publication bias was found. Conclusion: We found a high overall AI accuracy for the diagnosis of any neoplastic lesion of the UGI tract that was independent of the underlying condition. This may be expected to substantially reduce the miss rate of precancerous lesions and early cancer when implemented in clinical practice.}, language = {en} } @article{EbigboMendelRueckertetal., author = {Ebigbo, Alanna and Mendel, Robert and R{\"u}ckert, Tobias and Schuster, Laurin and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Mende, Matthias and Steinbr{\"u}ck, Ingo and Faiss, Siegbert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Deprez, Pierre and Oyama, Tsuneo and Takahashi, Akiko and Seewald, Stefan and Sharma, Prateek and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Endoscopic prediction of submucosal invasion in Barrett's cancer with the use of Artificial Intelligence: A pilot Study}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {09}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-1311-8570}, pages = {878 -- 883}, abstract = {Background and aims: The accurate differentiation between T1a and T1b Barrett's cancer has both therapeutic and prognostic implications but is challenging even for experienced physicians. We trained an Artificial Intelligence (AI) system on the basis of deep artificial neural networks (deep learning) to differentiate between T1a and T1b Barrett's cancer white-light images. Methods: Endoscopic images from three tertiary care centres in Germany were collected retrospectively. A deep learning system was trained and tested using the principles of cross-validation. A total of 230 white-light endoscopic images (108 T1a and 122 T1b) was evaluated with the AI-system. For comparison, the images were also classified by experts specialized in endoscopic diagnosis and treatment of Barrett's cancer. Results: The sensitivity, specificity, F1 and accuracy of the AI-system in the differentiation between T1a and T1b cancer lesions was 0.77, 0.64, 0.73 and 0.71, respectively. There was no statistically significant difference between the performance of the AI-system and that of human experts with sensitivity, specificity, F1 and accuracy of 0.63, 0.78, 0.67 and 0.70 respectively. Conclusion: This pilot study demonstrates the first multicenter application of an AI-based system in the prediction of submucosal invasion in endoscopic images of Barrett's cancer. AI scored equal to international experts in the field, but more work is necessary to improve the system and apply it to video sequences and in a real-life setting. Nevertheless, the correct prediction of submucosal invasion in Barret´s cancer remains challenging for both experts and AI.}, subject = {Maschinelles Lernen}, language = {en} } @article{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Assisting Barrett's esophagus identification using endoscopic data augmentation based on Generative Adversarial Networks}, series = {Computers in Biology and Medicine}, volume = {126}, journal = {Computers in Biology and Medicine}, number = {November}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2020.104029}, pages = {12}, abstract = {Barrett's esophagus figured a swift rise in the number of cases in the past years. Although traditional diagnosis methods offered a vital role in early-stage treatment, they are generally time- and resource-consuming. In this context, computer-aided approaches for automatic diagnosis emerged in the literature since early detection is intrinsically related to remission probabilities. However, they still suffer from drawbacks because of the lack of available data for machine learning purposes, thus implying reduced recognition rates. This work introduces Generative Adversarial Networks to generate high-quality endoscopic images, thereby identifying Barrett's esophagus and adenocarcinoma more precisely. Further, Convolution Neural Networks are used for feature extraction and classification purposes. The proposed approach is validated over two datasets of endoscopic images, with the experiments conducted over the full and patch-split images. The application of Deep Convolutional Generative Adversarial Networks for the data augmentation step and LeNet-5 and AlexNet for the classification step allowed us to validate the proposed methodology over an extensive set of datasets (based on original and augmented sets), reaching results of 90\% of accuracy for the patch-based approach and 85\% for the image-based approach. Both results are based on augmented datasets and are statistically different from the ones obtained in the original datasets of the same kind. Moreover, the impact of data augmentation was evaluated in the context of image description and classification, and the results obtained using synthetic images outperformed the ones over the original datasets, as well as other recent approaches from the literature. Such results suggest promising insights related to the importance of proper data for the accurate classification concerning computer-assisted Barrett's esophagus and adenocarcinoma detection.}, subject = {Maschinelles Lernen}, language = {en} } @unpublished{WeihererEigenbergerBrebantetal., author = {Weiherer, Maximilian and Eigenberger, Andreas and Br{\´e}bant, Vanessa and Prantl, Lukas and Palm, Christoph}, title = {Learning the shape of female breasts: an open-access 3D statistical shape model of the female breast built from 110 breast scans}, pages = {15}, abstract = {We present the Regensburg Breast Shape Model (RBSM) - a 3D statistical shape model of the female breast built from 110 breast scans, and the first ever publicly available. Together with the model, a fully automated, pairwise surface registration pipeline used to establish correspondence among 3D breast scans is introduced. Our method is computationally efficient and requires only four landmarks to guide the registration process. In order to weaken the strong coupling between breast and thorax, we propose to minimize the variance outside the breast region as much as possible. To achieve this goal, a novel concept called breast probability masks (BPMs) is introduced. A BPM assigns probabilities to each point of a 3D breast scan, telling how likely it is that a particular point belongs to the breast area. During registration, we use BPMs to align the template to the target as accurately as possible inside the breast region and only roughly outside. This simple yet effective strategy significantly reduces the unwanted variance outside the breast region, leading to better statistical shape models in which breast shapes are quite well decoupled from the thorax. The RBSM is thus able to produce a variety of different breast shapes as independently as possible from the shape of the thorax. Our systematic experimental evaluation reveals a generalization ability of 0.17 mm and a specificity of 2.8 mm for the RBSM. Ultimately, our model is seen as a first step towards combining physically motivated deformable models of the breast and statistical approaches in order to enable more realistic surgical outcome simulation.}, language = {en} }