@article{GrassmannMengelkampBrandletal., author = {Graßmann, Felix and Mengelkamp, Judith and Brandl, Caroline and Harsch, Sebastian and Zimmermann, Martina E. and Linkohr, Birgit and Peters, Annette and Heid, Iris M. and Palm, Christoph and Weber, Bernhard H. F.}, title = {A Deep Learning Algorithm for Prediction of Age-Related Eye Disease Study Severity Scale for Age-Related Macular Degeneration from Color Fundus Photography}, series = {Ophtalmology}, volume = {125}, journal = {Ophtalmology}, number = {9}, publisher = {Elsevier}, doi = {10.1016/j.ophtha.2018.02.037}, pages = {1410 -- 1420}, abstract = {Purpose Age-related macular degeneration (AMD) is a common threat to vision. While classification of disease stages is critical to understanding disease risk and progression, several systems based on color fundus photographs are known. Most of these require in-depth and time-consuming analysis of fundus images. Herein, we present an automated computer-based classification algorithm. Design Algorithm development for AMD classification based on a large collection of color fundus images. Validation is performed on a cross-sectional, population-based study. Participants. We included 120 656 manually graded color fundus images from 3654 Age-Related Eye Disease Study (AREDS) participants. AREDS participants were >55 years of age, and non-AMD sight-threatening diseases were excluded at recruitment. In addition, performance of our algorithm was evaluated in 5555 fundus images from the population-based Kooperative Gesundheitsforschung in der Region Augsburg (KORA; Cooperative Health Research in the Region of Augsburg) study. Methods. We defined 13 classes (9 AREDS steps, 3 late AMD stages, and 1 for ungradable images) and trained several convolution deep learning architectures. An ensemble of network architectures improved prediction accuracy. An independent dataset was used to evaluate the performance of our algorithm in a population-based study. Main Outcome Measures. κ Statistics and accuracy to evaluate the concordance between predicted and expert human grader classification. Results. A network ensemble of 6 different neural net architectures predicted the 13 classes in the AREDS test set with a quadratic weighted κ of 92\% (95\% confidence interval, 89\%-92\%) and an overall accuracy of 63.3\%. In the independent KORA dataset, images wrongly classified as AMD were mainly the result of a macular reflex observed in young individuals. By restricting the KORA analysis to individuals >55 years of age and prior exclusion of other retinopathies, the weighted and unweighted κ increased to 50\% and 63\%, respectively. Importantly, the algorithm detected 84.2\% of all fundus images with definite signs of early or late AMD. Overall, 94.3\% of healthy fundus images were classified correctly. Conclusions Our deep learning algoritm revealed a weighted κ outperforming human graders in the AREDS study and is suitable to classify AMD fundus images in other datasets using individuals >55 years of age.}, subject = {Senile Makuladegeneration}, language = {en} } @inproceedings{MendelEbigboProbstetal., author = {Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph}, title = {Barrett's Esophagus Analysis Using Convolutional Neural Networks}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_23}, pages = {80 -- 85}, abstract = {We propose an automatic approach for early detection of adenocarcinoma in the esophagus. High-definition endoscopic images (50 cancer, 50 Barrett) are partitioned into a dataset containing approximately equal amounts of patches showing cancerous and non-cancerous regions. A deep convolutional neural network is adapted to the data using a transfer learning approach. The final classification of an image is determined by at least one patch, for which the probability being a cancer patch exceeds a given threshold. The model was evaluated with leave one patient out cross-validation. With sensitivity and specificity of 0.94 and 0.88, respectively, our findings improve recently published results on the same image data base considerably. Furthermore, the visualization of the class probabilities of each individual patch indicates, that our approach might be extensible to the segmentation domain.}, subject = {Speiser{\"o}hrenkrebs}, language = {en} } @misc{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Artificial Intelligence in Early Barrett's Cancer: The Segmentation Task}, series = {Endoscopy}, volume = {51}, journal = {Endoscopy}, number = {04}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/s-0039-1681187}, pages = {6}, abstract = {Aims: The delineation of outer margins of early Barrett's cancer can be challenging even for experienced endoscopists. Artificial intelligence (AI) could assist endoscopists faced with this task. As of date, there is very limited experience in this domain. In this study, we demonstrate the measure of overlap (Dice coefficient = D) between highly experienced Barrett endoscopists and an AI system in the delineation of cancer margins (segmentation task). Methods: An AI system with a deep convolutional neural network (CNN) was trained and tested on high-definition endoscopic images of early Barrett's cancer (n = 33) and normal Barrett's mucosa (n = 41). The reference standard for the segmentation task were the manual delineations of tumor margins by three highly experienced Barrett endoscopists. Training of the AI system included patch generation, patch augmentation and adjustment of the CNN weights. Then, the segmentation results from patch classification and thresholding of the class probabilities. Segmentation results were evaluated using the Dice coefficient (D). Results: The Dice coefficient (D) which can range between 0 (no overlap) and 1 (complete overlap) was computed only for images correctly classified by the AI-system as cancerous. At a threshold of t = 0.5, a mean value of D = 0.72 was computed. Conclusions: AI with CNN performed reasonably well in the segmentation of the tumor region in Barrett's cancer, at least when compared with expert Barrett's endoscopists. AI holds a lot of promise as a tool for better visualization of tumor margins but may need further improvement and enhancement especially in real-time settings.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{SouzaJrPalmMendeletal., author = {Souza Jr., Luis Antonio de and Palm, Christoph and Mendel, Robert and Hook, Christian and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Weber, Silke A. T. and Papa, Jo{\~a}o Paulo}, title = {A survey on Barrett's esophagus analysis using machine learning}, series = {Computers in Biology and Medicine}, volume = {96}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2018.03.014}, pages = {203 -- 213}, abstract = {This work presents a systematic review concerning recent studies and technologies of machine learning for Barrett's esophagus (BE) diagnosis and treatment. The use of artificial intelligence is a brand new and promising way to evaluate such disease. We compile some works published at some well-established databases, such as Science Direct, IEEEXplore, PubMed, Plos One, Multidisciplinary Digital Publishing Institute (MDPI), Association for Computing Machinery (ACM), Springer, and Hindawi Publishing Corporation. Each selected work has been analyzed to present its objective, methodology, and results. The BE progression to dysplasia or adenocarcinoma shows a complex pattern to be detected during endoscopic surveillance. Therefore, it is valuable to assist its diagnosis and automatic identification using computer analysis. The evaluation of the BE dysplasia can be performed through manual or automated segmentation through machine learning techniques. Finally, in this survey, we reviewed recent studies focused on the automatic detection of the neoplastic region for classification purposes using machine learning methods.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @inproceedings{SouzaJrAfonsoPalmetal., author = {Souza Jr., Luis Antonio de and Afonso, Luis Claudio Sugi and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Barrett's Esophagus Identification Using Optimum-Path Forest}, series = {Proceedings of the 30th Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T 2017), Niter{\´o}i, Rio de Janeiro, Brazil, 2017, 17-20 October}, booktitle = {Proceedings of the 30th Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T 2017), Niter{\´o}i, Rio de Janeiro, Brazil, 2017, 17-20 October}, doi = {10.1109/SIBGRAPI.2017.47}, pages = {308 -- 314}, abstract = {Computer-assisted analysis of endoscopic images can be helpful to the automatic diagnosis and classification of neoplastic lesions. Barrett's esophagus (BE) is a common type of reflux that is not straight forward to be detected by endoscopic surveillance, thus being way susceptible to erroneous diagnosis, which can cause cancer when not treated properly. In this work, we introduce the Optimum-Path Forest (OPF) classifier to the task of automatic identification of Barrett'sesophagus, with promising results and outperforming the well known Support Vector Machines (SVM) in the aforementioned context. We consider describing endoscopic images by means of feature extractors based on key point information, such as the Speeded up Robust Features (SURF) and Scale-Invariant Feature Transform (SIFT), for further designing a bag-of-visual-wordsthat is used to feed both OPF and SVM classifiers. The best results were obtained by means of the OPF classifier for both feature extractors, with values lying on 0.732 (SURF) - 0.735(SIFT) for sensitivity, 0.782 (SURF) - 0.806 (SIFT) for specificity, and 0.738 (SURF) - 0.732 (SIFT) for the accuracy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} }