@article{EbigboMendelRueckertetal., author = {Ebigbo, Alanna and Mendel, Robert and R{\"u}ckert, Tobias and Schuster, Laurin and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Mende, Matthias and Steinbr{\"u}ck, Ingo and Faiss, Siegbert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Deprez, Pierre and Oyama, Tsuneo and Takahashi, Akiko and Seewald, Stefan and Sharma, Prateek and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Endoscopic prediction of submucosal invasion in Barrett's cancer with the use of Artificial Intelligence: A pilot Study}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {09}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-1311-8570}, pages = {878 -- 883}, abstract = {Background and aims: The accurate differentiation between T1a and T1b Barrett's cancer has both therapeutic and prognostic implications but is challenging even for experienced physicians. We trained an Artificial Intelligence (AI) system on the basis of deep artificial neural networks (deep learning) to differentiate between T1a and T1b Barrett's cancer white-light images. Methods: Endoscopic images from three tertiary care centres in Germany were collected retrospectively. A deep learning system was trained and tested using the principles of cross-validation. A total of 230 white-light endoscopic images (108 T1a and 122 T1b) was evaluated with the AI-system. For comparison, the images were also classified by experts specialized in endoscopic diagnosis and treatment of Barrett's cancer. Results: The sensitivity, specificity, F1 and accuracy of the AI-system in the differentiation between T1a and T1b cancer lesions was 0.77, 0.64, 0.73 and 0.71, respectively. There was no statistically significant difference between the performance of the AI-system and that of human experts with sensitivity, specificity, F1 and accuracy of 0.63, 0.78, 0.67 and 0.70 respectively. Conclusion: This pilot study demonstrates the first multicenter application of an AI-based system in the prediction of submucosal invasion in endoscopic images of Barrett's cancer. AI scored equal to international experts in the field, but more work is necessary to improve the system and apply it to video sequences and in a real-life setting. Nevertheless, the correct prediction of submucosal invasion in Barret´s cancer remains challenging for both experts and AI.}, subject = {Maschinelles Lernen}, language = {en} } @article{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Assisting Barrett's esophagus identification using endoscopic data augmentation based on Generative Adversarial Networks}, series = {Computers in Biology and Medicine}, volume = {126}, journal = {Computers in Biology and Medicine}, number = {November}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2020.104029}, pages = {12}, abstract = {Barrett's esophagus figured a swift rise in the number of cases in the past years. Although traditional diagnosis methods offered a vital role in early-stage treatment, they are generally time- and resource-consuming. In this context, computer-aided approaches for automatic diagnosis emerged in the literature since early detection is intrinsically related to remission probabilities. However, they still suffer from drawbacks because of the lack of available data for machine learning purposes, thus implying reduced recognition rates. This work introduces Generative Adversarial Networks to generate high-quality endoscopic images, thereby identifying Barrett's esophagus and adenocarcinoma more precisely. Further, Convolution Neural Networks are used for feature extraction and classification purposes. The proposed approach is validated over two datasets of endoscopic images, with the experiments conducted over the full and patch-split images. The application of Deep Convolutional Generative Adversarial Networks for the data augmentation step and LeNet-5 and AlexNet for the classification step allowed us to validate the proposed methodology over an extensive set of datasets (based on original and augmented sets), reaching results of 90\% of accuracy for the patch-based approach and 85\% for the image-based approach. Both results are based on augmented datasets and are statistically different from the ones obtained in the original datasets of the same kind. Moreover, the impact of data augmentation was evaluated in the context of image description and classification, and the results obtained using synthetic images outperformed the ones over the original datasets, as well as other recent approaches from the literature. Such results suggest promising insights related to the importance of proper data for the accurate classification concerning computer-assisted Barrett's esophagus and adenocarcinoma detection.}, subject = {Maschinelles Lernen}, language = {en} }