@inproceedings{MendelSouzaJrRauberetal., author = {Mendel, Robert and Souza Jr., Luis Antonio de and Rauber, David and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Semi-supervised Segmentation Based on Error-Correcting Supervision}, series = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, booktitle = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-58525-9}, doi = {10.1007/978-3-030-58526-6_9}, pages = {141 -- 157}, abstract = {Pixel-level classification is an essential part of computer vision. For learning from labeled data, many powerful deep learning models have been developed recently. In this work, we augment such supervised segmentation models by allowing them to learn from unlabeled data. Our semi-supervised approach, termed Error-Correcting Supervision, leverages a collaborative strategy. Apart from the supervised training on the labeled data, the segmentation network is judged by an additional network. The secondary correction network learns on the labeled data to optimally spot correct predictions, as well as to amend incorrect ones. As auxiliary regularization term, the corrector directly influences the supervised training of the segmentation network. On unlabeled data, the output of the correction network is essential to create a proxy for the unknown truth. The corrector's output is combined with the segmentation network's prediction to form the new target. We propose a loss function that incorporates both the pseudo-labels as well as the predictive certainty of the correction network. Our approach can easily be added to supervised segmentation models. We show consistent improvements over a supervised baseline on experiments on both the Pascal VOC 2012 and the Cityscapes datasets with varying amounts of labeled data.}, subject = {Semi-Supervised Learning}, language = {en} } @inproceedings{SouzaJrEbigboProbstetal., author = {Souza Jr., Luis Antonio de and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Mendel, Robert and Palm, Christoph}, title = {Barrett's Esophagus Identification Using Color Co-occurrence Matrices}, series = {31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Parana, 2018}, booktitle = {31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Parana, 2018}, doi = {10.1109/SIBGRAPI.2018.00028}, pages = {166 -- 173}, abstract = {In this work, we propose the use of single channel Color Co-occurrence Matrices for texture description of Barrett'sEsophagus (BE)and adenocarcinoma images. Further classification using supervised learning techniques, such as Optimum-Path Forest (OPF), Support Vector Machines with Radial Basisunction (SVM-RBF) and Bayesian classifier supports the contextof automatic BE and adenocarcinoma diagnosis. We validated three approaches of classification based on patches, patients and images in two datasets (MICCAI 2015 and Augsburg) using the color-and-texture descriptors and the machine learning techniques. Concerning MICCAI 2015 dataset, the best results were obtained using the blue channel for the descriptors and the supervised OPF for classification purposes in the patch-based approach, with sensitivity nearly to 73\% for positive adenocarcinoma identification and specificity close to 77\% for BE (non-cancerous) patch classification. Regarding the Augsburg dataset, the most accurate results were also obtained using both OPF classifier and blue channel descriptor for the feature extraction, with sensitivity close to 67\% and specificity around to76\%. Our work highlights new advances in the related research area and provides a promising technique that combines color and texture information, allied to three different approaches of dataset pre-processing aiming to configure robust scenarios for the classification step.}, language = {en} } @inproceedings{SouzaJrAfonsoPalmetal., author = {Souza Jr., Luis Antonio de and Afonso, Luis Claudio Sugi and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Barrett's Esophagus Identification Using Optimum-Path Forest}, series = {Proceedings of the 30th Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T 2017), Niter{\´o}i, Rio de Janeiro, Brazil, 2017, 17-20 October}, booktitle = {Proceedings of the 30th Conference on Graphics, Patterns and Images Tutorials (SIBGRAPI-T 2017), Niter{\´o}i, Rio de Janeiro, Brazil, 2017, 17-20 October}, doi = {10.1109/SIBGRAPI.2017.47}, pages = {308 -- 314}, abstract = {Computer-assisted analysis of endoscopic images can be helpful to the automatic diagnosis and classification of neoplastic lesions. Barrett's esophagus (BE) is a common type of reflux that is not straight forward to be detected by endoscopic surveillance, thus being way susceptible to erroneous diagnosis, which can cause cancer when not treated properly. In this work, we introduce the Optimum-Path Forest (OPF) classifier to the task of automatic identification of Barrett'sesophagus, with promising results and outperforming the well known Support Vector Machines (SVM) in the aforementioned context. We consider describing endoscopic images by means of feature extractors based on key point information, such as the Speeded up Robust Features (SURF) and Scale-Invariant Feature Transform (SIFT), for further designing a bag-of-visual-wordsthat is used to feed both OPF and SVM classifiers. The best results were obtained by means of the OPF classifier for both feature extractors, with values lying on 0.732 (SURF) - 0.735(SIFT) for sensitivity, 0.782 (SURF) - 0.806 (SIFT) for specificity, and 0.738 (SURF) - 0.732 (SIFT) for the accuracy.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @misc{MendelSouzaJrRauberetal., author = {Mendel, Robert and Souza Jr., Luis Antonio de and Rauber, David and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Abstract: Semi-supervised Segmentation Based on Error-correcting Supervision}, series = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, journal = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, doi = {10.1007/978-3-658-33198-6_43}, pages = {178}, abstract = {Pixel-level classification is an essential part of computer vision. For learning from labeled data, many powerful deep learning models have been developed recently. In this work, we augment such supervised segmentation models by allowing them to learn from unlabeled data. Our semi-supervised approach, termed Error-Correcting Supervision, leverages a collaborative strategy. Apart from the supervised training on the labeled data, the segmentation network is judged by an additional network.}, subject = {Deep Learning}, language = {en} } @article{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Assisting Barrett's esophagus identification using endoscopic data augmentation based on Generative Adversarial Networks}, series = {Computers in Biology and Medicine}, volume = {126}, journal = {Computers in Biology and Medicine}, number = {November}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2020.104029}, pages = {12}, abstract = {Barrett's esophagus figured a swift rise in the number of cases in the past years. Although traditional diagnosis methods offered a vital role in early-stage treatment, they are generally time- and resource-consuming. In this context, computer-aided approaches for automatic diagnosis emerged in the literature since early detection is intrinsically related to remission probabilities. However, they still suffer from drawbacks because of the lack of available data for machine learning purposes, thus implying reduced recognition rates. This work introduces Generative Adversarial Networks to generate high-quality endoscopic images, thereby identifying Barrett's esophagus and adenocarcinoma more precisely. Further, Convolution Neural Networks are used for feature extraction and classification purposes. The proposed approach is validated over two datasets of endoscopic images, with the experiments conducted over the full and patch-split images. The application of Deep Convolutional Generative Adversarial Networks for the data augmentation step and LeNet-5 and AlexNet for the classification step allowed us to validate the proposed methodology over an extensive set of datasets (based on original and augmented sets), reaching results of 90\% of accuracy for the patch-based approach and 85\% for the image-based approach. Both results are based on augmented datasets and are statistically different from the ones obtained in the original datasets of the same kind. Moreover, the impact of data augmentation was evaluated in the context of image description and classification, and the results obtained using synthetic images outperformed the ones over the original datasets, as well as other recent approaches from the literature. Such results suggest promising insights related to the importance of proper data for the accurate classification concerning computer-assisted Barrett's esophagus and adenocarcinoma detection.}, subject = {Maschinelles Lernen}, language = {en} }