@article{TanSchreinerHausladenetal., author = {Tan, Jing Jie and Schreiner, Rupert and Hausladen, Matthias and Asgharzade, Ali and Edler, Simon and Bartsch, Julian and Bachmann, Michael and Schels, Andreas and Kwan, Ban-Hoe and Ng, Danny Wee-Kiat and Hum, Yan-Chai}, title = {SiMiC: Context-aware silicon microstructure characterization using attention-based convolutional neural networks for field-emission tip analysis}, series = {Journal of Vacuum Science \& Technology B}, volume = {43}, journal = {Journal of Vacuum Science \& Technology B}, number = {6}, publisher = {AVS}, doi = {10.1116/6.0005068}, abstract = {Accurate characterization of silicon microstructures is essential for advancing microscale fabrication, quality control, and device performance. Traditional analysis using scanning electron microscopy (SEM) often requires labor-intensive, manual evaluation of feature geometry, limiting throughput and reproducibility. In this study, we propose SiMiC: Context-aware Silicon Microstructure Characterization Using Attention-based Convolutional Neural Networks for Field-Emission Tip Analysis. By leveraging deep learning, our approach efficiently extracts morphological features—such as size, shape, and apex curvature—from SEM images, significantly reducing human intervention while improving measurement consistency. A specialized dataset of silicon-based field-emitter tips was developed, and a customized convolutional neural network architecture incorporating attention mechanisms was trained for multiclass microstructure classification and dimensional prediction. Comparative analysis with classical image processing techniques demonstrates that SiMiC achieves high accuracy while maintaining interpretability. The proposed framework establishes a foundation for data-driven microstructure analysis directly linked to field-emission performance, opening avenues for correlating emitter geometry with emission behavior and guiding the design of optimized cold-cathode and SEM electron sources. The related dataset and algorithm repository that could serve as a baseline in this area can be found at https://research.jingjietan.com/?q=SIMIC.}, language = {en} } @article{deSouzaJuniorPachecoOliveiradosSantosetal., author = {de Souza J{\´u}nior, Luis Antonio and Pacheco, Andr{\´e} Georghton Cardoso and Oliveira dos Santos, Thiago and Fogos da Rocha, Wyctor and Bouzon, Pedro Henrique and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {LiwTERM-r: a Revised Lightweight Transformer-based Model for Multimodal Skin Lesion Detection Robust to Incomplete Input}, series = {Journal of the Brazilian Computer Society}, volume = {32}, journal = {Journal of the Brazilian Computer Society}, number = {1}, publisher = {Brazilian Computer Society}, doi = {10.5753/jbcs.2026.5871}, pages = {11}, abstract = {As the most common type of cancer in the world, skin cancer accounts for approximately 30\% of all diagnosed tumor-based lesions. Early diagnosis can reduce mortality and prevent disfiguring in different skin regions. With the application of machine learning techniques in recent years, especially deep learning, promising results in this task could be achieved, presenting studies demonstrating that the combination of patients' clinical anamneses and images of the injured lesion is essential for improving the correct classification of skin lesions. Despite that, meaningful use of anamneses with multiple collected images of the same skin lesion is mandatory, requiring further investigation. Thus, this project aims to contribute to developing multimodal machine learning-based models to solve the skin lesion classification problem by employing a lightweight transformer model that is robust to missing clinical information input. As a main hypothesis, models can be fed by multiple images from different sources as input along with clinical anamneses from the patient's historical evaluations, leading to a more factual and trustworthy diagnosis. Our model deals with the not-trivial task of combining images and clinical information concerning the skin lesions in a lightweight transformer architecture that does not demand high computation resources or even all the information from the anamneses but still presents competitive classification results.}, language = {en} } @inproceedings{AliAhmmedAlEmranetal., author = {Ali, Md. Wajed and Ahmmed, Tanvir and Al Emran, Md and Roy, Dipon and Refat, Kawsar Ahmed and Khan, Robiul}, title = {Driver Fatigue Detection using CWT-Extracted Features and a Deep Learning Approach (CNNLSTM)}, series = {2024 IEEE International Conference on Biomedical Engineering, Computer and Information Technology for Health (BECITHCON), 28-29 Nov. 2024, Dhaka, Bangladesh}, booktitle = {2024 IEEE International Conference on Biomedical Engineering, Computer and Information Technology for Health (BECITHCON), 28-29 Nov. 2024, Dhaka, Bangladesh}, publisher = {IEEE}, address = {Piscataway, NJ}, isbn = {979-8-3315-3435-6}, doi = {10.1109/BECITHCON64160.2024.10962632}, pages = {77 -- 82}, abstract = {Driver fatigue is a leading reason behind traffic accidents globally, resulting in significant threats to public safety and substantial economic costs. Electroencephalography (EEG) has shown to be a critical tool for identifying driver fatigue as it can record brain activity associated with sleepiness, which gives it an advantage over other physiological modalities. Although raw EEG data can provide insightful information, accurate fatigue identification requires strong feature extraction techniques because of the inherent complexity of the data. This emphasizes how urgent it is to investigate revolutionary deep-learning architectures that can successfully extract discriminative features from raw EEG data. This study provides an innovative framework for driver fatigue identification from EEG that combines continuous wavelet transform (CWT) with convolutional neural networks (CNN) and long-short term memory (LSTM). In order to gain the advantage over the drawbacks of manual feature extraction, this study generates time-frequency spectrum representations of EEG data using the CWT. Following extracting information, each channel's time-frequency images are concatenated and fed into a CNN-LSTM architecture. This combination models the temporal and spatial characteristics of the EEG data and automatically learns discriminative features for identifying drivers' normal and fatigued states. A publicly available EEG dataset with recordings from twelve subjects is used to evaluate the proposed CWT-CNN-LSTM architecture. The result shows a prominent classification accuracy of 98.34\% for both the average of each subject and combined subjects. These findings show how well the CNN-LSTM framework captures EEG patterns associated with fatigue, which may result in more reliable driver fatigue detection systems and improved traffic safety.}, language = {en} } @inproceedings{SouzaPachecodeAngeloetal., author = {Souza, Luis A. and Pacheco, Andr{\´e} G.C. and de Angelo, Gabriel G. and Oliveira-Santos, Thiago and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {LiwTERM: A Lightweight Transformer-Based Model for Dermatological Multimodal Lesion Detection}, series = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, booktitle = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, publisher = {IEEE}, isbn = {979-8-3503-7603-6}, doi = {10.1109/SIBGRAPI62404.2024.10716324}, pages = {1 -- 6}, abstract = {Skin cancer is the most common type of cancer in the world, accounting for approximately 30\% of all diagnosed tumors. Early diagnosis reduces mortality rates and prevents disfiguring effects in different body regions. In recent years, machine learning techniques, particularly deep learning, have shown promising results in this task, presenting studies that have demonstrated that combining a patient's clinical information with images of the lesion is crucial for improving the classification of skin lesions. Despite that, meaningful use of clinical information with multiple images is mandatory, requiring further investigation. Thus, this project aims to contribute to developing multimodal machine learning-based models to cope with the skin lesion classification task employing a lightweight transformer model. As a main hypothesis, models can take multiple images from different sources as input, along with clinical information from the patient's history, leading to a more reliable diagnosis. Our model deals with the not-trivial task of combining images and clinical information (from anamneses) concerning the skin lesions in a lightweight transformer architecture that does not demand high computation resources but still presents competitive classification results.}, language = {en} } @article{SouzaJrPassosSantanaetal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Santana, Marcos Cleison S. and Mendel, Robert and Rauber, David and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Layer-selective deep representation to improve esophageal cancer classification}, series = {Medical \& Biological Engineering \& Computing}, volume = {62}, journal = {Medical \& Biological Engineering \& Computing}, publisher = {Springer Nature}, address = {Heidelberg}, doi = {10.1007/s11517-024-03142-8}, pages = {3355 -- 3372}, abstract = {Even though artificial intelligence and machine learning have demonstrated remarkable performances in medical image computing, their accountability and transparency level must be improved to transfer this success into clinical practice. The reliability of machine learning decisions must be explained and interpreted, especially for supporting the medical diagnosis.For this task, the deep learning techniques' black-box nature must somehow be lightened up to clarify its promising results. Hence, we aim to investigate the impact of the ResNet-50 deep convolutional design for Barrett's esophagus and adenocarcinoma classification. For such a task, and aiming at proposing a two-step learning technique, the output of each convolutional layer that composes the ResNet-50 architecture was trained and classified for further definition of layers that would provide more impact in the architecture. We showed that local information and high-dimensional features are essential to improve the classification for our task. Besides, we observed a significant improvement when the most discriminative layers expressed more impact in the training and classification of ResNet-50 for Barrett's esophagus and adenocarcinoma classification, demonstrating that both human knowledge and computational processing may influence the correct learning of such a problem.}, language = {en} } @article{MoserJobstBierletal., author = {Moser, Elisabeth and Jobst, Simon and Bierl, Rudolf and Jenko, Frank}, title = {A Deep Learning System to Transform Cross-Section Spectra to Varying Environmental Conditions}, series = {Vibrational Spectroscopy}, volume = {122}, journal = {Vibrational Spectroscopy}, number = {September}, publisher = {Elsevier}, issn = {0924-2031}, doi = {10.1016/j.vibspec.2022.103410}, abstract = {Absorption cross-sections provide a basis for many gas sensing applications. Therefore, any error in molecular cross-sections caused by varying environmental conditions propagates to spectroscopic applications. Original molecular cross-sections in varying environmental conditions can only be simulated for some molecules, whereas for most multi-atom molecules, one must rely on high-precision measurements at certain environmental configurations. In this study, a deep learning system trained with simulated absorption cross-sections for predicting cross-sections at a different pressure configuration is presented. The system's capability to transfer to measured, multi-atom cross-sections is demonstrated. Thus, it provides an alternative to (pseudo-) line lists whenever the required information for simulation is unavailable. The predictive performance of the system was evaluated on validation data via simulation, and its transfer learning capabilities were demonstrated on actual measurement chlorine nitrate data. From the comparison between the system and line lists, the system shows slightly worse performance than pseudo-line lists but its predictive quality is still deemed acceptable with less than 5\% relative integral change with a highly localized error around the peak center. This opens a promising way for further research to use deep learning to simulate the effect of varying environmental conditions on absorption cross-sections.}, language = {en} } @article{EbigboPalmMessmann, author = {Ebigbo, Alanna and Palm, Christoph and Messmann, Helmut}, title = {Barrett esophagus: What to expect from Artificial Intelligence?}, series = {Best Practice \& Research Clinical Gastroenterology}, volume = {52-53}, journal = {Best Practice \& Research Clinical Gastroenterology}, number = {June-August}, publisher = {Elsevier}, issn = {1521-6918}, doi = {10.1016/j.bpg.2021.101726}, abstract = {The evaluation and assessment of Barrett's esophagus is challenging for both expert and nonexpert endoscopists. However, the early diagnosis of cancer in Barrett's esophagus is crucial for its prognosis, and could save costs. Pre-clinical and clinical studies on the application of Artificial Intelligence (AI) in Barrett's esophagus have shown promising results. In this review, we focus on the current challenges and future perspectives of implementing AI systems in the management of patients with Barrett's esophagus.}, subject = {Deep Learning}, language = {en} } @article{EbigboMendelProbstetal., author = {Ebigbo, Alanna and Mendel, Robert and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph and Messmann, Helmut}, title = {Real-time use of artificial intelligence in the evaluation of cancer in Barrett's oesophagus}, series = {Gut}, volume = {69}, journal = {Gut}, number = {4}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2019-319460}, pages = {615 -- 616}, abstract = {Based on previous work by our group with manual annotation of visible Barrett oesophagus (BE) cancer images, a real-time deep learning artificial intelligence (AI) system was developed. While an expert endoscopist conducts the endoscopic assessment of BE, our AI system captures random images from the real-time camera livestream and provides a global prediction (classification), as well as a dense prediction (segmentation) differentiating accurately between normal BE and early oesophageal adenocarcinoma (EAC). The AI system showed an accuracy of 89.9\% on 14 cases with neoplastic BE.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{EbigboPalmProbstetal., author = {Ebigbo, Alanna and Palm, Christoph and Probst, Andreas and Mendel, Robert and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Siersema, Peter and Messmann, Helmut}, title = {A technical review of artificial intelligence as applied to gastrointestinal endoscopy: clarifying the terminology}, series = {Endoscopy International Open}, volume = {07}, journal = {Endoscopy International Open}, number = {12}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1010-5705}, pages = {1616 -- 1623}, abstract = {The growing number of publications on the application of artificial intelligence (AI) in medicine underlines the enormous importance and potential of this emerging field of research. In gastrointestinal endoscopy, AI has been applied to all segments of the gastrointestinal tract most importantly in the detection and characterization of colorectal polyps. However, AI research has been published also in the stomach and esophagus for both neoplastic and non-neoplastic disorders. The various technical as well as medical aspects of AI, however, remain confusing especially for non-expert physicians. This physician-engineer co-authored review explains the basic technical aspects of AI and provides a comprehensive overview of recent publications on AI in gastrointestinal endoscopy. Finally, a basic insight is offered into understanding publications on AI in gastrointestinal endoscopy.}, subject = {Diagnose}, language = {en} } @article{PassosSouzaJrMendeletal., author = {Passos, Leandro A. and Souza Jr., Luis Antonio de and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Barrett's esophagus analysis using infinity Restricted Boltzmann Machines}, series = {Journal of Visual Communication and Image Representation}, volume = {59}, journal = {Journal of Visual Communication and Image Representation}, publisher = {Elsevier}, doi = {10.1016/j.jvcir.2019.01.043}, pages = {475 -- 485}, abstract = {The number of patients with Barret's esophagus (BE) has increased in the last decades. Considering the dangerousness of the disease and its evolution to adenocarcinoma, an early diagnosis of BE may provide a high probability of cancer remission. However, limitations regarding traditional methods of detection and management of BE demand alternative solutions. As such, computer-aided tools have been recently used to assist in this problem, but the challenge still persists. To manage the problem, we introduce the infinity Restricted Boltzmann Machines (iRBMs) to the task of automatic identification of Barrett's esophagus from endoscopic images of the lower esophagus. Moreover, since iRBM requires a proper selection of its meta-parameters, we also present a discriminative iRBM fine-tuning using six meta-heuristic optimization techniques. We showed that iRBMs are suitable for the context since it provides competitive results, as well as the meta-heuristic techniques showed to be appropriate for such task.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} }