@techreport{SchmittBundschererBocklet2024, author = {Schmitt, Thomas and Bundscherer, Maximilian and Bocklet, Tobias}, title = {Semmeldetector: Application of Machine Learning in Commercial Bakeries}, doi = {10.48550/arXiv.2406.04050}, pages = {6}, year = {2024}, abstract = {The Semmeldetector, is a machine learning application that utilizes object detection models to detect, classify and count baked goods in images. Our application allows commercial bakers to track unsold baked goods, which allows them to optimize production and increase resource efficiency. We compiled a dataset comprising 1151 images that distinguishes between 18 different types of baked goods to train our detection models. To facilitate model training, we used a Copy-Paste augmentation pipeline to expand our dataset. We trained the state-of-the-art object detection model YOLOv8 on our detection task. We tested the impact of different training data, model scale, and online image augmentation pipelines on model performance. Our overall best performing model, achieved an AP@0.5 of 89.1\% on our test set. Based on our results, we conclude that machine learning can be a valuable tool even for unforeseen industries like bakeries, even with very limited datasets.}, language = {en} } @article{KammerbauerSchmittBocklet2023, author = {Kammerbauer, Roland and Schmitt, Thomas and Bocklet, Tobias}, title = {Segmenting Wood Rot using Computer Vision Models}, pages = {14}, year = {2023}, abstract = {In the woodworking industry, a huge amount of effort has to be invested into the initial quality assessment of the raw material. In this study we present an AI model to detect, quantify and localize defects on wooden logs. This model aims to both automate the quality control process and provide a more consistent and reliable quality assessment. For this purpose a dataset of 1424 sample images of wood logs is created. A total of 5 annotators possessing different levels of expertise is involved in dataset creation. An inter-annotator agreement analysis is conducted to analyze the impact of expertise on the annotation task and to highlight subjective differences in annotator judgement. We explore, train and fine-tune the state-of-the-art InternImage and ONE-PEACE architectures for semantic segmentation. The best model created achieves an average IoU of 0.71, and shows detection and quantification capabilities close to the human annotators.}, language = {en} } @inproceedings{BaumannUngerWagneretal.2024, author = {Baumann, Ilja and Unger, Nicole and Wagner, Dominik and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Automatic Evaluation of a Sentence Memory Test for Preschool Children}, doi = {10.21437/Interspeech.2024-2125}, pages = {5158 -- 5162}, year = {2024}, abstract = {Assessment of memory capabilities in preschool-aged children is crucial for early detection of potential speech development impairments or delays. We present an approach for the automatic evaluation of a standardized sentence memory test specifically for preschool children. Our methodology leverages automatic transcription of recited sentences and evaluation based on natural language processing techniques. We demonstrate the effectiveness of our approach on a dataset comprised of recited sentences from preschool-aged children, incorporating ratings of semantic and syntactic correctness. The best performing systems achieve an F1 score of 91.7\% for semantic correctness and 86.1\% for syntactic correctness using automatic transcripts. Our results showcase the potential of automated evaluation systems in providing reliable and efficient assessments of memory capabilities in early childhood, facilitating timely interventions and support for children with language development needs.}, language = {en} } @article{SchmittBundschererBocklet2024, author = {Schmitt, Thomas and Bundscherer, Maximilian and Bocklet, Tobias}, title = {Training a Computer Vision Model for Commercial Bakeries with Primarily Synthetic Images}, doi = {10.48550/arXiv.2409.20122}, pages = {10}, year = {2024}, abstract = {In the food industry, reprocessing returned product is a vital step to increase resource efficiency. [SBB23] presented an AI application that automates the tracking of returned bread buns. We extend their work by creating an expanded dataset comprising 2432 images and a wider range of baked goods. To increase model robustness, we use generative models pix2pix and CycleGAN to create synthetic images. We train state-of-the-art object detection model YOLOv9 and YOLOv8 on our detection task. Our overall best-performing model achieved an average precision AP@0.5 of 90.3\% on our test set.}, language = {en} } @inproceedings{BaumannWagnerSchusteretal.2024, author = {Baumann, Ilja and Wagner, Dominik and Schuster, Maria and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Towards Self-Attention Understanding for Automatic Articulatory Processes Analysis in Cleft Lip and Palate Speech}, doi = {10.21437/Interspeech.2024-2134}, pages = {2430 -- 2434}, year = {2024}, abstract = {Cleft lip and palate (CLP) speech presents unique challenges for automatic phoneme analysis due to its distinct acoustic characteristics and articulatory anomalies. We perform phoneme analysis in CLP speech using a pre-trained wav2vec 2.0 model with a multi-head self-attention classification module to capture long-range dependencies within the speech signal, thereby enabling better contextual understanding of phoneme sequences. We demonstrate the effectiveness of our approach in the classification of various articulatory processes in CLP speech. Furthermore, we investigate the interpretability of self-attention to gain insights into the model's understanding of CLP speech characteristics. Our findings highlight the potential of the selfattention mechanisms for improving automatic phoneme analysis in CLP speech, paving the way for enhanced diagnostics, adding interpretability for therapists and affected patients.}, language = {en} } @inproceedings{WagnerBaumannRiedhammeretal.2024, author = {Wagner, Dominik and Baumann, Ilja and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Outlier Reduction with Gated Attention for Improved Post-training Quantization in Large Sequence-to-sequence Speech Foundation Models}, editor = {Pesak, Krisztina}, doi = {10.21437/Interspeech.2024-2105}, pages = {4623 -- 4627}, year = {2024}, abstract = {This paper explores the improvement of post-training quantization (PTQ) after knowledge distillation in the Whisper speech foundation model family. We address the challenge of outliers in weights and activation tensors, known to impede quantization quality in transformer-based language and vision models. Extending this observation to Whisper, we demonstrate that these outliers are also present when transformer-based models are trained to perform automatic speech recognition, necessitating mitigation strategies for PTQ. We show that outliers can be reduced by a recently proposed gating mechanism in the attention blocks of the student model, enabling effective 8-bit quantization, and lower word error rates compared to student models without the gating mechanism in place.}, language = {en} } @article{FreierBockletHeltenetal.2023, author = {Freier, Carolin and Bocklet, Tobias and Helten, Anne-Kathrin and Hoffmann, Franziska and Hunger, Marianne and Kov{\´a}cs, L{\´a}szl{\´o} and Richter, Florian and Riedhammer, Korbinian and Schmohl, Tobias and Simon, Claudia}, title = {Wie kann videogest{\"u}tztes Lernen die Erwartungen Studierender und Dozierender erf{\"u}llen?}, series = {Soziale Passagen}, volume = {15}, journal = {Soziale Passagen}, number = {2}, publisher = {Springer VS}, issn = {1867-0180}, doi = {10.1007/s12592-023-00478-0}, pages = {631 -- 635}, year = {2023}, abstract = {ZusammenfassungIm BMBF-Verbundprojekt HAnS entwickeln und implementieren neun Hochschulen sowie drei hochschul{\"u}bergreifende Einrichtungen ein intelligentes Hochschul-Assistenz-System als Open-Source-L{\"o}sung. Videobasierte Lehrmaterialien werden verschriftlicht und durch eine Indexierung Stichwortsuchen erm{\"o}glicht; geplant ist, {\"u}ber einen KI-Tutor automatisiert {\"U}bungsaufgaben zu generieren. Studierende sollen so in ihrem Selbststudium digital unterst{\"u}tzt werden. Die technische Entwicklung wird interdisziplin{\"a}r - auch sozialwissenschaftlich und p{\"a}dagogisch - begleitet und in einem iterativen Vorgehen evidenzbasiert entsprechend Design-Based-Research angepasst. Wissen und Wertesystem der Anwender*innen, Didaktik, Ethik, Akzeptanz und Datenschutz werden dabei im Entwicklungsprozess einbezogen.}, language = {de} } @inproceedings{RanzenbergerFreierReinoldetal.2024, author = {Ranzenberger, Thomas and Freier, Carolin and Reinold, Luca and Riedhammer, Korbinian and Schneider, Fabian and Simic, Christopher and Simon, Claudia and Freisinger, Steffen and Georges, Munir and Bocklet, Tobias}, title = {A Multidisciplinary Approach to AI-based self-motivated Learning and Teaching with Large Language Models}, series = {Proceedings of DELFI 2024}, booktitle = {Proceedings of DELFI 2024}, publisher = {Gesellschaft f{\"u}r Informatik e.V.}, address = {Bonn}, issn = {2944-7682}, doi = {10.18420/delfi2024_11}, pages = {133-140}, year = {2024}, abstract = {We present a learning experience platform that uses machine learning methods to support students and lecturers in self-motivated online learning and teaching processes. The platform is being developed as an agile open-source collaborative project supported by multiple universities and partners. The development is guided didactically, reviewed, and scientifically evaluated in several cycles. Transparency, data protection and the copyright compliant use of the system is a central part of the project. The system further employs large language models (LLMs). Due to privacy concerns, we utilize locally hosted LLM instances and explicitly do not rely on available cloud products. Students and lecturers can interact with an LLM-based chatbot in the current prototype. The AI-generated outputs contain cross-references to the current educational video's context, indicating if sections are based on the lectures context or world knowledge. We present the prototype and results of our qualitative evaluation from the perspective of lecturers and students.}, language = {en} } @article{EscobarGrisalesRiosUrregoBaumannetal.2024, author = {Escobar-Grisales, Daniel and R{\´i}os-Urrego, Cristian-David and Baumann, Ilja and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias and Garcia, Adolfo and Orozco-Arroyave, Juan rafael}, title = {It's Time to Take Action: Acoustic Modeling of Motor Verbs to Detect Parkinson's Disease}, doi = {10.21437/Interspeech.2024-2205}, year = {2024}, abstract = {Pre-trained models generate speech representations that are used in different tasks, including the automatic detection of Parkinson's disease (PD). Although these models can yield high accuracy, their interpretation is still challenging. This paper used a pre-trained Wav2vec 2.0 model to represent speech frames of 25ms length and perform a frame-by-frame discrimination between PD patients and healthy control (HC) subjects. This fine granularity prediction enabled us to identify specific linguistic segments with high discrimination capability. Speech representations of all produced verbs were compared w.r.t. nouns and the first ones yielded higher accuracies. To gaina deeper understanding of this pattern, representations of motor and non-motor verbs were compared and the first ones yielded better results, with accuracies of around 83\% in an independent test set. These findings support well-established neurocognitive models about action-related language highlighted as key drivers of PD. Index Terms: computational paralinguistics, interpretability of pre-trained models, action verbs, Parkinson's disease}, language = {en} } @inproceedings{BayerlWagnerNoethetal.2022, author = {Bayerl, Sebastian Peter and Wagner, Dominik and Noeth, Elmar and Riedhammer, Korbinian}, title = {Detecting Dysfluencies in Stuttering Therapy Using wav2vec 2.0}, series = {Interspeech 2022}, booktitle = {Interspeech 2022}, publisher = {ISCA}, address = {ISCA}, doi = {10.21437/Interspeech.2022-10908}, pages = {2868 -- 2872}, year = {2022}, abstract = {Stuttering is a varied speech disorder that harms an individual's communication ability. Persons who stutter (PWS) often use speech therapy to cope with their condition. Improving speech recognition systems for people with such non-typical speech or tracking the effectiveness of speech therapy would require systems that can detect dysfluencies while at the same time being able to detect speech techniques acquired in therapy. This paper shows that fine-tuning wav2vec 2.0 [1] for the classification of stuttering on a sizeable English corpus containing stuttered speech, in conjunction with multi-task learning, boosts the effectiveness of the general-purpose wav2vec 2.0 features for detecting stuttering in speech; both within and across languages. We evaluate our method on FluencyBank , [2] and the German therapy-centric Kassel State of Fluency (KSoF) [3] dataset by training Support Vector Machine classifiers using features extracted from the finetuned models for six different stuttering-related event types: blocks, prolongations, sound repetitions, word repetitions, interjections, and - specific to therapy - speech modifications. Using embeddings from the fine-tuned models leads to relative classification performance gains up to 27\% w.r.t. F1-score.}, language = {en} }