@inproceedings{VasquezCorreaBockletOrozcoArroyaveetal.2020, author = {Vasquez-Correa, J. C. and Bocklet, T. and Orozco-Arroyave, J. R. and N{\"o}th, E.}, title = {Comparison of User Models Based on GMM-UBM and I-Vectors for Speech, Handwriting, and Gait Assessment of Parkinson's Disease Patients}, series = {ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, booktitle = {ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher = {IEEE}, isbn = {978-1-5090-6631-5}, doi = {10.1109/icassp40776.2020.9054348}, pages = {6544 -- 6548}, year = {2020}, abstract = {Parkinson's disease is a neurodegenerative disorder characterized by the presence of different motor impairments. Information from speech, handwriting, and gait signals have been considered to evaluate the neurological state of the patients. On the other hand, user models based on Gaussian mixture models - universal background models (GMMUBM) and i-vectors are considered the state-of-the-art in biometric applications like speaker verification because they are able to model specific speaker traits. This study introduces the use of GMM-UBM and i-vectors to evaluate the neurological state of Parkinson's patients using information from speech, handwriting, and gait. The results show the importance of different feature sets from each type of signal in the assessment of the neurological state of the patients.}, language = {en} } @inproceedings{PerezToroBayerlAriasVergaraetal.2021, author = {P{\´e}rez-Toro, P. A. and Bayerl, Sebastian P. and Arias-Vergara, T. and V{\´a}squez-Correa, J. C. and Klumpp, P. and Schuster, M. and N{\"o}th, Elmar and Orozco-Arroyave, J. R. and Riedhammer, Korbinian}, title = {Influence of the Interviewer on the Automatic Assessment of Alzheimer's Disease in the Context of the ADReSSo Challenge}, series = {Proceedings Interspeech 2021}, booktitle = {Proceedings Interspeech 2021}, publisher = {ISCA}, address = {ISCA}, issn = {2958-1796}, doi = {10.21437/Interspeech.2021-1589}, pages = {3785 -- 3789}, year = {2021}, abstract = {Alzheimer's Disease (AD) results from the progressive loss of neurons in the hippocampus, which affects the capability to produce coherent language. It affects lexical, grammatical, and semantic processes as well as speech fluency. This paper considers the analyses of speech and language for the assessment of AD in the context of the Alzheimer's Dementia Recognition through Spontaneous Speech (ADReSSo) 2021 challenge. We propose to extract acoustic features such as X-vectors, prosody, and emotional embeddings as well as linguistic features such as perplexity, and word-embeddings. The data consist of speech recordings from AD patients and healthy controls. The transcriptions are obtained using a commercial automatic speech recognition system. We outperform baseline results on the test set, both for the classification and the Mini-Mental State Examination (MMSE) prediction. We achieved a classification accuracy of 80\% and an RMSE of 4.56 in the regression. Additionally, we found strong evidence for the influence of the interviewer on classification results. In cross-validation on the training set, we get classification results of 85\% accuracy using the combined speech of the interviewer and the participant. Using interviewer speech only we still get an accuracy of 78\%. Thus, we provide strong evidence for interviewer influence on classification results.}, language = {en} } @inproceedings{PerezToroVasquezCorreaAriasVergaraetal.2021, author = {Perez-Toro, P. A. and Vasquez-Correa, J. C. and Arias-Vergara, T. and Klumpp, P. and Sierra-Castrillon, M. and Roldan-Lopez, M. E. and Aguillon, D. and Hincapie-Henao, L. and Tobon-Quintero, C. A. and Bocklet, T. and Schuster, M. and Orozco-Arroyave, J. R. and N{\"o}th, E.}, title = {Acoustic and Linguistic Analyses to Assess Early-Onset and Genetic Alzheimer's Disease}, series = {ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, booktitle = {ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher = {IEEE}, isbn = {978-1-7281-7605-5}, doi = {10.1109/ICASSP39728.2021.9414009}, pages = {8338 -- 8342}, year = {2021}, abstract = {The PSEN1-E280A or Paisa mutation is responsible for most of Early-Onset Alzheimer's (EOA) disease cases in Colombia. It affects a large kindred of over 5000 members that present the same phenotype. The most common symptoms are related to language disorders, where speech fluency is also affected due to the difficulty to access semantic information intentionally. This study proposes the use of acoustic and linguistic methods to extract features from speech recordings and their transcriptions to discriminate people with conditions related to the Paisa mutation. We consider state-of-the-art word-embedding methods like Word2Vec and Bidirectional Encoder Representations from Transformer to process the transcripts. The speech signals are modeled by using traditional acoustic features and speaker embeddings. To the best of our knowledge, this is the first study focused on evaluating genetic Alzheimer's and EOA using acoustics and linguistics.}, language = {en} } @inproceedings{KlumppBockletAriasVergaraetal.2021, author = {Klumpp, P. and Bocklet, T. and Arias-Vergara, T. and V{\´a}squez-Correa, J. C. and P{\´e}rez-Toro, P.A. and Bayerl, Sebastian P. and Orozco-Arroyave, J. R. and N{\"o}th, Elmar}, title = {The Phonetic Footprint of Covid-19?}, series = {Interspeech 2021}, booktitle = {Interspeech 2021}, publisher = {ISCA}, address = {ISCA}, issn = {2958-1796}, doi = {10.21437/Interspeech.2021-1488}, pages = {441 -- 445}, year = {2021}, abstract = {Against the background of the ongoing pandemic, this year's Computational Paralinguistics Challenge featured a classification problem to detect Covid-19 from speech recordings. The presented approach is based on a phonetic analysis of speech samples, thus it enabled us not only to discriminate between Covid and non-Covid samples, but also to better understand how the condition influenced an individual's speech signal. Our deep acoustic model was trained with datasets collected exclusively from healthy speakers. It served as a tool for segmentation and feature extraction on the samples from the challenge dataset. Distinct patterns were found in the embeddings of phonetic classes that have their place of articulation deep inside the vocal tract. We observed profound differences in classification results for development and test splits, similar to the baseline method. We concluded that, based on our phonetic findings, it was safe to assume that our classifier was able to reliably detect a pathological condition located in the respiratory tract. However, we found no evidence to claim that the system was able to discriminate between Covid-19 and other respiratory diseases.}, language = {en} }