@phdthesis{SchulzeDieckhoff2021, author = {Schulze Dieckhoff, Max}, title = {From bag-of-words towards natural language: adapting topic models to avoid stop word removal}, doi = {10.17904/ku.opus-726}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:824-opus4-7266}, school = {Katholische Universit{\"a}t Eichst{\"a}tt-Ingolstadt}, pages = {xvi, 159 Seiten : Illustrationen, Diagramme}, year = {2021}, abstract = {Topic models such as latent Dirichlet allocation (LDA) aim to identify latent topics within text corpora. However, although LDA-type models fall into the category of Natural Language Processing, the actual model input is heavily modified from the original natural language. Among other things, this is typically done by removing specific terms, which arguably might also remove information. In this paper, an extension to LDA is proposed called uLDA, which seeks to incorporate some of these formerly eliminated terms -- namely stop words -- to match natural topics more closely. After developing and evaluating the new extension on established fit measures, uLDA is then tasked with approximating human-perceived topics. For this, a ground truth for topic labels is generated using a human-based experiment. These values are then used as a reference to be matched by the model output. Results show that the new extension outperforms traditional topic models regarding out-of-sample fit across all data sets and regarding human topic approximation for most data sets. These findings demonstrate that the novel extension can extract valuable information from the additional data conveyed by stop words and shows potential for better modeling natural language in the future.}, subject = {Bayes-Lernen}, language = {en} }