@masterthesis{Ossner2024, type = {Bachelor Thesis}, author = {Oßner, Regina}, title = {A comparative analysis of tiny transformers and large language models: efficiency, effectiveness and applications}, doi = {10.57688/408}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:860-opus4-4086}, school = {Hochschule f{\"u}r Angewandte Wissenschaften Landshut}, pages = {62}, year = {2024}, abstract = {The application of large language models (LLM) to small, specific use cases necessitates the allocation of considerable computational resources and time for the fine-tuning of the model on a custom dataset. The use of small transformers may prove an effective solution, as they require less data and less time to train. This work investigates the effectiveness of employing small transformer models for specific use cases involving document classification, named entity recognition and relation extraction. While large language models demonstrate superior generalization capabilities and can therefore be employed for a diverse range of tasks, the comparison reveals that they are not optimal for specific use cases that necessitate a custom dataset and a particular output format. In contrast, small transformers offer a more cost-effective, accurate, and computationally efficient alternative for their training and storage.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} }