@masterthesis{Scheil, type = {Bachelor Thesis}, author = {Scheil, Jonathan}, title = {Analyse der aktuellen und vergangenen politischen Landschaft Deutschlands im Hinblick auf die aktuelle Bundestagswahl mit Fokus auf die ansteigende Bedeutung von Social-Media im Wahlkampf}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:1383-opus4-13687}, school = {Hochschule Rhein-Waal}, pages = {84}, abstract = {Since the last federal election in germany, the CDU is no longer the strongest force and suffered large losses of votes. The SPD, B{\"u}ndnis 90/Die Gr{\"u}nen and the FDP now make up the german parliament. Parts of the election campaign are taking place via social media. But how do the individual parties use modern media? Do the parties differ in the way they communicate their messages and, as a result, do they generate votes in diffe-rent ways? I will examine these questions in more detail in this bachelor's thesis, starting with the reunification of germany and including the last federal elections. The social me-dia platform I am investigating for political communication is twitter. I presented the past and current parliamentary election results and differences in eastern and western germany. To do this, I used sentiment analysis to examine tweets extracted from twitter at the federal and local/state political level, for their content and the accom-panying reactions. I created a detailed representation of the different social media appearances of politicians and determined whether appropriate conclusions can be drawn based on my results and data. My analysis shows that at the parliamentary level, Annalena Baerbock of B{\"u}ndnis 90/Die Gr{\"u}nen, followed by Dr. Alice Weidel of the AfD and Christian Lindner of the FDP, have the most likes and retweets. Olaf Scholz, with 826 tweets, is the most active on twitter in 2021. At federal and local/state level, the topics of all parties are often similar. There are differences with regard to the core issues of the respective parties. I have illustrated this in each case. Based on the politicians' use of twitter, I could not draw any concrete conclusions about voter behaviour. For this it would be important to evaluate other media, e.g. facebook.}, language = {de} } @masterthesis{Okos, type = {Bachelor Thesis}, author = {Okos, Martin}, title = {Ethische und rechtliche Rahmenbedingungen des Maschinellen Lernens - eine systematische Analyse zur Konzeptualisierung und Regulation innerhalb der EU}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:1383-opus4-15170}, school = {Hochschule Rhein-Waal}, pages = {80}, abstract = {Die Entwicklungen des maschinellen Lernens und damit einhergehend auch der k{\"u}nstlichen Intelligenz umfassen immer mehr Anwendungsf{\"a}lle im t{\"a}glichen Leben. Je komplexer und autonomer diese Systeme werden, desto schwieriger l{\"a}sst sich ihr Verhalten prognostizieren und es ist nicht auszuschließen, dass Situationen entstehen, die ethische und rechtliche Fragen aufwerfen. Das Ziel dieser Arbeit ist die Untersuchung der erforderlichen Rahmenbedingungen f{\"u}r den Umgang mit diesen Technologien. Neben der Erkl{\"a}rung wesentlicher Begriffe aus dem Bereich der KI und Ethik werden durch die Recherche geeigneter Literatur {\"u}ber Pflegeroboter und dem autonomen Fahren zwei Anwendungsbereiche vorgestellt, die einen Bedarf an moralischen Maschinen aufweisen. Maßnahmen zur Regulierung innerhalb der Europ{\"a}ischen Union erfolgen durch das Anwenden bestehender und Erlassen neuer Gesetze sowie {\"u}ber die Definition ethischer Leitlinien, die ebenfalls als Teil der vorliegenden Arbeit behandelt werden.}, language = {de} } @masterthesis{Salobir, type = {Bachelor Thesis}, author = {Salobir, Jan}, title = {Relaunch eines Kommunalen Webauftritts beim KRZN}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:1383-opus4-15214}, school = {Hochschule Rhein-Waal}, pages = {42}, abstract = {In der Arbeit wird der Prozess des Relaunches eines kommunalen Webauftritts Schritt f{\"u}r Schritt analysiert und besonders darauf geachtet, wie die gesetzlichen Anforderungen f{\"u}r so einen Webauftritt umgesetzt werden.}, language = {de} } @masterthesis{Kottek, type = {Bachelor Thesis}, author = {Kottek, Nick}, title = {Echtzeit-Erkennung von Gesten des deutschen Fingeralphabets mithilfe eines Convolutional Neural Networks}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:1383-opus4-19768}, school = {Hochschule Rhein-Waal}, pages = {58}, abstract = {Sign language is an important factor in the integration of deaf and hard of hearing people into society. Due to the limited number of people who speak sign language, there is a communication barrier that needs to be addressed. In recent years, sign language has been an important field of research. There have been numerous attempts at trying to find a solution to this problem. This thesis focuses on the German finger alphabet, which has not been researched as much. It examines whether it is possible to recognize the gestures of the German finger alphabet in real time with a convolutional neural network. For that, literature is consulted, and a prototype is developed. The prototype includes a newly created dataset with 2,500 images distributed over 25 gestures, a convolutional neural network, and software for real-time translation of a video stream. The prototype demonstrates the feasibility of realizing such a task. The convolutional neural network achieves an accuracy of 99.61\%. On a powerful desktop computer, the prediction of a single frame takes between 36 and 40 ms. However, there are some limitations and restrictions to the quality of prediction caused by factors such as lighting and background. Additionally, certain similar gestures, such as M and N, are difficult to distinguish. Based on these results, ideas and suggestions for future studies are presented.}, language = {de} } @misc{Shrestha, type = {Master Thesis}, author = {Shrestha, Sabita}, title = {Named Entity Recognition for Nepali Text Using Pre-Trained BERT-Based Model}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:1383-opus4-19870}, school = {Hochschule Rhein-Waal}, abstract = {The popularity of using transformer-based models like Bidirectional Encoder Representations from Transformers (BERT) for various Natural Language Processing (NLP) tasks is increasing rapidly. Unfortunately, the research is very limited for low-resource languages like Nepali. This study examines the utilisation of pre-trained BERT-based NERNepal for Named Entity Recognition (NER) tasks in Nepali text. The main goal is to investigate the efficiency of the NERNepal model, which has been pre-trained and fine-tuned on Nepali corpus data for NER. This research provides new insights by evaluating the NERNepal model on two distinct datasets. It addresses unique linguistic challenges specific to Nepali. The study also offers a detailed analysis of the model's strengths and weaknesses. By focusing on diverse datasets, this study shows how adaptable the model is and how its performance varies. These aspects have not been explored extensively before. The EverestNER dataset is one of the largest human-annotated datasets in Nepal so far, and the Nepali_NER dataset is also BIO-annotated for the NER task, which helped to compare the model's prediction. The performance was better on the Nepali_NER dataset in comparison with another selected dataset. The EverestNER dataset contained many complex words for ORG connected with many tokens for a single entity with different contextual meanings and different annotations for the same word in different tokens as per context. Because of this, it created more confusion for the prediction, especially for the ORG entity. It had a similar issue with another dataset as well, but the label annotation was better in comparison. Furthermore, the research tries to clarify the difficulties and constraints related to utilising pre-trained BERT models for NER in low-resource languages such as Nepali. The study focuses on research areas on the efficacy of the model and its performance on two different datasets. Post-training with the EverestNER train dataset was attempted, but due to computational resource limitations, only a maximum of 3 epochs was possible, which did not improve the evaluation. It also has implications for enhancing language processing tools for Nepali. The results show the ability of a pre-trained BERT-based model to improve NER skills for Nepali text. However, further study is required to overcome the current obstacles to identifying complex words. The availability of high computational resources and the possibility of combining other NER approaches with a transformer-based model could increase the performance and robustness of the model. Keywords: Named Entity Recognition, Natural Language Processing, BERT, low resources language}, language = {en} } @misc{Gurung, type = {Master Thesis}, author = {Gurung, Purnima}, title = {Sentiment Analysis in Nepali Tweets: Leveraging TransformerBased Pre-trained Models}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:1383-opus4-19677}, school = {Hochschule Rhein-Waal}, abstract = {Despite the remarkable achievements of large transformer-based pre-trained models like BERT, GPT in several Natural Language Processing (NLP) tasks including Sentiment Analysis (SA), challenges are still present for subdued source languages like Nepali. Nepali language is written in Devanagari script, has complex grammatical structure and diverse linguistic features. Due to the absence of balanced datasets, and computational resources for Nepali, achieving optimal result with the latest architecture remains challenging. For this reason, publicly available NLP modelsfor Nepali are very less, making research in this area difficult. This paper attempts to addressthis gap through the use of pre-trained transformer models specially tailored for Nepali from Hugging Face including BERT, DistilBERT, ALBERT, and DeBERTa for sentiment analysis in Nepali tweets on relatively balanced datasets. The models are trained on large Nepali datasets and optimized for NLP tasks involving Devanagari scripts. To evaluate the model's performance, various tokenization strategies are investigated in order to capitalize on transformer-based embedding with the SoftMax function and confusion matrix. The outcomes of models are compared using the same datasets. The study's results shows that DistilBERT achieved the highest accuracy rate of 88\% in Nepali sentiment analysis tasks, followed by BERT and DeBERTa at 83\% and 80\%, respectively. However, ALBERT showed a low accuracy of 70\%. The result of this approach shares valuable viewpoints for the field of sentiment analysis in diverse linguistic contexts.}, language = {en} }