@phdthesis{Alhamzeh2023, author = {Alhamzeh, Alaa}, title = {Language Reasoning by means of Argument Mining and Argument Quality}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-12699}, school = {Universit{\"a}t Passau}, pages = {ix, 154 Seiten}, year = {2023}, abstract = {Understanding of financial data has always been a point of interest for market participants to make better informed decisions. Recently, different cutting edge technologies have been addressed in the Financial Technology (FinTech) domain, including numeracy understanding, opinion mining and financial ocument processing. In this thesis, we are interested in analyzing the arguments of financial experts with the goal of supporting investment decisions. Although various business studies confirm the crucial role of argumentation in financial communications, no work has addressed this problem as a computational argumentation task. In other words, the automatic analysis of arguments. In this regard, this thesis presents contributions in the three essential axes of theory, data, and evaluation to fill the gap between argument mining and financial text. First, we propose a method for determining the structure of the arguments stated by company representatives during the public announcement of their quarterly results and future estimations through earnings conference calls. The proposed scheme is derived from argumentation theory at the micro-structure level of discourse. We further conducted the corresponding annotation study and published the first financial dataset annotated with arguments: FinArg. Moreover, we investigate the question of evaluating the quality of arguments in this financial genre of text. To tackle this challenge, we suggest using two levels of quality metrics, considering both the Natural Language Processing (NLP) literature of argument quality assessment and the financial era peculiarities. Hence, we have also enriched the FinArg data with our quality dimensions to produce the FinArgQuality dataset. In terms of evaluation, we validate the principle of ensemble learning on the argument identification and argument unit classification tasks. We show that combining a traditional machine learning model along with a deep learning one, via an integration model (stacking), improves the overall performance, especially in small dataset settings. In addition, despite the fact that argument mining is mainly a domain dependent task, to this date, the number of studies that tackle the generalization of argument mining models is still relatively small. Therefore, using our stacking approach and in comparison to the transfer learning model of DistilBert, we address and analyze three real-world scenarios concerning the model robustness over completely unseen domains and unseen topics. Furthermore, with the aim of the automatic assessment of argument strength, we have investigated and compared different (refined) versions of Bert-based models that incorporate external knowledge in the decision layer. Consequently, our method outperforms the baseline model by 13 ± 2\% in terms of F1-score through integrating Bert with encoded categorical features. Beyond our theoretical and methodological proposals, our model of argument quality assessment, annotated corpora, and evaluation approaches are publicly available, and can serve as strong baselines for future work in both FinNLP and computational argumentation domains. Hence, directly exploiting this thesis, we proposed to the community, a new task/challenge related to the analysis of financial arguments: FinArg-1, within the framework of the NTCIR-17 conference. We also used our proposals to react to the Touch{\´e} challenge at the CLEF 2021 conference. Our contribution was selected among the «Best of Labs».}, language = {en} } @phdthesis{Grassl2024, author = {Graßl, Isabella}, title = {Diversity in Programming Education: Effects of Topic and Group Constellation on Young Programming Novices}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15049}, school = {Universit{\"a}t Passau}, pages = {xi, 256 Seiten}, year = {2024}, abstract = {The field of software engineering faces a significant diversity crisis, characterized by a critical lack of heterogeneity despite ongoing efforts to promote gender equality. The persistent male dominance in this domain has created an urgent need for more heterogeneous groups in software engineering. This lack of diversity not only hinders underrepresented groups from entering the field but also prevents them from gaining initial programming experiences, which are a core component of software engineering and essential for developing computational thinking. To address this crisis and its implications, early interventions are key in shaping positive perceptions, building confidence, and sparking initial interest in programming among underrepresented groups before societal stereotypes of programming as a nerdy field manifests. This means starting with basic programming courses for children and continuing through to first-year university students in order to foster technical skills and computational thinking, alongside creativity and collaboration. However, there is limited understanding of how introductory programming course designs impact diversity-dependent characteristics to create welcoming and learning-friendly environments. This understanding is particularly important for underrepresented groups, especially girls, to benefit from their first programming experiences as they are often hindered by the initial perception of programming as (1) abstract and unappealing, and (2) non-social to novices. Engaging, creative, and relatable topics in programming courses might demystify complex programming concepts, making them more accessible, less intimidating, and appealing. However, understanding programming is not just about the content---it is also about the context in which it is learned. Introducing programming as social activity is important, particularly for young learners. By emphasizing team work, we might encourage collaboration and peer support, counteracting the lone-wolf programmer stereotype. Therefore, this doctoral thesis investigates the effects of both key aspects in programming courses---(1) topic choices and (2) group constellations---on young programming novices. The aim is to provide a holistic understanding of how different course designs can support diverse learners and promote gender equality in programming education. While this research primarily addresses gender diversity due to the persistent gender gap in software engineering, it also examines additional diversity dimensions, including age, ethnicity, prior programming experience, disabilities, and educational background. A total of 13 studies were conducted within this thesis, examining the current state of educational settings and utilizing various introductory programming courses designed for children aged 8 to 18, as well as first-year university students. These studies employed different programming environments, such as Scratch and Sonic Pi, and incorporated a variety of topics and group constellations to observe their effects on student outcomes. By using a mixed-methods design, data were gathered through surveys, observations, and both data-driven and manual code analysis. Key findings reveal that it is particularly noteworthy how children utilize the programming environment to engage with and creatively express topics aligned with their interests which also align mostly with gender-stereotypes, including elements from internet and popular culture as well as socio-cultural narratives. However, gender-sensitive and neutral topic choices enhance engagement, self-efficacy, contribution, code quality and creative output, while also contributing to reduce stereotypical beliefs about programming, particularly among girls. In line with the findings for the course topic, group constellations also influence programming experiences. In particular, introducing pair programming in courses shows a promising approach for young learners, but attention must be paid to mitigate socially learned gender-stereotypical behaviours. Another finding indicates that, unlike professional software teams, mixed-diverse student teams often encounter substantial challenges, thus benefit from clear communication guidelines and supportive environments to promote better collaboration. This doctoral thesis concludes with guidelines for designing more effective and inclusive introductory programming courses. These recommendations include using gender-sensitive course materials, allowing for creative freedom through topic choices while encouraging the use of advanced programming concepts, promoting collaboration through pair programming while fostering enhanced communication, boosting self-efficacy with quick positive feedback for girls in particular, and providing emotional support for underrepresented groups. By following these guidelines, educators can create more engaging, inclusive, and effective programming courses. This may ultimately promote a more equitable and diverse future generation of professional software developers while also fostering computational thinking, encouraging a broader interest in programming among all young learners.}, subject = {Softwareentwicklung}, language = {en} } @article{BecherGerl2022, author = {Becher, Stefan and Gerl, Armin}, title = {ConTra Preference Language: Privacy Preference Unification via Privacy Interfaces}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {14}, editor = {Sarne, Giuseppe Maria Luigi and Ma, Jianhua and Rosaci, Domenico and Srivastava, Gautam}, publisher = {MDPI}, address = {Basel, Switzerland}, issn = {1424-8220}, doi = {10.3390/s22145428}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11218}, pages = {18 Seiten}, year = {2022}, abstract = {After the enactment of the GDPR in 2018, many companies were forced to rethink their privacy management in order to comply with the new legal framework. These changes mostly affect the Controller to achieve GDPR-compliant privacy policies and management.However, measures to give users a better understanding of privacy, which is essential to generate legitimate interest in the Controller, are often skipped. We recommend addressing this issue by the usage of privacy preference languages, whereas users define rules regarding their preferences for privacy handling. In the literature, preference languages only work with their corresponding privacy language, which limits their applicability. In this paper, we propose the ConTra preference language, which we envision to support users during privacy policy negotiation while meeting current technical and legal requirements. Therefore, ConTra preferences are defined showing its expressiveness, extensibility, and applicability in resource-limited IoT scenarios. In addition, we introduce a generic approach which provides privacy language compatibility for unified preference matching.}, language = {en} } @phdthesis{Lachat2024, author = {Lachat, Paul}, title = {Detecting Inference Attacks Involving Sensor Data}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14149}, school = {Universit{\"a}t Passau}, pages = {xiii, 141 Seiten}, year = {2024}, abstract = {The collection of personal information by organizations has become increasingly essential for social interactions. Nevertheless, according to the GDPR (General Data Protection Regulation), the organizations have to protect collected data. Access Control (AC) mechanisms are traditionally used to secure information systems against unauthorized access to sensitive data. The increased availability of personal sensor data, thanks to IoT-oriented applications, motivates new services to offer insights about individuals. Consequently, data mining algorithms have been proposed to infer personal insights from collected sensor data. Although they can be used for genuine purposes, attackers can leverage those outcomes, combining them with other type of data, and further breaching individuals' privacy. Thus, bypassing AC mechanisms thanks to such insights is a concrete problem. We propose an inference detection system based on the analysis of queries issued on a sensor database. The knowledge obtained through these queries, and the inference channels corresponding to the use of data mining algorithms on sensor data to infer individual information, are described using Raw sensor data based Inference ChannEl Model (RICE-M). The detection is carried out by RICE-M based inference detection System (RICE-Sy). RICE-Sy considers at the time of the query, the knowledge that a user obtains via a new query and has obtained via his query history, and determines whether this is sufficient to allow that user to operate a channel. Thus, privacy protection systems can take advantage of the inferences detected by RICE-Sy, taking into account individuals' information obtained by the attackers via a database of sensors, to further protect these individuals.}, language = {en} } @article{AnagnostopoulosTeymuriSeratietal.2023, author = {Anagnostopoulos, Nikolaos Athanasios and Teymuri, Benyamin and Serati, Reza and Rasti, Mehdi}, title = {LP-MAB: Improving the Energy Efficiency of LoRaWAN Using a Reinforcement-Learning-Based Adaptive Configuration Algorithm}, series = {Sensors}, volume = {23}, journal = {Sensors}, number = {4}, editor = {Xie, Bin and Wang, Ning and Gu, Yi and Stefanidis, Angelos}, publisher = {MDPI}, address = {Basel, Switzerland}, issn = {1424-8220}, doi = {10.3390/s23042363}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11853}, pages = {22 Seiten}, year = {2023}, abstract = {In the Internet of Things (IoT), Low-Power Wide-Area Networks (LPWANs) are designed to provide low energy consumption while maintaining a long communications' range for End Devices (EDs). LoRa is a communication protocol that can cover a wide range with low energy consumption. To evaluate the efficiency of the LoRa Wide-Area Network (LoRaWAN), three criteria can be considered, namely, the Packet Delivery Rate (PDR), Energy Consumption (EC), and coverage area. A set of transmission parameters have to be configured to establish a communication link. These parameters can affect the data rate, noise resistance, receiver sensitivity, and EC. The Adaptive Data Rate (ADR) algorithm is a mechanism to configure the transmission parameters of EDs aiming to improve the PDR. Therefore, we introduce a new algorithm using the Multi-Armed Bandit (MAB) technique, to configure the EDs' transmission parameters in a centralized manner on the Network Server (NS) side, while improving the EC, too. The performance of the proposed algorithm, the Low-Power Multi-Armed Bandit (LP-MAB), is evaluated through simulation results and is compared with other approaches in different scenarios. The simulation results indicate that the LP-MAB's EC outperforms other algorithms while maintaining a relatively high PDR in various circumstances.}, language = {en} } @phdthesis{Auer2024, author = {Auer, Michael}, title = {Improving Automated Android Test Generation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14955}, school = {Universit{\"a}t Passau}, pages = {x, 140 Seiten}, year = {2024}, abstract = {Mobile apps are nowadays the preferred means to accomplish ubiquitous tasks like messaging, e-commerce and even playing games. Often, there exist multiple apps for the same purpose, and it is the choice of the end user to pick an appropriate app. Apps that behave unexpected, e.g., crash frequently, are sooner or later replaced, which isundesirable for the companies developing such apps. Thus, it is essential to tests apps properly before they are released onto the market. However, testing manually is often not only too cost-intensive but also too time-consuming in the short development phase, thus an automated solution is preferred. Testing mobile apps automatically received increased attention in the last decade from primarily people in academia, and several testing techniques evolved. One technique that yielded promising results, especially in different domains, is search-based software testing in which a metaheuristic, e.g., a genetic algorithm, is applied to solve an optimisation problem, e.g., test generation. A main objective of test generation is to produce tests that reveal as many faults as possible. This in turn requires the generation of tests that deeply explore the tested app. The core metric to quantify how much code tests cover is the measurement of code coverage, which can be computed at different levels of granularity ranging from determining the fraction of covered activities to a very fine-grained measurement that calculates the percentage of covered lines. This coverage information is then often used to guide the search of the employed metaheuristic. However, current automated test generation approaches produce tests with a rather low code coverage. Thus, a substantial part of tested apps remains unexplored, which in turn misses revealing deeply residing faults. We identified three core issues that are directly related to the generation of low-coverage tests. First, the applicability of current test generators is often limited. This comprises the fact that current state-of-the-art code coverage tools are incapable of instrumenting a substantial number of apps and consequently, test generators cannot utilise detailed coverage information during exploration. In addition, test generators are often only equipped with a primitive set of actions that are insufficient to simulate system events and complex user inputs. Second, the test execution is extremely time-consuming. This includes among other things the overhead associated with executing individual actions, intermediate restart operations as well as fitness evaluations. Since search-based algorithms require a substantial number of test executions to play out their strengths, the slow test execution impedes the effectiveness of the search. Third, the guidance offered by search-based algorithms is often hampered by applying inadequate fitness functions or by using non-representation-specific variation operators. In this thesis we address the problem of low-coverage tests in the Android domain by proposing several enhancements for the three identified core issues. Concerning the applicability problem, we provide the implementation of a robust code coverage tool that is capable of measuring coverage at different levels of granularity and requires no access to the source code. We also propose to include actions that can simulate system events as well as complex user inputs. Regarding the performance issue, we suggest the integration of a surrogate model that is capable of predicting the outcome of individual actions or complete tests over time in order to reduce the overall test execution costs. With respect to the lack of guidance offered by traditional search-based algorithms, we suggest alternative search strategies. In the case of a deceptive fitness landscape, we propose using novelty search algorithms. Alternatively, we suggest utilising estimation of distribution algorithms that require no crossover or mutation perators to sample new tests. While all those enhancements had a positive impact on the Android test generation process, the individual empirical studies highlighted that further research is necessary to unleash the full power of the proposed search-based algorithms. In particular, exploring complex user interfaces meaningfully requires more attention whether by introducing additional actions or by extracting valuable hints to infer reasonable text inputs. In addition, the guidance offered by fitness functions is often limited because they are either designed too coarse at all or do not accurately reflect the search objectives.}, language = {en} } @article{vonderHeydeGerl2022, author = {von der Heyde, Markus and Gerl, Armin}, title = {Entwicklungsstand der CIO-Funktion und hochschul{\"u}bergreifenden IT-Governance im Kontext der Digitalen Transformation an Hochschulen in Bayern}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {2022}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {59}, publisher = {Springer Nature}, address = {Berlin}, doi = {10.1365/s40702-022-00872-x}, url = {http://nbn-resolving.de/urn:nbn:de:101:1-2022072222053919381076}, pages = {881 -- 895}, year = {2022}, abstract = {Die Hochschulen befinden sich durch vielf{\"a}ltige Ver{\"a}nderungs-prozesse in Verbindung mit dem Einsatz von Informationstechnologien (IT) aufdem Weg der Digitalen Transformation. Diese Digitale Transformation der Hoch-schulen umfasst intensive Ver{\"a}nderungsprozesse in der gesamten Hochschulkulturin Lehre, Forschung und Verwaltung in {\"u}bergreifender und strukturierter Weise.Seit vielen Jahren werden vielf{\"a}ltige Digitalisierungsvorhaben zur Modernisierungvon einzelnen Prozessen an den Hochschulen umgesetzt. Die Leitungen der Re-chenzentren leisten mit der Umsetzung von IT-Projekten einen zentralen Beitrag zudiesem Wandel. Mit der Einf{\"u}hrung der CIO-Funktion in den Hochschulleitungenund der hochschul{\"u}bergreifenden Kooperationen hat sich die IT-Governance wei-terentwickelt. Insbesondere f{\"u}r die Digitale Transformation werden Strukturen zurKoordination der {\"u}bergreifenden Vorhaben ben{\"o}tigt, wobei zus{\"a}tzlich zur IT-Lei-tung eine Vielzahl von Funktionstr{\"a}gern mit fachlichen Aufgaben aus Forschung,Lehre und Verwaltung involviert ist. Es stellt sich die Frage, wie die Digitale Trans-formation an Hochschulen gesteuert werden kann und in welcher organisatorischenForm sich die Aufgaben und Verantwortlichkeiten im Hochschulkontext realisierenlassen. An der Weiterentwicklung der IT-Governance an bayerischen Hochschulenwird beispielhaft erl{\"a}utert, welche {\"u}bergreifenden Aufgaben der Koordination vonBedarf und Versorgung mit IT-Services zwischen und innerhalb der Hochschulenbestehen. Die CIO-Funktion wird durch die Verankerung in der Leitungsebene derFunktion des Chief Digital Officers (CDO) aus der Wirtschaft {\"a}hnlicher, auch wennin Hochschulen aufgrund der klassischen Ressort-Einteilung die Rolle oft als Vize-pr{\"a}sident:in f{\"u}r Digitalisierung bezeichnet wird.}, language = {de} } @phdthesis{Kochendoerfer2026, author = {Kochend{\"o}rfer, Laura}, title = {Rethinking systems use in information systems research - theories on individuals' use of multiple information systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-20164}, school = {Universit{\"a}t Passau}, pages = {vi, 99 Seiten}, year = {2026}, abstract = {While individuals use multiple information systems (IS) every day, research on IS use predominantly investigates use with respect to only one information system at a time. In light of this "single-IS paradigm" (Gerlach \& Cenfetelli, 2022), theoretical and empirical insights into the nature and behavioral manifestations of multiple IS use remain limited. To advance the discipline's understanding of the multiple IS use reality, this dissertation theorizes mechanisms that are idiosyncratic to the context of individuals' multiple IS use. Based on grounded theory methodology and interview data from individuals using multiple IS, this dissertation contributes two theories on multiple IS use in two essays. The first essay introduces an analytical theory of eight different interdependencies-in-use as core mechanisms that emerge as users engage with multiple IS. The second essay builds on these interdependencies-in-use and examines how one type of interdependency manifests in behavior. The resulting process theory explains a behavioral phenomenon resulting from multiple IS use: users transferring usage behaviors from one IS to another. This dissertation contributes a theoretical framework for conceptualizing multiple IS use with its underlying mechanisms that enable future research to systematically investigate multiple IS use and related phenomena. It further enriches insights on usage behavior by a multiple IS perspective, indicating that multiple IS use contexts give rise to unique behavioral dynamics. With that, the current conversation in IS use research that focuses on single IS use is extended with new theoretical insights on the use of multiple IS. The dissertation offers additional recommendations for practitioners to consider the interdependent way individuals use multiple IS.}, language = {en} } @phdthesis{Frank2026, author = {Frank, Florian}, title = {Integrating physical unclonable functions from novel nanomaterials, circuit elements, and memory technologies into future hardware architectures}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-20104}, school = {Universit{\"a}t Passau}, pages = {XVI, 190 Seiten}, year = {2026}, abstract = {Cryptographic keys are fundamental components for ensuring security in digital systems. To ensure reliable key generation and management, various technical concepts have been developed, primarily based on dedicated hardware components such as Trusted Platform Modules (TPMs). However, many modern systems, especially small resource-constrained devices, typically lack hardware support for secure key generation and management. To address these limitations, Physical Unclonable Functions (PUFs) have proven to be an effective solution for key generation, device authentication, and identification tasks. PUFs leverage inherent variations in hardware components to produce unique, device-specific keys. For a well-designed PUF, these keys can be reproduced reliably on the same device but are practically impossible to clone. Various types of PUFs exist, including those that exploit slight delay differences in circuits with symmetric paths. Others rely on physical characteristics of components already present in the computing system, such as SRAM or DRAM. However, many of these constructions rely on technologies that could be replaced by emerging ones in the future. Such a replacement may involve a transition from traditional memory technologies, such as SRAM, DRAM, and flash memory, to emerging Non-Volatile Memories (NVMs), including Ferroelectric RAM (FRAM), Magnetoresistive RAM (MRAM), and Resistive RAM (ReRAM). These new technologies, in turn, necessitate innovative hardware security solutions for generating intrinsic hardware fingerprints, ensuring security for next-generation embedded devices. Furthermore, the integration of nanomaterials, such as carbon nanotubes, into processor architectures and the adoption of reconfigurable hardware platforms like Field-Programmable Gate Arrays (FPGAs) require the development of specifically tailored cybersecurity solutions. This dissertation aims to develop hardware-based security mechanisms for these types of devices by designing new PUF constructions and demonstrating their practical applications. One focus lies on PUFs extracted from nanomaterials and emerging circuit elements, particularly memristive devices and Carbon NanoTube Field-Effect Transistors (CNT-FETs). For memristive devices, which form the basis of ReRAM memory, this work analyzes methods ranging from simple binary quantization to advanced techniques exploiting device-specific response patterns. In the case of CNT-FETs, custom-fabricated wafers are developed to construct PUFs with optimal properties, such as high robustness, uniformity, and entropy, even under varying environmental conditions. These conditions include fluctuations in ambient temperature. Based on an analysis of fundamental system components, this work evaluates the feasibility of deriving PUFs from fully integrated circuits. A specific focus is placed on emerging non-volatile memory technologies, assessing their potential for PUF applications. To achieve PUF behavior in these memory devices, techniques such as intentional timing manipulation, induced bit flips through row hammering, and variations in supply voltage are examined. These resulting bit flips can be exploited as PUF responses. Additionally, transforming raw PUF responses into cryptographically usable keys and integrating specific PUFs into practical applications are core components of this work. The demonstrated practical applications include an innovative architecture for encrypting and binding data to non-volatile memory modules, implemented on Multiprocessor System-on-Chips (MPSoCs) incorporating FPGAs. This architecture enables the storage of confidential data on non-volatile memory while simultaneously using the same module as a PUF, without requiring separate memory partitions solely for the PUF functionality. Finally, practical applications of hardware fingerprints in the automotive sector are demonstrated, including an FPGA-based implementation to maintain security while preserving the temporal determinism of time-critical messages. These goals are met through the use of hardware-implemented cryptographic algorithms coupled with an FPGA-based ring oscillator PUF. To summarize, this work presents new types of PUF implementations, starting with nanomaterials and emerging circuit elements, extending to PUFs derived from integrated circuits, and demonstrates innovative solutions for their integration into MPSoC-based architectures.}, language = {en} }