@phdthesis{Frank2026, author = {Frank, Florian}, title = {Integrating physical unclonable functions from novel nanomaterials, circuit elements, and memory technologies into future hardware architectures}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-20104}, school = {Universit{\"a}t Passau}, pages = {XVI, 190 Seiten}, year = {2026}, abstract = {Cryptographic keys are fundamental components for ensuring security in digital systems. To ensure reliable key generation and management, various technical concepts have been developed, primarily based on dedicated hardware components such as Trusted Platform Modules (TPMs). However, many modern systems, especially small resource-constrained devices, typically lack hardware support for secure key generation and management. To address these limitations, Physical Unclonable Functions (PUFs) have proven to be an effective solution for key generation, device authentication, and identification tasks. PUFs leverage inherent variations in hardware components to produce unique, device-specific keys. For a well-designed PUF, these keys can be reproduced reliably on the same device but are practically impossible to clone. Various types of PUFs exist, including those that exploit slight delay differences in circuits with symmetric paths. Others rely on physical characteristics of components already present in the computing system, such as SRAM or DRAM. However, many of these constructions rely on technologies that could be replaced by emerging ones in the future. Such a replacement may involve a transition from traditional memory technologies, such as SRAM, DRAM, and flash memory, to emerging Non-Volatile Memories (NVMs), including Ferroelectric RAM (FRAM), Magnetoresistive RAM (MRAM), and Resistive RAM (ReRAM). These new technologies, in turn, necessitate innovative hardware security solutions for generating intrinsic hardware fingerprints, ensuring security for next-generation embedded devices. Furthermore, the integration of nanomaterials, such as carbon nanotubes, into processor architectures and the adoption of reconfigurable hardware platforms like Field-Programmable Gate Arrays (FPGAs) require the development of specifically tailored cybersecurity solutions. This dissertation aims to develop hardware-based security mechanisms for these types of devices by designing new PUF constructions and demonstrating their practical applications. One focus lies on PUFs extracted from nanomaterials and emerging circuit elements, particularly memristive devices and Carbon NanoTube Field-Effect Transistors (CNT-FETs). For memristive devices, which form the basis of ReRAM memory, this work analyzes methods ranging from simple binary quantization to advanced techniques exploiting device-specific response patterns. In the case of CNT-FETs, custom-fabricated wafers are developed to construct PUFs with optimal properties, such as high robustness, uniformity, and entropy, even under varying environmental conditions. These conditions include fluctuations in ambient temperature. Based on an analysis of fundamental system components, this work evaluates the feasibility of deriving PUFs from fully integrated circuits. A specific focus is placed on emerging non-volatile memory technologies, assessing their potential for PUF applications. To achieve PUF behavior in these memory devices, techniques such as intentional timing manipulation, induced bit flips through row hammering, and variations in supply voltage are examined. These resulting bit flips can be exploited as PUF responses. Additionally, transforming raw PUF responses into cryptographically usable keys and integrating specific PUFs into practical applications are core components of this work. The demonstrated practical applications include an innovative architecture for encrypting and binding data to non-volatile memory modules, implemented on Multiprocessor System-on-Chips (MPSoCs) incorporating FPGAs. This architecture enables the storage of confidential data on non-volatile memory while simultaneously using the same module as a PUF, without requiring separate memory partitions solely for the PUF functionality. Finally, practical applications of hardware fingerprints in the automotive sector are demonstrated, including an FPGA-based implementation to maintain security while preserving the temporal determinism of time-critical messages. These goals are met through the use of hardware-implemented cryptographic algorithms coupled with an FPGA-based ring oscillator PUF. To summarize, this work presents new types of PUF implementations, starting with nanomaterials and emerging circuit elements, extending to PUFs derived from integrated circuits, and demonstrates innovative solutions for their integration into MPSoC-based architectures.}, language = {en} } @phdthesis{Wendlinger2026, author = {Wendlinger, Lorenz}, title = {Structure-aware Deep Learning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19892}, school = {Universit{\"a}t Passau}, pages = {XX, 176 Seiten}, year = {2026}, abstract = {Graph structures permeate the digital landscape in explicit and implicit forms. They connect or construct artifacts by combining semantic and structural information. We also observe them in the systems designed to process this data, in their learning algorithms and the very nature of the tasks they solve. At the same time, machine learning methods are extremely data-hungry, requiring petabytes of data for training. Due to their complexity, graphs remain an under-utilized resource in this regard. Many approaches cannot incorporate them due to being fully structurally unaware or not suited to the specific flavour of graphs encountered in some domains. This disconnect is sub-optimal from an effectiveness and efficiency perspective. We present methods that extend the scope of structure-aware deep learning through structural knowledge integration and enrichment, structural performance prediction, and synergistic transfer learning. Knowledge graphs organize information and make it directly available for querying. They provide a structured inference interface for manual and automated inspection, though they can suffer from data quality issues and require careful schema design. We rephrase the reconciliation of knowledge in knowledge graphs as a link prediction task, making it tractable with adapted graph neural networks, while also benefiting conventional link prediction tasks. We further combine textual semantics and structural expression for legal reference prediction via adapted heterogeneous graph neural networks operating on complex meta-information enriched graphs. Additionally, we explore methods for the integration of intermediary expressions in strongly typed heterogeneous graphs, improving prediction via meta-path-based processing. We also develop methods for automated machine learning workflow analysis and performance prediction. This includes the learning of salient representations for management as well as improvement of workflows through automatic suggestion and refinement of components. These are then extended to the prediction of Neural Architecture Search performance prediction, including adaptation to operation-on-edge spaces. Finally, we investigate the transfer capability of pre-trained attention structures for text-based prediction tasks and find it to be both inferior to directly optimized attention masks as well as highly dependent on inherent domain knowledge. We also show that the exploitation of hierarchical task formulation can improve prediction performance through joint learning in diverse learning domains, including link prediction, performance prediction, and specialized and general argumentation mining. The dissertation contains previously published or submitted texts: Wendlinger, L., H{\"u}bscher, G., Ekelhart, A., Granitzer, M. (2022). Reconciliation of Mental Concepts with Graph Neural Networks. In: Strauss, C., Cuzzocrea, A., Kotsis, G., Tjoa, A.M., Khalil, I. (eds): Database and Expert Systems Applications. DEXA 2022. Lecture Notes in Computer Science, vol 13427, p 133-146. Springer, Cham. https://doi.org/10.1007/978-3-031-12426-6_11; Wendlinger, L., Granitzer M. (2024). Informed Heterogeneous Attention Networks for Metapath Based Learning. In: SAC '24: Proceedings of the 39th ACM/SIGAPP Symposium on Applied Computing, p 458-465, ACM, New York. https://doi.org/10.1145/3605098.3635890; Wendlinger, L., Nonn, S.A., Al Zubaer, A., Granitzer, M. (2026). The Missing Link: Joint Legal Citation Prediction Using Heterogeneous Graph Enrichment. In: Wrembel, R., Kotsis, G., Tjoa, A.M., Khalil, I. (eds) Database and Expert Systems Applications. DEXA 2025. Lecture Notes in Computer Science, vol 16047, p 197-211. Springer, Cham. https://doi.org/10.1007/978-3-032-02088-8_14; Wendlinger, L., Stier, J., Granitzer, M. (2021). Evofficient: Reproducing a Cartesian Genetic Programming Method. In: Hu, T., Louren{\c{c}}o, N., Medvet, E. (eds) Genetic Programming. EuroGP 2021. Lecture Notes in Computer Science, vol 12691, p 162-178. Springer, Cham. https://doi.org/10.1007/978-3-030-72812-0_11; Wendlinger, L., Berndl, E., Granitzer, M. (2021). Methods for Automatic Machine-Learning Workflow Analysis. In: Dong, Y., Kourtellis, N., Hammer, B., Lozano, J.A. (eds) Machine Learning and Knowledge Discovery in Databases. Applied Data Science Track. ECML PKDD 2021. Lecture Notes in Computer Science, vol 12979, p 52-67. Springer, Cham. https://doi.org/10.1007/978-3-030-86517-7_4; Wendlinger, L., Granitzer, M., Fellicious, C. (2023). Pooling Graph Convolutional Networks for Structural Performance Prediction. In: Nicosia, G., et al. (eds) Machine Learning, Optimization, and Data Science. LOD 2022. Lecture Notes in Computer Science, vol 13811, p 1-16. Springer, Cham. https://doi.org/10.1007/978-3-031-25891-6_1; Wendlinger, L., Braun, C., Zubaer, A., Nonn, S., Großkopf, S., Fellicious, C., Granitzer, M.: On the Suitability of pre-trained foundational LLMs for Analysis in German Legal Education, submitted to the proceedings of the International Conference on Machine Learning, Optimization, and Data Science 2025, preprint published: https://doi.org/10.48550/arXiv.2412.15902; Wendlinger, L., Kuhn, R., Mitrovic, J., Granitzer, M. (2025). Joint Learning for Efficient German Argument Mining. In: 2025 IEEE 37th International Conference on Tools with Artificial Intelligence (ICTAI), Athens, Greece, 2025, p 770-777. IEEE, Los Alamitos. https://doi.org/10.1109/ICTAI66417.2025.00111.}, language = {en} } @phdthesis{Neuwirth2026, author = {Neuwirth, Daniel}, title = {Einbettung und Charakterisierung von aligned bar 1-visibility Graphen und outer fan free Graphen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19930}, school = {Universit{\"a}t Passau}, pages = {VII, 247 Seiten}, year = {2026}, abstract = {In dieser Arbeit werden drei verschiedene Klassen von Graphen untersucht. Die Klassen sind die bar (1;1)-visibilty Graphen, die aligned bar 1-visibility Graphen und die outer fan free Graphen. Die Klassen werden durch ihre m{\"o}glichen Einbettungen charakterisiert. Die Repr{\"a}sentation der bar (1; j)-visibility Graphen ist, dass jeder Knoten als horizontaler Strich und jede Kante als vertikaler Strich gezeichnet wird. Eine Kante kann einen Knoten genau einmal schneiden und ein Knoten kann j-mal geschnitten werden. Wir erweitern die Ergebnisse von Dean et. al. und geben Beispiele mit einer maximalen Dichte an f{\"u}r bar (1; 2)-visibility, bar (1; 3)-visibility und bar (1; 4)-visibility Graphen und geben einen maximal d{\"u}nnen Graphen f{\"u}r die Klasse der bar (1;1) visibility Graphen an. Wir zeigen, dass die Klassen der bar (1; j)-visibility Graphen f{\"u}r 1 < j < 1eine unendliche Hierarchie bilden. Abschließend beweisen wir, dass das Erkennungsproblem ob ein Graph eine bar (1;1)-visibility Repr{\"a}sentation hat, NP-vollst{\"a}ndig ist. Die Klasse der aligned bar 1-visibility Graphen (AB1V ) erh{\"a}lt man, indem man die bar (1;1)-visibility Repr{\"a}sentation um 90 Grad dreht und alle Knoten verl{\"a}ngert, so dass diese alle mit der y-Koordinate 0 starten. Die relative Position bzgl. der x-Koordinate wird mit der t-Ordnung beschrieben und mit der r-Ordnung die relative Position bzgl. der y-Koordinate. Wir erweitern die Erkenntnisse von Felsner und Massow f{\"u}r die Klasse der AB1V Graphen bzgl. ihrer maximalen Dichte, der minimale Grad eines Knotens. Wir f{\"u}hren die Methode Pfadaddition ein, um anhand deren Abschlusseigenschaften zu unterscheiden, ob ein Graph in einer Klasse liegt oder nicht. Diese Methode nutzen wir, um die Beziehung der Klasse der AB1V Graphen mit anderen Klassen zu untersuchen. F{\"u}r die Klasse der maximalen Graphen geben wir einen d{\"u}nnen Graphen und eine untere Schranke bzgl. der Dichte an. Wir geben einen Algorithmus an, welcher eine Bucheinbettung aus einer AB1V Einbettung berechnet. F{\"u}r die Klassen der optimalen AB1V Graphen geben wir einen Einbettungsalgorithmus an. Wir verbessern den Erkennungsalgorithmus von Felsner und Massow, ob ein Graph mit einer gegebenen t-Ordnung eine AB1V Einbettung besitzt. F{\"u}r die Klasse der distinkt strong AB1V Graphen, Graphen in der jeder Knoten ein unterschiedliche r-Ordnung hat und maximal f{\"u}r die r-Ordnung ist, geben wir einen Algorithmus an, der in O(n6) eine m{\"o}gliche Einbettung berechnet. Zum Schluss zeigen wir f{\"u}r diese Klasse, dass es exponentiell viele verschiedene Einbettungen gibt. Ein Graph hat eine outer fan free Einbettung, wenn alle Knoten inzident zu einer Fl{\"a}che sind und keine Kante von zwei Kanten geschnitten wird, die adjazent zu einem Knoten sind. Wir untersuchen diese Klasse zuerst auf die Dichte. Weiter erforschen wir die Beziehung zwischen den Klassen der AB1V , RAC und k-planaren Graphen. Abschließend geben wir eine Reduktion von NAE-3-SAT auf das Erkennungsproblem von outer fan free Graphen an.}, language = {de} } @phdthesis{Schiermeier2025, author = {Schiermeier, Kathrin}, title = {Multidimensional Wavelets and Neural Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19742}, school = {Universit{\"a}t Passau}, pages = {xx, 168 Seiten}, year = {2025}, abstract = {The construction of scaling functions and wavelets in multiple dimensions and for arbitrary scaling matrices is a challenging task entailing some complexities. Existing approaches mainly focus on the two-dimensional case using dyadic or quincunx sampling. This thesis aims to develop a method to construct multidimensional scaling and wavelet filters yielding orthogonal scaling functions and wavelets under the usage of convolutional neural networks. We start by recalling substantial fundamentals of ideals, modules, Fourier analysis, filterbanks and multiresolution analyses, where the mentioned concepts are already considered in an arbitrary dimensional setting to prepare the proof of the main result. There, we show the connection between multivariate scaling functions and multidimensional filters possessing certain properties. This enables us to construct scaling functions and corresponding wavelets by discrete filter design. Exploiting the link between the discrete wavelet decomposition, filterbanks and neural networks, we utilize the latter to do so. Being the main difficulty of this process, we especially focus on the Cohen criterion, which concerns the zeros of the Fourier transform of the scaling filter in modulus representing a multivariate trigonometric polynomial. After transferring the Bernstein inequality for univariate trigonomic polynomials to multiple dimensions, we present a method to derive a finite set of inequality constraints implying that the Cohen criterion holds true for a given multivariate cosine sum. Afterwards, we introduce neural networks and TensorFlow as the main tools to execute the described approach, formulate the described objective as an optimization problem and present some smaller numerical experiments and their results. A second objective of this thesis is the construction of filters possessing a unimodular modulation vector and therefore the ability to be completed to a perfect reconstruction filterbank. Both - the construction and the filterbank completion - can also be considered in a neural network framework as we will detail in the last section of this thesis alongside with the presentation of corresponding numerical experiments. In the context of filterbank completion, a further observation which allows to complete any given interpolatory filter to a perfect reconstruction filterbank in a very intuitive and simple way is presented. Furthermore, we explain that any given unimodular filter can be rendered interpolatory through prefiltering.}, language = {en} } @phdthesis{Henle2025, author = {Henle, Mona}, title = {Multi-Leader Congestion Games with an Adversary}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19683}, school = {Universit{\"a}t Passau}, pages = {112 Seiten}, year = {2025}, abstract = {In this thesis, we introduced a congestion game with multiple leaders and a single follower (adversary) which is motivated by security applications with congestion effects. Our objective was to understand the result and the impact of selfish acting individuals in these games. In this regard, we analyzed the existence, the computation and the quality of (approximate) pure Nash equilibria. First, we observed that an exact pure Nash equilibrium always exists in the resulting strategic game among the leaders if the resource cost coefficients are identical and the underlying congestion game is a matroid congestion game. If one of these two conditions is not fulfilled, the existence of PNE is not ensured anymore in general. Consequently, we focused on approximate equilibria. For the case of symmetric singleton strategies, one of our main result established that K ≈ 1.1974, the unique solution of a cubic polynomial equation, is the smallest possible factor such that the existence of a K-approximate equilibrium is guaranteed for all instances of the game. To this end, we presented an efficient algorithm which computes a K-approximate PNE. Furthermore, we showed that the factor K is tight by providing an instance where no α-approximate PNE with α < K exists. However, for a specific symmetric singleton instance there might be a better α-approximate PNE, i.e., with α < K. A given instance could even admit an exact PNE. We provided therefore a polynomial time procedure that computes a best approximate PNE of a given instance. In particular, this procedure can verify the existence of an exact PNE in a given instance efficiently and, if it exists, can also determine the corresponding load vector. Finally, for symmetric singleton instances with two resources, we compared the total cost of a best (cheapest) and worst (most expensive) PNE to the total cost of an optimal outcome, termed by the price of stability and the price of anarchy, respectively. In particular, we verified that the PoS and the PoA are 4/3.}, subject = {Spieltheorie}, language = {en} } @phdthesis{Ellinger2025, author = {Ellinger, Simon}, title = {On optimal error rates for strong approximation of stochastic differential equations with irregular drift coefficients}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19634}, school = {Universit{\"a}t Passau}, pages = {179 Seiten in verschiedenen Seitenz{\"a}hlungen}, year = {2025}, abstract = {In this dissertation we study strong approximation of stochastic differential equations (SDEs) with irregular drift coefficients at the final time point or globally in time by methods that use only finitely many evaluations of the driving Brownian motion. We show the optimality of well-known methods, such as the Euler-Maruyama scheme or a transformed Milstein scheme, for classes of piecewise Lipschitz continuous, H{\"o}lder continuous and Sobolev regular drift coefficients. To do this, we derive the optimal error rates for the different classes of irregular drift coefficients. Furthermore, we show that the solution of an SDE with piecewise H{\"o}lder continuous drift coefficient has a regular local density, which is used in the proofs of the lower bounds.}, language = {en} } @phdthesis{Prummer2025, author = {Prummer, Michael}, title = {Asset Tokenization and Authentication in the Industrial Metaverse}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19566}, school = {Universit{\"a}t Passau}, pages = {xviii, 196 Seiten}, year = {2025}, abstract = {The Industrial Revolution is a crucial development step in human history that started three centuries ago and is still ongoing. It continually influences and shapes the globalized world. Today, industries account for 20\% of carbon dioxide emissions worldwide and require more than a third of global energy consumption. Current problems, such as climate change, increasing waste, and pollution, require simultaneous optimization across all industrial domains, infrastructure, and systems as they depend on each other. The global industry faces the immense challenges of providing for a surging world population expected to peak in the mid-2080s with 10.4 billion people, as reported by the United Nations. Hence, industries are expected to become less resource-intensive, sustainable, and more resilient to disrupted supply chains while producing for a growing population for the next decades. The Fourth Industrial Revolution, or Industry 4.0 (I4.0), started around 2010 and is still an ongoing transformation of industrial processes towards digitalization, creating smart factories referring to the digital data integration of the entire manufacturing cycle. I4.0 is incredibly information-intensive and requires immense data to simulate and predict essential operations based on a digital shadow of the factory, a so-called digital twin. The Metaverse is considered a digitalization megatrend merging digital and physical worlds, creating immersive experiences and new opportunities for interaction and innovation across various sectors and industries. The vision of the Metaverse promotes interconnected and interoperable real-time 3D virtual worlds that can be frictionlessly traversed while sustaining ownership of one's assets under a self-sovereign identity in a decentralized environment without platform lock-ins to a specific ecosystem. Therefore, the Metaverse creates an immersive parallel reality with collective virtually shared spaces for entertainment, social interactions, education, and a new working environment. The Industrial Metaverse synthesizes Metaverse concepts with current industrial automation, such as I4.0, to deepen the digital-physical convergence by interconnecting internal and external systems to enable decision-making and predictions based on significantly broader knowledge. An Industrial Metaverse factory is entirely mirrored to integrate digital twins of all types of equipment, assets, and other entities that can communicate vertically and horizontally, as well as the knowledge about relevant external systems and industrial core sectors. Through the comprehensive data integration of the Industrial Metaverse, AI-driven applications can predict future events, reducing system and hardware failures. Furthermore, the interconnected virtual environments create a meta-ecosystem for global collaboration, providing spaces for solving complex problems such as engineering and product design tasks, simulation of product twins, and reduced development time and costs. The connected industrial ecosystems create a token-based digital economy for exchanging data, assets, and services cross-metaverse connecting isolated data silos. Sharing digital twin resources and services with other systems enables new innovative applications and growing ecosystems. The theoretical part of this thesis defines the essential characteristics and key technologies of the Industrial Metaverse to derive a reference architecture for a decentralized system of systems, outlining the fundamental Industrial Metaverse building blocks. Interoperable data exchange, access management, and system communication are critical challenges. Especially interoperability of assets such as 3D files that come in different formats and identities must be ensured to move between virtual environments. The unique fusion of technologies leverages interconnected digital twins in the context of immersion, interaction, and collaboration for secure, autonomous-governed, decentralized industrial applications. Hence, the Industrial Metaverse requires the possibility of exchanging assets, products, and services across all systems in a secure manner. Distributed ledger technology enables tamper-proof transactions of assets and value in a decentralized token economy. Therefore, we investigate the feasibility of current tokenization methods for industrial assets, in particular, Printed Circuit Board (PCB) designs and 3D models. We contribute methods to create unique fingerprints of PCB designs to enable their exchange in the token economy. We investigate how to bind files in different formats and quality representations to the same token. A robust multi-file binding based on the copper layers of a PCB design was achieved by calculating an adaptive perceptual hash of all files. The adaptive perceptual hash was evaluated against numerous tamperings of the routing layout of a PCB, showing decent resistance to layout changes. The resulting adaptive perceptual hash can be used as an additional identification attribute in a tokenized asset. Furthermore, assets must be authenticatable and verifiable by marketplaces, manufacturers, and other participants to create trust in a decentralized environment. While assets can be tampered with to manipulate, for example, cryptographic hashes that link the file to the token, perceptual hashes can compute a perceived or functional similarity of two objects instead of the plain file integrity. Without the possibility of verifying and protecting intellectual property, mass adoption of the Metaverse and Industrial Metaverse is unlikely. Therefore, we contribute to detecting tampering attacks on 3D models by introducing a 3D perceptual hash that is robust to a set of mesh manipulations, enabling the trusted exchange and authentication of 3D data in the Metaverse.}, language = {en} } @phdthesis{Danner2025, author = {Danner, Julian}, title = {SAT Solving Using XOR-OR-AND Normal Forms and Cryptographic Fault Attacks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19171}, school = {Universit{\"a}t Passau}, pages = {vi, 237 Seiten}, year = {2025}, abstract = {The Boolean satisfiability problem (SAT) lies at the core of computational logic and has found many applications in verification, cryptography, and artificial intelligence. While conflict-driven SAT solvers (CDCL) excel on large industrial instances, they struggle with XOR-rich instances arising frequently in cryptanalysis, due to the inefficiency of CNF encodings of linear constraints. Conversely, algebraic approaches can work with linear XOR constraints naturally but fail to scale to relevant sizes. Bridging these complementary paradigms with a focus on cryptographic problems is at the heart of this thesis. On one hand, this dissertation advances SAT solving by introducing the XOR-OR-AND normal form (XNF) as a generalization of the conjunctive normal form (CNF), where literals are replaced by XOR chains of literals. This allows for a native representation of XOR constraints. We generalize the CDCL architecture to the richer language of XNFs. The underlying reasoning based on the proof system SRES which is shown to be exponentially stronger than classical resolution. An implementation demonstrates competitive performance and often surpasses state-of-the-art algebraic and logic solvers on random and cryptographic benchmarks. Furthermore, we prove that every XNF formula can be converted in polynomial time to a formula in 2-XNF, enabling a graph-based approach similar to 2-SAT. Building on this, we propose advanced in- and pre-processing techniques, and construct a simple DPLL-based solving framework. Our implementation, 2-Xornado, outperforms modern algebraic and logic solving approaches on many random and some structured cryptographic problems. On the other hand, we apply combined algebraic and logical techniques to cryptanalysis of stream ciphers. We introduce a formal guess-and-determine (GD) framework using a logical abstraction of the information flow in the internal state. From an algebraic point of view, we can then find optimal GD attacks utilizing a Gr{\"o}bner basis. As a case study, we apply this method to aid in the construction of novel fault attacks on the ciphers KCipher-2 and Enocoro-128v2. Using ad hoc methods combining algebraic and logical approaches, we show that both ciphers are vulnerable to active side-channel attacks under rather weak fault models.}, language = {en} } @phdthesis{Fellicious2025, author = {Fellicious, Christofer}, title = {Bridging the gap: Applying machine learning techniques in digital forensics}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-18473}, school = {Universit{\"a}t Passau}, pages = {112 Seiten}, year = {2025}, abstract = {With the increasing adoption of virtualization technologies across various industries, virtual machines (VMs) offer cost-effective solutions for obtaining computing power without the burden of initial investment or ongoing maintenance. However, the widespread use of VMs also increases the risk of malicious actors attempting to gain unauthorized access due to the possibility of accessing the VMs via standard internet protocols. Virtual Machine Introspection (VMI) and Forensic Memory Analysis (FMA) are two key cybersecurity methods for addressing these threats. While FMA leverages digital forensic techniques to extract and analyse information from system memory to explain security incidents, VMI typically works with live systems, analysing running processes to detect real-time threats. Both approaches face a significant challenge known as the "semantic gap," which arises from the need to infer high-level system information from low-level data such as physical memory and CPU registers. This dissertation explores using machine learning to bridge the semantic gap in FMA and VMI applications. The research uses OpenSSH process heap dumps as a use-case to extract high-level structures, such as OpenSSH encryption keys, from raw process memory dumps. The study employs various techniques to isolate relevant memory sections, from basic memory chunking and entropy analysis to more advanced methods utilizing pointers and malloc headers. During this research study, we also identified the need for a foundation model in memory forensics. Foundation models are general purpose models trained on large amounts of data and users can later use these models to perform different tasks by finetuning the model. This research also addresses the challenge of detecting malware by analysing system-level API calls and employing custom feature engineering techniques. Given that the threat landscape is constantly evolving, we also investigate concept drift — a phenomenon where input data distribution changes affect predictive models' performance. To mitigate the degradation in performance due to concept drift, we introduce a concept drift detection algorithm complemented by a custom sampling method that optimizes training data selection. This approach reduces the training dataset size by one-third, enhancing the efficiency of model training while maintaining high performance.}, language = {en} } @phdthesis{Zerhoudi2025, author = {Zerhoudi, Saber}, title = {User Simulation in Interactive Information Retrieval : methods and frameworks for simulating complex search behavior}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-18936}, school = {Universit{\"a}t Passau}, pages = {xii, 196 Seiten}, year = {2025}, abstract = {Modern information retrieval (IR) systems, including web search engines and digital libraries, face challenges in simulating realistic user search behavior. Evolving interaction patterns and the integration of AI-powered interfaces make these challenges even harder. Traditional evaluation methods struggle to capture the dynamic nature of user interactions, particularly in complex search tasks and multi-stage information-seeking processes. User simulation offers a promising solution, providing a controlled environment for experimentation and allowing customization to model specific user behaviors and task contexts. This research develops advanced techniques for user simulation in IR, creating more realistic and dynamic models than were previously possible. Key contributions include new methods for representing query reformulation, modeling how information needs change, and measuring the impact of different search environments on simulated user behavior. Specifically, this work introduces contextual Markov models, cognitive state models, and embedding space alignment techniques to accurately represent interactive search behavior. Beyond model development, new evaluation methods and metrics are proposed for assessing the quality of simulated search sessions. These include statistical comparisons of session characteristics and classification-based approaches to distinguish between simulated and real user behavior. Additionally, this work leverages emerging technologies, such as large language models (LLMs) and retrieval-augmented generation, to improve the realism of user search behavior simulation. The practical outcome of this research is a modular and extensible simulation framework. This framework incorporates advanced techniques like user type-specific Markov models, advanced query generation using LLMs, and conversational user models.}, language = {en} } @phdthesis{Klement2025, author = {Klement, Felix}, title = {Strengthening Security Foundations in Next-G Wireless Telecommunication Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-18669}, school = {Universit{\"a}t Passau}, pages = {XV, 161 Seiten}, year = {2025}, abstract = {Digital transformation fundamentally impacts our everyday lives and creates significant efficiency gains in the economy. At the same time, however, it is also increasing the complexity of wireless networks, particularly in the case of the sixth generation (6G) mobile communications standard. The use of different technologies in these networks poses an increasing challenge and increases the risk of security threats and vulnerabilities. Wireless communication networks are particularly vulnerable to cyber attacks as they are an integral part of critical infrastructures. Robust methods must, therefore, be developed to overcome these challenges. Consequently, this dissertation is focused on the security fundamentals of next-generation (Next-G) wireless telecommunication systems. It addresses the security challenges resulting from emerging and innovative concepts within these systems. We use case studies for in-depth investigation and analysis, with a particular focus on the Open Radio Access Network (O-RAN) approach. The aim of this concept is to enable open and interoperable network architectures, in contrast to traditional RAN systems, which are often proprietary and manufacturer-specific. In this respect, it is essential to address fundamental questions, like the efficient assessment of the threat landscape, the impact of potential attacks, and the mechanisms that can be used to ensure security at the system level. Initially, an empirical approach is developed to analyze threats within telecommunications systems, such as the O-RAN. The procedure we have developed enables automated, programmatically executable vulnerability management. This methodology is further enhanced by integrating Natural Language Processing (NLP), leading to the creation of a fully automated, iteratively executable framework for security analysis within O-RANs. The framework allows for the direct incorporation of our methods into the deployment process, facilitating rapid and efficient comparison of all components against the latest security vulnerabilities. The current approach of fully deploying all components in virtualized environments, such as cloud infrastructures, introduces new and unprecedented security challenges. In response, we investigate current deployment strategies within the O-RAN infrastructure and establish best practices to mitigate these security issues. In the course of the dissertation, we identify security vulnerabilities in wireless telecommunication systems by executing different attack scenarios. We present a detailed procedure for carrying out the attacks as well as effective methodologies for detecting or avoiding the vulnerabilities we have identified. In the first study, we analyze the security of a key component, the Near-Real-Time RAN Intelligent Controller (Near-RT RIC), within O-RAN. We show how a subscription Denial of Service (DoS) attack can render current implementations of this component unusable. In the second analysis, we investigate the robustness of new standards in wireless networks against jamming attacks using the open-source connectivity standard Matter. The final section of this dissertation explores innovative security research approaches for enhancing the system security of future communication systems. Initially, a novel concept is introduced that facilitates the comprehensive and efficient management and assurance of security within O-RAN systems through the use of Security Platforms (SPs). Furthermore, two developed methodologies for this approach are presented: firstly, a method for programmatically analyzing eXtended Applications (xApps) to identify vulnerabilities, and secondly, an approach for conducting comparative assessments of these vulnerabilities. Additionally, a strategy is proposed to ensure that only secure xApps, such as those that have been pre-tested, are deployed for use in Near-RT RIC. Overall, this dissertation makes an important contribution to the research of security principles for next-generation wireless telecommunication systems. With the help of our approaches for a better and more concrete assessment of threats in such networks, we directly contribute to a clearer and better manageable picture of the vulnerability landscape. Our two publications on vulnerability research also provide valuable insights for securing future problems in the respective areas. In summary, with our approaches to system security, with which security principles can be implemented and integrated into modern system approaches such as O-RAN, we contribute to ensuring a secure transition to 6G.}, language = {en} } @phdthesis{Patil2025, author = {Patil, Amit Dilip}, title = {Towards Resilient Protection of Interconnected ICT and Power Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-16088}, school = {Universit{\"a}t Passau}, pages = {xxii, 173 Seiten}, year = {2025}, abstract = {Due to the increasing number of distributed renewable energy sources, the distribution grid faces new operational challenges. Information and Communication Technology (ICT) systems can resolve these challenges through grid services that use automation, monitoring, and real-time decision-making, helping maintain an acceptable operational state of the distribution grid. However, the reliance of the power system on the ICT system and vice versa in the so-called smart grid creates interdependencies between the systems, which present new pathways for failure propagation. Therefore, these interdependencies require special attention to ensure stable system operation in the face of these challenges. However, these interdependencies have not been studied extensively in the literature. This thesis investigates approaches to model, quantify and improve the performance and resilience of the smart grid infrastructure. The interdependencies are formalised as interconnectors, entities that exist in all the connected systems. These interconnectors consist of components from both systems, where the components are modelled as state variables. These state variables determine the interconnector state and the service delivered. Failures represented by a change in state variables may impact the state. These state variables are deployed in a discrete event simulation framework to determine the system performance over time. The simulation result is represented on a two-dimensional state-space diagram depicting the operational state and service delivered. This allows for the resilience analysis of a system under various scenarios. The interdependencies are further investigated by exploring the role of ICT-based grid services in power grids, whose state is defined based on ICT properties, such as latency. These properties are formalised using property graphs. The ICT properties obtained from these graphs are used to parameterise a finite state automaton model of a grid service states, which are then used to determine the state of the entire smart grid. Case studies of state estimation and adaptive protection highlight the application of this approach. The use of ICT in state estimation allows the distinction of a global and perceived view of the power grid, which influences decision-making in the face of challenges. This thesis further investigates the protection system in detail. The overcurrent protection system is adversely impacted by distributed generation, resulting in undesired phenomena such as protection blinding. This thesis characterises this phenomenon by proposing two indices that capture the protection trip time under the influence of distributed generation. These indices consider the electrical distance between faults, protection and distributed generation. These indices and simulation results identify the worst-impacted locations in the power grid in terms of protection trip time. They also identify fault locations under given assumptions that do not cause protection blinding. ICT can resolve protection blinding by adapting the sensitivity of protection relays. However, since faults must be cleared in a short timeframe, communication delays may adversely impact the fault-clearing time. A discrete event simulation model is proposed to study protection performance in distribution grids. Investigation of time distribution assumptions reveals that the lognormal distribution accurately captures the circuit breaker trip time. The impact of the distributed generation and communication delay on the protection system is determined by measuring fault-clearing times using discrete event simulation. Results show that for the system studied, protection blinding is critical for low impedance faults in grids with high fault levels, while high impedance faults are critical in grids with low fault levels. Moreover, sympathetic tripping is seen at increased distribution grid fault levels and fault impedance. Furthermore, while communication systems reduce fault clearing times, increased delays harm protection systems. Finally, communication system components like sensors can fail, preventing fault detection. This thesis proposes a genetic algorithm-based approach to optimally place redundant sensors, minimising protection blinding under communication uncertainty within a redundancy budget. Results demonstrate the algorithm's effectiveness in optimising redundant sensor locations, reducing system costs, and improving fault tolerance. For the system and scenarios investigated, an average of 60\% redundant sensors are relocated, reducing the average protection trip time by 36.65\% compared to a baseline approach that does not consider communication uncertainty. This encourages incorporating communication component failure considerations in power system planning.}, language = {en} } @phdthesis{Julka2025, author = {Julka, Sahib}, title = {Towards Data Efficiency and Controllable Representations for Deep Learning in Resource-Constrained Domains}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-16030}, school = {Universit{\"a}t Passau}, pages = {20 ungez{\"a}hlte Seiten, 131 Seiten, 17 ungez{\"a}hlte Seiten}, year = {2025}, abstract = {The deployment of artificial intelligence (AI) in specialised domains such as planetary science and healthcare, as well as in low-resource NLP settings, faces two fundamental challenges: label scarcity and data scarcity. Label scarcity stems from the high cost of expert annotation, the scarcity of domain experts, and the infeasibility of crowdsourcing, particularly in complex tasks requiring specialised knowledge. In parallel, data scarcity stems from the inherent difficulty of acquiring sufficient raw data, whether due to limited observational opportunities, environmental and technical barriers, or stringent privacy constraints. Together, these limitations impede the broader adoption of AI in these fields. Many existing approaches to label efficiency, such as active learning, rely on problem-specific heuristics and often, as a design choice, employ naive uncertainty estimations—typically at the instance level. However, such methods can lead to redundant or suboptimal sample selection by ignoring structural data properties and failing to account for representational diversity. In practice, they often perform no better than random sampling. For data synthesis, generative models face their own set of challenges. Despite their promise for synthetic data generation, these models frequently lack mechanisms to disentangle generative factors at the representation level, limiting their controllability. Additionally, standardised evaluation metrics to assess the quality of disentanglement remain underdeveloped, limiting their practical utility. These limitations highlight the need for advancements in data-efficient machine learning and controllable generative modelling, focusing on domain-specific validity and rigorous evaluation. This thesis contributes to addressing these challenges by proposing tailored solutions in two key directions. First, for data-efficient learning, a deep active learning (DAL) framework is introduced to enhance label efficiency by prioritising the most informative samples for annotation. Unlike traditional per-sample approaches, this framework aggregates uncertainty across larger data segments—such as orbital intervals in planetary science—allowing it to capture contextual variations. This method reduces labelled data requirements by up to 90\% in the case of boundary crossing detection at Mercury's magnetosphere. To further improve sampling diversity, a GAN-based concept drift detection method is integrated into the DAL framework, leveraging uncertainty and diversity together to offer a sampling method that outperforms random sampling. Additionally, foundation models such as the Segment Anything Model (SAM) are employed for zero-shot annotation to generate high-quality pseudo-labels, which are subsequently used to train a domain-specific model via knowledge distillation. This approach significantly enhances data efficiency, reducing the need for annotated samples several times over in the tested scenario of image segmentation for geological mapping. Furthermore, large language models (LLMs) are explored as active annotators for linguistic tasks in low-resource languages, achieving near-baseline performance while reducing annotation costs by up to 40x. Second, the thesis investigates methods to induce controllability in generative models, enabling the production of high-fidelity, controllable synthetic data. Conditional generative adversarial networks (CGANs) and disentangled representation learning techniques (DRL) are explored, particularly in the context of pedestrian trajectory prediction in the mobility domain, where controlled synthesis of diverse motion patterns is critical. Additionally, the work examines existing metrics for evaluating disentanglement and identifies critical limitations in them. A novel metric, the Exclusivity Disentanglement Index (EDI), is proposed as an improved standardised measure. Based on the principle of exclusivity in factor-code relationships, this metric offers advantages over existing alternatives in terms of efficiency and robustness. By advancing data-efficient learning and controllable generation strategies, this thesis aims to bridge the gap between AI's vast potential and its practical adoption in resource-constrained environments. These contributions pave the way for transformative applications in planetary science, healthcare, and beyond, where label and data scarcity have long been barriers to progress.}, language = {en} } @phdthesis{Wilhelm2025, author = {Wilhelm, Sebastian}, title = {Emergency Detection in Private Households Utilizing Existing Data Sources for Human Activity Event Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15992}, school = {Universit{\"a}t Passau}, pages = {xvii, 156 Seiten}, year = {2025}, abstract = {In an aging society, the need for efficient emergency detection systems in smart homes is becoming increasingly important. Over 30\% of those aged 65 and older experience at least one fall per year, often resulting in the inability to rise without assistance, leading to 'long lies' lasting hours or even days. Systems for detecting such emergency events usually rely on wearable sensors or specific installations of ambient sensors, which can be intrusive and complex, hindering acceptance. This thesis proposes a novel approach that utilizes existing digital data sources within the residential infrastructure to detect human activities and identify potential emergencies. A survey identifies 44 potential data sources in private households for recognizing human activity. However, extracting activity information often requires complex preprocessing. In this thesis, methodologies are developed for three of these data sources to highlight practical applications: Smart Power Meters, Smart Water Meters, and Home Weather Stations. It is shown that detecting human activities using these sources is feasible in a practical environment, although accuracy and reliability vary. Notably, Smart Water Meters demonstrate high reliability, with a precision of 0.86 and a recall of 1.00, making them particularly suitable for emergency detection. Existing emergency detection methods are not designed to handle uncertain activity data. This thesis introduces a novel approach based on probabilistic activity information, employing an Inactivity Score that provides a probabilistic weighting of inactivity periods based on the reliability of sensor measurements. By analyzing historical Inactivity Scores, anomalies that potentially represent an emergency can be identified. Evaluations across seven datasets show this approach outperforms existing methods, achieving a mean time to detect emergencies of approximately 05:23:28 hours and producing 0.09 false positives per day under noise-free conditions. Moreover, unlike related approaches, the proposed method remains effective with noisy data. This thesis demonstrates that emergencies in private households can be detected using existing data sources from the home infrastructure, offering a cost-effective and non-intrusive solution to enhance the safety and autonomy of the elderly at home.}, language = {de} } @phdthesis{Hasenpflug2025, author = {Hasenpflug, Mareike}, title = {Slice sampling on Riemannian manifolds}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15903}, school = {Universit{\"a}t Passau}, pages = {iii, 118 Seiten}, year = {2025}, abstract = {This thesis is concerned with hybrid slice samplers for approximate sampling of distributions on Riemannian manifolds. First for distributions on the Euclidean unit sphere, and then for distributions on general Riemannian manifolds we introduce a geodesic-based hybrid slice sampler, called geodesic slice sampler. Under mild regularity assumptions, we establish reversibility with respect to the target distribution for this sampler and positive semi-definiteness of the corresponding operator. Moreover, on compact Riemannian manifolds we show uniform ergodicity with explicit constants for the geodesic slice sampler if the target distribution has a bounded density with respect to the Riemannian measure. As an important building block of this sampler, we provide an explicit expression for the shrinkage procedure proposed in (Neal, 2003) in terms of a Markov kernel. We establish that this kernel is reversible with respect to the uniform distribution on the target set and that its corresponding operator is positive semi-definite. Beyond the geodesic slice sampler, we apply these results also to elliptical slice sampling (Murray, Adams, MacKay, 2010) to obtain a proof for its reversibility with respect to the target distribution and positive semi-definiteness of the corresponding operator.}, language = {en} } @phdthesis{Prakash2025, author = {Prakash, Jyoti}, title = {Static Analyses of Interlanguage Interoperations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15736}, school = {Universit{\"a}t Passau}, pages = {ix, 126 Seiten}, year = {2025}, abstract = {Software Developers are moving towards a multilingual development where they combine two languages in a single application to harness the strengths of each language. For example, performance-critical components of a Java application can be implemented in C language. It provides flexibility, at the same time, it becomes difficult to statically analyze these applications. The amalgamation of two languages in a single application may introduce bugs ranging from type-mismatch to security vulnerabilities. Therefore, it is necessary to develop static analysis techniques to aid developers in multilingual development. In this thesis, we develop techniques to study and analyze these applications. In the first part of the thesis, we study the prevalence of security and privacy vulnerabilities in hybrid apps. Hybrid apps are Android apps that combine both Java and Javascript components, where the Android part is secured (on the device), while the JavaScript part is exposed to web. Additionally, some of the Java functions are available to JavaScript component through an interface called as bridge interface. In the pursuit of the goal, we adopt a static backtracking of data dependencies to determine the flow of information from the android component to the web component. Our study revealed the potential sources of unsoundness in the existing static analyses. Static backtracing also induces imprecision in the analysis, i.e., there might be some flows that are not possible during runtime albeit are reported by the analysis. These were mitigated through a manual verification. This work reveals that the android-web hybridization can lead to (potential) vulnerabilities that might impact the confidentiality as well as the integrity properties of these apps. From the communication patterns occurring in Android WebView, we noticed that its is feasible for an attacker to jeopardize the integrity of apps by corrupting some value, say an input on the web through bridge interfaces. Motivated by this, we define a information flow analysis of the bridge interfaces and the associated data flows in hybrid apps. In the first step, we propose a novel threat model where we model the attacker as someone who wants to influence the behavior of android app as an integrity violation. Based on this threat model, we then propose a demand-driven analysis technique to detect confidentiality and integrity violations. Our analysis leverages, a demand-driven technique, where it only analyzes the relevant part of app for the information flow analysis with the help of function summaries --- escaping the need of a whole-program analysis. In the second part of the thesis, we generalize the approach to static analysis of multilingual applications. To this end, we investigate into the question of combining existing single language analyses to analyze multilingual programs. To provide an affirmative answer, we define an analysis to leverage single language analyses for call-graph and pointer analysis of multilingual programs. Our analysis takes two existing unilingual analyses and analyzes the complete multilingual program. It uses a novel summary specialization technique that resolves the information flows at the bridge interfaces by utilizing independent pre-analyses (modulo foreign function interfaces) of each language component. We apply this technique to analyze Android-NDK and GraalVM Java-Python multilingual applications for generating call-graphs. In summary, we have developed novel techniques for information flow and call-graph analysis for multilingual programs. With this, we motivate the need of static analyses for multilingual applications and its applications which includes, vulnerability detection, program understanding, amongst others.}, language = {en} } @phdthesis{Soller2025, author = {Soller, Sebastian}, title = {Anomaly Detection and Forecasting Techniques and their Applications Scenarios, Challenges and Limits in Industrial Production Settings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15659}, school = {Universit{\"a}t Passau}, pages = {vi, 136 Seiten}, year = {2025}, abstract = {What needs to be done to get machine learning and artificial intelligence from the lab to the shop floor? This work and its affiliated publications focus on challenges and solutions to apply machine learning applications inside industrial setups and what steps are needed to improve those setups. In industrial setups it is easy to run into a "hen and egg" problem. To gather data, the information which data to gather is ideally given beforehand and these information are not available when studying new setups and machines. In this work setups and concepts are created to dynamically connect to a network and start gathering data from available endpoints inside a manufacturing setup. The data streams of these endpoints are further analyzed to give an initial analysis of the data and advise further processing. To further analyze these data streams with the current advances in the machine learning field and AI, plug and play solutions are presented by manufacturers and scientific research. Limits are determined for this plug and play capability and solutions are provided to further improve upon the base solutions. The capability to apply commonly applied methods was analyzed and initially provided non-sufficient results. In the sub-fields of anomaly detection, regression analysis, forecasting and classification the addition of context information, such as production specific information and time dependent analysis were used to improve the results. Context information, especially periodic information, were further conceptualized and integrated into the initial data analysis. Difficulties with correct labeling of ground truth due to differing biases of participants were encountered, and counter measurements were proposed. Results of the classification, regression, forecast and context information extraction were investigated for their influence on the human operator. A significant change could be measured in multiple cases, just by providing information about underlying problems and errors. The Aforementioned machine learning methods further improved the performance of machine and operator.}, subject = {Anomalieerkennung}, language = {en} } @phdthesis{Fruehwirth2025, author = {Fr{\"u}hwirth, Lorenz}, title = {The Asymptotic Behavior of Birkhoff- and Lacunary Sums}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15677}, school = {Universit{\"a}t Passau}, pages = {109 Seiten}, year = {2025}, abstract = {This doctoral thesis consists of three independently published research articles on the asymptoic behaviour of Lacunary- and Birkhoff sums. The former are sums formed by periodic functions and exponentially growing sequences of natural numbers. The corresponding summands often exhibit behavior typical of independent and identically distributed random variables. The methods used are of an analytical and probabilistic nature. The Birkhoff sums considered in this work are generated by the Kronecker sequence and by discontinuous functions. The methods employed are from the field of metric number theory, specifically classical results from continued fraction theory are utilized.}, language = {en} } @phdthesis{Gruber2025, author = {Gruber, Martin}, title = {Tackling Test Flakiness: Understanding the Problem and Providing Practical Mitigations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15549}, school = {Universit{\"a}t Passau}, pages = {127 Seiten}, year = {2025}, abstract = {"Software is eating the world". With this phrase from his 2011 Wall Street Journal interview, Marc Andreessen predicted a decade of disruptive software-based innovations affecting various industries. Today, over ten years later, many of his predictions have come true: six of the seven most valuable companies worldwide are computer technology firms, and more than half of the world's population has access to the internet and owns a smartphone, with numbers still growing rapidly. The increasing importance of software has also changed software development. To ensure product quality despite high complexity and fast product cycles, software developers started to adopt continuous integration and regression testing practices: each change to an existing system is automatically tested and reverted in case it breaks any existing functionality. As a result, large software projects are conducting millions of test executions each day. One obstacle to such extensive testing are non-deterministic tests that can pass and fail without any changes to the underlying system or the test itself. These tests are commonly referred to as flaky tests. Flaky tests break regression testing, as they cause test failures that are unrelated to the changes that are being tested. Developers are forced to investigate these intermittent failures, wasting their time and decreasing their trust in testing. This thesis presents our research that aims at understanding and mitigating test flakiness. To comprehend the nature of flaky tests, we conducted both code-based studies on open-source projects, as well as a developer survey. All our investigations confirmed that flakiness is a frequently occurring and severe issue. The causes of flakiness, however, depend on the domain of the project and the source of the test: while asynchronous waiting and concurrency are overall the most prevalent causes aside from test order dependencies, Python projects tend to experience more flakiness caused by networking and randomness. Flaky tests that were not written by developers but generated automatically tend to be more often caused by randomness or unspecified behavior. To avoid test flakiness in generated tests, developers can use existing flakiness suppression mechanisms of test generation frameworks, which we found to be effective. In general, however, most developers currently address the issue of test flakiness by rerunning failing tests. Nevertheless, they would like more support when dealing with test flakiness, namely better visualizations, automated detection and debugging techniques, and education on the topic. In response to this feedback, we developed and evaluated a generic flakiness prediction approach, as well as an automated flakiness debugging technique. Our flakiness prediction method is easy to use and widely applicable. In contrast to previous techniques, it avoids any form of static or dynamic analysis. Instead, it relies solely on a test's execution result history and version control information, two commonly available artifacts. Additionally, it aims to classify real-world failures as either caused by flakiness or a regression. Previous techniques mainly focused on identifying potential flaky test cases in test suites, a related but less actionable question. An evaluation on a large-scale automotive software project yielded positive results. Our approach showed a strong predictive performance (95.5\% F1-score), outperforming the previously used heuristic. We also introduced Spectrum-based Flaky Fault Localization (SFFL), an automated debugging technique that aims to pinpoint the specific lines in the source code that cause a flaky test's non-deterministic behavior. SFFL extends traditional Spectrum-based Fault Localization (SFL) by considering multiple coverage behaviors of the same test case, a highly common phenomenon among flaky tests. Our evaluation on 101 flaky Python tests showed that SFFL outperforms traditional SFL and was able to narrow down the flaky fault's location to 3.5\% of a project's code base on average.}, subject = {Softwareentwicklung}, language = {en} } @phdthesis{GhoshDastidar2025, author = {Ghosh Dastidar, Kanishka}, title = {Using Context for Credit Card Fraud Detection}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15561}, school = {Universit{\"a}t Passau}, pages = {147 Seiten}, year = {2025}, abstract = {Online payment fraud is one of the biggest challenges accompanying the ubiquitous adoption of digital payment methods. The academic literature shows that providing data-driven models with additional context of a transaction results in significant improvements in fraud detection performance. However, the methods used to generate suitable context representations often rely on human expert knowledge, which is expensive and suffers from several limitations. In this thesis, we propose different methods to automate this process by learning these context representations end-to-end on the fraud detection objective. Each of these methods is evaluated on millions of real-world transactions from Worldline, our industrial partner. Central to this thesis is our proposal of the Neural Aggregate Generator (NAG), a neural network that learns context representations automatically. The architecture of the NAG is designed to resemble the structure of expert feature aggregates, while also addressing their limitations. Our evaluation of the NAG reveals that it outperforms both approaches that use expert aggregates and other end-to-end methods across several months of testing. A thorough evaluation shows that the NAG improves over other approaches on several key factors including model size and robustness to shorter sequences. We propose several extensions to the NAG with the dual motive of improved alignment with expert aggregates and improved expressiveness. Our evaluation of these extensions shows comparable performance to the NAG with ancillary benefits in terms of prospective interpretability and model size. We also introduce the novel paradigm of using \lq future' transactions as context. Our analysis of real-world data from Worldline shows that verification of transactions are often delayed by several days and that within this delay there are often several transactions booked on the card which can be used as additional context. We show that this future context improves the performance of sequence models. Moreover, we also show that a balance between past and future context yields the best results and that using future context allows the use of shorter sequences overall. Beyond context-based fraud detection, we also provide an initial proposal of generating synthetic credit card data using Generative Adversarial Networks (GANs), showing that a Wasserstein GAN can be used to generated synthetic data similar to a popular publicly available credit card fraud dataset. We also describe several possible directions for future work including the incorporation of a adapted self-attention mechanism to the NAG and the use of transformers for synthetic data generation.}, language = {en} } @phdthesis{Raich2025, author = {Raich, Krispin}, title = {Multimodal Data Space for Cooperative Intelligent Transport Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15496}, school = {Universit{\"a}t Passau}, pages = {xv, 111 Seiten}, year = {2025}, abstract = {Modern Cooperative Intelligent Transport Systems (C-ITSs) are comprehensive applications that must cope with a multitude of challenges while meeting strict service and security standards. One of these challenges is a fast, secure, reliable, and universal way to store and exchange data in such a traffic system. Furthermore, multimodal scenarios where different types of vehicles (e.g., cars and Unmanned Aerial System) interact with each other, are increasingly emerging. To overcome these challenges, this thesis presents a set of key innovations to establish a multimodal capable data space for transport application. Therefore, a multimodal optimized geographic model is presented, called SpatialJSON, that is capable of depicting two- and three-dimensional geometries. To accomplish this feat, SpatialJSON extends the popular GeoJSON format with two new data types: area and corridor. Exchanging, managing, and storing data is handled in a novel data-centric middleware, called Large Scale Multimodal Data Processing Middleware for Intelligent Transport Systems (LDPM). This LDPM uses cryptographic- and trust-based schemas to allow secure data exchange and provide data quality assessment. Furthermore, a service architecture is introduced, that fulfils modern service requirements. Trust management is also another essential part of a C-ITS. Hence, a novel scheme to describe traffic related evidence in a multimodal environment is introduced. This schema allows assessing arbitrary traffic related data. This information is then processed in a specialized and modified Bayesian Inference (BI) function. Subsequently, a comprehensive data centric trust management method is introduced. Finally, a use case is presented that relies on the aforementioned technologies to collect data in a hazardous environmental. This data is then distributed and managed via the LDPM, and finally visualized.}, language = {en} } @phdthesis{Greifenstein2024, author = {Greifenstein, Luisa}, title = {Supporting Primary School Programming Education through Formative Feedback}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15188}, school = {Universit{\"a}t Passau}, pages = {viii, 204 Seiten}, year = {2024}, abstract = {Children are increasingly surrounded by computer science aspects in their everyday life. Primary school education aims at empowering children to participate in and reflect on their environment. Therefore, computer science related contents such as programming are increasingly introduced into primary school curricula. However, this also involves challenges in particular for teachers, who need to familiarise themselves with the new curriculum. As a result, primary school teachers often struggle to help primary school children with their programming issues. Corrective feedback given during the learning process (i. e. formative feedback) can help by promoting cognitive factors such as content knowledge. This thesis therefore aims to support primary school programming education through formative feedback. In order to shed light on different perspectives, both teachers and children participated in the studies in a mixed methods design. Teachers' challenges and children's programming issues were explored as a basis for knowing what both target groups struggle with. This was done by conducting content analysis on the challenges and issues collected and using the resulting categories for further quantitative analysis. The effects of different characteristics of feedback on the effectiveness and efficiency of teaching and learning programming were then explained. This was done by asking the teachers and children to explain their ratings and conducting content analysis on their explanations. The support of primary school programming education through formative feedback builds on the major challenge of teachers' lack of content knowledge and the corresponding strategies of teacher training and automated feedback. Indeed, automated feedback was found to be mostly helpful for debugging and task creation. In order to provide direct support to children, common programming issues were identified in terms of the understanding of programming concepts and the usage of the programming environment. Effects on children's learning and their preferences were identified for several feedback characteristics, leading for example to elaborated hints instead of simple direct instructions. Based on these results, a formative feedback approach of hint cards was developed and evaluated. The hint cards approach proved to be a useful example strategy for supporting primary school programming education through formative feedback.}, subject = {R{\"u}ckmeldung}, language = {en} } @phdthesis{Stumpf2024, author = {Stumpf, Peter Frederik}, title = {Partial Representation Extension and Simultaneous Representation of Intersection Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15201}, school = {Universit{\"a}t Passau}, pages = {IX, 208 Seiten}, year = {2024}, abstract = {Many real world problems can be modeled with geometric intersection graphs. A (geometric) intersection representation of a graph G=(V,E) is a family {R_v}_{v\in V} of geometric objects such that two geometric objects R_u, R_v intersect if and only if the corresponding vertices u, v are adjacent in G. The most prominent class of intersection graphs are interval graphs, which have representations consisting only of intervals on the real line. Interval graphs have applications in genetics, scheduling, archaeology and many more fields. The recognition problem asks the question whether a given graph belongs to a certain graph class. Two natural generalizations of the recognition problem are the partial representation extension problem and the simultaneous representation problem. In the partial representation extension problem one is given a graph G and a partial representation, i.e., a representation of a subgraph of G. The question then is whether the partial representation can be extended to the whole graph G without changing the given partial representation. In the simultaneous representation problem one is given multiple graphs G_1,...,G_k that can have shared parts, and the question is whether there are representations of all input graphs such that shared vertices are represented by the same geometric objects. Often the sunflower case is considered, where the shared part of any two input graphs is the same. We determine the complexity of the partial representation extension problem and the simultaneous representation problem, especially in the sunflower case for various intersection graph classes. We also improve the running time for various intersection graph classes. In particular, we show that the partial representation extension problem for circular-arc graphs is NP-complete and that the simultaneous representation problem for interval graphs can be solved in linear time in the sunflower case, answering open questions from 2014 and 2010.}, language = {en} } @phdthesis{Hofstadler2024, author = {Hofstadler, Julian}, title = {Qualitative and quantitative convergence results for randomised integration methods}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15196}, school = {Universit{\"a}t Passau}, pages = {III, 80 Seiten}, year = {2024}, abstract = {In this thesis different randomised integration methods based on either, randomised Quasi-Monte Carlo, or (adaptive) Markov chain Monte Carlo methods are studied. Depending on the underlying integration problem we show qualitative and quantitative results, which ensure the asymptotic correctness of an algorithm or provide explicit error bounds. The first problem we consider is Lebesgue integration in the unit cube. We prove that a class of structured randomised integration methods is consistent w.r.t. convergence in mean and probability for any integrable function. Under slightly stronger integrability conditions we show that one also has almost sure convergence for median modified methods. We demonstrate the applicability of our theoretical results by considering randomly shifted lattice rules, randomised (t,d)-sequences, Latin hypercube samples, and randomised Frolov points. Secondly, we study integration w.r.t. probability measures which are available only via their non-normalised density. In this context we investigate Markov chain Monte Carlo methods which satisfy a spectral gap condition and functions which do not need to have a finite second moment. We prove error bounds for the absolute mean error where the rate of convergence is optimal. Illustrative scenarios where our theory is applicable are the random walk Metropolis algorithm as well as slice samplers. Finally, we study so-called adaptive increasingly rare Markov chain Monte Carlo algorithms. Based on a simultaneous Wasserstein contraction assumption we estimate the mean squared error and also prove bounds which characterise the path-wise convergence of the estimator. To demonstrate the applicability of our results we consider a number of examples, among which are doubly intractable distributions.}, language = {en} } @phdthesis{Walsh2024, author = {Walsh, Florian}, title = {Computing the Binomial Part of Polynomial Ideals}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15096}, school = {Universit{\"a}t Passau}, pages = {vi, 131 Seiten}, year = {2024}, abstract = {Given an ideal in a polynomial ring over a field, we present a complete algorithm to compute its binomial part.}, language = {en} } @phdthesis{Berger2024, author = {Berger, Christian}, title = {Towards Fast and Adaptive Byzantine State Machine Replication for Planetary-Scale Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15059}, school = {Universit{\"a}t Passau}, pages = {ix, 181 Seiten}, year = {2024}, abstract = {State machine replication (SMR) is a classical approach for building resilient distributed systems. In Byzantine fault-tolerant (BFT) systems, no concrete assumptions are made about the behavior of faulty replicas. With the advancement of distributed ledger technologies (DLT), planetary-scale BFT SMR ist becoming practical and necessary as it can serve as a consensus primitive to keep the ledger consistent. In our view, the alignment of BFT SMR to DLT brings new challenges, for instance the scalability aspect, where recent research works less frequently address latency improvements than throughput improvements. Further challenges include the geographic dispersion of replicas within a planetary-scale system and the need of a BFT SMR protocol to react to environmental changes during runtime. This thesis has the objective to improve BFT SMR for planetary-scale systems by lowering the protocol latency observed by clients and by making the BFT SMR system adaptive, i.e., enabling replicas to react to perceived changes such as changing network characteristics or faulty replicas. As a first contribution of this thesis, we discover that fast, consensus-free (read-only) operations is a flawed optimization in seminal BFT SMR frameworks, such as PBFT and BFT-SMaRt. We explain how the read-only optimization can violate the protocol's liveness by showing an attack and then present a solution that makes the overall, optimized protocol both live and linearizable. The second contribution is Adaptive Wide-Area Replication (AWARE), which enables a geo-replicated system to adapt to its environment, thus improving the geographical scalability of consensus if replicas are dispersed across the world. Essentially, AWARE is an automated and dynamic voting-weight tuning and leader positioning scheme, which supports the emergence of fast consensus quorums in the system and builds upon previous work, the WHEAT protocol. AWARE combines reliable self-monitoring with a consensus latency prediction model, thus striving to minimize the system's consensus latency at runtime, which subsequently results in latency improvements observed by clients scattered across the globe, which we validate through experiments. The third contribution presents FlashConsensus, a protocol derived from AWARE, that also adjusts the resilience threshold. The core idea is the tentative use of a lower resilience threshold which leads to smaller consensus quorums and thus consensus acceleration in common-case scenarios where we expect only few faulty replicas. FlashConsensus achieves threat-level awareness through the incorporation of two modes of operation and BFT forensic support and guarantees liveness and linearizability under optimal resilience. Moreover, FlashConsensus allows for client-side speculation by using incremental consistency guarantees to further lower request latency. Additionally, we investigate on the question whether we can reason about the performance of large-scale systems utilizing simulations. We discover, that we can faithfully forecast the performance of BFT protocols by plugging real protocol implementations into a high-performance network simulator. For instance, simulation results reveal that, using 51 replicas scattered across the planet, FlashConsensus can finalize operations in less than 0.4 s, which is half of the time required for a PBFT-like protocol in the same network, and matching the latency of this protocol running on the best possible internet links (transmitting at 67\% of the speed of light).}, language = {en} } @phdthesis{Grassl2024, author = {Graßl, Isabella}, title = {Diversity in Programming Education: Effects of Topic and Group Constellation on Young Programming Novices}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15049}, school = {Universit{\"a}t Passau}, pages = {xi, 256 Seiten}, year = {2024}, abstract = {The field of software engineering faces a significant diversity crisis, characterized by a critical lack of heterogeneity despite ongoing efforts to promote gender equality. The persistent male dominance in this domain has created an urgent need for more heterogeneous groups in software engineering. This lack of diversity not only hinders underrepresented groups from entering the field but also prevents them from gaining initial programming experiences, which are a core component of software engineering and essential for developing computational thinking. To address this crisis and its implications, early interventions are key in shaping positive perceptions, building confidence, and sparking initial interest in programming among underrepresented groups before societal stereotypes of programming as a nerdy field manifests. This means starting with basic programming courses for children and continuing through to first-year university students in order to foster technical skills and computational thinking, alongside creativity and collaboration. However, there is limited understanding of how introductory programming course designs impact diversity-dependent characteristics to create welcoming and learning-friendly environments. This understanding is particularly important for underrepresented groups, especially girls, to benefit from their first programming experiences as they are often hindered by the initial perception of programming as (1) abstract and unappealing, and (2) non-social to novices. Engaging, creative, and relatable topics in programming courses might demystify complex programming concepts, making them more accessible, less intimidating, and appealing. However, understanding programming is not just about the content---it is also about the context in which it is learned. Introducing programming as social activity is important, particularly for young learners. By emphasizing team work, we might encourage collaboration and peer support, counteracting the lone-wolf programmer stereotype. Therefore, this doctoral thesis investigates the effects of both key aspects in programming courses---(1) topic choices and (2) group constellations---on young programming novices. The aim is to provide a holistic understanding of how different course designs can support diverse learners and promote gender equality in programming education. While this research primarily addresses gender diversity due to the persistent gender gap in software engineering, it also examines additional diversity dimensions, including age, ethnicity, prior programming experience, disabilities, and educational background. A total of 13 studies were conducted within this thesis, examining the current state of educational settings and utilizing various introductory programming courses designed for children aged 8 to 18, as well as first-year university students. These studies employed different programming environments, such as Scratch and Sonic Pi, and incorporated a variety of topics and group constellations to observe their effects on student outcomes. By using a mixed-methods design, data were gathered through surveys, observations, and both data-driven and manual code analysis. Key findings reveal that it is particularly noteworthy how children utilize the programming environment to engage with and creatively express topics aligned with their interests which also align mostly with gender-stereotypes, including elements from internet and popular culture as well as socio-cultural narratives. However, gender-sensitive and neutral topic choices enhance engagement, self-efficacy, contribution, code quality and creative output, while also contributing to reduce stereotypical beliefs about programming, particularly among girls. In line with the findings for the course topic, group constellations also influence programming experiences. In particular, introducing pair programming in courses shows a promising approach for young learners, but attention must be paid to mitigate socially learned gender-stereotypical behaviours. Another finding indicates that, unlike professional software teams, mixed-diverse student teams often encounter substantial challenges, thus benefit from clear communication guidelines and supportive environments to promote better collaboration. This doctoral thesis concludes with guidelines for designing more effective and inclusive introductory programming courses. These recommendations include using gender-sensitive course materials, allowing for creative freedom through topic choices while encouraging the use of advanced programming concepts, promoting collaboration through pair programming while fostering enhanced communication, boosting self-efficacy with quick positive feedback for girls in particular, and providing emotional support for underrepresented groups. By following these guidelines, educators can create more engaging, inclusive, and effective programming courses. This may ultimately promote a more equitable and diverse future generation of professional software developers while also fostering computational thinking, encouraging a broader interest in programming among all young learners.}, subject = {Softwareentwicklung}, language = {en} } @phdthesis{Sentanoe2024, author = {Sentanoe, Stewart}, title = {VMIaaS: Virtual Machine Introspection as a Service}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15027}, school = {Universit{\"a}t Passau}, pages = {vi, 121 Seiten}, year = {2024}, abstract = {In this digital era, communication in the digital world is becoming part of our daily lives. One key technology that becomes part of this digital transformation is cloud computing. It allows users to have a running system as a virtual machine (VM) on the cloud without owning a physical server. Unfortunately, adversaries can also use those systems to conduct criminal activities. Therefore, developing a method to extract evidence from those systems is also necessary. One way is through digital forensics, and one method to do digital forensics of a VM is using virtual machine introspection (VMI). However, VMI has yet to be made available by any public cloud provider. This thesis addresses this issue by introducing methods for deploying VMI on public cloud providers. Four main challenges have to be solved. Firstly, VMI requires access to the hypervisor, which practically can access all VMs running on the same server. This leads to security and privacy issues where customers can introspect each other VMs. To solve this problem, this thesis introduces KVMIveggur, a versatile access control of VMI. It comes with different options that every customer can choose from based on their needs. Secondly, VMI introduces overhead to the running VM. This is because most of the introspection mechanisms perform data access to the monitored VM. Performing data access on a running VM can cause data inconsistency. Hence, pausing the VM before executing the data access is better. However, when the VM pausing frequency is high, it will affect the performance of the monitored VM. The current state-of-the-art techniques use caching to reduce the VM pausing frequency. However, it faces a problem: the cached data may be outdated compared to the actual data. Therefore, this thesis introduces VMIFresh, a better caching mechanism. We leverage both active and passive tracing mechanisms to ensure high performance and consistency of the data (freshness). Thirdly, many state-of-the-art VMI libraries and applications run perfectly only on Intel processors because Intel CPUs provide the best hardware support for VMI. However, AMD and ARM processors are getting more popular in cloud computing. Thus, it is necessary to retrofit VMI capabilities to support AMD and ARM processors. This thesis describes the requirements to employ VMI on AMD and ARM processors. We also provide the implementation of those requirements. Finally, to do introspection using VMI, it is crucial to have proper symbol information (layout and location of data structures) of the introspected operating system (OS) and user applications. While many existing VMI approaches concentrate primarily on analyzing OS data structures, analyzing user application data often receives no attention. In our approach, we address this gap by focussing on application-level introspection. We have identified several use cases that require this kind of introspection. We focus on cryptographic key extraction for two specific instances: secure shell (SSH) and transport layer security (TLS) by leveraging the power of machine learning techniques to locate those keys in the main memory effectively and efficiently. After we had solved those challenges, we combined a couple of our approaches and introduced two VMI applications: Sarracenia and VMIGuard. Sarracenia is a deception technology that tracks activities done on an SSH session. The main goal of Sarracenia is to attract adversaries away from the production system and learn about their behavior. On the other hand, VMIGuard also monitors the SSH traffic. But, it specifically monitors the activity of any git-related activities. The main goal of VMIGuard is to ensure the integrity of the hosted data from any internal malicious actor.}, subject = {Cloud Computing}, language = {en} } @phdthesis{Stier2024, author = {Stier, Julian}, title = {Structure of Artificial Neural Networks : Empirical Investigations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14968}, school = {Universit{\"a}t Passau}, pages = {xxvi, 350 Seiten}, year = {2024}, abstract = {Within one decade, Deep Learning overtook the dominating solution methods of countless problems of artificial intelligence. "Deep" refers to the deep architectures with operations in manifolds of which there are no immediate observations. For these deep architectures some kind of structure is pre-defined -- but what is this structure? With a formal definition for structures of neural networks, neural architecture search problems and solution methods can be formulated under a common framework. Both practical and theoretical questions arise from closing the gap between applied neural architecture search and learning theory. Does structure make a difference or can it be chosen arbitrarily? This work is concerned with deep structures of artificial neural networks and examines automatic construction methods under empirical principles to shed light on to the so called ``black-box models''. Our contributions include a formulation of graph-induced neural networks that is used to pose optimisation problems for neural architecture. We analyse structural properties for different neural network objectives such as correctness, robustness or energy consumption and discuss how structure affects them. Selected automation methods for neural architecture optimisation problems are discussed and empirically analysed. With the insights gained from formalising graph-induced neural networks, analysing structural properties and comparing the applicability of neural architecture search methods qualitatively and quantitatively we advance these methods in two ways. First, new predictive models are presented for replacing computationally expensive evaluation schemes, and second, new generative models for informed sampling during neural architecture search are analysed and discussed.}, language = {en} } @phdthesis{Juhos2024, author = {Juhos, Michael}, title = {Probabilistic and geometric aspects of classical and non-commutative lp-type spaces in high dimensions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14857}, school = {Universit{\"a}t Passau}, pages = {viii, 157 Seiten}, year = {2024}, abstract = {This cumulative dissertation contains selected contributions to the field of asymptotic geometric analysis and high-dimensional probability. It is divided into two chapters: Chapter 1 explains some of the necessary theoretical background. In Section 1.1 it first gives a very concise history of asymptotic geometric analysis in general and then of the objects under study in particular, setting out some cornerstones in the discovery of the functional-analytic, geometric, and probabilistic properties of the spaces under consideration. The next section (1.2) gives the precise definitions and very basic properties of the three lp-type spaces that play a role in the contributed articles: the classical lp-sequence spaces, the mixed-norm sequence spaces, and the Schatten-classes Sp, each in its infinite- and finite-dimensional version. Section 1.3 is dedicated to the interplay between geometry and probability, expounding the general idea, introducing a few of the common tools, and exemplifying these on two kinds of limit theorems: Schechtman-Schmuckenschl{\"a}ger-type results and Poincar{\´e}-Maxwell-Borel lemmas. The first chapter concludes with Section 1.4, addressing a small sample of open questions pertaining to the contributed articles which are not answered in said articles and may be the interest of future research. The entirety of Chapter 2 consists of the contributed articles.}, language = {en} } @phdthesis{Auer2024, author = {Auer, Michael}, title = {Improving Automated Android Test Generation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14955}, school = {Universit{\"a}t Passau}, pages = {x, 140 Seiten}, year = {2024}, abstract = {Mobile apps are nowadays the preferred means to accomplish ubiquitous tasks like messaging, e-commerce and even playing games. Often, there exist multiple apps for the same purpose, and it is the choice of the end user to pick an appropriate app. Apps that behave unexpected, e.g., crash frequently, are sooner or later replaced, which isundesirable for the companies developing such apps. Thus, it is essential to tests apps properly before they are released onto the market. However, testing manually is often not only too cost-intensive but also too time-consuming in the short development phase, thus an automated solution is preferred. Testing mobile apps automatically received increased attention in the last decade from primarily people in academia, and several testing techniques evolved. One technique that yielded promising results, especially in different domains, is search-based software testing in which a metaheuristic, e.g., a genetic algorithm, is applied to solve an optimisation problem, e.g., test generation. A main objective of test generation is to produce tests that reveal as many faults as possible. This in turn requires the generation of tests that deeply explore the tested app. The core metric to quantify how much code tests cover is the measurement of code coverage, which can be computed at different levels of granularity ranging from determining the fraction of covered activities to a very fine-grained measurement that calculates the percentage of covered lines. This coverage information is then often used to guide the search of the employed metaheuristic. However, current automated test generation approaches produce tests with a rather low code coverage. Thus, a substantial part of tested apps remains unexplored, which in turn misses revealing deeply residing faults. We identified three core issues that are directly related to the generation of low-coverage tests. First, the applicability of current test generators is often limited. This comprises the fact that current state-of-the-art code coverage tools are incapable of instrumenting a substantial number of apps and consequently, test generators cannot utilise detailed coverage information during exploration. In addition, test generators are often only equipped with a primitive set of actions that are insufficient to simulate system events and complex user inputs. Second, the test execution is extremely time-consuming. This includes among other things the overhead associated with executing individual actions, intermediate restart operations as well as fitness evaluations. Since search-based algorithms require a substantial number of test executions to play out their strengths, the slow test execution impedes the effectiveness of the search. Third, the guidance offered by search-based algorithms is often hampered by applying inadequate fitness functions or by using non-representation-specific variation operators. In this thesis we address the problem of low-coverage tests in the Android domain by proposing several enhancements for the three identified core issues. Concerning the applicability problem, we provide the implementation of a robust code coverage tool that is capable of measuring coverage at different levels of granularity and requires no access to the source code. We also propose to include actions that can simulate system events as well as complex user inputs. Regarding the performance issue, we suggest the integration of a surrogate model that is capable of predicting the outcome of individual actions or complete tests over time in order to reduce the overall test execution costs. With respect to the lack of guidance offered by traditional search-based algorithms, we suggest alternative search strategies. In the case of a deceptive fitness landscape, we propose using novelty search algorithms. Alternatively, we suggest utilising estimation of distribution algorithms that require no crossover or mutation perators to sample new tests. While all those enhancements had a positive impact on the Android test generation process, the individual empirical studies highlighted that further research is necessary to unleash the full power of the proposed search-based algorithms. In particular, exploring complex user interfaces meaningfully requires more attention whether by introducing additional actions or by extracting valuable hints to infer reasonable text inputs. In addition, the guidance offered by fitness functions is often limited because they are either designed too coarse at all or do not accurately reflect the search objectives.}, language = {en} } @phdthesis{Puellen2024, author = {P{\"u}llen, Dominik}, title = {Holistic Security Engineering for Software-Defined Vehicles}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14497}, school = {Universit{\"a}t Passau}, pages = {XXIII, 161 Seiten}, year = {2024}, abstract = {With the increasing use of digital technologies in the automotive sector, the traditional automobile is undergoing a structural transformation, requiring new technologies and enabling innovative mobility concepts. In particular, the ability to drive automatically or even fully autonomously, update control software, and remain connected to the environment allows attackers to infiltrate highly critical vehicle systems and take control without adequate protection. Once not only individual vehicles but entire fleets are dominated by software, cyberattacks could disrupt a significant portion of the infrastructure and expose passengers to substantial risks. This work follows a holistic approach to protecting highly automated software-defined vehicles from cyberattacks by designing and implementing security concepts in the main phases of a vehicle's lifecycle. We use SAE level 4 prototype vehicles to evaluate our proposed techniques. We start with a systematic security requirement analysis using the ISA-62443 standard series, demonstrating how threats can be identified in a collaborative, hierarchical process and how the resulting security risks impact the software and hardware architecture of a self-driving vehicle. We show how this analysis process results in concrete requirements whose consideration reduces the overall security risk to a tolerable level. Subsequently, we develop technical solutions for selected requirements. We begin by securing the CAN and FlexRay legacy protocols, which we foresee being used in specific areas of SDV in a transitional period despite technological changes. To enable vehicle-wide security management, we address the management and distribution of cryptographic keys within such networks, mainly focusing on resource-constrained devices. We propose using lightweight implicit certificates for deriving cryptographic group keys that can be used in CAN networks. Additionally, we demonstrate how the slot-based frame structure of the FlexRay protocol allows for efficient "multi-slot" authentication, for which we calculate cryptographic keys using hash-based key chains. SDV use Ethernet-based communication protocols and custom middleware stacks to transmit large amounts of data in real-time. We develop a three-stage security process for the novel ASOA, which enables the development and central orchestration of system-agnostic functional software components on embedded systems and HPC platforms. After the central specification of the security architecture at the data flow level, security tokens are automatically calculated and distributed for runtime protection of the service-oriented, DDS-based data transmission. Our process ensures the strict separation of function and system knowledge, allowing for cost-effective and adaptable security architecture management. The evaluation in four self-driving, software-defined vehicles demonstrates an average runtime overhead of approximately 5.71\%. As the initial risk analysis and actual cyberattacks have shown, protective measures against the compromise of control units must be taken alongside communication security. To address this, we develop a method for verifying and validating the software integrity of control units. A governmental third party confirms a measurement through a digital certificate, proving the examined vehicle's trustworthiness and suitability for participation in automated traffic. In the final step of this work, we present an assessment scheme that allows software-defined vehicles to evaluate security incidents during operation in terms of their maximum expected damage and initiate appropriate countermeasures. We follow the ISO/SAE 21434 standard and model attack paths using a graph representing dependencies among internal vehicle assets to account for the propagation effects of cyberattacks. The assessment of a security incident considers not only the probability of individual attack paths but also the vehicle context. Our practical evaluation demonstrates that we can detect, report, and assess security incidents below the human reaction time in the earlier mentioned prototype vehicles.}, language = {en} } @phdthesis{Lachat2024, author = {Lachat, Paul}, title = {Detecting Inference Attacks Involving Sensor Data}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14149}, school = {Universit{\"a}t Passau}, pages = {xiii, 141 Seiten}, year = {2024}, abstract = {The collection of personal information by organizations has become increasingly essential for social interactions. Nevertheless, according to the GDPR (General Data Protection Regulation), the organizations have to protect collected data. Access Control (AC) mechanisms are traditionally used to secure information systems against unauthorized access to sensitive data. The increased availability of personal sensor data, thanks to IoT-oriented applications, motivates new services to offer insights about individuals. Consequently, data mining algorithms have been proposed to infer personal insights from collected sensor data. Although they can be used for genuine purposes, attackers can leverage those outcomes, combining them with other type of data, and further breaching individuals' privacy. Thus, bypassing AC mechanisms thanks to such insights is a concrete problem. We propose an inference detection system based on the analysis of queries issued on a sensor database. The knowledge obtained through these queries, and the inference channels corresponding to the use of data mining algorithms on sensor data to infer individual information, are described using Raw sensor data based Inference ChannEl Model (RICE-M). The detection is carried out by RICE-M based inference detection System (RICE-Sy). RICE-Sy considers at the time of the query, the knowledge that a user obtains via a new query and has obtained via his query history, and determines whether this is sufficient to allow that user to operate a channel. Thus, privacy protection systems can take advantage of the inferences detected by RICE-Sy, taking into account individuals' information obtained by the attackers via a database of sensors, to further protect these individuals.}, language = {en} } @phdthesis{Bermeitinger2024, author = {Bermeitinger, Bernhard}, title = {Investigating a Second-Order Optimization Strategy for Neural Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14087}, school = {Universit{\"a}t Passau}, pages = {xv, 59 Seiten}, year = {2024}, abstract = {In summary, this cumulative dissertation investigates the application of the conjugate gradient method CG for the optimization of artificial neural networks (NNs) and compares this method with common first-order optimization methods, especially the stochastic gradient descent (SGD). The presented research results show that CG can effectively optimize both small and very large networks. However, the default machine precision of 32 bits can lead to problems. The best results are only achieved in 64-bits computations. The research also emphasizes the importance of the initialization of the NNs' trainable parameters and shows that an initialization using singular value decomposition (SVD) leads to drastically lower error values. Surprisingly, shallow but wide NNs, both in Transformer and CNN architectures, often perform better than their deeper counterparts. Overall, the research results recommend a re-evaluation of the previous preference for extremely deep NNs and emphasize the potential of CG as an optimization method.}, language = {en} } @phdthesis{Fink2023, author = {Fink, Simon Dominik}, title = {Constrained Planarity Algorithms in Theory and Practice}, doi = {10.15475/cpatp.2024}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13817}, school = {Universit{\"a}t Passau}, pages = {216 Seiten}, year = {2023}, abstract = {In the constrained planarity setting, we ask whether a graph admits a crossing-free drawing that additionally satisfies a given set of constraints. These constraints are often derived from very natural problems; prominent examples are Level Planarity, where vertices have to lie on given horizontal lines indicating a hierarchy, Partially Embedded Planarity, where we extend a given drawing without modifying already-drawn parts, and Clustered Planarity, where we additionally draw the boundaries of clusters which recursively group the vertices in a crossing-free manner. In the last years, the family of constrained planarity problems received a lot of attention in the field of graph drawing. Efficient algorithms were discovered for many of them, while a few others turned out to be NP-complete. In contrast to the extensive theoretical considerations and the direct motivation by applications, only very few of the found algorithms have been implemented and evaluated in practice. The goal of this thesis is to advance the research on both theoretical as well as practical aspects of constrained planarity. On the theoretical side, we consider two types of constrained planarity problems. The first type are problems that individually constrain the rotations of vertices, that is they restrict the counter-clockwise cyclic orders of the edges incident to vertices. We give a simple linear-time algorithm for the problem Partially Embedded Planarity, which also generalizes to further constrained planarity variants of this type. The second type of constrained planarity problem concerns more involved planarity variants that come down to the question whether there are embeddings of one or multiple graphs such that the rotations of certain vertices are in sync in a certain way. Clustered Planarity and a variant of the Simultaneous Embedding with Fixed Edges Problem (Connected SEFE-2) are well-known problems of this type. Both are generalized by our Synchronized Planarity problem, for which we give a quadratic algorithm. Through reductions from various other problems, we provide a unified modelling framework for almost all known efficiently solvable constrained planarity variants that also directly provides a quadratic-time solution to all of them. For both our algorithms, a key ingredient for reaching an efficient solution is the usage of the right data structure for the problem at hand. In this case, these data structures are the SPQR-tree and the PC-tree, which describe planar embedding possibilities from a global and a local perspective, respectively. More specifically, PC-trees can be used to locally describe the possible cyclic orders of edges around vertices in all planar embeddings of a graph. This makes it a key component for our algorithms, as it allows us to test planarity while also respecting further constraints, and to communicate constraints arising from the surrounding graph structure between vertices with synchronized rotation. Bridging over to the practical side, we present the first correct implementation of PC-trees. We also describe further improvements, which allow us to outperform all implementations of alternative data structures (out of which we only found very few to be fully correct) by at least a factor of 4. We show that this yields a simple and competitive planarity test that can also yield an embedding to certify planarity. We also use our PC-tree implementation to implement our quadratic algorithm for solving Synchronized Planarity. Here, we show that our algorithm greatly outperforms previous attempts at solving related problems like Clustered Planarity in practice. We also engineer its running time and show how degrees of freedom in the theoretical algorithm can be leveraged to yield an up to tenfold speed-up in practice.}, language = {en} } @phdthesis{Danner2023, author = {Danner, Dominik}, title = {Towards Quality of Service and Fairness in Smart Grid Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13731}, school = {Universit{\"a}t Passau}, pages = {xx, 172 Seiten}, year = {2023}, abstract = {Due to the increasing amount of distributed renewable energy generation and the emerging high demand at consumer connection points, e. g., electric vehicles, the power distribution grid will reach its capacity limit at peak load times if it is not expensively enhanced. Alternatively, smart flexibility management that controls user assets can help to better utilize the existing power grid infrastructure for example by sharing available grid capacity among connected electric vehicles or by disaggregating flexibility requests to hybrid photovoltaic battery energy storage systems in households. Besides maintaining an acceptable state of the power distribution grid, these smart grid applications also need to ensure a certain quality of service and provide fairness between the individual participants, both of which are not extensively discussed in the literature. This thesis investigates two smart grid applications, namely electric vehicle charging-as-a-service and flexibility-provision-as-a-service from distributed energy storage systems in private households. The electric vehicle charging service allocation is modeled with distributed queuing-based allocation mechanisms which are compared to new probabilistic algorithms. Both integrate user constraints (arrival time, departure time, and energy required) to manage the quality of service and fairness. In the queuing-based allocation mechanisms, electric vehicle charging requests are packetized into logical charging current packets, representing the smallest controllable size of the charging process. These packets are queued at hierarchically distributed schedulers, which allocate the available charging capacity using the time and frequency division multiplexing technique known from the networking domain. This allows multiple electric vehicles to be charged simultaneously with variable charging currents. To achieve high quality of service and fairness among electric vehicle charging processes, dynamic weights are introduced into a weighted fair queuing scheduler that considers electric vehicle departure time and required energy for prioritization. The distributed probabilistic algorithms are inspired by medium access protocols from computer networking, such as binary exponential backoff, and control the quality of service and fairness by adjusting sampling windows and waiting periods based on user requirements. The second smart grid application under investigation aims to provide flexibility provision-as-a-service that disaggregates power flexibility requests to distributed battery energy storage systems in private households. Commonly, the main purpose of stationary energy storage is to store energy from a local photovoltaic system for later use, e. g., for overnight charging of an electric vehicle. This is optimized locally by a home energy management system, which also allows the scheduling of external flexibility requests defined by the deviation from the optimal power profile at the grid connection point, for example, to perform peak shaving at the transformer. This thesis discusses a linear heuristic and a meta heuristic to disaggregate a flexibility request to the single participating energy management systems that are grouped into a flexibility pool. Thereby, the linear heuristic iteratively assigns portions of the power flexibility to the most appropriate energy management system for one time slot after another, minimizing the total flexibility cost or maximizing the probability of flexibility delivery. In addition, a multi-objective genetic algorithm is proposed that also takes into account power grid aspects, quality of service, and fairness among par-ticipating households. The genetic operators are tailored to the flexibility disaggregation search space, taking into account flexibility and energy management system constraints, and enable power-optimized buffering of fitness values. Both smart grid applications are validated on a realistic power distribution grid with real driving patterns and energy profiles for photovoltaic generation and household consumption. The results of all proposed algorithms are analyzed with respect to a set of newly defined metrics on quality of service, fairness, efficiency, and utilization of the power distribution grid. One of the main findings is that none of the tested algorithms outperforms the others in all quality of service metrics, however, integration of user expectations improves the service quality compared to simpler approaches. Furthermore, smart grid control that incorporates users and their flexibility allows the integration of high-load applications such as electric vehicle charging and flexibility aggregation from distributed energy storage systems into the existing electricity distribution infrastructure. However, there is a trade-off between power grid aspects, e. g., grid losses and voltage values, and the quality of service provided. Whenever active user interaction is required, means of controlling the quality of service of users' smart grid applications are necessary to ensure user satisfaction with the services provided.}, language = {en} } @phdthesis{Welearegai2023, author = {Welearegai, Gebrehiwet Biyane}, title = {Precise Detection of Injection Attacks in Real-world Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-12926}, school = {Universit{\"a}t Passau}, pages = {xvi, 120 Seiten}, year = {2023}, abstract = {Code injection attacks like the one used in the high-profile 2017 Equifax breach, have become increasingly common, ranking at the top of OWASP's list of critical web application vulnerabilities. The injection attacks can also target embedded applications running on processors like ARM and Xtensa by exploiting memory bugs and maliciously altering the program's behavior or even taking full control over a system. Especially, ARM's support of low power consumption without sacrificing performance is leading the industry to shift towards ARM processors, which advances the attention of injection attacks as well. In this thesis, we are considering web applications and embedded applications (running on ARM and Xtensa processors) as the target of injection attacks. To detect injection attacks in web applications, taint analysis is mostly proposed but the precision, scalability, and runtime overhead of the detection depend on the analysis types (e.g., static vs dynamic, sound vs unsound). Moreover, in the existing dynamic taint tracking approach for Java- based applications, even the most performant can impose a slowdown of at least 10-20\% and often far more. On the other hand, considering the embedded applications, while some initial research has tried to detect injection attacks (i.e., ROP and JOP) on ARM, they suffer from high performance or storage overhead. Besides, the Xtensa has been neglected though used in most firmware-based embedded WiFi home automation devices. This thesis aims to provide novel approaches to precisely detect injection attacks on both the web and embedded applications. To that end, we evaluate JavaScript static analysis frameworks to evaluate the security of a hybrid app (JS \& native) from an industrial partner, provide RIVULET - a tool that precisely detects injection attacks in Java-based real-world applications, and investigate injection attacks detection on ARM and Xtensa platforms using hardware performance counters (HPCs) and machine learning (ML) techniques. To evaluate the security of the hybrid application, we initially compare the precision, scalability, and code coverage of two widely-used static analysis frameworks—WALA and SAFE. The result of our comparison shows that SAFE provides higher precision and better code coverage at the cost of somewhat lower scalability. Based on these results, we analyze the data flows of the hybrid app via taint analysis by extending the SAFE's taint analysis and detected a potential for injection attacks of the hybrid application. Similarly, to detect injection attacks in Java-based applications, we provide Rivulet which monitors the execution of developer-written functional tests using dynamic taint tracking. Rivulet uses a white-box test generation technique to re-purpose those functional tests to check if any vulnerable flow could be exploited. We compared Rivulet to the state-of-the-art static vulnerability detector Julia on benchmarks and Rivulet outperformed Julia in both false positives and false negatives. We also used Rivulet to detect new vulnerabilities. Moreover, for applications running on ARM and Xtensa platforms, we investigate ROP1 attack detection by combining HPCs and ML techniques. We collect data exploiting real- world vulnerable applications and small benchmarks to train the ML. For ROP attack detection on ARM, we also implement an online monitor which labels a program's execution as benign or under attack and stops its execution once the latter is detected. Evaluating our ROP attack detection approach on ARM provides a detection accuracy of 92\% for the offline training and 75\% for the online monitoring. Similarly, our ROP attack detection on the firmware-only Xtensa processor provides an overall average detection accuracy of 79\%. Last but not least, this thesis shows how relevant taint analysis is to precisely detect injection attacks on web applications and the power of HPC combined with machine learning in the control flow injection attacks detection on ARM and Xtensa platforms.}, language = {en} } @phdthesis{Alhamzeh2023, author = {Alhamzeh, Alaa}, title = {Language Reasoning by means of Argument Mining and Argument Quality}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-12699}, school = {Universit{\"a}t Passau}, pages = {ix, 154 Seiten}, year = {2023}, abstract = {Understanding of financial data has always been a point of interest for market participants to make better informed decisions. Recently, different cutting edge technologies have been addressed in the Financial Technology (FinTech) domain, including numeracy understanding, opinion mining and financial ocument processing. In this thesis, we are interested in analyzing the arguments of financial experts with the goal of supporting investment decisions. Although various business studies confirm the crucial role of argumentation in financial communications, no work has addressed this problem as a computational argumentation task. In other words, the automatic analysis of arguments. In this regard, this thesis presents contributions in the three essential axes of theory, data, and evaluation to fill the gap between argument mining and financial text. First, we propose a method for determining the structure of the arguments stated by company representatives during the public announcement of their quarterly results and future estimations through earnings conference calls. The proposed scheme is derived from argumentation theory at the micro-structure level of discourse. We further conducted the corresponding annotation study and published the first financial dataset annotated with arguments: FinArg. Moreover, we investigate the question of evaluating the quality of arguments in this financial genre of text. To tackle this challenge, we suggest using two levels of quality metrics, considering both the Natural Language Processing (NLP) literature of argument quality assessment and the financial era peculiarities. Hence, we have also enriched the FinArg data with our quality dimensions to produce the FinArgQuality dataset. In terms of evaluation, we validate the principle of ensemble learning on the argument identification and argument unit classification tasks. We show that combining a traditional machine learning model along with a deep learning one, via an integration model (stacking), improves the overall performance, especially in small dataset settings. In addition, despite the fact that argument mining is mainly a domain dependent task, to this date, the number of studies that tackle the generalization of argument mining models is still relatively small. Therefore, using our stacking approach and in comparison to the transfer learning model of DistilBert, we address and analyze three real-world scenarios concerning the model robustness over completely unseen domains and unseen topics. Furthermore, with the aim of the automatic assessment of argument strength, we have investigated and compared different (refined) versions of Bert-based models that incorporate external knowledge in the decision layer. Consequently, our method outperforms the baseline model by 13 ± 2\% in terms of F1-score through integrating Bert with encoded categorical features. Beyond our theoretical and methodological proposals, our model of argument quality assessment, annotated corpora, and evaluation approaches are publicly available, and can serve as strong baselines for future work in both FinNLP and computational argumentation domains. Hence, directly exploiting this thesis, we proposed to the community, a new task/challenge related to the analysis of financial arguments: FinArg-1, within the framework of the NTCIR-17 conference. We also used our proposals to react to the Touch{\´e} challenge at the CLEF 2021 conference. Our contribution was selected among the «Best of Labs».}, language = {en} } @phdthesis{Brummer2022, author = {Brummer, Stephan}, title = {Numerisch robuste Berechnung der zirkul{\"a}ren Sichtbarkeitsmenge}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-12299}, school = {Universit{\"a}t Passau}, pages = {viii, 170 Seiten}, year = {2022}, abstract = {Sichtbarkeitsprobleme, wie das Folgende, geh{\"o}ren zu den grundlegenden Problemen der algorithmischen Geometrie: Berechne zu einem einfachen Polygon, dem sogenannten Kanal, und zu einem darin enthaltenen Punkt die von diesem Punkt aus sichtbare Punktmenge. Dabei ist ein Punkt von einem anderen Punkt aus sichtbar, wenn deren Verbindungsstrecke den Kanal nicht verl{\"a}sst. Wir wollen uns in dieser Arbeit mit zirkul{\"a}rer Sichtbarkeit besch{\"a}ftigen. Zur Verbindung zweier Punkte sind dann nicht nur Strecken, sondern auch Kreisb{\"o}gen zul{\"a}ssig. Außerdem betrachten wir als Ausgangspunkt dieser sogenannten Sichtbarkeitskreisb{\"o}gen und -strecken eine Kante des Kanals anstatt eines einzelnen Punkts. Konkret liefert diese Arbeit einen Beitrag zur numerisch robusten Bestimmung der zirkul{\"a}ren Sichtbarkeitsmenge ausgehend von einer Kante des Kanals. Hierf{\"u}r wird in dieser Arbeit ein Algorithmus vorgestellt, mit dem f{\"u}r einen gegebenen Punkt festgestellt werden kann, ob dieser von der Startkante aus sichtbar ist. Im Fall eines sichtbaren Punkts wird ein Sichtbarkeitskreisbogen berechnet, der zwei Kanalber{\"u}hrungen besitzt. Damit kann der Algorithmus bei geeigneter Wahl des zu untersuchenden Punkts - der als dritte Kanalber{\"u}hrung fungiert - direkt zur Berechnung von sogenannten Grenzkreisb{\"o}gen der Sichtbarkeitsmenge benutzt werden. Diese definieren den Rand der zirkul{\"a}ren Sichtbarkeitsmenge und zeichnen sich dadurch aus, dass sie vom Kanal dreimal abwechselnd von links und von rechts ber{\"u}hrt werden. Der beschriebene Algorithmus basiert auf der Untersuchung derjenigen Kreisb{\"o}gen, die zwar nicht notwendigerweise vollst{\"a}ndig im Kanal liegen, aber die Startkante mit dem Punkt verbinden, dessen Sichtbarkeit bestimmt werden soll. Insbesondere werden dabei die Bereiche untersucht, in denen der jeweilige Kreisbogen den Kanal verl{\"a}sst, die sogenannten Verletzungen. Da die „Schwere" einer solchen Verletzung quantifizierbar ist, wird ein iteratives Vorgehen erm{\"o}glicht. Dabei wird der Kreisbogen iterativ so ver{\"a}ndert, dass dieser bei gleichem Endpunkt den Kanal immer „weniger verl{\"a}sst". Ist der Endpunkt und damit der zu untersuchende Punkt nicht sichtbar, wird im Laufe des Algorithmus festgestellt, dass keine derartige Verbesserung m{\"o}glich ist. Der vorgestellte Algorithmus ist numerisch robust, einfach umzusetzen und besitzt eine in der Anzahl der Kanalecken lineare Laufzeit.}, language = {de} } @phdthesis{Liang2023, author = {Liang, Hanning}, title = {Deflectometric Measurement of the Topography of Reflecting Freeform Surfaces in Motion}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11672}, school = {Universit{\"a}t Passau}, pages = {xiv, 157 Seiten}, year = {2023}, abstract = {Measuring the topography of specular surfaces with strong surface structures in motion was impossible before this research. A new method based on singleshot phase-measuring de ectometry (SSPMD) and combining different solution aspects has been presented.}, language = {en} } @phdthesis{Sonnleitner2022, author = {Sonnleitner, Mathias}, title = {The power of random information for numerical approximation and integration}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11305}, school = {Universit{\"a}t Passau}, pages = {x, 165 Seiten}, year = {2022}, abstract = {This thesis investigates the quality of randomly collected data by employing a framework built on information-based complexity, a field related to the numerical analysis of abstract problems. The quality or power of gathered information is measured by its radius which is the uniform error obtainable by the best possible algorithm using it. The main aim is to present progress towards understanding the power of random information for approximation and integration problems. In the first problem considered, information given by linear functionals is used to recover vectors, in particular from generalized ellipsoids. This is related to the approximation of diagonal operators which are important objects of study in the theory of function spaces. We obtain upper bounds on the radius of random information both in a convex and a quasi-normed setting, which extend and, in some cases, improve existing results. We conjecture and partially establish that the power of random information is subject to a dichotomy determined by the decay of the length of the semiaxes of the generalized ellipsoid. Second, we study multivariate approximation and integration using information given by function values at sampling point sets. We obtain an asymptotic characterization of the radius of information in terms of a geometric measure of equidistribution, the distortion, which is well known in the theory of quantization of measures. This holds for isotropic Sobolev as well as H{\"o}lder and Triebel-Lizorkin spaces on bounded convex domains. We obtain that for these spaces, depending on the parameters involved, typical point sets are either asymptotically optimal or worse by a logarithmic factor, again extending and improving existing results. Further, we study isotropic discrepancy which is related to numerical integration using linear algorithms with equal weights. In particular, we analyze the quality of lattice point sets with respect to this criterion and obtain that they are suboptimal compared to uniform random points. This is in contrast to the approximation of Sobolev functions and resolves an open question raised in the context of a possible low discrepancy construction on the two-dimensional sphere.}, subject = {Komplexit{\"a}t / Algorithmus}, language = {en} } @phdthesis{Schlenker2022, author = {Schlenker, Florian}, title = {Delaunay Configuration B-Splines}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11225}, school = {Universit{\"a}t Passau}, pages = {xxiii, 223 Seiten}, year = {2022}, abstract = {The generalization of univariate splines to higher dimensions is not straightforward. There are different approaches, each with its own advantages and drawbacks. A promising approach using Delaunay configurations and simplex splines is due to Neamtu. After recalling fundamentals of univariate splines, simplex splines, and the wellknown, multivariate DMS-splines, we address Neamtu's DCB-splines. He defined two variants that we refer to as the nonpooled and the pooled approach, respectively. Regarding these spline spaces, we contribute the following results. We prove that, under suitable assumptions on the knot set, both variants exhibit the local finiteness property, i.e., these spline spaces are locally finite-dimensional and at each point only a finite number of basis candidate functions have a nonzero value. Additionally, we establish a criterion guaranteeing these properties within a compact region under mitigated assumptions. Moreover, we show that the knot insertion process known from univariate splines does not work for DCB-splines and reason why this behavior is inherent to these spline spaces. Furthermore, we provide a necessary criterion for the knot insertion property to hold true for a specific inserted knot. This criterion is also sufficient for bivariate, nonpooled DCB-splines of degrees zero and one. Numerical experiments suggest that the sufficiency also holds true for arbitrary spline degrees. Univariate functions can be approximated in terms of splines using the Schoenberg operator, where the approximation error decreases quadratically as the maximum distance between consecutive knots is reduced. We show that the Schoenberg operator can be defined analogously for both variants of DCB-splines with a similar error bound. Additionally, we provide a counterexample showing that the basis candidate functions of nonpooled DCB-splines are not necessarily linearly independent, contrary to earlier statements in the literature. In particular, this implies that the corresponding functions are not a basis for the space of nonpooled DCB-splines.}, subject = {Spline}, language = {en} } @phdthesis{Schmid2022, author = {Schmid, Josef}, title = {Learning-Based Quality of Service Prediction in Cellular Vehicle Communication}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10772}, school = {Universit{\"a}t Passau}, pages = {xvi, 147 Seiten}, year = {2022}, abstract = {Network communication has become a part of everyday life, and the interconnection among devices and people will increase even more in the future. A new area where this development is on the rise is the field of connected vehicles. It is especially useful for automated vehicles in order to connect the vehicles with other road users or cloud services. In particular for the latter it is beneficial to establish a mobile network connection, as it is already widely used and no additional infrastructure is needed. With the use of network communication, certain requirements come along. One of them is the reliability of the connection. Certain Quality of Service (QoS) parameters need to be met. In case of degraded QoS, according to the SAE level specification, a downgrade of the automated system can be required, which may lead to a takeover maneuver, in which control is returned back to the driver. Since such a handover takes time, prediction is necessary to forecast the network quality for the next few seconds. Prediction of QoS parameters, especially in terms of Throughput (TP) and Latency (LA), is still a challenging task, as the wireless transmission properties of a moving mobile network connection are undergoing fluctuation. In this thesis, a new approach for prediction Network Quality Parameters (NQPs) on Transmission Control Protocol (TCP) level is presented. It combines the knowledge of the environment with the low level parameters of the mobile network. The aim of this work is to perform a comprehensive study of various models including both Location Smoothing (LS) grid maps and Learning Based (LB) regression ones. Moreover, the possibility of using the location independence of a model as well as suitability for automated driving is evaluated.}, language = {en} } @phdthesis{Silva2022, author = {Silva, Vivian dos Santos}, title = {A Composite Syntactic-Semantic Interpretable Text Entailment Approach Exploring Commonsense Knowledge Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10706}, school = {Universit{\"a}t Passau}, pages = {xiv, 229 Seiten}, year = {2022}, abstract = {Natural Language Processing has an important role in Artificial Intelligence for easing human-machine interaction. Processing human language, though, poses many challenges, among which is the semantics-related phenomenon known as language variability, the fact that the same thing can be said in several ways. NLP applications' inputs and outputs can be expressed in different forms, whose equivalence can be verified through inference. The textual entailment paradigm was established to enable the creation of a unifying framework for applied inference, providing a means of delivering other NLP task from handling inference issues in an ad-hoc manner, using instead the outputs of an inference-dedicated mechanism. Text entailment, the task of determining whether a piece of text logically follows from another piece of text, involves different scenarios, which can range from a simple syntactic variation to more complex semantic relationships between sentences. However, most approaches try a one-size-fits-all solution that usually favors some scenario to the detriment of another. The commonsense world knowledge necessary to support more complex inferences is also usually employed in a limited way, with most approaches sticking to shallow semantic information, leaving more elaborate semantic relationships aside. Furthermore, most systems still work as a "black box", providing a yes/no answer that does not explain the underlying reasoning process. This thesis aims at addressing these issues by proposing a composite interpretable approach for recognizing text entailment where the entailment pair is analyzed so the most relevant phenomenon is detected and the suitable method can be used to solve it. Syntactic variations are dealt with through the analysis of the sentences' syntactic structures, and semantic relationships are detected with the aid of a knowledge graph built from natural language dictionary definitions. Also, if a semantic matching is involved, the answer is made interpretable through the generation of natural language justifications that explain the semantic relationship between the pieces of text. The result is the XTE - Explainable Text Entailment - a system that outperforms well-established tools based on single-technique entailment algorithms, and that also gives an important step towards Explainable AI, allowing the inference model interpretation, making the semantic reasoning process explicit and understandable.}, language = {en} } @phdthesis{NorbertoSales2022, author = {Norberto Sales, Juliano Efson}, title = {An Explainable Semantic Parser for End-User Development}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10718}, school = {Universit{\"a}t Passau}, pages = {xvi, 165 Seiten}, year = {2022}, abstract = {Programming is a key skill in a world where businesses are driven by digital transformations. Although many of the programming demand can be addressed by a simple set of instructions composing libraries and services available in the web, non-technical professionals, such as domain experts and analysts, are still unable to construct their own programs due to the intrinsic complexity of coding. Among other types of end-user development, natural language programming has emerged to allow users to program without the formalism of traditional programming languages, where a tailored semantic parser can translate a natural language utterance to a formal command representation able to be processed by a computational machine. Currently, semantic parsers are typically built on the top of a learning method that defines its behaviours based on the patterns behind a large training data, whose production frequently are costly and time-consuming. Our research is devoted to study and propose a semantic parser for natural language commands targeting a scenario with low availability of training data. Our proposed semantic parser follows a multi-component architecture, composed of a specialised shallow parser that associates natural language commands to predicate-argument structures, integrated to a distributional ranking model that matches the command to a function signature available from an API knowledge base. Systems developed with statistical learning models and complex linguistics resources, as the proposed semantic parser, do not provide natively an easy way to associate a single feature from the input data to the impact in system behaviour. In this scenario, end-user explanations for intelligent systems has become a strong requirement to increase user confidence and system literacy. Thus, our research designed an explanation model for the proposed semantic parser that fits the heterogeneity of its multi-component architecture. The explanation model explores a hierarchical representation in an increasing degree of technical depth, providing higher-level explanations in the initial layers, going gradually to those that demand technical knowledge, applying different explanation strategies to better express the approach behind each component. With the support of a user-centred experiment, we compared the utility of different types of explanations and the impact of background knowledge in their preferences.}, language = {en} } @phdthesis{Opris2022, author = {Opris, Andre}, title = {Holomorphic Extensions in the Structure R_{an,exp}}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10691}, school = {Universit{\"a}t Passau}, pages = {233 Seiten}, year = {2022}, abstract = {In this thesis we consider real analytic functions, i.e. functions which can be described locally as convergent power series and ask the following: Which real analytic functions definable in R_{an,exp} have a holomorphic extension which is again definable in R_{an,exp}? Finding a holomorphic extension is of course not difficult simply by power series expansion. The difficulty is to construct it in a definably way. We will not answer the question above completely, but introduce a large non trivial class of definable functions in R_{an,exp} where for example functions which are iterated compositions from either side of globally subanalytic functions and the global logarithm are contained. We call them restricted log-exp-analytic. After giving some preliminary results like preparation theorems and Tamm's Theorem for this class of functions we are able to show that real analytic restricted log-exp-analytic functions have a holomorphic extension which is again restricted log-exp-analytic.}, subject = {O-Minimalit{\"a}t}, language = {en} } @phdthesis{Mandarawi2022, author = {Mandarawi, Waseem}, title = {Multi-objective Network Virtualization and its Applicability to Industrial Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10606}, school = {Universit{\"a}t Passau}, pages = {xv, 156 Seiten}, year = {2022}, abstract = {Network virtualization provides high flexibility for deploying communication services in dense and heterogeneous environments. Two main approaches (dimensions) that are usually combined exist: Network Function Virtualization (NFV) technologies for functionality virtualization and Virtual Network Embedding (VNE) algorithms for resource virtualization. These approaches can be applied to different network levels, such as factory and enterprise levels of industrial networks. Several objectives and constraints, that might be conflicting, shall be considered when network virtualization is applied, mainly in complex topologies. This thesis proposes a network virtualization model that considers both virtualization dimensions, two network levels, and different objectives and constraints. The network levels considered are two primary levels in industrial networks. However, this consideration does not restrict the model to a particular environment or certain levels. The considered objectivities/constraints are topology, reliability, security, performance, and resource usage. Based on this model, we first build an overall combined solution for autonomic and composite virtual networking. This solution considers both virtualization dimensions, two network levels, and target objectives. Furthermore, this solution combines three novel virtualization sub-approaches that consider performance, reliability, and performance. However, the sub-approaches apply to different combinations of levels and dimensions, and the reliability approach additionally considers the resource usage objective. After presenting all solutions, we map them to the defined model. Regarding applicability to industrial networks, the combined approach is applied to an enterprise-level Industrial Internet of Things (IIoT) use case inspired by the smart factory concept in Industry 4.0. However, the sub-approaches are applied to more specific use cases. The performance and reliability solutions are integrated with relevant components of the Time Sensitive Networks (TSN) standard as a modern technology for industrial networks. The goal is to enrich the reliability and performance capabilities of TSN with the flexibility of network virtualization. In the combined approach, we compose and embed an environment-aware Extended Virtual Network (EVN) that represents the physical devices, virtual application functions, and required Service Function Chains (SFCs). We use the graph transformation method to transform abstract application requirements (represented by an Application Request (AR)) into an EVN. Both EVN composition and embedding methods consider the Substrate Network (SN) topology and different security, reliability, performance, and resource usage policies. These policies are applied with a certain priority and depend on the properties of communicating entities such as location and type. The EVN is embedded using property-based node mapping, reliability-aware branching, and a greedy chain embedding heuristic. The chain embedding heuristic is evaluated using a random topology that represents the use case. The performance sub-approach is NFV-based and is applied to a specific use case with Time-critical Traffic (TCT) flows. We develop and evaluate a complete framework for virtualizing Time-aware Shaper (TAS) using high-performance NFV. The reliability sub-approach is VNE-based and is applied to a specific factory level use case. We develop minimal and maximal branching heuristics based on a reliability-aware k-shortest path algorithm and compare them using a typical factory topology. We then integrate these algorithms with a Frame Replication and Elimination for Reliability (FRER) simulator to realize reliability policies by the autonomic and efficient configuration of a supporting technology. The security sub-approaches are related to both virtualization dimensions and are applied to generic enterprise-level use cases. However, the applicability of the security aspect to industrial networks is only shown in the combined (EVN) approach and its use case. We research the autonomic security management in Network Function Virtualization Infrastructure (NFVI) with the main goal of early reaction to threats through SFC reconfiguration through Virtual Network Function (VNF) live migration. This goal is approached by supporting the security measurements with a decision making architecture that considers, on the one hand, the threats and events in the environment and, on the other hand, the Service Level Agreement (SLA) between the NFVI provider and user. For this purpose, we classify the VNF-specific attacks and define possible early detectable behavior patterns. Finally, we develop a security-aware VNE heuristic that considers the security requirements of the Virtual Network (VN) and the security capabilities of the SN. This approach is modified in the combined approach to consider deploying virtualized security VNFs.}, language = {en} } @phdthesis{Niklaus2022, author = {Niklaus, Christina}, title = {From Complex Sentences to a Formal Semantic Representation using Syntactic Text Simplification and Open Information Extraction}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10540}, school = {Universit{\"a}t Passau}, pages = {xxi, 301 Seiten}, year = {2022}, abstract = {Sentences that present a complex linguistic structure act as a major stumbling block for Natural Language Processing (NLP) applications whose predictive quality deteriorates with sentence length and complexity. The task of Text Simplification (TS) may remedy this situation. It aims to modify sentences in order to make them easier to process, using a set of rewriting operations, such as reordering, deletion or splitting. These transformations are executed with the objective of converting the input into a simplified output, while preserving its main idea and keeping it grammatically sound. State-of-the-art syntactic TS approaches suffer from two major drawbacks: first, they follow a very conservative approach in that they tend to retain the input rather than transforming it, and second, they ignore the cohesive nature of texts, where context spread across clauses or sentences is needed to infer the true meaning of a statement. To address these problems, we present a discourse-aware TS framework that is able to split and rephrase complex English sentences within the semantic context in which they occur. By generating a fine-grained output with a simple canonical structure that is easy to analyze by downstream applications, we tackle the first issue. For this purpose, we decompose a source sentence into smaller units by using a linguistically grounded transformation stage. The result is a set of selfcontained propositions, with each of them presenting a minimal semantic unit. To address the second concern, we suggest not only to split the input into isolated sentences, but to also incorporate the semantic context in the form of hierarchical structures and semantic relationships between the split propositions. In that way, we generate a semantic hierarchy of minimal propositions that benefits downstream Open Information Extraction (IE) tasks. To function well, the TS approach that we propose requires syntactically well-formed input sentences. It targets generalpurpose texts in English, such as newswire or Wikipedia articles, which commonly contain a high proportion of complex assertions. In a second step, we present a method that allows state-of-the-art Open IE systems to leverage the semantic hierarchy of simplified sentences created by our discourseaware TS approach in constructing a lightweight semantic representation of complex assertions in the form of semantically typed predicate-argument structures. In that way, important contextual information of the extracted relations is preserved that allows for a proper interpretation of the output. Thus, we address the problem of extracting incomplete, uninformative or incoherent relational tuples that is commonly to be observed in existing Open IE approaches. Moreover, assuming that shorter sentences with a more regular structure are easier to process, the extraction of relational tuples is facilitated, leading to a higher coverage and accuracy of the extracted relations when operating on the simplified sentences. Aside from taking advantage of the semantic hierarchy of minimal propositions in existing Open IE Abstract approaches, we also develop an Open IE reference system, Graphene. It implements a relation extraction pattern upon the simplified sentences. The framework we propose is evaluated within our reference TS implementation DisSim. In a comparative analysis, we demonstrate that our approach outperforms the state of the art in structural TS both in an automatic and a manual analysis. It obtains the highest score on three simplification datasets from two different domains with regard to SAMSA (0.67, 0.57, 0.54), a recently proposed metric targeted at automatically measuring the syntactic complexity of sentences which highly correlates with human judgments on structural simplicity and grammaticality. These findings are supported by the ratings from the human evaluation, which indicate that our baseline implementation DisSim returns fine-grained simplified sentences that achieve a high level of syntactic correctness and largely preserve the meaning of the input. Furthermore, a comparative analysis with the annotations contained in the RST Discourse Treebank (RST-DT) reveals that we are able to capture the contextual hierarchy between the split sentences with a precision of approximately 90\% and reach an average precision of almost 70\% for the classification of the rhetorical relations that hold between them. Finally, an extrinsic evaluation shows that when applying our TS framework as a pre-processing step, the performance of state-ofthe-art Open IE systems can be improved by up to 32\% in precision and 30\% in recall of the extracted relational tuples. Accordingly, we can conclude that our proposed discourse-aware TS approach succeeds in transforming sentences that present a complex linguistic structure into a sequence of simplified sentences that are to a large extent grammatically correct, represent atomic semantic units and preserve the meaning of the input. Moreover, the evaluation provides sufficient evidence that our framework is able to establish a semantic hierarchy between the split sentences, generating a fine-grained representation of complex assertions in the form of hierarchically ordered and semantically interconnected propositions. Finally, we demonstrate that state-of-the-art Open IE systems benefit from using our TS approach as a pre-processing step by increasing both the accuracy and coverage of the extracted relational tuples for the majority of the Open IE approaches under consideration. In addition, we outline that the semantic hierarchy of simplified sentences can be leveraged to enrich the output of existing Open IE systems with additional meta information, thus transforming the shallow semantic representation of state-of-the-art approaches into a canonical context-preserving representation of relational tuples.}, language = {en} } @phdthesis{Schmid2021, author = {Schmid, Matthias}, title = {Towards Storing 3D Model Graphs in Relational Databases}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10353}, school = {Universit{\"a}t Passau}, pages = {243 Seiten}, year = {2021}, abstract = {The increasing relevance of massive graph data reinforces the need for adequate graph data management. While several graph database engines have been developed, the storage of graph data in a relational database management system, and therefore the seamless integration into existing information systems remains an open challenge. Motivated by the use case to integrate Building Information Modeling (BIM) data into the MonArch system, we propose a solution that transforms the BIM data into a property graph and stores this graph in the database system. We present a novel approach to efficiently store property graph data in a relational database management system using JSON functionality and redundant storage of edges in adjacency lists and show how to import huge data sets into this schema. Applying this approach, we import data sets of up to nearly 1 TB of disk space within the relational database, while only having 96 GB of main memory available. We also present a new approach of how to retrieve data from this database schema, translating queries written in the popular property graph query language Cypher into SQL. Hence, we provide an intuitive way to write semantically complex queries. We also demonstrate the efficiency of our approach using the standardized Linked Data Benchmark Council - Social Network Benchmark (LDBC - SNB) framework. Our approach increases the throughput for this benchmark by up to 85 times, compared to existing approaches for RDBMS. In addition, we propose a new method to transform BIM data into the property graph model and how to apply the aforementioned property graph storage to this data. We can import IFC models of up to 300 MB within five minutes. We show the suitability of our approach using our own use case specific benchmark, which we integrated into the previously mentioned Social Network Benchmark. For our interactive use case-specific queries, we achieve response times faster than 5 ms in 99\% of all executions. Finally, we present how the aforementioned approach to store BIM data in a relational database management system is integrated into the existing MonArch system by splitting the different functionalities of our approach into a microservice architecture.}, language = {en} } @phdthesis{Alshawish2021, author = {Alshawish, Ali}, title = {Risk-based Security Management in Critical Infrastructure Organizations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10026}, school = {Universit{\"a}t Passau}, pages = {xii, 181 Seiten}, year = {2021}, abstract = {Critical infrastructure and contemporary business organizations are experiencing an ongoing paradigm shift of business towards more collaboration and agility. On the one hand, this shift seeks to enhance business efficiency, coordinate large-scale distribution operations, and manage complex supply chains. But, on the other hand, it makes traditional security practices such as firewalls and other perimeter defenses insufficient. Therefore, concerns over risks like terrorism, crime, and business revenue loss increasingly impose the need for enhancing and managing security within the boundaries of these systems so that unwanted incidents (e.g., potential intrusions) can still be detected with higher probabilities. To this end, critical infrastructure organizations step up their efforts to investigate new possibilities for actively engaging in situational awareness practices to ensure a high level of persistent monitoring as well as on-site observation. Compliance with security standards is necessary to ensure that organizations meet regulatory requirements mostly shaped by a set of best practices. Nevertheless, it does not necessarily result in a coherent security strategy that considers the different aims and practical constraints of each organization. In this regard, there is an increasingly growing demand for risk-based security management approaches that enable critical infrastructures to focus their efforts on mitigating the risks to which they are exposed. Broadly speaking, security management involves the identification, assessment, and evaluation of long-term (or overall) objectives and interests as well as the means of achieving them. Due to the critical role of such systems, their decision-makers tend to enhance the system resilience against very unpleasant outcomes and severe consequences. That is, they seek to avoid decision options associated with likely extreme risks in the first place. Practically speaking, this risk attitude can significantly influence the decision-making process in such critical organizations. Towards incorporating the aversion to extreme risks into security management decisions, this thesis investigates thoroughly the capabilities of a recently emerged theory of games with payoffs that are probability distributions. Unlike traditional optimization techniques, this theory provides an alternative decision technique that is more robust to extreme risks and uncertainty. Furthermore, this thesis proposes a new method that gives a decision maker more control over the decision-making process through defining loss regions with different importance levels according to people's risk attitudes. In this way, the static decision analysis used in the distribution-valued games is transformed into a dynamic process to adapt to different subjective risk attitudes or account for future changes in the decision caused by a learning process or other changes in the context. Throughout their different parts, this thesis shows how theoretical models, simulation, and risk assessment models can be combined into practical solutions. In this context, it deals with three facets of security management: allocating limited security resources, prioritizing security actions, and tweaking decision making. Finally, the author discusses experiences and limitations distilled from this research and from investigating the new theory of games, which can be taken into account in future approaches.}, subject = {Spieltheorie}, language = {en} }