@phdthesis{Prakash2025, author = {Prakash, Jyoti}, title = {Static Analyses of Interlanguage Interoperations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15736}, school = {Universit{\"a}t Passau}, pages = {ix, 126 Seiten}, year = {2025}, abstract = {Software Developers are moving towards a multilingual development where they combine two languages in a single application to harness the strengths of each language. For example, performance-critical components of a Java application can be implemented in C language. It provides flexibility, at the same time, it becomes difficult to statically analyze these applications. The amalgamation of two languages in a single application may introduce bugs ranging from type-mismatch to security vulnerabilities. Therefore, it is necessary to develop static analysis techniques to aid developers in multilingual development. In this thesis, we develop techniques to study and analyze these applications. In the first part of the thesis, we study the prevalence of security and privacy vulnerabilities in hybrid apps. Hybrid apps are Android apps that combine both Java and Javascript components, where the Android part is secured (on the device), while the JavaScript part is exposed to web. Additionally, some of the Java functions are available to JavaScript component through an interface called as bridge interface. In the pursuit of the goal, we adopt a static backtracking of data dependencies to determine the flow of information from the android component to the web component. Our study revealed the potential sources of unsoundness in the existing static analyses. Static backtracing also induces imprecision in the analysis, i.e., there might be some flows that are not possible during runtime albeit are reported by the analysis. These were mitigated through a manual verification. This work reveals that the android-web hybridization can lead to (potential) vulnerabilities that might impact the confidentiality as well as the integrity properties of these apps. From the communication patterns occurring in Android WebView, we noticed that its is feasible for an attacker to jeopardize the integrity of apps by corrupting some value, say an input on the web through bridge interfaces. Motivated by this, we define a information flow analysis of the bridge interfaces and the associated data flows in hybrid apps. In the first step, we propose a novel threat model where we model the attacker as someone who wants to influence the behavior of android app as an integrity violation. Based on this threat model, we then propose a demand-driven analysis technique to detect confidentiality and integrity violations. Our analysis leverages, a demand-driven technique, where it only analyzes the relevant part of app for the information flow analysis with the help of function summaries --- escaping the need of a whole-program analysis. In the second part of the thesis, we generalize the approach to static analysis of multilingual applications. To this end, we investigate into the question of combining existing single language analyses to analyze multilingual programs. To provide an affirmative answer, we define an analysis to leverage single language analyses for call-graph and pointer analysis of multilingual programs. Our analysis takes two existing unilingual analyses and analyzes the complete multilingual program. It uses a novel summary specialization technique that resolves the information flows at the bridge interfaces by utilizing independent pre-analyses (modulo foreign function interfaces) of each language component. We apply this technique to analyze Android-NDK and GraalVM Java-Python multilingual applications for generating call-graphs. In summary, we have developed novel techniques for information flow and call-graph analysis for multilingual programs. With this, we motivate the need of static analyses for multilingual applications and its applications which includes, vulnerability detection, program understanding, amongst others.}, language = {en} } @phdthesis{Gruber2025, author = {Gruber, Martin}, title = {Tackling Test Flakiness: Understanding the Problem and Providing Practical Mitigations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15549}, school = {Universit{\"a}t Passau}, pages = {127 Seiten}, year = {2025}, abstract = {"Software is eating the world". With this phrase from his 2011 Wall Street Journal interview, Marc Andreessen predicted a decade of disruptive software-based innovations affecting various industries. Today, over ten years later, many of his predictions have come true: six of the seven most valuable companies worldwide are computer technology firms, and more than half of the world's population has access to the internet and owns a smartphone, with numbers still growing rapidly. The increasing importance of software has also changed software development. To ensure product quality despite high complexity and fast product cycles, software developers started to adopt continuous integration and regression testing practices: each change to an existing system is automatically tested and reverted in case it breaks any existing functionality. As a result, large software projects are conducting millions of test executions each day. One obstacle to such extensive testing are non-deterministic tests that can pass and fail without any changes to the underlying system or the test itself. These tests are commonly referred to as flaky tests. Flaky tests break regression testing, as they cause test failures that are unrelated to the changes that are being tested. Developers are forced to investigate these intermittent failures, wasting their time and decreasing their trust in testing. This thesis presents our research that aims at understanding and mitigating test flakiness. To comprehend the nature of flaky tests, we conducted both code-based studies on open-source projects, as well as a developer survey. All our investigations confirmed that flakiness is a frequently occurring and severe issue. The causes of flakiness, however, depend on the domain of the project and the source of the test: while asynchronous waiting and concurrency are overall the most prevalent causes aside from test order dependencies, Python projects tend to experience more flakiness caused by networking and randomness. Flaky tests that were not written by developers but generated automatically tend to be more often caused by randomness or unspecified behavior. To avoid test flakiness in generated tests, developers can use existing flakiness suppression mechanisms of test generation frameworks, which we found to be effective. In general, however, most developers currently address the issue of test flakiness by rerunning failing tests. Nevertheless, they would like more support when dealing with test flakiness, namely better visualizations, automated detection and debugging techniques, and education on the topic. In response to this feedback, we developed and evaluated a generic flakiness prediction approach, as well as an automated flakiness debugging technique. Our flakiness prediction method is easy to use and widely applicable. In contrast to previous techniques, it avoids any form of static or dynamic analysis. Instead, it relies solely on a test's execution result history and version control information, two commonly available artifacts. Additionally, it aims to classify real-world failures as either caused by flakiness or a regression. Previous techniques mainly focused on identifying potential flaky test cases in test suites, a related but less actionable question. An evaluation on a large-scale automotive software project yielded positive results. Our approach showed a strong predictive performance (95.5\% F1-score), outperforming the previously used heuristic. We also introduced Spectrum-based Flaky Fault Localization (SFFL), an automated debugging technique that aims to pinpoint the specific lines in the source code that cause a flaky test's non-deterministic behavior. SFFL extends traditional Spectrum-based Fault Localization (SFL) by considering multiple coverage behaviors of the same test case, a highly common phenomenon among flaky tests. Our evaluation on 101 flaky Python tests showed that SFFL outperforms traditional SFL and was able to narrow down the flaky fault's location to 3.5\% of a project's code base on average.}, subject = {Softwareentwicklung}, language = {en} } @phdthesis{NorbertoSales2022, author = {Norberto Sales, Juliano Efson}, title = {An Explainable Semantic Parser for End-User Development}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10718}, school = {Universit{\"a}t Passau}, pages = {xvi, 165 Seiten}, year = {2022}, abstract = {Programming is a key skill in a world where businesses are driven by digital transformations. Although many of the programming demand can be addressed by a simple set of instructions composing libraries and services available in the web, non-technical professionals, such as domain experts and analysts, are still unable to construct their own programs due to the intrinsic complexity of coding. Among other types of end-user development, natural language programming has emerged to allow users to program without the formalism of traditional programming languages, where a tailored semantic parser can translate a natural language utterance to a formal command representation able to be processed by a computational machine. Currently, semantic parsers are typically built on the top of a learning method that defines its behaviours based on the patterns behind a large training data, whose production frequently are costly and time-consuming. Our research is devoted to study and propose a semantic parser for natural language commands targeting a scenario with low availability of training data. Our proposed semantic parser follows a multi-component architecture, composed of a specialised shallow parser that associates natural language commands to predicate-argument structures, integrated to a distributional ranking model that matches the command to a function signature available from an API knowledge base. Systems developed with statistical learning models and complex linguistics resources, as the proposed semantic parser, do not provide natively an easy way to associate a single feature from the input data to the impact in system behaviour. In this scenario, end-user explanations for intelligent systems has become a strong requirement to increase user confidence and system literacy. Thus, our research designed an explanation model for the proposed semantic parser that fits the heterogeneity of its multi-component architecture. The explanation model explores a hierarchical representation in an increasing degree of technical depth, providing higher-level explanations in the initial layers, going gradually to those that demand technical knowledge, applying different explanation strategies to better express the approach behind each component. With the support of a user-centred experiment, we compared the utility of different types of explanations and the impact of background knowledge in their preferences.}, language = {en} } @phdthesis{Sui2018, author = {Sui, Zhiyuan}, title = {Security and Privacy Schemes for Demand Response in Smart Grids}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5809}, school = {Universit{\"a}t Passau}, pages = {xx, 153 Seiten}, year = {2018}, abstract = {Smart Grids integrate currently isolated power and communications networks, while introducing several new technologies on the hardware and software sides. One of the most important ingredients is the potential for demand-response programs, which offer the possibility of sending instructions to consumers to adapt their power consumption over a certain period of time. However, high-frequency data collection exposes consumers' usage behaviors, leading to security and privacy challenges for Smart Grids. In this thesis, three cryptographic schemes are constructed for different demand-response programs. In the mandatory incentive-based demand-response program, privacy preservation depends on the power consumption of consumers. An anonymous authentication scheme is constructed for overload auditing and privacy preservation. Consumers' identities are anonymous during normal operation. The operation center defines an acceptable consumption threshold at times of power shortage. Consumers must follow the instruction and curtail their power consumption to meet the threshold. If they do so, the consumers keep their anonymity, while disobedient consumers, whose power consumption exceeds the threshold, can be identified. Security analysis demonstrates that the constructed anonymous authentication scheme is secure in a random oracle model. In the voluntary incentivebased demand-response program, consumers are categorized as either obedient or disobedient consumers according to their consumption curtailment. Consumers utilize a homomorphic encryption algorithm to encrypt their usage and report the ciphertexts to the operation center periodically. At a time of grid instability, the obedient consumers reduce their consumption and prove their curtailment by using a range proof. Both the usage reports and the proofs from obedient consumers concerning their consumption are reported without leaking private information. In order to achieve the real-time requirement, a security model is proposed and a batch verification algorithm is constructed, which is proved to be secure in the defined oracle model. Apart from reward and penalty detection in demand-response programs, theft detection is also an important requirement in Smart Grids. In order to achieve theft detection, this thesis employs the dynamic k-times anonymous authentication and blind signatures to create an efficient theft detection mechanism in the prepaid card system, where consumers pay for their consumption in advance and obtain credentials. A consumer sends the credentials anonymously and obtains corresponding credentials during times of consumption. If a thief tries to send reused credentials to steal electricity, his anonymity will be revoked. Finally, this thesis proves that the proposed mechanism finds the real identities of power thieves, without sacrificing the privacy of honest consumers under the random oracle model.}, subject = {Intelligentes Stromnetz}, language = {en} } @phdthesis{Walsh2024, author = {Walsh, Florian}, title = {Computing the Binomial Part of Polynomial Ideals}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15096}, school = {Universit{\"a}t Passau}, pages = {vi, 131 Seiten}, year = {2024}, abstract = {Given an ideal in a polynomial ring over a field, we present a complete algorithm to compute its binomial part.}, language = {en} } @phdthesis{Paler2014, author = {Paler, Alexandru}, title = {Design Methods for Reliable Quantum Circuits}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-2852}, school = {Universit{\"a}t Passau}, pages = {199 S.}, year = {2014}, abstract = {Quantum computing is an emerging technology that has the potential to change the perspectives and applications of computing in general. A wide range of applications are enabled: from faster algorithmic solutions of classically still difficult problems to theoretically more secure communication protocols. A quantum computer uses the quantum mechanical effects of particles or particle-like systems, and a major similarity between quantum and classical computers consists of both being abstracted as information processing machines. Whereas a classical computer operates on classical digital information, the quantum computer processes quantum information, which shares similarities with analog signals. One of the central differences between the two types of information is that classical information is more fault-tolerant when compared to its quantum counterpart. Faults are the result of the quantum systems being interfered by external noise, but during the last decades quantum error correction codes (QECC) were proposed as methods to reduce the effect of noise. Reliable quantum circuits are the result of designing circuits that operate directly on encoded quantum information, but the circuit's reliability is also increased by supplemental redundancies, such as sub-circuit repetitions. Reliable quantum circuits have not been widely used, and one of the major obstacles is their vast associated resource overhead, but recent quantum computing architectures show promising scalabilities. Consequently the number of particles used for computing can be more easily increased, and that the classical control hardware (inherent for quantum computation) is also more reliable. Reliable quantum circuits haev been investigated for almost as long as general quantum computing, but their limited adoption (until recently) has not generated enough interest into their systematic design. The continuously increasing practical relevance of reliability motivates the present thesis to investigate some of the first answers to questions related to the background and the methods forming a reliable quantum circuit design stack. The specifics of quantum circuits are analysed from two perspectives: their probabilistic behaviour and their topological properties when a particular class of QECCs are used. The quantum phenomena, such as entanglement and superposition, are the computational resources used for designing quantum circuits. The discrete nature of classical information is missing for quantum information. An arbitrary quantum system can be in an infinite number of states, which are linear combinations of an exponential number of basis states. Any nontrivial linear combination of more than one basis states is called a state superposition. The effect of superpositions becomes evident when the state of the system is inferred (measured), as measurements are probabilistic with respect to their output: a nontrivial state superposition will collapse to one of the component basis states, and the measurement result is known exactly only after the measurement. A quantum system is, in general, composed from identical subsystems, meaning that a quantum computer (the complete system) operates on multiple similar particles (subsystems). Entanglement expresses the impossibility of separating the state of the subsystems from the state of the complete system: the nontrivial interactions between the subsystems result into a single indivisible state. Entanglement is an additional source of probabilistic behaviour: by measuring the state of a subsystem, the states of the unmeasured subsystems will probabilistically collapse to states from a well defined set of possible states. Superposition and entanglement are the building blocks of quantum information teleportation protocols, which in turn are used in state-of-the-art fault-tolerant quantum computing architectures. Information teleportation implies that the state of a subsystem is moved to a second subsystem without copying any information during the process. The probabilistic approach towards the design of quantum circuits is initiated by the extension of classical test and diagnosis methods. Quantum circuits are modelled similarly to classical circuits by defining gate-lists, and missing quantum gates are modelled by the single missing gate fault. The probabilistic approaches towards quantum circuits are facilitated by comparing these to stochastic circuits, which are a particular type of classical digital circuits. Stochastic circuits can be considered an emulation of analogue computing using digital components. A first proposed design method, based on the direct comparison, is the simulation of quantum circuits using stochastic circuits by mapping each quantum gate to a stochastic computing sub-circuit. The resulting stochastic circuit is compiled and simulated on FPGAs. The obtained results are encouraging and illustrate the capabilities of the proposed simulation technique. However, the exponential number of possible quantum basis states was translated into an exponential number of stochastic computing elements. A second contribution of the thesis is the proposal of test and diagnosis methods for both stochastic and quantum circuits. Existing verification (tomographic) methods of quantum circuits were targeting the reconstruction of the gate-lists. The repeated execution of the quantum circuit was followed by different but specific measurement at the circuit outputs. The similarities between stochastic and quantum circuits motivated the proposal of test and diagnosis methods that use a restricted set of measurement types, which minimise the number of circuit executions. The obtained simulation results show that the proposed validation methods improve the feasibility of quantum circuit tomography for small and medium size circuits. A third contribution of the thesis is the algorithmic formalisation of a problem encountered in teleportation-based quantum computing architectures. The teleportation results are probabilistic and require corrections represented as quantum gates from a particular set. However, there are known commutation properties of these gates with the gates used in the circuit. The corrections are not applied as dynamic gate insertions (during the circuit's execution) into the gate-lists, but their effect is tracked through the circuit, and the corrections are applied only at circuit outputs. The simulation results show that the algorithmic solution is applicable for very large quantum circuits. Topological quantum computing (TQC) is based on a class of fault-tolerant quantum circuits that use the surface code as the underlying QECC. Quantum information is encoded in lattice-like structures and error protection is enabled by the topological properties of the lattice. The 3D structure of the lattice allows TQC computations to be visualised similarly to knot diagrams. Logical information is abstracted as strands and strand interactions (braids) represent logical quantum gates. Therefore, TQC circuits are abstracted using a geometrical description, which allows circuit input-output transformations (correlations) to be represented as geometric sub-structures. TQC design methods were not investigated prior to this work, and the thesis introduces the topological computational model by first analysing the necessary concepts. The proposed TQC design stack follows a top-down approach: an arbitrary quantum circuit is decomposed into the TQC supported gate set; the resulting circuit is mapped to a lattice of appropriate dimensions; relevant resulting topological properties are extracted and expressed using graphs and Boolean formulas. Both circuit representations are novel and applicable to TQC circuit synthesis and validation. Moreover, the Boolean formalism is broadened into a formal mechanism for proving circuit correctness. The thesis introduces TQC circuit synthesis, which is based on a novel logical gate geometric description, whose formal correctness is demonstrated. Two synthesis methods are designed, and both use a general planar representation of the circuit. Initial simulation results demonstrate the practicality and performance of the methods. An additional group of proposed design methods solves the problem of automatic correlation construction. The methods use validity criteria which were introduced and analysed beforehand in the thesis. Input-output correlations existing in the circuit are inferred using both the graph and the Boolean representation. The thesis extends the TQC state-of-the-art by recognising the importance of correlations in the validation process: correlation construction is used as a sub-routine for TQC circuit validation. The presented cross-layer validation procedure is useful when investigating both the QECC and the circuit, while a second proposed method is QECC-independent. Both methods are scalable and applicable even to very large circuits. The thesis completes with the analysis of TQC circuit identities, where the developed Boolean formalism is used. The proofs of former known circuit identities were either missing or complex, and the presented approach reduces the length of the proofs and represents a first step towards standardising them. A new identity is developed and detailed during the process of illustrating the known circuit identities. Reliable quantum circuits are a necessity for quantum computing to become reality, and specialised design methods are required to support the quest for scalable quantum computers. This thesis used a twofold approach towards this target: firstly by focusing on the probabilistic behaviour of quantum circuits, and secondly by considering the requirements of a promising quantum computing architecture, namely TQC. Both approaches resulted in a set of design methods enabling the investigation of reliable quantum circuits. The thesis contributes with the proposal of a new quantum simulation technique, novel and practical test and diagnosis methods for general quantum circuits, the proposal of the TQC design stack and the set of design methods that form the stack. The mapping, synthesis and validation of TQC circuits were developed and evaluated based on a novel and promising formalism that enabled checking circuit correctness. Future work will focus on improving the understanding of TQC circuit identities as it is hoped that these are the key for circuit compaction and optimisation. Improvements to the stochastic circuit simulation technique have the potential of spawning new insights about quantum circuits in general.}, subject = {Quantencomputer}, language = {en} } @phdthesis{Liebig2015, author = {Liebig, J{\"o}rg}, title = {Analysis and Transformation of Configurable Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-2996}, school = {Universit{\"a}t Passau}, pages = {160}, year = {2015}, abstract = {Static analysis tools and transformation engines for source code belong to the standard equipment of a software developer. Their use simplifies a developer's everyday work of maintaining and evolving software systems significantly and, hence, accounts for much of a developer's programming efficiency and programming productivity. This is also beneficial from a financial point of view, as programming errors are early detected and avoided in the the development process, thus the use of static analysis tools reduces the overall software-development costs considerably. In practice, software systems are often developed as configurable systems to account for different requirements of application scenarios and use cases. To implement configurable systems, developers often use compile-time implementation techniques, such as preprocessors, by using \#ifdef directives. Configuration options control the inclusion and exclusion of \#ifdef-annotated source code and their selection/deselection serve as an input for generating tailor-made system variants on demand. Existing configurable systems, such as the linux kernel, often provide thousands of configuration options, forming a huge configuration space with billions of system variants. Unfortunately, existing tool support cannot handle the myriads of system variants that can typically be derived from a configurable system. Analysis and transformation tools are not prepared for variability in source code, and, hence, they may process it incorrectly with the result of an incomplete and often broken tool support. We challenge the way configurable systems are analyzed and transformed by introducing variability-aware static analysis tools and a variability-aware transformation engine for configurable systems' development. The main idea of such tool support is to exploit commonalities between system variants, reducing the effort of analyzing and transforming a configurable system. In particular, we develop novel analysis approaches for analyzing the myriads of system variants and compare them to state-of-the-art analysis approaches (namely sampling). The comparison shows that variability-aware analysis is complete (with respect to covering the whole configuration space), efficient (it outperforms some of the sampling heuristics), and scales even to large software systems. We demonstrate that variability-aware analysis is even practical when using it with non-trivial case studies, such as the linux kernel. On top of variability-aware analysis, we develop a transformation engine for C, which respects variability induced by the preprocessor. The engine provides three common refactorings (rename identifier, extract function, and inline function) and overcomes shortcomings (completeness, use of heuristics, and scalability issues) of existing engines, while still being semantics-preserving with respect to all variants and being fast, providing an instantaneous user experience. To validate semantics preservation, we extend a standard testing approach for refactoring engines with variability and show in real-world case studies the effectiveness and scalability of our engine. In the end, our analysis and transformation techniques show that configurable systems can efficiently be analyzed and transformed (even for large-scale systems), providing the same guarantees for configurable systems as for standard systems in terms of detecting and avoiding programming errors.}, subject = {Refactoring}, language = {en} } @phdthesis{Rosenthal2014, author = {Rosenthal, Kristina}, title = {Die Tits-Alternative f{\"u}r eine relevante Klasse endlich pr{\"a}sentierter Gruppen unter besonderer Ber{\"u}cksichtigung computeralgebraischer Aspekte}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-2865}, school = {Universit{\"a}t Passau}, year = {2014}, abstract = {Eine Gruppe erf{\"u}llt die Tits-Alternative, wenn sie entweder eine nicht-abelsche freie Untergruppe vom Rang 2 enth{\"a}lt oder virtuell aufl{\"o}sbar ist, d. h. eine aufl{\"o}sbare Untergruppe von endlichem Index enth{\"a}lt. Diese Eigenschaft geht auf J. Tits zur{\"u}ck, der sie f{\"u}r endlich erzeugte lineare Gruppen nachwies. Es wird eine relevante Klasse endlich pr{\"a}sentierter Gruppen in Bezug auf die Tits-Alternative untersucht. Die betrachteten Gruppen stellen eine Verallgemeinerung von Pride-Gruppen und der von Vinberg betrachteten Gruppen erzeugt von periodisch gepaarten Relationen f{\"u}r drei Erzeugende dar. Zus{\"a}tzlich treten diese Gruppen als Fundamentalgruppen hyperbolischer Orbifaltigkeiten auf. Es wird der Nachweis der Tits-Alternative unter bestimmten Voraussetzungen an die Pr{\"a}sentierungen der betrachteten Gruppen erbracht. F{\"u}r diesen Nachweis werden verschiedene Methoden angewandt: Es werden zum einen homomorphe Bilder der Gruppen betrachtet. Zum anderen wird die Existenz wesentlicher Darstellungen in eine lineare Gruppe nachgewiesen. Basierend auf diesen Darstellungen kann in vielen F{\"a}llen der Nachweis der Existenz nicht-abelscher freier Untergruppen erbracht werden. Zus{\"a}tzlich wird f{\"u}r den Nachweis der Endlichkeit und damit der Tits-Alternative einiger Gruppen eine Methode angewandt, die auf Berechnungen von Gr{\"o}bner-Basen in nicht-kommutativen Polynomringen basiert. Es wird dabei die Dimension der Gruppenringe betrachtet als Vektorr{\"a}ume berechnet. F{\"u}r die betrachtete Klasse von Gruppen wird f{\"u}r Relationen der Blockl{\"a}nge 1 die Tits-Alternative vollst{\"a}ndig nachgewiesen. Als Folgerung ergibt sich eine Klassifikation der endlichen unter diesen Gruppen.}, subject = {Kombinatorische Gruppentheorie}, language = {de} } @phdthesis{Limam2014, author = {Limam, Lyes}, title = {Usage-Driven Unified Model for User Profile and Data Source Profile Extraction}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-2936}, school = {Universit{\"a}t Passau}, pages = {160}, year = {2014}, abstract = {This thesis addresses a problem related to usage analysis in information retrieval systems. Indeed, we exploit the history of search queries as support of analysis to extract a profile model. The objective is to characterize the user and the data source that interact in a system to allow different types of comparison (user-to-user, sourceto- source, user-to-source). According to the study we conducted on the work done on profile model, we concluded that the large majority of the contributions are strongly related to the applications within they are proposed. As a result, the proposed profile models are not reusable and suffer from several weaknesses. For instance, these models do not consider the data source, they lack of semantic mechanisms and they do not deal with scalability (in terms of complexity). Therefore, we propose a generic model of user and data source profiles. The characteristics of this model are the following. First, it is generic, being able to represent both the user and the data source. Second, it enables to construct the profiles in an implicit way based on histories of search queries. Third, it defines the profile as a set of topics of interest, each topic corresponding to a semantic cluster of keywords extracted by a specific clustering algorithm. Finally, the profile is represented according to the vector space model. The model is composed of several components organized in the form of a framework, in which we assessed the complexity of each component. The main components of the framework are: • a method for keyword queries disambiguation • a method for semantically representing search query logs in the form of a taxonomy; • a clustering algorithm that allows fast and efficient identification of topics of interest as semantic clusters of keywords; • a method to identify user and data source profiles according to the generic model. This framework enables in particular to perform various tasks related to usage-based structuration of a distributed environment. As an example of application, the framework is used to the discovery of user communities, and the categorization of data sources. To validate the proposed framework, we conduct a series of experiments on real logs from the search engine AOL search, which demonstrate the efficiency of the disambiguation method in short queries, and show the relation between the quality based clustering and the structure based clustering.}, subject = {Information Retrieval}, language = {en} } @phdthesis{Sonnleitner2022, author = {Sonnleitner, Mathias}, title = {The power of random information for numerical approximation and integration}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11305}, school = {Universit{\"a}t Passau}, pages = {x, 165 Seiten}, year = {2022}, abstract = {This thesis investigates the quality of randomly collected data by employing a framework built on information-based complexity, a field related to the numerical analysis of abstract problems. The quality or power of gathered information is measured by its radius which is the uniform error obtainable by the best possible algorithm using it. The main aim is to present progress towards understanding the power of random information for approximation and integration problems. In the first problem considered, information given by linear functionals is used to recover vectors, in particular from generalized ellipsoids. This is related to the approximation of diagonal operators which are important objects of study in the theory of function spaces. We obtain upper bounds on the radius of random information both in a convex and a quasi-normed setting, which extend and, in some cases, improve existing results. We conjecture and partially establish that the power of random information is subject to a dichotomy determined by the decay of the length of the semiaxes of the generalized ellipsoid. Second, we study multivariate approximation and integration using information given by function values at sampling point sets. We obtain an asymptotic characterization of the radius of information in terms of a geometric measure of equidistribution, the distortion, which is well known in the theory of quantization of measures. This holds for isotropic Sobolev as well as H{\"o}lder and Triebel-Lizorkin spaces on bounded convex domains. We obtain that for these spaces, depending on the parameters involved, typical point sets are either asymptotically optimal or worse by a logarithmic factor, again extending and improving existing results. Further, we study isotropic discrepancy which is related to numerical integration using linear algorithms with equal weights. In particular, we analyze the quality of lattice point sets with respect to this criterion and obtain that they are suboptimal compared to uniform random points. This is in contrast to the approximation of Sobolev functions and resolves an open question raised in the context of a possible low discrepancy construction on the two-dimensional sphere.}, subject = {Komplexit{\"a}t / Algorithmus}, language = {en} } @phdthesis{Jovanovic2015, author = {Jovanovic, Philipp}, title = {Analysis and Design of Symmetric Cryptographic Algorithms}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3319}, school = {Universit{\"a}t Passau}, pages = {216}, year = {2015}, abstract = {This doctoral thesis is dedicated to the analysis and the design of symmetric cryptographic algorithms. In the first part of the dissertation, we deal with fault-based attacks on cryptographic circuits which belong to the field of active implementation attacks and aim to retrieve secret keys stored on such chips. Our main focus lies on the cryptanalytic aspects of those attacks. In particular, we target block ciphers with a lightweight and (often) non-bijective key schedule where the derived subkeys are (almost) independent from each other. An attacker who is able to reconstruct one of the subkeys is thus not necessarily able to directly retrieve other subkeys or even the secret master key by simply reversing the key schedule. We introduce a framework based on differential fault analysis that allows to attack block ciphers with an arbitrary number of independent subkeys and which rely on a substitution-permutation network. These methods are then applied to the lightweight block ciphers LED and PRINCE and we show in both cases how to recover the secret master key requiring only a small number of fault injections. Moreover, we investigate approaches that utilize algebraic instead of differential techniques for the fault analysis and discuss advantages and drawbacks. At the end of the first part of the dissertation, we explore fault-based attacks on the block cipher Bel-T which also has a lightweight key schedule but is not based on a substitution-permutation network but instead on the so-called Lai-Massey scheme. The framework mentioned above is thus not usable against Bel-T. Nevertheless, we also present techniques for the case of Bel-T that enable full recovery of the secret key in a very efficient way using differential fault analysis. In the second part of the thesis, we focus on authenticated encryption schemes. While regular ciphers only protect privacy of processed data, authenticated encryption schemes also secure its authenticity and integrity. Many of these ciphers are additionally able to protect authenticity and integrity of so-called associated data. This type of data is transmitted unencrypted but nevertheless must be protected from being tampered with during transmission. Authenticated encryption is nowadays the standard technique to protect in-transit data. However, most of the currently deployed schemes have deficits and there are many leverage points for improvements. With NORX we introduce a novel authenticated encryption scheme supporting associated data. This algorithm was designed with high security, efficiency in both hardware and software, simplicity, and robustness against side-channel attacks in mind. Next to its specification, we present special features, security goals, implementation details, extensive performance measurements and discuss advantages over currently deployed standards. Finally, we describe our preliminary security analysis where we investigate differential and rotational properties of NORX. Noteworthy are in particular the newly developed techniques for differential cryptanalysis of NORX which exploit the power of SAT- and SMT-solvers and have the potential to be easily adaptable to other encryption schemes as well.}, subject = {Kryptologie}, language = {en} } @phdthesis{Le2015, author = {Le, Ngoc Long}, title = {Various Differents for 0-Dimensional Schemes and Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3386}, school = {Universit{\"a}t Passau}, pages = {205}, year = {2015}, abstract = {This thesis attempts to investigate the Noether, Dedekind, and K{\"a}hler differents for a 0-dimensional scheme X in the projective n-space P^n_K over an arbitrary field K. In particular, we focus on studying the relations between the algebraic structure of these differents and geometric properties of the scheme X. In Chapter 1 we give an outline to the problems this thesis is concerned with, a brief literature review for each problem, and the main results regarding these problems. Chapter 2 contains background results that we will need in the subsequent chapters. We introduce the concept of maximal p_j-subschemes of a 0-dimensional scheme X and give some descriptions of them and their Hilbert functions. Furthermore, we generalize the notion of a separator of a subscheme of X of degree deg(X)-1 to a set of separators of a maximal p_j-subscheme of X. In Chapter 3 we explore the Noether, Dedekind, and K{\"a}hler differents for 0-dimensional schemes X. First we define these differents for X, and take a look at how to compute these differents and examine their relations. Then we give an answer to the question "What are the Hilbert functions of these differents?" in some cases. In Chapter 4 we use the differents to investigate the Cayley-Bacharach property of 0-dimensional schemes over an arbitrary field K. The principal results of this chapter are characterizations of CB-schemes and of arithmetically Gorenstein schemes in terms of their Dedekind differents and a criterion for a 0-dimensional smooth scheme to be a complete intersection. We also generalize some results such as Dedekind's formula and the characterization of the Cayley-Bacharach property by using Liaison theory. In addition, several propositions on the uniformities are proven. In Chapter 5 we are interested in studying the Noether, Dedekind, and K{\"a}hler differents for finite special classes of schemes and finding out some applications of these differents. First, we investigate these differents for reduced 0-dimensional almost complete intersections X in P^n_K over a perfect field K. Then we investigate the relationships between these differents and the i-th Fitting ideals of the module of K{\"a}hler differentials of the homogeneous coordinate ring of X. Finally, we look more closely at the Hilbert functions and the regularity indices of these differents for fat point schemes.}, subject = {Kommutativer Ring}, language = {en} } @phdthesis{vonRhein2016, author = {von Rhein, Alexander}, title = {Analysis Strategies for Configurable Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3682}, school = {Universit{\"a}t Passau}, pages = {xiii, 229 Seiten}, year = {2016}, abstract = {A configurable system enables users to derive individual system variants based on a selection of configuration options. To cope with the often huge number of possible configurations, several analysis approaches (e.g., for verification of configurable systems) implement different strategies to account for configurability. One popular strategy—often applied in practice—is to use sampling (i.e.,analyzing only a subset of all system variants). While sampling reduces the analysis effort significantly, the information obtained is necessarily incomplete as some variants are not analyzed. A second strategy is to identify the common parts and the variable parts of a configurable system and analyze each part separately (called feature-based strategy). As a third strategy, researchers have begun to develop family-based analyses. Family-based approaches analyze the code base of a configurable system as a whole, rather than the individual variants or parts of the system, this way exploiting similarities among individual variants to reduce analysis effort. Each of these three strategies has advantages and disadvantages, which might even prevent its application (e.g., the family-based strategy typically needs much main memory). The goal of this thesis is to enable the efficient analysis of configuable systems, even if existing strategies fail (e.g., the family-based strategy, because of memory limitations). To this end, we designed a framework that models the key aspects of configurable-system analysis strategies, independent of their implementation and of the analyses techniques (e.g., type checking or model checking). Guided by our model, we developed a number of analysis strategies for configurable systems. To learn about advantages and disadvantages of individual strategies, we compared these in a series of empirical studies. In particular, we developed and evaluated a model-checking analysis and a data-flow analysis for configurable systems. One of our key findings is that family-based analysis outperforms most sampling heuristics with respect to analysis time, while being able to make definite statements about all variants of a configurable system. Furthermore, we identified advantages and disadvantages of analysis strategies and how to mitigate them by combining strategies. In our endeavor, we identified two key problems that are common to configurable-system analyses, and we developed supporting techniques to solve them. These techniques are general and are applicable beyond our research. In particular, we developed presence-condition simplification and variability encoding. Presence-condition simplification provides a simple method to reduce the size of the output or the internal data structure of configurable-systemanalyses. Variability encoding provides a means for transforming compile-time variability to run-time variability, which enables many family-based analyses. Our key contributions are the model of analysis strategies for configurable systems and the corresponding empirical comparisons of strategies. Our findings are backed by empirical studies, which helped broaden the community knowledge on analyses of configurable systems (indicated by citations). For these evaluations, we prepared several subject systems, which have also been used already by other researchers. Furthermore, we developed several analysis tools and demonstrated their feasibility in practical application scenarios based on code from, for example, the Linux kernel. Our tools are based on variability-aware optimizations that enable levels of scalability on configurable systems that were not possible with other tools before.}, subject = {Software Engineering}, language = {en} } @phdthesis{Lehner2016, author = {Lehner, Sabrina}, title = {The Asymptotic Behaviour of the Riemann Mapping Function at Analytic Cusps}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3587}, school = {Universit{\"a}t Passau}, pages = {97}, year = {2016}, abstract = {The well-known Riemann Mapping Theorem states the existence of a conformal map of a simply connected proper domain of the complex plane onto the upper half plane. One of the main topics in geometric function theory is to investigate the behaviour of the mapping functions at the boundary of such domains. In this work, we always assume that a piecewise analytic boundary is given. Hereby, we have to distinguish regular and singular boundary points. While the asymptotic behaviour for regular boundary points can be investigated by using the Schwarz Reflection at analytic arcs, the situation for singular boundary points is far more complicated. In the latter scenario two cases have to be differentiated: analytic corners and analytic cusps. The first part of the thesis deals with the asymptotic behaviour at analytic corners where the opening angle is greater than 0. The results of Lichtenstein and Warschawski on the asymptotic behaviour of the Riemann map and its derivatives at an analytic corner are presented as well as the much stronger result of Lehman that the mapping function can be developed in a certain generalised power series which in turn enables to examine the o-minimal content of the Riemann Mapping Theorem. To obtain a similar statement for domains with analytic cusps, it is necessary to investigate the asymptotic behaviour of a Riemann map at the cusp and based on this result to determine the asymptotic power series expansion. Therefore, the aim of the second part of this work is to investigate the asymptotic behaviour of a Riemann map at an analytic cusp. A simply connected domain has an analytic cusp if the boundary is locally given by two analytic arcs such that the interior angle vanishes. Besides the asymptotic behaviour of the mapping function, the behaviour of its derivatives, its inverse, and the derivatives of the inverse are analysed. Finally, we present a conjecture on the asymptotic power series expansion of the mapping function at an analytic cusp.}, subject = {Geometrische Funktionentheorie}, language = {en} } @unpublished{ZwicklbauerSeifertGranitzer2016, author = {Zwicklbauer, Stefan and Seifert, Christin and Granitzer, Michael}, title = {DoSeR - A Knowledge-Base-Agnostic Framework for Entity Disambiguation Using Semantic Embeddings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3670}, year = {2016}, abstract = {Entity disambiguation is the task of mapping ambiguous terms in natural-language text to its entities in a knowledge base. It finds its application in the extraction of structured data in RDF (Resource Description Framework) from textual documents, but equally so in facilitating artificial intelligence applications, such as Semantic Search, Reasoning and Question \& Answering. In this work, we propose DoSeR (Disambiguation of Semantic Resources), a (named) entity disambiguation framework that is knowledge-base-agnostic in terms of RDF (e.g. DBpedia) and entity-annotated document knowledge bases (e.g. Wikipedia). Initially, our framework automatically generates semantic entity embeddings given one or multiple knowledge bases. In the following, DoSeR accepts documents with a given set of surface forms as input and collectively links them to an entity in a knowledge base with a graph-based approach. We evaluate DoSeR on seven different data sets against publicly available, state-of-the-art (named) entity disambiguation frameworks. Our approach outperforms the state-of-the-art approaches that make use of RDF knowledge bases and/or entity-annotated document knowledge bases by up to 10\% F1 measure.}, language = {de} } @phdthesis{Poehls2018, author = {P{\"o}hls, Henrich C.}, title = {Increasing the Legal Probative Value of Cryptographically Private Malleable Signatures}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5823}, school = {Universit{\"a}t Passau}, pages = {540, LXXVIII Seiten}, year = {2018}, abstract = {Die Arbeit befasst sich mit der Erarbeitung von technischen Vorgaben und deren Umsetzung in kryptographisch sichere Verfahren von datenschutzfreundlichen, veränderbaren digitalen Signaturverfahren (private malleable signature schemes oder MSS) zur Erlangung möglichst hoher rechtlicher Evidenz. Im Recht werden bestimmte kryptographische Algorithmen, Schl{\"u}ssellängen und deren korrekte organisatorische Anwendungen zur Erzeugung elektronisch signierter Dokumente als rechtssicher eingestuft. Dies kann zu einer Beweiserleichterung mithilfe signierter Dokumente f{\"u}hren. So gelten nach Verordnung (EU) Nr. 910/2014 (eIDAS) qualifiziert signierte elektronische Dokumente entweder als Anscheinsbeweis der Echtheit oder ihnen wird gar eine gesetzliche Vermutung der Echtheit zuteil. Gesetzlich anerkannte technische Verfahren, die einen solch erhöhten Beweiswert erreichen, erf{\"u}llen mithilfe von Kryptographie im wesentlichen zwei Eigenschaften: Integritätsschutz (integrity), also die Erkennung der Abwesenheit von unerw{\"u}nschten Änderungen und die Zurechenbarkeit des unveränderten Dokumentes zum Signaturersteller (accountability). Hingegen ist der größte Vorteil veränderbarer digitaler Signaturverfahren (MSS) die „privacy" genannte Eigenschaft: Eine autorisierte Änderung verbirgt den vorherigen Inhalt. Des Weiteren bleibt die Signatur solange valide wie ausschliesslich autorisierte Änderungen vorgenommen werden. Wird diese Eigenschaft kryptographisch nachweislich sicher erf{\"u}llt, so spricht man von einem private malleable signature scheme. In der Arbeit werden zwei verbreitete Formen, die sogenannten redactable signature schemes (RSS) und die sanitizable signature schemes (SSS), eingehend betrachtet. Diese erlauben vielfältige Einsatzmöglichkeiten, zum Beispiel eine autorisierte spätere Veränderung zur Wahrung von Geschäftsgeheimnissen oder zum Datenschutz: Der Unterzeichner delegiert so beispielsweise {\"u}ber ein private redactable signature scheme nur das nachträgliche Schwärzen (redaction). Dies schränkt die Veränderbarkeit auf das Entfernen von Informationen ein, erlaubt aber wirksam die Wahrung des Datenschutzes oder den Schutz von (Geschäfts)geheimnissen indem diese Informationen irreversibel f{\"u}r Angreifer entfernt werden. Die kryptographische privacy Eigenschaft besagt, dass es nun nicht mehr effizient möglich ist, aus dem geschwärzten Dokument Wissen {\"u}ber die geschwärzten Informationen zu erlangen, auch und gerade nicht f{\"u}r den Signaturpr{\"u}fer. Die Arbeit geht im Kern der Frage nach, ob ein MSS sowohl die kryptographische Eigenschaft „privacy" als auch gleichzeitig die Eigenschaften „integrity" und „accountability" mit ausreichend hohen Sicherheitsniveaus erf{\"u}llen kann. Das Ziel ist es, dass ein MSS gleichzeitig ein solch ausreichend hohen Grad an Sicherheit erf{\"u}llt, dass (1) die autorisierten nachträglichen Änderungen zum Schutze von Geschäftsgeheimnissen oder personenbezogenen Daten eingesetzt werden können, und dass (2) dem Dokument, welches mit dem speziellen Signaturverfahren signiert wurde, ein erhöhter Beweiswert beigemessen werden kann. In Bezug auf letzteres stellt die Arbeit sowohl die technischen Vorgaben, welche f{\"u}r qualifizierte elektronische Signaturen (nach Verordnung (EU) Nr. 910/2014) gelten, in Bezug auf die nachträgliche Änderbarkeit dar, als auch konkrete kryptographische Eigenschaften und Verfahren um diese Vorgaben kryptographisch beweisbar zu erreichen. Insbesondere weisen veränderbare Signaturen (MSS) einen anderen Integritätsschutz als traditionelle digitale Signaturen auf: Eine signierte Nachricht darf nachträglich durch eine definierte dritte Partei in einer definierten Art modifiziert werden. Diese sogenannte autorisierte Änderung (authorized modification) kann auch ohne Kenntnis des geheimen Signaturschl{\"u}ssels des Unterzeichners durchgef{\"u}hrt werden. Bei der Verifikation der digitalen Signatur durch den Signaturpr{\"u}fer bleibt der urspr{\"u}ngliche Signierende und dessen Einwilligung zur autorisierten Änderung kryptographisch verifizierbar, auch wenn autorisierte Änderungen vorgenommen wurden. Die Arbeit umfasst folgende Bereiche: 1. Analyse der Rechtsvorgaben zur Ermittlung der rechtlich relevanten technischen Anforderungen hinsichtlich des geforderten Integritätsschutzes (integrity protection) und hinsichtlich des Schutzes von personenbezogenen Daten und (Geschäfts)geheimnissen (privacy protection), 2. Definition eines geeigneten Integritäts-Begriffes zur Beschreibung der Schutzfunktion von existierenden malleable signatures und bereits rechtlich anerkannten Signaturverfahren, 3. Harmonisierung und Analyse der kryptographischen Eigenschaften existierender malleable signature Verfahren in Hinblick auf die rechtlichen Anforderungen, 4. Entwicklung neuer und beweisbar sicherer kryptographischer Verfahren, 5. abschließende Bewertung des rechtlichen Beweiswertes (probative value) und des Datenschutzniveaus anhand der technischen Umsetzung der rechtlichen Anforderungen. Die Arbeit kommt zu dem Ergebnis, dass zunächst einmal jedwede (autorisierte wie auch unautorisierte) Änderung von einem kryptographisch sicheren malleable signature Verfahren (MSS) ebenfalls erkannt werden muss um Konformität mit Verordnung (EU) Nr. 910/2014 (eIDAS) zu erlangen. Eine solche Änderungserkennung durch die der Signaturpr{\"u}fer, ohne Zuhilfe weiterer Parteien oder Geheimnisse, die Abwesenheit von autorisierten und unautorisierten Änderungen erkennt wurde im Rahmen der Arbeit entwickelt (non-interactive public accountability (PUB)). Diese neue kryptographische Eigenschaft wurde veröffentlicht und bereits von Arbeiten Anderer aufgegriffen. Des Weiteren werden neue kryptographische Eigenschaften und redactable signature und sanitizable signature Verfahren vorgestellt, welche zusätzlich zu dieser Änderungerkennung einen starken Schutz gegen die Aufdeckung des Orginals ermöglichen. Werden geeignete Eigenschaften erf{\"u}llt so wird f{\"u}r bestimmte Fälle ein technisches Schutzniveau erzielt, welches mit klassischen Signaturen vergleichbar ist. Damit lässt sich die Kernfrage positiv beantworten: Private MSS können ein Integritätsschutzniveau erreichen, welches dem rechtlich anerkannter digitaler Signaturen technisch entspricht, aber dennoch nachträgliche Änderungen autorisieren kann, welche einen starken Schutz gegen die Wiederherstellung des Orginals ermöglichen.}, subject = {Integrit{\"a}t}, language = {en} } @phdthesis{Woelfl2018, author = {W{\"o}lfl, Andreas}, title = {Data Management in Certified Avionics Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5758}, school = {Universit{\"a}t Passau}, pages = {xi, 177 Seiten}, year = {2018}, abstract = {Data management is a cornerstone for any kind of information system - including the aerospace and aviation sector. In contrast to conventional domains, software development in the avionics domain must adhere to a legally binding certification process, called qualification. The success of the process depends on compliance with international standards, such as DO-178: Software Considerations in Airborne Systems and Equipment Certification. From a software developer's perspective, challenges arise in terms of methods and tools. Techniques that have a potential impact on the deterministic and predictable execution of avionics software are prohibited. The objective of this thesis' research is to develop a scalable method to realize data-management for multi-variant avionics software under the restrictions and constraints of the domain. Since avionics software faces very long-term life-cycles (up to 75 years), a particular focus is being placed on maintenance and evolution. Based on the insights gained in a semi-structured interview at Airbus Helicopters, industrial established approaches to implement qualified avionics software are assessed at first and compared with respect to strengths and weaknesses for data-management afterwards. As a result, a novel development approach is proposed, combining model-based techniques and product-line technology to derive the source code of highly specific data-management variants, as well as the majority of assets required for the qualification process, from a declarative system specification. In order to demonstrate the practicability of the approach in industry, a framework is presented that is deployed and applied at Airbus Helicopters to generate qualifiable data-management components for the variants of the NH90 helicopter. The maintainability is shown by means of a domain-specific optimization, in which the model-based and generative approach is used to establish safe memory overlays at compile-time. Key findings reveal a substantially reduced memory footprint (29,1\% in case of a real-world scenario), as well as an significantly facilitated implementation process, which would not be accomplishable using conventional methods for software development in the avionics domain.}, subject = {Avionik}, language = {en} } @phdthesis{Stenzer2018, author = {Stenzer, Alexander}, title = {Ein Ansatz zur semantik-basierten Anfragerelaxation f{\"u}r hierarchische Strukturen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5746}, school = {Universit{\"a}t Passau}, pages = {vii, 225 Seiten}, year = {2018}, abstract = {F{\"u}r Monumentalbauten als Teil unseres Kulturgutes im Speziellen als auch f{\"u}r Geb{\"a}ude im Allgemeinen, wurden im Rahmen des MonArch- rojektes verschiedene Methoden zur digitalen Speicherung von Informationen {\"u}ber Monumentalbauten erforscht. Das daraus entstandene MonArch-System ist f{\"u}r die Dokumentation von Monumentalbauten verwendbar und speichert das digitale Modell des Bauwerks in einer relationalen Datenbank. Das digitale Modell des Bauwerks entsteht durch eine Segmentierung in Geb{\"a}udeteile, die dann in einer Strukturhierarchie zusammengefasst werden k{\"o}nnen. Als Strukturhierarchie versteht man in diesem Zusammenhang eine Hierarchie von Geb{\"a}udeteilen, die in einer Teil-von-Beziehung stehen. Die Strukturhierarchie erlaubt es Informationen z.B. Dokumente mit einem r{\"a}umlichen Bezug auszuzeichnen. Zus{\"a}tzlich wird eine Themenhierarchie unterst{\"u}tzt, die es erlaubt Informationen thematisch mit Begriffen zu beschreiben. Betrachtet man r{\"a}umliche und thematische Anfragen in vernetzten MonArch-Systemen, in denen sich mehrere Geb{\"a}udearchive zusammenschließen, ist diese starke Bindung der Information an die einzigartige Struktur jedes Geb{\"a}udes ein Hindernis f{\"u}r ein einfaches Verfahren zur r{\"a}umlichen Suche. Da sich jedes Geb{\"a}ude in seinem speziellen strukturellen und r{\"a}umlichen Aufbau unterscheidet, liefert eine r{\"a}umliche Anfrage, die speziell auf diese Eigenheiten eines Geb{\"a}udes ausgerichtet ist, f{\"u}r andere Geb{\"a}ude keine Suchergebnisse. F{\"u}r thematische Anfragen stellen nicht kompatible Themenhierarchien ein Hindernis dar, die eine {\"u}bergreifende thematische Anfrage verhindern. Die gr{\"o}ßte Herausforderung ist es, Struktur- und Themenhierarchien aufeinander abzubilden. Zur L{\"o}sung des geschilderten Problems wird in vernetzten Informationssystemen auf eine geeignete Transformation der urspr{\"u}nglichen Anfrage zur{\"u}ckgegriffen, um den Anfragefokus zu erweitern (Relaxation) oder eine Anpassung an die Gegebenheiten des entfernten Informationssystems zu erreichen (Transformation). Das Anfragetransformations- und -relaxationsverfahren, das in dieser Arbeit vorgestellt wird, nutzt eine Generalisierungsbeziehung aus, um ausgehend von einer Anfrage an eine spezielle Struktur- und Themenhierarchie eine automatische Transformation der Anfrage durchzuf{\"u}hren. Bei Themenhierarchien sind gemeinsame Oberthemen ein Ansatzpunkt. Bei Strukturhierarchien k{\"o}nnen Typinformationen zu Geb{\"a}udeteilen die Generalisierungsbeziehung darstellen. Die transformierte und dadurch relaxierte Anfrage kann dann an ein Netzwerk von MonArch-Systemen gestellt werden, ohne dass eine manuelle Auswahl der Geb{\"a}udeteile in anderen Strukturhierarchien oder eine angepasste Themenauswahl erfolgen muss. Dazu muss die Strukturhierarchie der anderen Geb{\"a}ude im Netzwerk von MonArch-Systemen nicht bekannt sein. Im Rahmen der vorliegenden Arbeit werden verschiedene Relaxationsverfahren, z.B. ein angepasstes Spreading-Activation-Verfahren, zur automatischen Anfragetransformation von r{\"a}umlichen und thematischen Anfragen vorgestellt, mit dem Ziel eine vollst{\"a}ndige Abbildung zwischen den Strukturhierarchien von Geb{\"a}uden und Themenhierarchien zu vermeiden. Erreicht wird das Ziel durch eine Erweiterung des MonArch-Datenmodells und eine Verallgemeinerung der MonArch-Anfragen, die eine Anfragetransformation zum Anfragezeitpunkt erlauben.}, subject = {Abfragesprache}, language = {de} } @phdthesis{LoperaGonzalez2018, author = {Lopera Gonzalez, Luis Ignacio}, title = {Mining Functional and Structural Relationships of Context Variables in Smart-Buildings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5737}, school = {Universit{\"a}t Passau}, pages = {viii, 99 Seiten}, year = {2018}, abstract = {The Internet of Things (IoT) is a network of computational services, devices, and people, which share information with each other. In IoT, inter-system communication is possible and human interaction is not required. IoT devices are penetrating the home and office building environments. According to current estimates, about 35 billion IoT devices will be connected by the year 20212. In the IoT business model, value comes from integrating devices into applications, e.g., home and office automation. In general, an IoT application associates different information sources with actions which can modify the environment, e.g., change the room's temperature, inform a person, e.g., send an e-mail, or activate other services, e.g., buy milk on-line. In this thesis, we focus on the commissioning and verification processes of IoT devices used in building automation applications. Within a building's lifespan, new devices are added, interior spaces are refurbished, and faulty devices are replaced. All of these changes are currently made manually. Furthermore, consider that a context-aware Building Management System (BMS) is an IoT application, which measures direct-context from the building's sensors to characterize environmental conditions, user locations, and state. Additionally, a BMS combines sensor information to derive inferred-context, such as user activity. Similar to IoT devices, inferred-context instances have to be created manually. As the number of devices and inferred-context instances increases, keeping track of all associations becomes a time-consuming and error-prone task. The hypothesis of the thesis is that users who interact with the building create use-patterns in the data, which describe functional relations between devices and inferred-context instances, e.g., which desk-movement sensor is used to infer desk-presence and controls which overhead light; additionally, use-patterns can also provide structural relations, e.g., the relative position of spatial sensors. To test the hypothesis, this thesis presents an extension to the new IoT class rule programming paradigm, which simplifies rule creation based on classes. The proposed extension uses a semantic compiler to simplify the device and inferred-context associations. Using direct-context information and template classes, the compiler creates all possible inferredcontext instances. Buildings using context-aware BMSs will have a dynamic response to user behaviour, e.g., required illumination for computer-work is provided by adjusting blinds or increasing the dim setting of overhead ceiling lamps. We propose a rule mining framework to extract use-patterns and find the functional and structural relationships between devices. The rule mining framework uses three stages: (1) event extraction, (2) rule mining, (3) structure creation. The event extraction combines the building's data into a time-series of device events. Then, in the rule mining stage, rules are mined from the time series, where we use the established algorithm temporal interval tree association rule learner. Additionally, we proposed a rule extraction algorithm for spatial sensor's data. The algorithm is based on statistical analysis of user transition times between adjacent sensors. We also introduce a new rule extraction algorithm based on increasing belief. In the last stage, structure creation uses the extracted rules to produce device association groups, hierarchical representation of the building, or the relative location of spatial sensors. The proposed algorithms were tested using a year-long installation in a living-lab consisting of a four-person office, a 12-person open office, and a meeting room. For the spatial sensors, four locations within public buildings were used: a meeting room, a hallway, T-crossing, and a foyer. The recording times range from two weeks to two months depending on scenario complexity. We found that user-generated patterns appear in building data. The rule mining framework produced structures that represent functional and spatial relationships of building's devices and provide sufficient information to automate maintenance tasks, e.g., automatic device naming. Furthermore, we found that environmental changes are also a source of device data patterns, which provide additional associations. For example, using the framework we found the fa{\c{c}}ade group for exterior light sensors. The fa{\c{c}}ade group can be used to automatically find an alternative signal source to replace broken outdoor light sensors. Finally, the rule mining framework successfully retrieved the relative location of spatial sensors in all locations but the foyer.}, subject = {Internet der Dinge}, language = {en} } @article{KronawitterLengauer2018, author = {Kronawitter, Stefan and Lengauer, Christian}, title = {Polyhedral Search Space Exploration in the ExaStencils Code Generator}, series = {ACM Transactions on Architecture and Code Optimization}, volume = {15}, journal = {ACM Transactions on Architecture and Code Optimization}, number = {4}, issn = {1544-3973}, doi = {10.1145/3274653}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5778}, year = {2018}, abstract = {Performance optimization of stencil codes requires data locality improvements. The polyhedron model for loop transformation is well suited for such optimizations with established techniques, such as the PLuTo algorithm and diamond tiling. However, in the domain of our project ExaStencils, stencil codes, it fails to yield optimal results. As an alternative, we propose a new, optimized, multi-dimensional polyhedral search space exploration and demonstrate its effectiveness: we obtain better results than existing approaches in several cases. We also propose how to specialize the search for the domain of stencil codes, which dramatically reduces the exploration effort without significantly impairing performance.}, language = {en} } @inproceedings{ParraRodriguezPosegga2018, author = {Parra Rodriguez, Juan D. and Posegga, Joachim}, title = {RAPID: Resource and API-Based Detection Against In-Browser Miners}, series = {Proceedings of the 34th Annual Computer Security Applications Conference}, booktitle = {Proceedings of the 34th Annual Computer Security Applications Conference}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-6569-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6550}, pages = {[14] Seiten}, year = {2018}, abstract = {Direct access to the system's resources such as the GPU, persistent storage and networking has enabled in-browser crypto-mining. Thus, there has been a massive response by rogue actors who abuse browsers for mining without the user's consent. This trend has grown steadily for the last months until this practice, i.e., CryptoJacking, has been acknowledged as the number one security threat by several antivirus companies. Considering this, and the fact that these attacks do not behave as JavaScript malware or other Web attacks, we propose and evaluate several approaches to detect in-browser mining. To this end, we collect information from the top 330.500 Alexa sites. Mainly, we used real-life browsers to visit sites while monitoring resource-related API calls and the browser's resource consumption, e.g., CPU. Our detection mechanisms are based on dynamic monitoring, so they are resistant to JavaScript obfuscation. Furthermore, our detection techniques can generalize well and classify previously unseen samples with up to 99.99\\% precision and recall for the benign class and up to 96\\% precision and recall for the mining class. These results demonstrate the applicability of detection mechanisms as a server-side approach, e.g., to support the enhancement of existing blacklists. Last but not least, we evaluated the feasibility of deploying prototypical implementations of some detection mechanisms directly on the browser. Specifically, we measured the impact of in-browser API monitoring on page-loading time and performed micro-benchmarks for the execution of some classifiers directly within the browser. In this regard, we ascertain that, even though there are engineering challenges to overcome, it is feasible and beneficial for users to bring the mining detection to the browser.}, language = {en} } @inproceedings{ParraRodriguezSchrecklingPosegga2017, author = {Parra Rodriguez, Juan D. and Schreckling, Daniel and Posegga, Joachim}, title = {Addressing Data-Centric Security Requirements for IoT-Based Systems}, series = {2016 International Workshop on Secure Internet of Things (SIoT)}, booktitle = {2016 International Workshop on Secure Internet of Things (SIoT)}, publisher = {IEEE Xplore}, address = {Heraklion, Greece}, isbn = {978-1-5090-5091-8}, doi = {10.1109/SIoT.2016.007}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6546}, pages = {[10] Seiten}, year = {2017}, abstract = {Allowing users to control access to their data is paramount for the success of the Internet of Things; therefore, it is imperative to ensure it, even when data has left the users' control, e.g. shared with cloud infrastructure. Consequently, we propose several state of the art mechanisms from the security and privacy research fields to cope with this requirement. To illustrate how each mechanism can be applied, we derive a data-centric architecture providing access control and privacy guaranties for the users of IoT-based applications. Moreover, we discuss the limitations and challenges related to applying the selected mechanisms to ensure access control remotely. Also, we validate our architecture by showing how it empowers users to control access to their health data in a quantified self use case.}, language = {en} } @inproceedings{ParraRodriguezPosegga2018, author = {Parra Rodriguez, Juan D. and Posegga, Joachim}, title = {Local Storage on Steroids: Abusing Web Browsers for Hidden Content Storage and Distribution}, series = {International Conference on Security and Privacy in Communication Systems}, booktitle = {International Conference on Security and Privacy in Communication Systems}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-01704-0}, doi = {10.1007/978-3-030-01704-0_19}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6572}, pages = {20 Seiten}, year = {2018}, abstract = {Analysing security assumptions taken for the WebRTC and postMessage APIs led us to find a novel attack abusing the browsers' persistent storage capabilities. The presented attack can be executed without the website's visitor knowledge, and it requires neither browser vulnerabilities nor additional software on the browser's side. To exemplify this, we study how can an attacker use browsers to create a network for persistent storage and distribution of arbitrary data. In our proof of concept, the total storage of the network, and therefore the space used within each browser, grows linearly with the number of origins delivering the malicious JavaScript code. Further, data transfers between browsers are not restricted by the Same Origin Policy, which allows for a unified cross-origin browser network, regardless of the origin from which the script executing the functionality is loaded from. In the course of our work, we assess the feasibility of a real-life deployment of the network by running experiments using Linux containers and browser automation tools. Moreover, we show how security mechanisms against third-party tracking, cross-site scripting and click-jacking can diminish the attack's impact, or even prevent it.}, language = {en} } @inproceedings{ParraRodriguezPosegga2017, author = {Parra Rodriguez, Juan D. and Posegga, Joachim}, title = {CSP \& Co. Can Save Us from a Rogue Cross-Origin Storage Browser Network! But for How Long?}, series = {Proceedings of the Eighth ACM Conference on Data and Application Security and Privacy}, booktitle = {Proceedings of the Eighth ACM Conference on Data and Application Security and Privacy}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-5632-9}, doi = {10.1145/3176258.3176951}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6561}, pages = {3 Seiten}, year = {2017}, abstract = {We introduce a new browser abuse scenario where an attacker uses local storage capabilities without the website's visitor knowledge to create a network of browsers for persistent storage and distribution of arbitrary data. We describe how security-aware users can use mechanisms such as the Content Security Policy (CSP), sandboxing, and third-party tracking protection, i.e., CSP \& Company, to limit the network's effectiveness. From another point of view, we also show that the upcoming Suborigin standard can inadvertently thwart existing countermeasures, if it is adopted.}, language = {en} } @phdthesis{Ganser2019, author = {Ganser, Stefan}, title = {Iterative Schedule Optimization for Parallelization in the Polyhedron Model}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7936}, school = {Universit{\"a}t Passau}, pages = {xvii, 176 Seiten}, year = {2019}, abstract = {In high-performance computing, one primary objective is to exploit the performance that the given target hardware can deliver to the fullest. Compilers that have the ability to automatically optimize programs for a specific target hardware can be highly useful in this context. Iterative (or search-based) compilation requires little or no prior knowledge and can adapt more easily to concrete programs and target hardware than static cost models and heuristics. Thereby, iterative compilation helps in situations in which static heuristics do not reflect the combination of input program and target hardware well. Moreover, iterative compilation may enable the derivation of more accurate cost models and heuristics for optimizing compilers. In this context, the polyhedron model is of help as it provides not only a mathematical representation of programs but, more importantly, a uniform representation of complex sequences of program transformations by schedule functions. The latter facilitates the systematic exploration of the set of legal transformations of a given program. Early approaches to purely iterative schedule optimization in the polyhedron model do not limit their search to schedules that preserve program semantics and, thereby, suffer from the need to explore numbers of illegal schedules. More recent research ensures the legality of program transformations but presumes a sequential rather than a parallel execution of the transformed program. Other approaches do not perform a purely iterative optimization. We propose an approach to iterative schedule optimization for parallelization and tiling in the polyhedron model. Our approach targets loop programs that profit from data locality optimization and coarse-grained loop parallelization. The schedule search space can be explored either randomly or by means of a genetic algorithm. To determine a schedule's profitability, we rely primarily on measuring the transformed code's execution time. While benchmarking is accurate, it increases the time and resource consumption of program optimization tremendously and can even make it impractical. We address this limitation by proposing to learn surrogate models from schedules generated and evaluated in previous runs of the iterative optimization and to replace benchmarking by performance prediction to the extent possible. Our evaluation on the PolyBench 4.1 benchmark set reveals that, in a given setting, iterative schedule optimization yields significantly higher speedups in the execution of the program to be optimized. Surrogate performance models learned from training data that was generated during previous iterative optimizations can reduce the benchmarking effort without strongly impairing the optimization result. A prerequisite for this approach is a sufficient similarity between the training programs and the program to be optimized.}, subject = {Parallelrechner}, language = {en} } @phdthesis{Hatzesberger2020, author = {Hatzesberger, Simon}, title = {Strongly Asymptotically Optimal Methods for the Pathwise Global Approximation of Stochastic Differential Equations with Coefficients of Super-linear Growth}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8100}, school = {Universit{\"a}t Passau}, pages = {ii, 116 Seiten}, year = {2020}, abstract = {Our subject of study is strong approximation of stochastic differential equations (SDEs) with respect to the supremum and the L_p error criteria, and we seek approximations that are strongly asymptotically optimal in specific classes of approximations. For the supremum error, we prove strong asymptotic optimality for specific tamed Euler schemes relating to certain adaptive and to equidistant time discretizations. For the L_p error, we prove strong asymptotic optimality for specific tamed Milstein schemes relating to certain adaptive and to equidistant time discretizations. To illustrate our findings, we numerically analyze the SDE associated with the Heston-3/2-model originating from mathematical finance.}, subject = {Stochastische Differentialgleichung}, language = {en} } @phdthesis{Stahlbauer2019, author = {Stahlbauer, Andreas}, title = {Abstract Transducers for Software Analysis and Verification}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8468}, school = {Universit{\"a}t Passau}, pages = {xv, 187 Seiten}, year = {2019}, abstract = {Whenever software faults can endanger human life, property, or the environment, the absence of faults must be ensured with utmost care and the best technologies available. Evidence is needed showing that all requirements are satisfied and that the risk of faults is reduced. One technique to conduct such a verification task—composed of the software to verify, the specification to check, and a model of the environment—is software model checking. To conduct a verification task with a model checker, different models of the task are constructed. We distinguish between two types of task models: syntactic task models and semantic task models, which define the respective syntactic structure (control flow) and semantic structure (state transitions, invariants) of the verification task. When constructing such models, we can observe that similar structures and substructures reappear within and among different verification tasks. For example, the same assertions to check can appear in different functions, or the same predicate can be part of different invariants to describe sets of program states. Similarities that appear during the model construction process can be the result of solving similar reasoning problems, often solved using computationally expensive procedures (as typical for model checking), over and over again. Not reusing results of solving similar problems, not having a means for conducting repeated efforts automatically, or not trying to reduce the number of similar reasoning efforts, is a waste of precious resources. To address these problems, we present a common conceptual and technical foundation for sharing syntactic and semantic task artifacts for reuse, within and among verification runs. Both the syntactic construction of a verification task and the construction of its semantic model—which describes all possible behaviors and states—are covered. We study how commonalities and regularities in the task models can be taken into account to facilitate the process of sharing task artifacts for reuse, and to make the overall verification process more efficient and effective. We introduce abstract transducers as the theoretical foundation of this thesis: a type of finite-state transducers with an inherent notion of abstraction for states, the input alphabet, and its output alphabet. Abstracting these transducers allows us to widen both the set of input words for that they produce output and the sets of output words. Abstract transducers are instantiated as task artifact transducers to map from program structures to task artifacts to share. We show that the notion of abstraction provides a means for increasing the scope for that task artifacts are shared for reuse. We present two instances of task artifact transducers: Yarn transducers and precision transducers. We use Yarn transducers for providing code to weave into the control-flow structure of a computer program, and present the Loom analysis as a means for orchestrating the weaving process. Precision transducers provide a means for sharing abstraction precisions for reuse, thus aid in defining the level of abstraction of a semantic task model. For both types of transducers, we provide empirical evidence on their practical applicability, for example, to verify Linux kernel modules, and show that they can help in increasing the verification performance.}, subject = {Transduktor}, language = {en} } @phdthesis{Planche2020, author = {Planche, Benjamin}, title = {Bridging the Realism Gap for CAD-Based Visual Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8361}, school = {Universit{\"a}t Passau}, pages = {xx, 152}, year = {2020}, abstract = {Computer vision aims at developing algorithms to extract high-level information from images and videos. In the industry, for instance, such algorithms are applied to guide manufacturing robots, to visually monitor plants, or to assist human operators in recognizing specific components. Recent progress in computer vision has been dominated by deep artificial neural network, i.e., machine learning methods simulating the way that information flows in our biological brains, and the way that our neural networks adapt and learn from experience. For these methods to learn how to accurately perform complex visual tasks, large amounts of annotated images are needed. Collecting and labeling such domain-relevant training datasets is, however, a tedious—sometimes impossible—task. Therefore, it has become common practice to leverage pre-available three-dimensional (3D) models instead, to generate synthetic images for the recognition algorithms to be trained on. However, methods optimized over synthetic data usually suffer a significant performance drop when applied to real target images. This is due to the realism gap, i.e., the discrepancies between synthetic and real images (in terms of noise, clutter, etc.). In my work, three main directions were explored to bridge this gap. First, an innovative end-to-end framework is proposed to render realistic depth images from 3D models, as a growing number of solutions (especially in the industry) are utilizing low-cost depth cameras (e.g., Microsoft Kinect and Intel RealSense) for recognition tasks. Based on a thorough study of these devices and the different types of noise impairing them, the proposed framework simulates their inner mechanisms, comprehensively modeling vital factors such as sensor noise, material reflectance, surface geometry, etc. Able to simulate a wide panel of depth sensors and to quickly generate large datasets, this framework is used to train algorithms for various recognition tasks, consistently and significantly enhancing their performance compared to other state-of-the-art simulation tools. In some cases, however, relevant 2D or 3D object representations to generate synthetic samples are not available. Considering this different case of data scarcity, a solution is then proposed to incrementally build a representation of visual scenes from partial observations. Provided observations are localized from one to another based on their content and registered in a global memory with spatial properties. Simultaneously, this memory can be queried to render novel views of the scene. Furthermore, unobserved regions can be hallucinated in memory, in consistence with previous observations, hallucinations, and global priors. The efficacy of the proposed mnemonic and generative system, trainable end-to-end, is demonstrated on various 2D and 3D use-cases. Finally, an advanced convolutional neural network pipeline is introduced, tackling the realism gap from a novel angle. While most methods addressing this problem focus on bringing synthetic samples—or the knowledge acquired from them—closer to the real target domain, the proposed solution performs the opposite process, mapping unseen target images into controlled synthetic domains. The pre-processed samples can then be handed to downstream recognition methods, themselves purely trained on similar synthetic data, to greatly improve their accuracy. For each approach, a variety of qualitative and quantitative studies are detailed, providing successful comparisons to state-of-the-art methods. By proposing solutions to bridge the realism gap from either side, as well as a pipeline to improve the acquisition and generation of new visual content, this thesis provides a unique perspective on the challenges of data scarcity when building robust recognition systems.}, language = {en} } @phdthesis{Keren2020, author = {Keren, Gil}, title = {Neural Network Supervision: Notes on Loss Functions, Labels and Confidence Estimation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8223}, school = {Universit{\"a}t Passau}, pages = {ix, 98 Seiten}, year = {2020}, abstract = {We consider a number of enhancements to the standard neural network training paradigm. First, we show that carefully designed parameter update rules may replace the need for a loss function and its gradient. We introduce a parameter update rule that generalises the standard cross-entropy gradient, and allows directly controlling the relative effect of easy and hard examples on the training process. We show that the proposed update rule cannot be derived by using a loss function and yields better classification accuracy compared to training with the standard cross-entropy loss. In addition, we study the effect of the loss function choice on the learnt representations. We introduce the Single Logit Classification (SLC) task: classifying whether a given class is the correct class for a given example, in a computationally efficient manner, based on the appropriate class logit alone. A natural principle is proposed, the Principle of Logit Separation (PoLS), as a guideline for choosing and designing loss functions suitable for the SLC task. We mathematically analyse the alignment of eleven existing and novel loss functions with this principle. Experiment results show that using loss functions that are aligned with this principle results in a representation in the logits layer in which each logit is more informative of its class correctness, leading to a considerably better SLC accuracy. Further, we attempt to alleviate the dependency of standard neural network models on large amounts of quality labels. The task of weakly supervised one-shot detection is considered, in which at training time the model is trained without any localisation labels, and at test time it needs to identify and localise instances of unseen classes. We propose the attention similarity networks (ASN) for this task. ASN use a Siamese neural network to compute a similarity score between an exemplar and different locations in a target example. Then, an attention mechanism performs localisation by learning to attend to the correct locations. The ASN model outperforms the relevant baselines for weakly supervised one-shot detection tasks in the audio and computer vision domains. Finally, we consider the problem of quantifying prediction confidence in the regression setting. We propose two novel algorithms for emitting calibrated prediction intervals for neural network regressors, at any given confidence level. The two algorithms require binning of the output space and training the neural network regressor as a classifier. Then, the calibration algorithms choose the intervals in the output space, making sure they contain the amount of posterior probability mass that results in the desired confidence level.}, subject = {Neuronales Netz}, language = {en} } @phdthesis{Koop2021, author = {Koop, Martin}, title = {Preventing the Leakage of Privacy Sensitive User Data on the Web}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8717}, school = {Universit{\"a}t Passau}, pages = {137 Seiten}, year = {2021}, abstract = {Das Aufzeichnen der Internetaktivit{\"a}t ist mit der Verkn{\"u}pfung pers{\"o}nlicher Daten zu einer Schl{\"u}sselressource f{\"u}r viele kostenpflichtige und kostenfreie Dienste im Web geworden. Diese Dienste sind zum einen Webanwendungen, wie beispielsweise die von Google bereitgestellten Karten/Navigation oder Websuche, die t{\"a}glich kostenlos verwendet werden. Zum anderen sind es alle Webseiten, die meist kostenlos Nachrichten oder allgemeine Informationen zu verschiedenen Themen bereitstellen. Durch das Aufrufen und die Nutzung dieser Webdienste werden alle Informationen, die im Webdienst verarbeitet werden, an den Dienstanbieter weitergeben. Dies umfasst nicht nur die im Benutzerkonto des Webdienstes gespeicherte Profildaten wie Name oder Adresse, sondern auch die Aktivit{\"a}t mit dem Webdienst wie das anklicken von Links oder die Verweildauer. Dar{\"u}ber hinaus gibt es jedoch auch unz{\"a}hlige Drittparteien, welche zumeist im Hintergrund in die Webdienste eingebunden sind und das Benutzerverhalten der kompletten Webaktivit{\"a}t - Webseiten {\"u}bergreifend - mitspeichern sowie auswerten. Der Einsatz verschiedener, in der Regel f{\"u}r den Benutzer verborgener Techniken, dient dazu das Online-Verhalten der Benutzer genau zu verfolgen und viele sensible Daten zu sammeln. Dieses Verhalten wird als Web-Tracking bezeichnet und wird haupts{\"a}chlich von Werbeunternehmen genutzt. Die gesammelten Daten sind oft personenbezogen und eine wertvolle Ressourcen der Unternehmen, um Beispielsweise passend zum Benutzerprofil personalisierte Werbung schalten zu k{\"o}nnen. Mit der Nutzung dieser personenbezogenen Daten entstehen aber auch weitreichendere Auswirkungen, welche sich unter anderem in Preisanpassungen f{\"u}r Benutzer mit speziellen Profilattributen, wie der Nutzung von teuren Endger{\"a}ten, widerspiegeln. Ziel dieser Arbeit ist es die Privatsph{\"a}re der Nutzer im Internet zu steigern und die Nutzerverfolgung von Web-Tracking signifikant zu reduzieren. Dabei stellen sich vier Herausforderungen, die jeweils einen Forschungsschwerpunkt dieser Arbeit bilden: (1) Systematische Analyse und Einordnung eingesetzter Tracking-Techniken, (2) Untersuchung vorhandener Schutzmechanismen und deren Schwachstellen,(3) Konzeption einer Referenzarchitektur zum Schutz vor Web-Tracking und (4) Entwurf einer automatisierten Testumgebungen unter Realbedingungen, um die Reduzierung von Web-Tracking in den entwickelten Schutzmaßnahmen zu untersuchen. Jeder dieser Forschungsschwerpunkte stellt neue Beitr{\"a}ge bereit, um einheitlich das {\"u}bergeordnete Ziel zu erreichen: der Entwicklung von Schutzmaßnahmen gegen die Preisgabe sensibler Benutzerdaten im Internet. Der erste wissenschaftliche Beitrag dieser Dissertation ist eine umfassende Evaluation eingesetzter Web-Tracking Techniken und Methoden, sowie deren Gefahren, Risiken und Implikationen f{\"u}r die Privatsph{\"a}re der Internetnutzer. Die Evaluation beinhaltet zus{\"a}tzlich die Untersuchung vorhandener Tracking-Schutzmechanismen und deren Schwachstellen. Die gewonnenen Erkenntnisse sind maßgeblich f{\"u}r die in dieser Arbeit neu entwickelten Ans{\"a}tze und verbessern den bisherigen nicht hinreichend gew{\"a}hrleisteten Schutz vor Web-Tracking. Der zweite wissenschaftliche Beitrag ist die Entwicklung einer robusten Klassifizierung von Web-Tracking, der Entwurf einer effizienten Architektur zur Langzeituntersuchung von Web-Tracking sowie einer interaktiven Visualisierung des Auftreten von Web-Tracking im Internet. Dabei basiert der neue Klassifizierungsansatz, um Tracking zu identifizieren, auf der Entropie Messung des Informationsgehalts von Cookies. Die Resultate der Web-Tracking Langzeitstudien sind unter anderem 1.209 identifizierte Tracking-Domains auf den meistbesuchten Webseiten in Deutschland. Hierbei wurden innerhalb der Top 25 Webseiten im Durchschnitt 45 Tracking-Elemente pro Webseite gefunden. Der Tracker mit dem h{\"o}chsten Potenzial zum Erstellen eines Benutzerprofils war doubleclick.com, da er 90\% der Webseiten {\"u}berwacht. Die Auswertung des untersuchten Tracking-Netzwerks ergab weiterhin einen detaillierten Einblick in die Tracking-Technik mithilfe von Weiterleitungslinks. Dabei haben wir 1,2 Millionen HTTP-Traces von monatelangen Crawls der 50.000 international meistbesuchten Webseiten analysiert. Die Ergebnisse zeigen, dass 11,6\% dieser Webseiten HTTP-Redirects, verborgen in Webseiten-Links, zum Tracken verwenden. Dies wird eingesetzt, um den Webseitenverlauf des Benutzers nach dem Klick durch eine Kette von (Tracking-)Servern umzuleiten, welche in der Regel nicht sichtbar sind, bevor das beabsichtigte Link-Ziel geladen wird. In diesem Szenario erfasst der Tracker wertvolle Verbindungs-Metadaten zu Inhalt, Thema oder Benutzerinteressen der Website. Die Visualisierung des Tracking {\"O}kosystem stellen wir in einem interaktiven Open-Source Web-Tool bereit. Der dritte wissenschaftliche Beitrag dieser Dissertation ist die Konzeption von zwei neuartigen Schutzmechanismen gegen Web-Tracking und der Aufbau einer automatisierten Simulationsumgebung unter Realbedingungen, um die Effektivit{\"a}t der Umsetzungen zu verifizieren. Der Fokus liegt auf den beiden meist verwendeten Tracking-Verfahren: Cookies (hierbei wird eine eindeutigen ID auf dem Ger{\"a}t des Benutzers gespeichert), sowie Browser-Fingerprinting. Letzteres beschreibt eine Methode zum Sammeln einer Vielzahl an Ger{\"a}teeigenschaften, um den Benutzer eindeutig zu (re- )identifizieren, ohne eine eindeutige ID auf dem Ger{\"a}t zu speichern. Um die Effektivit{\"a}t der in dieser Arbeit entwickelten Schutzmechanismen vor Web-Tracking zu untersuchen, implementierten und evaluierten wir die Schutzkonzepte direkt im Chromium Browser. Das Ergebnis zeigt eine erfolgreiche Reduzierung von Web-Tracking um 44\%. Zus{\"a}tzlich verbessert das in dieser Arbeit entwickelte Konzept "Site Isolation" den Datenschutz des privaten Browsing-Modus, erm{\"o}glicht das Setzen eines manuellen Speicher-Zeitlimits von Cookies und sch{\"u}tzt den Browser gegen verschiedene Bedrohungen wie CSRF (Cross-Site Request Forgery) oder CORS (Cross-Origin Ressource Sharing). Site Isolation speichert dabei den Status der lokalen Website in separaten Containern und kann dadurch diverse Tracking-Methoden wie Cookies, lokalStorage oder redirect tracking verhindern. Bei der Auswertung von 1,6 Millionen Webseiten haben wir gezeigt, dass der Tracker doubleclick.com das h{\"o}chste Potenzial besitzt, den Nutzer zu verfolgen und auf 25\% der 40.000 international meistbesuchten Webseiten vertreten ist. Schließlich demonstrieren wir in unserem erweiterten Chromium-Browser einen robusten Browser-Fingerprinting-Schutz. Der Test unseres Prototyps mittels 70.000 Browsersitzungen zeigt, dass unser Browser den Nutzer vor sogenanntem Browser-Fingerprinting Tracking sch{\"u}tzt. Im Vergleich zu f{\"u}nf anderen Browser-Fingerprint-Tools erzielte unser Prototyp die besten Ergebnisse und ist der erste Schutzmechanismus gegen Flash sowie Canvas Fingerprinting.}, subject = {Datenschutz}, language = {en} } @phdthesis{Kasinathan2021, author = {Kasinathan, Prabhakaran}, title = {Workflow-aware access control for the Internet of Things}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8915}, school = {Universit{\"a}t Passau}, pages = {xxiii, 214 Seiten}, year = {2021}, abstract = {IoT is defined as a paradigm where "things" have sensing, actuating, communicating, and self-configuring abilities, and are connected to each other and to the Internet. Recent advancements in the manufacturing industry have helped to produce embedded devices with various sensors and actuators in mass numbers at a reduced cost. As part of the IoT revolution, everyday devices such as television, refrigerator, cars, even industrial machines are now connected IoT devices. Recent studies have predicted that by 2025 there will be over 75 billion of such IoT devices connected to the Internet. The providers of IoT based services want to integrate their services to satisfy customer requirements. For example, in the mobility scenario, different mobility solution providers want to offer a multi-modal ticket to their customers jointly. In such a distributed and loosely coupled environment, each owner and stakeholder wants to secure his/her own integrity, confidentiality, and functionality goals. This means that distributed rules and conditions defined by the individual owners must be enforced on the participating entities (e.g., customers or partners using their services). The owners and stakeholders may not necessarily trust each other's actions. Therefore, a mechanism is required that guarantees the rules and conditions specified by the different owners. Attacks on IoT devices and similar computing systems are increasing and getting more advanced. IoT devices are often constrained, i.e., they have limited processing power, memory, and energy. Security mechanisms designed for traditional computing systems, e.g., computers, servers, or mobile computing devices such as smartphones, may not fit in those constrained IoT devices. Weak security mechanisms and unenforced security measures were one of the main reasons for recent successful attacks on IoT devices and services. As IoT is now used in many sensitive places, including critical infrastructures, securing them becomes more critical than ever. This thesis focuses on developing mechanisms that secure IoT devices and services and enforcing the rules and conditions specified by the owners on entities that want to access owners' resources. In classical computer systems, security automata are used for specifying security policies and monitoring mechanisms are used for enforcing such policies. For instance, a reference monitor observes and stops the execution when the security policies are about to be violated, thus, the security policies are enforced. To restrict the adversary from using protected IoT devices or services for malicious purposes, it is required to ensure that a workflow must be followed to access the protected resource. In distributed IoT systems where the policies are governed by different owners, each owner would like to specify their rules and conditions in their workflows. The workflows contain tasks that must be performed in a particular order. The goal of this thesis is to develop mechanisms to specify and enforce these workflows in the distributed IoT environment. This thesis introduces a distributed WFAC framework that restricts the entities to do only what they are allowed to do in a collaborative environment. To gain access to a service protected by the WFAC framework, every workflow participant must prove that he/she is in a particular state of an authorized workflow. Authorized means two things: (a) the owner has authorized the workflow to be executed; (b) the workflow participant is authorized to execute it. This restricts the adversary's access to the devices and its services. The security policies defined by different owners are modeled as workflows and specified using Petri Nets. The policies are then enforced with the help of the WFAC framework which supports error-handling, accountability, integration of practitioner-friendly tools, and interoperability with existing security mechanisms such as OAuth. Thus, the WFAC guarantees the integrity of workflows in a distributed environment.}, language = {en} } @phdthesis{ParraRodriguez2019, author = {Parra Rodriguez, Juan David}, title = {Computational Resource Abuse in Web Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7706}, school = {Universit{\"a}t Passau}, pages = {xi, 158 Seiten}, year = {2019}, abstract = {Internet browsers include Application Programming Interfaces (APIs) to support Web applications that require complex functionality, e.g., to let end users watch videos, make phone calls, and play video games. Meanwhile, many Web applications employ the browser APIs to rely on the user's hardware to execute intensive computation, access the Graphics Processing Unit (GPU), use persistent storage, and establish network connections. However, providing access to the system's computational resources, i.e., processing, storage, and networking, through the browser creates an opportunity for attackers to abuse resources. Principally, the problem occurs when an attacker compromises a Web site and includes malicious code to abuse its visitor's computational resources. For example, an attacker can abuse the user's system networking capabilities to perform a Denial of Service (DoS) attack against third parties. What is more, computational resource abuse has not received widespread attention from the Web security community because most of the current specifications are focused on content and session properties such as isolation, confidentiality, and integrity. Our primary goal is to study computational resource abuse and to advance the state of the art by providing a general attacker model, multiple case studies, a thorough analysis of available security mechanisms, and a new detection mechanism. To this end, we implemented and evaluated three scenarios where attackers use multiple browser APIs to abuse networking, local storage, and computation. Further, depending on the scenario, an attacker can use browsers to perform Denial of Service against third-party Web sites, create a network of browsers to store and distribute arbitrary data, or use browsers to establish anonymous connections similarly to The Onion Router (Tor). Our analysis also includes a real-life resource abuse case found in the wild, i.e., CryptoJacking, where thousands of Web sites forced their visitors to perform crypto-currency mining without their consent. In the general case, attacks presented in this thesis share the attacker model and two key characteristics: 1) the browser's end user remains oblivious to the attack, and 2) an attacker has to invest little resources in comparison to the resources he obtains. In addition to the attack's analysis, we present how existing, and upcoming, security enforcement mechanisms from Web security can hinder an attacker and their drawbacks. Moreover, we propose a novel detection approach based on browser API usage patterns. Finally, we evaluate the accuracy of our detection model, after training it with the real-life crypto-mining scenario, through a large scale analysis of the most popular Web sites.}, subject = {Computersicherheit}, language = {en} } @phdthesis{Horaček2020, author = {Hor{\´a}ček, Jan}, title = {Algebraic and Logic Solving Methods for Cryptanalysis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7731}, school = {Universit{\"a}t Passau}, pages = {v, 154 Seiten}, year = {2020}, abstract = {Algebraic solving of polynomial systems and satisfiability of propositional logic formulas are not two completely separate research areas, as it may appear at first sight. In fact, many problems coming from cryptanalysis, such as algebraic fault attacks, can be rephrased as solving a set of Boolean polynomials or as deciding the satisfiability of a propositional logic formula. Thus one can analyze the security of cryptosystems by applying standard solving methods from computer algebra and SAT solving. This doctoral thesis is dedicated to studying solvers that are based on logic and algebra separately as well as integrating them into one such that the combined solvers become more powerful tools for cryptanalysis. This disseration is divided into three parts. In this first part, we recall some theory and basic techniques for algebraic and logic solving. We focus mainly on DPLL-based SAT solving and techniques that are related to border bases and Gr{\"o}bner bases. In particular, we describe in detail the Border Basis Algorithm and discuss its specialized version for Boolean polynomials called the Boolean Border Basis Algorithm. In the second part of the thesis, we deal with connecting solvers based on algebra and logic. The ultimate goal is to combine the strength of different solvers into one. Namely, we fuse the XOR reasoning from algebraic solvers with the light, efficient design of SAT solvers. As a first step in this direction, we design various conversions from sets of clauses to sets of Boolean polynomials, and vice versa, such that solutions and models are preserved via the conversions. In particular, based on a block-building mechanism, we design a new blockwise algorithm for the CNF to ANF conversion which is geared towards producing fewer and lower degree polynomials. The above conversions allow usto integrate both solvers via a communication interface. To reach an even tighter integration, we consider proof systems that combine resolution and polynomial calculus, i.e. the two most used proof systems in logic and algebraic solving. Based on such a proof system, which we call SRES, we introduce new types of solving algorithms that demostrate the synergy between Gr{\"o}bner-like and DPLL-like solving. At the end of the second part of the dissertation, we provide some experiments based on a new benchmark which illustrate that the our new method based on DPLL has the potential to outperform CDCL SAT solvers. In the third part of the thesis, we focus on practical attacks on various cryptograhic primitives. For instance, we apply SAT solvers in the case of algebraic fault attacks on the symmetric ciphers LED and derivatives of the block cipher AES. The main goal there is to derive so-called fault equations automatically from the hardware description of the cryptosystem and thus automatizate the attack. To give some extra power to a SAT solver that inverts the hash functions SHA-1 and SHA-2, we describe how to tweak the SAT solver using a programmatic interface such that the propagation of the solver and thus the attack itself is improved.}, subject = {Kryptoanalyse}, language = {en} } @phdthesis{Fink2019, author = {Fink, Thomas}, title = {Curvature Detection by Integral Transforms}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7684}, school = {Universit{\"a}t Passau}, pages = {viii, 194 Seiten}, year = {2019}, abstract = {In various fields of image analysis, determining the precise geometry of occurrent edges, e.g. the contour of an object, is a crucial task. Especially the curvature of an edge is of great practical relevance. In this thesis, we develop different methods to detect a variety of edge features, among them the curvature. We first examine the properties of the parabolic Radon transform and show that it can be used to detect the edge curvature, as the smoothness of the parabolic Radon transform changes when the parabola is tangential to an edge and also, when additionally the curvature of the parabola coincides with the edge curvature. By subsequently introducing a parabolic Fourier transform and establishing a precise relation between the smoothness of a certain class of functions and the decay of the Fourier transform, we show that the smoothness result for the parabolic Radon transform can be translated into a change of the decay rate of the parabolic Fourier transform. Furthermore, we introduce an extension of the continuous shearlet transform which additionally utilizes shears of higher order. This extension, called the Taylorlet transform, allows for a detection of the position and orientation, as well as the curvature and other higher order geometric information of edges. We introduce novel vanishing moment conditions which enable a more robust detection of the geometric edge features and examine two different constructions for Taylorlets. Lastly, we translate the results of the Taylorlet transform in R^2 into R^3 and thereby allow for the analysis of the geometry of object surfaces.}, subject = {Kr{\"u}mmung}, language = {en} } @phdthesis{Lucas2019, author = {Lucas, Yvan}, title = {Credit card fraud detection using machine learning with integration of contextual knowledge}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7713}, school = {Universit{\"a}t Passau}, pages = {xxi, 125 Seiten}, year = {2019}, abstract = {We have proposed a strategy for the creation of attributes based on hidden Markov models (HMM) characterizing the transaction from different points of view. This strategy makes it possible to integrate a broad spectrum of sequential information into the attributes of transactions. In fact, we model the authentic and fraudulent behavior of merchants and card holders according to two univariate characteristics: the date and the amount of transactions. In addition, attributes based on HMMs are created in a supervised manner, thereby reducing the need for expert knowledge for the creation of the fraud detection system. Ultimately, our HMM-based multi-perspective approach allows automated data pre-processing to model time correlations to complement and eventually replace transaction aggregation strategies to improve detection efficiency. Experiments carried out on a large set of credit card transaction data from the real world (46 million transactions carried out by Belgian card holders between March and May 2015) have shown that the strategy proposed for data preprocessing based on HMM can detect more fraudulent transactions when combined with the strategy of preprocessing reference data based on expert knowledge for the detection of credit card fraud.}, subject = {Kreditkartenmissbrauch}, language = {en} } @phdthesis{Reislhuber2017, author = {Reislhuber, Josef}, title = {Optical Graph Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5159}, school = {Universit{\"a}t Passau}, pages = {270 Seiten}, year = {2017}, abstract = {Graphs are an important model for the representation of structural information between objects. One identifies objects and nodes as well as a binary relation between objects and edges. Graphs have many uses, e. g., in social sciences, life sciences and engineering. There are two primary representations: abstract and visual. The abstract representation is well suited for processing graphs by computers and is given by an adjacency list, an adjacency matrix or any abstract data structure. A visual representation is used by human users who prefer a picture. Common terms are diagram, scheme, plan, or network. The objective of Graph Drawing is to transform a graph into a visual representation called the drawing of a graph. The goal is a "nice" drawing. In this thesis we introduce Optical Graph Recognition. Optical Graph Recognition (OGR) reverses Graph Drawing and transforms a digital image of a graph into an abstract representation. Our approach consists of four phases: Preprocessing where we determine which pixels of an image are part of the graph, Segmentation where we recognize the nodes, Topology Recognition where we detect the edges and Postprocessing where we enrich the recognized graph with additional information. We apply established digital image processing methods and make use of the special property that the image contains nodes that are connected by edges. We have focused on developing algorithms that need as little parameters as possible or to automatically calibrate the parameters. Most false recognition results are caused by crossing edges as this makes tracing the edges difficult and can lead to other recognition errors. We have evaluated hand-drawn and computer-drawn graphs. Our algorithms have a very high recognition rate for computer-drawn graphs, e. g., from a set of 100000 computer-drawn graphs over 90\% were correctly recognized. Most false recognition results where observed for hand-drawn graphs as they can include drawing errors and inaccuracies. For universal usability we have implemented a prototype called OGRup for mobile devices like smartphones or tablet computers. With our software it is possible to directly take a picture of a graph via a built in camera, recognize the graph, and then use the result for further processing. Furthermore, in order to gain more insight into the way a person draws a graph by hand, we have conducted a field study.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{Lorenz2018, author = {Lorenz, Florian}, title = {Analyse und Erzeugung von glatten Fl{\"a}chen{\"u}berg{\"a}ngen f{\"u}r das CNC-Fr{\"a}sen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5166}, school = {Universit{\"a}t Passau}, pages = {i, 149 Seiten}, year = {2018}, abstract = {In dieser Arbeit werden numerisch stabile Methoden zur Pr{\"u}fung von Stetigkeiten an Fl{\"a}chen{\"u}berg{\"a}ngen vorgestellt und Algorithmen zur Erzeugung von G^2-stetigen Fl{\"a}chen{\"u}berg{\"a}ngen hergeleitet.}, subject = {Differentialgeometrie}, language = {de} } @phdthesis{Hanauer2018, author = {Hanauer, Kathrin}, title = {Linear Orderings of Sparse Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5524}, school = {Universit{\"a}t Passau}, pages = {vii, 282 Seiten}, year = {2018}, abstract = {The Linear Ordering problem consists in finding a total ordering of the vertices of a directed graph such that the number of backward arcs, i.e., arcs whose heads precede their tails in the ordering, is minimized. A minimum set of backward arcs corresponds to an optimal solution to the equivalent Feedback Arc Set problem and forms a minimum Cycle Cover. Linear Ordering and Feedback Arc Set are classic NP-hard optimization problems and have a wide range of applications. Whereas both problems have been studied intensively on dense graphs and tournaments, not much is known about their structure and properties on sparser graphs. There are also only few approximative algorithms that give performance guarantees especially for graphs with bounded vertex degree. This thesis fills this gap in multiple respects: We establish necessary conditions for a linear ordering (and thereby also for a feedback arc set) to be optimal, which provide new and fine-grained insights into the combinatorial structure of the problem. From these, we derive a framework for polynomial-time algorithms that construct linear orderings which adhere to one or more of these conditions. The analysis of the linear orderings produced by these algorithms is especially tailored to graphs with bounded vertex degrees of three and four and improves on previously known upper bounds. Furthermore, the set of necessary conditions is used to implement exact and fast algorithms for the Linear Ordering problem on sparse graphs. In an experimental evaluation, we finally show that the property-enforcing algorithms produce linear orderings that are very close to the optimum and that the exact representative delivers solutions in a timely manner also in practice. As an additional benefit, our results can be applied to the Acyclic Subgraph problem, which is the complementary problem to Feedback Arc Set, and provide insights into the dual problem of Feedback Arc Set, the Arc-Disjoint Cycles problem.}, subject = {Graphentheorie}, language = {en} } @phdthesis{Niedermeier2020, author = {Niedermeier, Michael}, title = {Towards High Performability in Advanced Metering Infrastructures}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8597}, school = {Universit{\"a}t Passau}, pages = {xvii, 198 Seiten}, year = {2020}, abstract = {The current movement towards a smart grid serves as a solution to present power grid challenges by introducing numerous monitoring and communication technologies. A dependable, yet timely exchange of data is on the one hand an existential prerequisite to enable Advanced Metering Infrastructure (AMI) services, yet on the other a challenging endeavor, because the increasing complexity of the grid fostered by the combination of Information and Communications Technology (ICT) and utility networks inherently leads to dependability challenges. To be able to counter this dependability degradation, current approaches based on high-reliability hardware or physical redundancy are no longer feasible, as they lead to increased hardware costs or maintenance, if not both. The flexibility of these approaches regarding vendor and regulatory interoperability is also limited. However, a suitable solution to the AMI dependability challenges is also required to maintain certain regulatory-set performance and Quality of Service (QoS) levels. While a part of the challenge is the introduction of ICT into the power grid, it also serves as part of the solution. In this thesis a Network Functions Virtualization (NFV) based approach is proposed, which employs virtualized ICT components serving as a replacement for physical devices. By using virtualization techniques, it is possible to enhance the performability in contrast to hardware based solutions through the usage of virtual replacements of processes that would otherwise require dedicated hardware. This approach offers higher flexibility compared to hardware redundancy, as a broad variety of virtual components can be spawned, adapted and replaced in a short time. Also, as no additional hardware is necessary, the incurred costs decrease significantly. In addition to that, most of the virtualized components are deployed on Commercial-Off-The-Shelf (COTS) hardware solutions, further increasing the monetary benefit. The approach is developed by first reviewing currently suggested solutions for AMIs and related services. Using this information, virtualization technologies are investigated for their performance influences, before a virtualized service infrastructure is devised, which replaces selected components by virtualized counterparts. Next, a novel model, which allows the separation of services and hosting substrates is developed, allowing the introduction of virtualization technologies to abstract from the underlying architecture. Third, the performability as well as monetary savings are investigated by evaluating the developed approach in several scenarios using analytical and simulative model analysis as well as proof-of-concept approaches. Last, the practical applicability and possible regulatory challenges of the approach are identified and discussed. Results confirm that—under certain assumptions—the developed virtualized AMI is superior to the currently suggested architecture. The availability of services can be severely increased and network delays can be minimized through centralized hosting. The availability can be increased from 96.82\% to 98.66\% in the given scenarios, while decreasing the costs by over 60\% in comparison to the currently suggested AMI architecture. Lastly, the performability analysis of a virtualized service prototype employing performance analysis and a Musa-Okumoto approach reveals that the AMI requirements are fulfilled.}, subject = {Energieversorgung}, language = {en} } @phdthesis{Schloetterer2020, author = {Schl{\"o}tterer, J{\"o}rg}, title = {Supporting the Discovery of Long-Tail Resources on the Web}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8539}, school = {Universit{\"a}t Passau}, pages = {xi, 218 Seiten}, year = {2020}, abstract = {A plethora of resources made available via retrieval systems in digital libraries remains untapped in the so called long tail of the Web. These long-tail websites get considerably less visits than major Web hubs. Zero-effort queries ease the discovery of long-tail resources by proactively retrieving and presenting information based on a user's context. However, zero-effort queries over existing digital library structures are challenging, since the underlying retrieval system is only accessible via an API. The information need must be expressed by a query, instead of optimizing the ranking between context and resources in the retrieval system directly. We address three research questions that arise from replacing the user information seeking process by zero-effort queries. Our first question addresses the transformation of a user query to an automatic query, derived from the context. We present means to 1) identify the relevant context on different levels of granularity, 2) derive an information need from the context via keyword extraction and personalization and 3) express this information need in a query scheme that avoids over- or under-specified queries. We address the cold start problem with an approach to bootstrap user profiles from social media, even for passive users. With the second question, we address the presentation of resources in zero-effort query scenarios, presenting guidelines for presentation interfaces in the browser and a visualization of the triadic relationship between context, query and results. QueryCrumbs, a compact query history visualization supports recalling information found in the past and exploratory search by visualizing qualitative and quantitative query similarity. Our last question addresses the gap between (simple) keyword queries and the representation of resources by rich and complex meta-data. We investigate and extend feature representation learning techniques centered around the skip-gram model with negative sampling. Finally, we present an approach to learn representations from network and text jointly that can cope with the partial absence of one modality. Experimental results show close to human performance of our zero-effort query and user profile generation approach and visualizations to be helpful in terms of transparency, efficiency and support for exploratory search. These results indicate that the proposed zero-effort query approach indeed eases the discovery of long-tail resources and the accompanying visualizations further facilitate this process. The joint representation model provides a first step to bridge the gap between query and resource representation and we plan to follow and investigate this route further in the future.}, subject = {Data Sience}, language = {en} } @phdthesis{Garchery2020, author = {Garchery, Mathieu}, title = {User-centered intrusion detection using heterogeneous data}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8704}, school = {Universit{\"a}t Passau}, pages = {vii, 119 Seiten}, year = {2020}, abstract = {With the frequency and impact of data breaches raising, it has become essential for organizations to automate intrusion detection via machine learning solutions. This generally comes with numerous challenges, among others high class imbalance, changing target concepts and difficulties to conduct sound evaluation. In this thesis, we adopt a user-centered anomaly detection perspective to address selected challenges of intrusion detection, through a real-world use case in the identity and access management (IAM) domain. In addition to the previous challenges, salient properties of this particular problem are high relevance of categorical data, limited feature availability and total absence of ground truth. First, we ask how to apply anomaly detection to IAM audit logs containing a restricted set of mixed (i.e. numeric and categorical) attributes. Then, we inquire how anomalous user behavior can be separated from normality, and this separation evaluated without ground truth. Finally, we examine how the lack of audit data can be alleviated in two complementary settings. On the one hand, we ask how to cope with users without relevant activity history ("cold start" problem). On the other hand, we seek how to extend audit data collection with heterogeneous attributes (i.e. categorical, graph and text) to improve insider threat detection. After aggregating IAM audit data into sessions, we introduce and compare general anomaly detection methods for mixed data to a user identification approach, designed to learn the distinction between normal and malicious user behavior. We find that user identification outperforms general anomaly detection and is effective against masquerades. An additional clustering step allows to reduce false positives among similar users. However, user identification is not effective against insider threats. Furthermore, results suggest that the current scope of our audit data collection should be extended. In order to tackle the "cold start" problem, we adopt a zero-shot learning approach. Focusing on the CERT insider threat use case, we extend an intrusion detection system by integrating user relations to organizational entities (like assignments to projects or teams) in order to better estimate user behavior and improve intrusion detection performance. Results show that this approach is effective in two realistic scenarios. Finally, to support additional sources of audit data for insider threat detection, we propose a method representing audit events as graph edges with heterogeneous attributes. By performing detection at fine-grained level, this approach advantageously improves anomaly traceability while reducing the need for aggregation and feature engineering. Our results show that this method is effective to find intrusions in authentication and email logs. Overall, our work suggests that masquerades and insider threats call for different detection methods. For masquerades, user identification is a promising approach. To find malicious insiders, graph features representing user context and relations to other entities can be informative. This opens the door for tighter coupling of intrusion detection with user identities, roles and privileges used in IAM solutions.}, subject = {Anomalie}, language = {en} } @article{MandarawiRottmeierRezaeighaleetal.2020, author = {Mandarawi, Waseem and Rottmeier, J{\"u}rgen and Rezaeighale, Milad and de Meer, Hermann}, title = {Policy-Based Composition and Embedding of Extended Virtual Networks and SFCs for IIoT}, series = {Algorithms}, volume = {13}, journal = {Algorithms}, number = {9}, publisher = {MDPI}, issn = {1999-4893}, doi = {10.3390/a13090240}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8488}, year = {2020}, abstract = {The autonomic composition of Virtual Networks (VNs) and Service Function Chains (SFCs)based on application requirements is significant for complex environments. In this paper, we use graph transformation in order to compose an Extended Virtual Network (EVN) that is based on different requirements, such as locations, low latency, redundancy, and security functions. The EVN can represent physical environment devices and virtual application and network functions. We build a generic Virtual Network Embedding (VNE) framework for transforming an Application Request (AR) to an EVN. Subsequently, we define a set of transformations that reflect preliminary topological, performance, reliability, and security policies. These transformations update the entities and demands of the VN and add SFCs that include the required Virtual Network Functions (VNFs). Additionally, we propose a greedy proactive heuristic for path-independent embedding of the composed SFCs. This heuristic is appropriate for real complex environments, such as industrial networks. Furthermore, we present an Industrail Internet of Things (IIoT) use case that was inspired by Industry 4.0 concepts,in which EVNs for remote asset management are deployed over three levels; manufacturing halls and edge and cloud computing. We also implement the developed methods in Alevin and show exemplary mapping results from our use case. Finally, we evaluate the chain embedding heuristic while using a random topology that is typical for such a use case, and show that it can improve the admission ratio and resource utilization with minimal overhead.}, language = {en} } @phdthesis{Gerl2019, author = {Gerl, Armin}, title = {Modelling of a Privacy Language and Efficient Policy-based De-identification}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7674}, school = {Universit{\"a}t Passau}, pages = {xviii, 257 Seiten}, year = {2019}, abstract = {The processing of personal information is omnipresent in our data-driven society enabling personalized services, which are regulated by privacy policies. Although privacy policies are strictly defined by the General Data Protection Regulation (GDPR), no systematic mechanism is in place to enforce them. Especially if data is merged from several sources into a data-set with different privacy policies associated, the management and compliance to all privacy requirements is challenging during the processing of the data-set. Privacy policies can vary hereby due to different policies for each source or personalization of privacy policies by individual users. Thus, the risk for negligent or malicious processing of personal data due to defiance of privacy policies exists. To tackle this challenge, a privacy-preserving framework is proposed. Within this framework privacy policies are expressed in the proposed Layered Privacy Language (LPL) which allows to specify legal privacy policies and privacy-preserving de-identification methods. The policies are enforced by a Policy-based De-identification (PD) process. The PD process enables efficient compliance to various privacy policies simultaneously while applying pseudonymization, personal privacy anonymization and privacy models for de-identification of the data-set. Thus, the privacy requirements of each individual privacy policy are enforced filling the gap between legal privacy policies and their technical enforcement.}, subject = {Datenschutz}, language = {en} } @phdthesis{Jurgovsky2019, author = {Jurgovsky, Johannes}, title = {Context-Aware Credit Card Fraud Detection}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7622}, school = {Universit{\"a}t Passau}, pages = {xvii, 160 Seiten}, year = {2019}, abstract = {Credit card fraud has emerged as major problem in the electronic payment sector. In this thesis, we study data-driven fraud detection and address several of its intricate challenges by means of machine learning methods with the goal to identify fraudulent transactions that have been issued illegitimately on behalf of the rightful card owner. In particular, we explore several means to leverage contextual information beyond a transaction's basic attributes on the transaction level, sequence level and user level. On the transaction level, we aim to identify fraudulent transactions which, in terms of their attribute values, are globally distinguishable from genuine transactions. We provide an empirical study of the influence of class imbalance and forecasting horizons on the classification performance of a random forest classifier. We augment transactions with additional features extracted from external knowledge sources and show that external information about countries and calendar events improves classification performance most noticeably on card-not-present transactions. On the sequence level, we aim to detect frauds that are inconspicuous in the background of all transactions but peculiar with respect to the short-term sequence they appear in. We use a Long Short-term Memory network (LSTM) for modeling the sequential succession of transactions. Our results suggest that LSTM-based modeling is a promising strategy for characterizing sequences of card-present transactions but it is not adequate for card-not-present transactions. On the user level, we elaborate on feature aggregations and propose a flexible concept allowing us define numerous features by means of a simple syntax. We provide a CUDA-based implementation for the computationally expensive extraction with a speed-up of two orders of magnitude over a single-core implementation. Our feature selection study reveals that aggregates extracted from users' transaction sequences are more useful than those extracted from merchant sequences. Moreover, we discover multiple sets of candidate features with equivalent performance as manually engineered aggregates while being structurally different. Regarding future work, we motivate the usage of simple and transparent machine learning methods for credit card fraud detection and we sketch a simple user-focused modeling approach.}, subject = {Kreditkartenmissbrauch}, language = {en} } @phdthesis{Klaus2019, author = {Klaus, Tina}, title = {Complexity Analysis of Quantizations of Multidimensional Stochastic Differential Equations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7665}, school = {Universit{\"a}t Passau}, pages = {iv, 166 Seiten}, year = {2019}, abstract = {The dissertation is located in the field of quantizations of certain stochastic processes, namely a solution X of a multidimensional stochastic differential equation (SDE). The quantization problem for X consists in approximating X by a a random element which takes only finitely many values. Our main interest lies in the investigation of the asymptotic behavior of the Nth minimal quantization error of X as N tends to infinity, which incorporates the determination of both the sharp rate of convergence and explicit asymptotic constants. Especially explicit asymptotic constants have been so far unknown in the context of multidimensional SDEs. Furthermore, as part of our analysis, we provide a method which yields a strongly asymptotically optimal sequence of N-quantization of X. In certain special cases our method is fully constructive and the algorithm is easy to implement.}, subject = {Stochastische Differentialgleichung}, language = {en} } @phdthesis{Charpenay2019, author = {Charpenay, Victor}, title = {Semantics for the Web of Things: Modeling the Physical World as a Collection of Things and Reasoning with their Descriptions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7578}, school = {Universit{\"a}t Passau}, pages = {xiii, 127 Seiten}, year = {2019}, abstract = {The main research question of this thesis is to develop a theory that would provide foundations for the development of Web of Things (WoT) systems. A theory for WoT shall provide a model of the 'things' WoT agents relate to such that these relations determine what interactions take place between these agents. This thesis presents a knowledge-based approach in which the semantics of WoT systems is given by a transformation (an homomorphism) between a graph representing agent interactions and a knowledge graph describing 'things'. It focuses on three aspects of knowledge graphs in particular: the vocabulary with which assertions can be made, the rules that can be defined over this vocabulary and its serialization to efficiently exchange pieces of a knowledge graph. Each aspect is developed in a dedicated chapter, with specific contributions to the state-of-the-art. The need for a unified vocabulary to describe 'things' in WoT and the Internet of Things (IoT) has been identified early on in the literature. Many proposals have been consequently published, in the form of Web ontologies. In Ch. 2, a systematic review of these proposals is being developed, as well as a comparison with the data models of the principal IoT frameworks and protocols. The contribution of the thesis in that respect is an alignment between the Thing Description (TD) model and the Semantic Sensor Network (SSN) ontology, two standards of the World Wide Web Consortium (W3C). The scope of this thesis is generally limited to Web standards, especially those defined by the Resource Description framework (RDF). Web ontologies do not only expose a vocabulary but also rules to extend a knowledge graph by means of reasoning. Starting from a set of TD documents, new relations between 'things' can be "discovered" this way, indicating possible interactions between the servients that relate to them. The experiments presented in Ch. 3 were done on the basis of this semantic discovery framework on two use cases: a building automation use case provided by Intel Labs and an industrial control use case developed internally at Siemens. The relations to discover often involve anonymous nodes in the knowledge graph: the chapter also introduces a novel skolemization algorithm to correctly process these nodes on a well-defined fragment of the Web Ontology Language (OWL). Finally, because this semantic discovery framework relies on the exchange of TD documents, Ch. 4 introduces a binary format for RDF that proves efficient in serializing TD assertions such that even the smallest WoT agents, i.e. micro-controllers, can store and process them. A formalization for the semantics-preserving compaction and querying of TD documents is also introduced in this chapter, at the basis of an embedded RDF store called the µRDF store. The ability of all WoT agents to query logical assertions about themselves and their environment, as found in TD documents, is a first step towards knowledge-based intelligent systems that can operate autonomously and dynamically in a decentralized way. The µRDF store is an attempt to illustrate the practical outcomes of the theory of WoT developed throughout this thesis.}, subject = {Semantic Web}, language = {en} } @phdthesis{Wahl2019, author = {Wahl, Florian}, title = {Methods for monitoring the human circadian rhythm in free-living}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7607}, school = {Universit{\"a}t Passau}, pages = {vi, 123 Seiten}, year = {2019}, abstract = {Our internal clock, the circadian clock, determines at which time we have our best cognitive abilities, are physically strongest, and when we are tired. Circadian clock phase is influenced primarily through exposure to light. A direct pathway from the eyes to the suprachiasmatic nucleus, where the circadian clock resides, is used to synchronise the circadian clock to external light-dark cycles. In modern society, with the ability to work anywhere at anytime and a full social agenda, many struggle to keep internal and external clocks synchronised. Living against our circadian clock makes us less efficient and poses serious health impact, especially when exercised over a long period of time, e.g. in shift workers. Assessing circadian clock phase is a cumbersome and uncomfortable task. A common method, dim light melatonin onset testing, requires a series of eight saliva samples taken in hourly intervals while the subject stays in dim light condition from 5 hours before until 2 hours past their habitual bedtime. At the same time, sensor-rich smartphones have become widely available and wearable computing is on the rise. The hypothesis of this thesis is that smartphones and wearables can be used to record sensor data to monitor human circadian rhythms in free-living. To test this hypothesis, we conducted research on specialised wearable hardware and smartphones to record relevant data, and developed algorithms to monitor circadian clock phase in free-living. We first introduce our smart eyeglasses concept, which can be personalised to the wearers head and 3D-printed. Furthermore, hardware was integrated into the eyewear to recognise typical activities of daily living (ADLs). A light sensor integrated into the eyeglasses bridge was used to detect screen use. In addition to wearables, we also investigate if sleep-wake patterns can be revealed from smartphone context information. We introduce novel methods to detect sleep opportunity, which incorporate expert knowledge to filter and fuse classifier outputs. Furthermore, we estimate light exposure from smartphone sensor and weather in- formation. We applied the Kronauer model to compare the phase shift resulting from head light measurements, wrist measurements, and smartphone estimations. We found it was possible to monitor circadian phase shift from light estimation based on smartphone sensor and weather information with a weekly error of 32±17min, which outperformed wrist measurements in 11 out of 12 participants. Sleep could be detected from smartphone use with an onset error of 40±48 min and wake error of 42±57 min. Screen use could be detected smart eyeglasses with 0.9 ROC AUC for ambient light intensities below 200lux. Nine clusters of ADLs were distinguished using Gaussian mixture models with an average accuracy of 77\%. In conclusion, a combination of the proposed smartphones and smart eyeglasses applications could support users in synchronising their circadian clock to the external clocks, thus living a healthier lifestyle.}, subject = {Tagesrhythmus}, language = {en} } @phdthesis{Kronawitter2019, author = {Kronawitter, Stefan}, title = {Automatic Performance Optimization of Stencil Codes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7618}, school = {Universit{\"a}t Passau}, pages = {xiii, 130 Seiten}, year = {2019}, abstract = {A widely used class of codes are stencil codes. Their general structure is very simple: data points in a large grid are repeatedly recomputed from neighboring values. This predefined neighborhood is the so-called stencil. Despite their very simple structure, stencil codes are hard to optimize since only few computations are performed while a comparatively large number of values have to be accessed, i.e., stencil codes usually have a very low computational intensity. Moreover, the set of optimizations and their parameters also depend on the hardware on which the code is executed. To cut a long story short, current production compilers are not able to fully optimize this class of codes and optimizing each application by hand is not practical. As a remedy, we propose a set of optimizations and describe how they can be applied automatically by a code generator for the domain of stencil codes. A combination of a space and time tiling is able to increase the data locality, which significantly reduces the memory-bandwidth requirements: a standard three-dimensional 7-point Jacobi stencil can be accelerated by a factor of 3. This optimization can target basically any stencil code, while others are more specialized. E.g., support for arbitrary linear data layout transformations is especially beneficial for colored kernels, such as a Red-Black Gauss-Seidel smoother. On the one hand, an optimized data layout for such kernels reduces the bandwidth requirements while, on the other hand, it simplifies an explicit vectorization. Other noticeable optimizations described in detail are redundancy elimination techniques to eliminate common subexpressions both in a sequence of statements and across loop boundaries, arithmetic simplifications and normalizations, and the vectorization mentioned previously. In combination, these optimizations are able to increase the performance not only of the model problem given by Poisson's equation, but also of real-world applications: an optical flow simulation and the simulation of a non-isothermal and non-Newtonian fluid flow.}, subject = {Optimierung}, language = {en} } @phdthesis{Stadler2015, author = {Stadler, Thomas}, title = {Eine Anwendung der Invariantentheorie auf das Korrespondenzproblem lokaler Bildmerkmale}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4026}, school = {Universit{\"a}t Passau}, pages = {XII, 324 S.}, year = {2015}, abstract = {Als sich in der ersten H{\"a}lfte des 19. Jahrhunderts zunehmend mehr bedeutende Mathematiker mit der Suche nach Invarianten besch{\"a}ftigten, konnte nat{\"u}rlich noch niemand vorhersehen, dass die Invariantentheorie mit Beginn des Computerzeitalters in der Bildverarbeitung bzw. dem Rechnersehen ein {\"a}ußerst fruchtbares Anwendungsgebiet finden wird. In dieser Arbeit wird eine neue Anwendungsm{\"o}glichkeit der Invariantentheorie in der Bildverarbeitung vorgestellt. Dazu werden lokale Bildmerkmale betrachtet. Dabei handelt es sich um die Koordinaten einer Polynomfunktion bzgl. einer geeigneten Orthonormalbasis von P_n(R^2,R), die die zeitintegrierte Sensorinputfunktion auf lokalen Pixelfenstern bestm{\"o}glich approximiert. Diese Bildmerkmale werden in vielen Anwendungen eingesetzt, um Objekte in Bildern zu erkennen und zu lokalisieren. Beispiele hierf{\"u}r sind die Detektion von Werkst{\"u}cken an einem Fließband oder die Verfolgung von Fahrbahnmarkierungen in Fahrerassistenzsystemen. Modellieren l{\"a}sst sich die Suche nach einem Muster in einem Suchbild als Paar von Stereobildern, auf denen lokal die affin-lineare Gruppe AGL(R) operiert. Will man also feststellen, ob zwei lokale Pixelfenster in etwa Bilder eines bestimmten dreidimensionalen Oberfl{\"a}chenausschnitts sind, ist zu kl{\"a}ren, ob die Bildausschnitte durch eine Operation der Gruppe AGL(R) n{\"a}herungsweise ineinander {\"u}bergef{\"u}hrt werden k{\"o}nnen. Je nach Anwendung gen{\"u}gt es bereits, passende Untergruppen G von AGL(R) zu betrachten. Dank der lokalen Approximation durch Polynomfunktionen induziert die Operation einer Untergruppe G eine Operation auf dem reellen Vektorraum P_n(R^2,R). Damit l{\"a}sst sich das Korrespondenzproblem auf die Frage reduzieren, ob es eine Transformation T in G gibt so, dass p ungef{\"a}hr mit der Komposition von q und T f{\"u}r die zugeh{\"o}rigen Approximationspolynome p,q in P_n(R^2,R) gilt. Mit anderen Worten, es ist zu kl{\"a}ren, ob sich p und q n{\"a}herungsweise in einer G-Bahn befinden, eine typische Fragestellung der Invariantentheorie. Da nur lokale Bildausschnitte betrachtet werden, gen{\"u}gt es weiter, Untergruppen G von GL_2(R) zu betrachten. Dann erh{\"a}lt man sofort auch die Antwort f{\"u}r das semidirekte Produkt von R^2 mit G. Besonders interessant f{\"u}r Anwendungen ist hierbei die spezielle orthogonale Gruppe G=SO_2(R) und damit insgesamt die eigentliche Euklidische Gruppe. F{\"u}r diese Gruppe und spezielle Pixelfenster ist das Korrespondenzproblem bereits gel{\"o}st. In dieser Arbeit wird das Problem in eben dieser Konstellation ebenfalls gel{\"o}st, allerdings auf elegante Weise mit Methoden der Invariantentheorie. Der Ansatz, der hier vorgestellt wird, ist aber nicht auf diese Gruppe und spezielle Pixelfenster begrenzt, sondern leicht auf weitere F{\"a}lle erweiterbar. Dazu ist insbesondere zu kl{\"a}ren, wie sich sogenannte fundamentale Invarianten von lokalen Bildmerkmalen, also letztendlich Invarianten von Polynomfunktionen, berechnen lassen, d.h. Erzeugendensysteme der entsprechenden Invariantenringe. Mit deren Hilfe l{\"a}sst sich die Zugeh{\"o}rigkeit einer Polynomfunktion zur Bahn einer anderen Funktion auf einfache Weise untersuchen. Neben der Vorstellung des Verfahrens zur Korrespondenzfindung und der daf{\"u}r notwendigen Theorie werden in dieser Arbeit Erzeugendensysteme von Invariantenringen untersucht, die besonders "sch{\"o}ne" Eigenschaften besitzen. Diese sch{\"o}nen Erzeugendensysteme von Unteralgebren werden, analog zu Gr{\"o}bner-Basen als Erzeugendensysteme von Idealen, SAGBI-Basen genannt ("Subalgebra Analogs to Gr{\"o}bner Bases for Ideals"). SAGBI-Basen werden hier insbesondere aus algorithmischer Sicht behandelt, d.h. die Berechnung von SAGBI-Basen steht im Vordergrund. Dazu werden verschiedene Algorithmen erarbeitet, deren Korrektheit bewiesen und implementiert. Daraus resultiert ein Software-Paket zu SAGBI-Basen f{\"u}r das Computeralgebrasystem ApCoCoA, dessen Funktionalit{\"a}t in diesem Umfang in keinem Computeralgebrasystem zu finden sein wird. Im Zuge der Umsetzung der einzelnen Algorithmen konnte außerdem die Theorie der SAGBI-Basen an zahlreichen Stellen erweitert werden.}, language = {de} } @phdthesis{dePonteMueller2016, author = {de Ponte M{\"u}ller, Fabian}, title = {Cooperative Relative Positioning for Vehicular Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5411}, school = {Universit{\"a}t Passau}, pages = {vii, 247 Seiten}, year = {2016}, abstract = {Fahrerassistenzsysteme sind ein wesentlicher Baustein zur Steigerung der Sicherheit im Straßenverkehr. Vor allem sicherheitsrelevante Applikationen ben{\"o}tigen eine genaue Information {\"u}ber den Ort und der Geschwindigkeit der Fahrzeuge in der unmittelbaren Umgebung, um m{\"o}gliche Gefahrensituationen vorherzusehen, den Fahrer zu warnen oder eigenst{\"a}ndig einzugreifen. Repr{\"a}sentative Beispiele f{\"u}r Assistenzsysteme, die auf eine genaue, kontinuierliche und zuverl{\"a}ssige Relativpositionierung anderer Verkehrsteilnehmer angewiesen sind, sind Notbremsassitenten, Spurwechselassitenten und Abstandsregeltempomate. Moderne L{\"o}sungsans{\"a}tze benutzen Umfeldsensorik wie zum Beispiel Radar, Laser Scanner oder Kameras, um die Position benachbarter Fahrzeuge zu sch{\"a}tzen. Dieser Sensorsysteme gemeinsame Nachteile sind deren limitierte Erfassungsreichweite und die Notwendigkeit einer direkten und nicht blockierten Sichtlinie zum Nachbarfahrzeug. Kooperative L{\"o}sungen basierend auf einer Fahrzeug-zu-Fahrzeug Kommunikation k{\"o}nnen die eigene Wahrnehmungsreichweite erh{\"o}hen, in dem Positionsinformationen zwischen den Verkehrsteilnehmern ausgetauscht werden. In dieser Dissertation soll die M{\"o}glichkeit der kooperativen Relativpositionierung von Straßenfahrzeugen mittels Fahrzeug-zu-Fahrzeug Kommunikation auf ihre Genauigkeit, Kontinuit{\"a}t und Robustheit untersucht werden. Anstatt die in jedem Fahrzeug unabh{\"a}ngig ermittelte Position zu {\"u}bertragen, werden in einem neuartigem Ansatz GNSS-Rohdaten, wie Pseudoranges und Doppler-Messungen, ausgetauscht. Dies hat den Vorteil, dass sich korrelierte Fehler in beiden Fahrzeugen potentiell herausk{\"u}rzen. Dies wird in dieser Dissertation mathematisch untersucht, simulativ modelliert und experimentell verifiziert. Um die Zuverl{\"a}ssigkeit und Kontinuit{\"a}t auch in "gest{\"o}rten" Umgebungen zu erh{\"o}hen, werden in einem Bayesischen Filter die GNSS-Rohdaten mit Inertialsensormessungen aus zwei Fahrzeugen fusioniert. Die Validierung des Sensorfusionsansatzes wurde im Rahmen dieser Dissertation in einem Verkehrs- sowie in einem GNSS-Simulator durchgef{\"u}hrt. Zur experimentellen Untersuchung wurden zwei Testfahrzeuge mit den verschiedenen Sensoren ausgestattet und Messungen in diversen Umgebungen gefahren. In dieser Arbeit wird gezeigt, dass auf Autobahnen, die Relativposition eines anderen Fahrzeugs mit einer Genauigkeit von unter einem Meter kontinuierlich gesch{\"a}tzt werden kann. Eine hohe Zuverl{\"a}ssigkeit in der longitudinalen und lateralen Richtung k{\"o}nnen erzielt werden und das System erweist 90\% der Zeit eine Unsicherheit unter 2.5m. In l{\"a}ndlichen Umgebungen w{\"a}chst die Unsicherheit in der relativen Position. Mit Hilfe der on-board Sensoren k{\"o}nnen Fehler bei der Fahrt durch W{\"a}lder und D{\"o}rfer korrekt gest{\"u}tzt werden. In st{\"a}dtischen Umgebungen werden die Limitierungen des Systems deutlich. Durch die erschwerte Sch{\"a}tzung der Fahrtrichtung des Ego-Fahrzeugs ist vor Allem die longitudinale Komponente der Relativen Position in st{\"a}dtischen Umgebungen stark verf{\"a}lscht.}, subject = {Fahrerassistenzsystem}, language = {en} } @phdthesis{Fischer2017, author = {Fischer, Andreas}, title = {An Evaluation Methodology for Virtual Network Embedding}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4793}, school = {Universit{\"a}t Passau}, pages = {XVII, 179 S.}, year = {2017}, abstract = {The increasing scale and complexity of computer networks imposes a need for highly flexible management mechanisms. The concept of network virtualization promises to provide this flexibility. Multiple arbitrary virtual networks can be constructed on top of a single substrate network. This allows network operators and service providers to tailor their network topologies to the specific needs of any offered service. However, the assignment of resources proves to be a problem. Each newly defined virtual network must be realized by assigning appropriate physical resources. For a given set of virtual networks, two questions arise: Can all virtual networks be accommodated in the given substrate network? And how should the respective resources be assigned? The underlying problem is commonly known as the Virtual Network Embedding problem. A multitude of algorithms has already been proposed, aiming to provide solutions to that problem under various constraints. For the evaluation of these algorithms typically an empirical approach is adopted, using artificially created random problem instances. However, due to complex effects of random problem generation the obtained results can be hard to interpret correctly. A structured evaluation methodology that can avoid these effects is currently missing. This thesis aims to fill that gap. Based on a thorough understanding of the problem itself, the effects of random problem generation are highlighted. A new simulation architecture is defined, increasing the flexibility for experimentation with embedding algorithms. A novel way of generating embedding problems is presented which migitates the effects of conventional problem generation approaches. An evaluation using these newly defined concepts demonstrates how new insights on algorithm behavior can be gained. The proposed concepts support experimenters in obtaining more precise and tangible evaluation data for embedding algorithms.}, subject = {Virtuelles Netz}, language = {en} } @phdthesis{Loewe2017, author = {L{\"o}we, Stefan}, title = {Effective Approaches to Abstraction Refinement for Automatic Software Verification}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4815}, school = {Universit{\"a}t Passau}, pages = {XXI, 155 S.}, year = {2017}, abstract = {This thesis presents various techniques that aim at enabling more effective and more efficient approaches for automatic software verification. After a brief motivation why automatic software verification is getting ever more relevant, we continue with detailing the formalism used in this thesis and on the concepts it is built on. We then describe the design and implementation of the value analysis, an analysis for automatic software verification that tracks state information concretely. From a thorough evaluation based on well over 4 000 verification tasks from the latest edition of the International Competition on Software Verification (SV-COMP), we learn that this plain value analysis leads to an efficient verification process for many verification tasks, but at the same time, fails to solve other verification tasks due to state-space explosion. From this insight we infer that some form of abstraction technique must be added to the value analysis in order to also allow the successful verification of large and complex verification tasks. As a solution, we propose to incorporate counterexample-guided abstraction refinement (CEGAR) and interpolation into the value domain. To this end, we design a novel interpolation procedure, that extracts from infeasible counterexamples interpolants for the value domain, allowing to form a precision strong enough to exclude these infeasible counterexamples, and to make progress in the CEGAR loop. We then describe several optimizations and extensions to these concepts, such that the value analysis with CEGAR becomes competitive for automatic software verification. As the next step, we combine the value analysis with CEGAR with a predicate analysis, to obtain a more precise and efficient composite analysis based on CEGAR. This composite analysis is indeed on a par with the world's leading software verification tools, as witnessed by the results of SV-COMP'13 where this approach achieved the 2 nd place in the overall ranking. After having available competitive CEGAR-based analyses for the value domain, the predicate domain, and the combination thereof, we then turn our attention to techniques that have the goal to make all these CEGAR-based approaches more successful. Our first novel idea in this regard is based on the concept of infeasible sliced prefixes, which allow the computation of different precisions from a single infeasible counterexample. This adds choice to the CEGAR loop, while without this enhancement, no choice for a specific precision, i. e., a specific refinement, is possible. In our evaluation we show, for both the value analysis and the predicate analysis, that choosing different infeasible sliced prefixes during the refinement step leads to major differences in verification effectiveness and verification efficiency. Extending on the concept of infeasible sliced prefixes, we define several heuristics in order to precisely select a single refinement from a set of possible refinements. We make this new concept, which we refer to as guided refinement selection, available to both the value and predicate analysis, and in a large-scale evaluation we try to answer the question which selection technique leads to well suited abstractions and thus, to a more effective verification process. Additionally, we present the idea of inter-analysis refinement selection, where the refinement component of a composite analysis may decide which of its component analyses is best to be refined, and in yet another evaluation we highlight the positive effects of this technique. Finally, we present the results of SV-COMP'16, where the verifier we contributed and which is based on the concepts and ideas presented in this thesis achieved the 1 st place in the category DeviceDriversLinux64.}, subject = {Programmverifikation}, language = {en} }