@phdthesis{NorbertoSales2022, author = {Norberto Sales, Juliano Efson}, title = {An Explainable Semantic Parser for End-User Development}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10718}, school = {Universit{\"a}t Passau}, pages = {xvi, 165 Seiten}, year = {2022}, abstract = {Programming is a key skill in a world where businesses are driven by digital transformations. Although many of the programming demand can be addressed by a simple set of instructions composing libraries and services available in the web, non-technical professionals, such as domain experts and analysts, are still unable to construct their own programs due to the intrinsic complexity of coding. Among other types of end-user development, natural language programming has emerged to allow users to program without the formalism of traditional programming languages, where a tailored semantic parser can translate a natural language utterance to a formal command representation able to be processed by a computational machine. Currently, semantic parsers are typically built on the top of a learning method that defines its behaviours based on the patterns behind a large training data, whose production frequently are costly and time-consuming. Our research is devoted to study and propose a semantic parser for natural language commands targeting a scenario with low availability of training data. Our proposed semantic parser follows a multi-component architecture, composed of a specialised shallow parser that associates natural language commands to predicate-argument structures, integrated to a distributional ranking model that matches the command to a function signature available from an API knowledge base. Systems developed with statistical learning models and complex linguistics resources, as the proposed semantic parser, do not provide natively an easy way to associate a single feature from the input data to the impact in system behaviour. In this scenario, end-user explanations for intelligent systems has become a strong requirement to increase user confidence and system literacy. Thus, our research designed an explanation model for the proposed semantic parser that fits the heterogeneity of its multi-component architecture. The explanation model explores a hierarchical representation in an increasing degree of technical depth, providing higher-level explanations in the initial layers, going gradually to those that demand technical knowledge, applying different explanation strategies to better express the approach behind each component. With the support of a user-centred experiment, we compared the utility of different types of explanations and the impact of background knowledge in their preferences.}, language = {en} } @phdthesis{Sui2018, author = {Sui, Zhiyuan}, title = {Security and Privacy Schemes for Demand Response in Smart Grids}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5809}, school = {Universit{\"a}t Passau}, pages = {xx, 153 Seiten}, year = {2018}, abstract = {Smart Grids integrate currently isolated power and communications networks, while introducing several new technologies on the hardware and software sides. One of the most important ingredients is the potential for demand-response programs, which offer the possibility of sending instructions to consumers to adapt their power consumption over a certain period of time. However, high-frequency data collection exposes consumers' usage behaviors, leading to security and privacy challenges for Smart Grids. In this thesis, three cryptographic schemes are constructed for different demand-response programs. In the mandatory incentive-based demand-response program, privacy preservation depends on the power consumption of consumers. An anonymous authentication scheme is constructed for overload auditing and privacy preservation. Consumers' identities are anonymous during normal operation. The operation center defines an acceptable consumption threshold at times of power shortage. Consumers must follow the instruction and curtail their power consumption to meet the threshold. If they do so, the consumers keep their anonymity, while disobedient consumers, whose power consumption exceeds the threshold, can be identified. Security analysis demonstrates that the constructed anonymous authentication scheme is secure in a random oracle model. In the voluntary incentivebased demand-response program, consumers are categorized as either obedient or disobedient consumers according to their consumption curtailment. Consumers utilize a homomorphic encryption algorithm to encrypt their usage and report the ciphertexts to the operation center periodically. At a time of grid instability, the obedient consumers reduce their consumption and prove their curtailment by using a range proof. Both the usage reports and the proofs from obedient consumers concerning their consumption are reported without leaking private information. In order to achieve the real-time requirement, a security model is proposed and a batch verification algorithm is constructed, which is proved to be secure in the defined oracle model. Apart from reward and penalty detection in demand-response programs, theft detection is also an important requirement in Smart Grids. In order to achieve theft detection, this thesis employs the dynamic k-times anonymous authentication and blind signatures to create an efficient theft detection mechanism in the prepaid card system, where consumers pay for their consumption in advance and obtain credentials. A consumer sends the credentials anonymously and obtains corresponding credentials during times of consumption. If a thief tries to send reused credentials to steal electricity, his anonymity will be revoked. Finally, this thesis proves that the proposed mechanism finds the real identities of power thieves, without sacrificing the privacy of honest consumers under the random oracle model.}, subject = {Intelligentes Stromnetz}, language = {en} } @phdthesis{Hintz2009, author = {Hintz, Martin}, title = {Micro-Impact: Deconstructing the complex impact process of a simple microinsurance product in Indonesia}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-20389}, school = {Universit{\"a}t Passau}, year = {2009}, abstract = {This thesis analyses the social impact of Payung Keluarga, an obligatory enhanced credit life microinsurance product launched by Allianz in Indonesia in 2006. Payung Keluarga automatically insures micro-borrowers who take out microcredits from microfinance institutions. In case of death, the outstanding credit balance is canceled and the beneficiary receives twice the original loan as additional payout. Payung Keluarga was conceived to ameliorate the assumed post-mortem financial crisis of low-asset families. Through qualitative-explorative field research from 2006 until 2008 I investigated if this developmental intention was realized. It is the first impact analysis on microinsurance in Indonesia. In the research process, I took the position of an observing participant. As operational project leader for Allianz in Indonesia I was virtually doing research on my own work. The resulting challenge to research neutrality is primarily mitigated by the sobering to discerning social impact which was eventually revealed. The majority of insured were married female Muslim petty traders in urban and semi-urban areas around Jakarta. Socio-economically these women stand at the upper end of the low-asset stratum. Their husbands were generally the main bread-winners of the family, and it was mostly them who received the insurance payouts. It could therefore be said that Payung Keluarga benefited the main breadwinner instead of insuring him. The study found that norms of a moral economy are still exerting significant clout on the insured. The moral economy aims at providing "subsistence insurance" for all community members through an intricate collective system of balanced exchanges. The corresponding "premium" is a denouncement of self-interested material asset accumulation. Next to structural reasons, it was this moral restriction that saw the businesses of the women stagnate at low and socially inconspicuous levels. Payung Keluarga did not help to overcome the assumed post-mortem financial crisis. In reality, such crisis did not exist since community and family support among low-asset Muslim Indonesians is normally strong enough to largely provide for the bereft family. This support is driven by the perception of death as a collective risk in the light of the moral economy and hinged on principles of balanced reciprocity. For cultural and religious reasons, the beneficiaries used most of the insurance payouts for funeral ceremonies and repayment of informal debt. With the advent of Payung Keluarga familial post-mortem assistance has been reduced. Funeral costs also seem to have been inflated by the product. It has thereby promoted a long-term societal shift from equality-seeking balanced reciprocity towards status-seeking and socially diversifying general reciprocity. In effect, Payung Keluarga has attacked cooperative social cohesion head-on where it is still strongest in a rapidly modernizing Indonesian society. This discerning and unintended impact of Payung Keluarga is hardly offset by a positive increase in financial literacy among the insured. Furthermore, the effect on "peace of mind" on the insured is ambivalent: while most insured stated to feel safer, some declared to feel less secure with their obligatory coverage for fear of interference with divine predetermination. Its overall developmental impact can be literally described as "micro". Instead of protecting the status-quo of the family, Payung Keluarga has assumed the role of an actor of social change. Not only because it has changed the funeral pattern of the beneficiaries, but also because it promotes a far-reaching conceptual paradigm shift from balanced reciprocity, which forms a core pillar of the insured's social structure, towards general reciprocity. The thesis hypothesizes that with sufficient insurance coverage provided, the insured will increasingly opt out of the coercively egalitarian "subsistence insurance" system. Such opt out will allow the insured to pursue a more aggressive economic asset accumulation strategy, particularly in combination with micro-credit. For the individual, this can be seen as a "liberating fortune" that would induce more women to grow their businesses to significant sizes. In parallel, it would deal a blow to cooperative social cohesion. I propose to call this the "double fortune / double blow" dilemma of microfinance. Although this thesis is exemplary, some of its findings can be generalized: The impact of microinsurance is highly dependent on cultural, religious and socio-demographic context. Any microinsurance intervention concerned with social impact should be preceded by a thick contextualization going beyond the usual demand assessments. In turn, microinsurance likewise impacts context as an actor of ambivalent social change. The complex influence of context and the role of microinsurance as an actor of social change have so far been hardly discussed in the development discourse.}, subject = {Versicherung}, language = {en} } @phdthesis{Roetzer2003, author = {R{\"o}tzer, Andreas}, title = {Die Einteilung der Wissenschaften - Analyse und Typologisierung von Wissenschaftsklassifikationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-707}, school = {Universit{\"a}t Passau}, year = {2003}, abstract = {Die Arbeit diskutiert die vielf{\"a}ltigen Implikationen von Wissenschaftsklassifikationen und stellt eine Typologie zur Diskussion, mit deren Hilfe es m{\"o}glich wird, Wissensklassifikationen hinsichtlich einer Vielzahl von Aspekte zu analysieren. Konkrete Beispiele aus der mehr als 2000 Jahre zur{\"u}ckreichenden Geschichte der Wissenschaftsklassifikation illustrieren dabei die Ergebnisse der Untersuchung. Sie dienen als Anschauungsmaterial, um mit ihren jeweils charakteristischen Probleml{\"o}sungen die ihnen zugrunde liegenden Voraussetzungen zu untersuchen. Im Zentrum der Untersuchung lagen dabei aber die impliziten und expliziten Voraussetzungen konkreter Wissenschaftsklassifikationen. Die angestrebte Systematisierung macht den Verzicht auf eine historische Darstellung notwendig. Die systematisch geordnete Unersuchung in Verbindung mit dem chronologisch geordneten Anhang erm{\"o}glicht es dem Leser, Aufschl{\"u}sse {\"u}ber das Aufkommen und Verschwinden beherrschender wissenstheoretischer Ideen und {\"u}ber den Stand der Entwicklung der Wissenschaften zu einer gegebenen Epoche zu erschließen, sowie die darunter liegenden Strukturen zu erkennen. Obwohl also auf eine lineare historische Darstellung verzichtet wurde, stellt sich dem Leser dadurch ein kleiner, repr{\"a}sentativer {\"U}berblick {\"u}ber ihre Geschichte dar, mit dessen Hilfe sich ihm die großen Entwicklungslinien erschließen.}, subject = {Systematische Aufstellung}, language = {de} } @phdthesis{vonRhein2016, author = {von Rhein, Alexander}, title = {Analysis Strategies for Configurable Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3682}, school = {Universit{\"a}t Passau}, pages = {xiii, 229 Seiten}, year = {2016}, abstract = {A configurable system enables users to derive individual system variants based on a selection of configuration options. To cope with the often huge number of possible configurations, several analysis approaches (e.g., for verification of configurable systems) implement different strategies to account for configurability. One popular strategy—often applied in practice—is to use sampling (i.e.,analyzing only a subset of all system variants). While sampling reduces the analysis effort significantly, the information obtained is necessarily incomplete as some variants are not analyzed. A second strategy is to identify the common parts and the variable parts of a configurable system and analyze each part separately (called feature-based strategy). As a third strategy, researchers have begun to develop family-based analyses. Family-based approaches analyze the code base of a configurable system as a whole, rather than the individual variants or parts of the system, this way exploiting similarities among individual variants to reduce analysis effort. Each of these three strategies has advantages and disadvantages, which might even prevent its application (e.g., the family-based strategy typically needs much main memory). The goal of this thesis is to enable the efficient analysis of configuable systems, even if existing strategies fail (e.g., the family-based strategy, because of memory limitations). To this end, we designed a framework that models the key aspects of configurable-system analysis strategies, independent of their implementation and of the analyses techniques (e.g., type checking or model checking). Guided by our model, we developed a number of analysis strategies for configurable systems. To learn about advantages and disadvantages of individual strategies, we compared these in a series of empirical studies. In particular, we developed and evaluated a model-checking analysis and a data-flow analysis for configurable systems. One of our key findings is that family-based analysis outperforms most sampling heuristics with respect to analysis time, while being able to make definite statements about all variants of a configurable system. Furthermore, we identified advantages and disadvantages of analysis strategies and how to mitigate them by combining strategies. In our endeavor, we identified two key problems that are common to configurable-system analyses, and we developed supporting techniques to solve them. These techniques are general and are applicable beyond our research. In particular, we developed presence-condition simplification and variability encoding. Presence-condition simplification provides a simple method to reduce the size of the output or the internal data structure of configurable-systemanalyses. Variability encoding provides a means for transforming compile-time variability to run-time variability, which enables many family-based analyses. Our key contributions are the model of analysis strategies for configurable systems and the corresponding empirical comparisons of strategies. Our findings are backed by empirical studies, which helped broaden the community knowledge on analyses of configurable systems (indicated by citations). For these evaluations, we prepared several subject systems, which have also been used already by other researchers. Furthermore, we developed several analysis tools and demonstrated their feasibility in practical application scenarios based on code from, for example, the Linux kernel. Our tools are based on variability-aware optimizations that enable levels of scalability on configurable systems that were not possible with other tools before.}, subject = {Software Engineering}, language = {en} } @phdthesis{Woelfl2018, author = {W{\"o}lfl, Andreas}, title = {Data Management in Certified Avionics Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5758}, school = {Universit{\"a}t Passau}, pages = {xi, 177 Seiten}, year = {2018}, abstract = {Data management is a cornerstone for any kind of information system - including the aerospace and aviation sector. In contrast to conventional domains, software development in the avionics domain must adhere to a legally binding certification process, called qualification. The success of the process depends on compliance with international standards, such as DO-178: Software Considerations in Airborne Systems and Equipment Certification. From a software developer's perspective, challenges arise in terms of methods and tools. Techniques that have a potential impact on the deterministic and predictable execution of avionics software are prohibited. The objective of this thesis' research is to develop a scalable method to realize data-management for multi-variant avionics software under the restrictions and constraints of the domain. Since avionics software faces very long-term life-cycles (up to 75 years), a particular focus is being placed on maintenance and evolution. Based on the insights gained in a semi-structured interview at Airbus Helicopters, industrial established approaches to implement qualified avionics software are assessed at first and compared with respect to strengths and weaknesses for data-management afterwards. As a result, a novel development approach is proposed, combining model-based techniques and product-line technology to derive the source code of highly specific data-management variants, as well as the majority of assets required for the qualification process, from a declarative system specification. In order to demonstrate the practicability of the approach in industry, a framework is presented that is deployed and applied at Airbus Helicopters to generate qualifiable data-management components for the variants of the NH90 helicopter. The maintainability is shown by means of a domain-specific optimization, in which the model-based and generative approach is used to establish safe memory overlays at compile-time. Key findings reveal a substantially reduced memory footprint (29,1\% in case of a real-world scenario), as well as an significantly facilitated implementation process, which would not be accomplishable using conventional methods for software development in the avionics domain.}, subject = {Avionik}, language = {en} } @phdthesis{Stenzer2018, author = {Stenzer, Alexander}, title = {Ein Ansatz zur semantik-basierten Anfragerelaxation f{\"u}r hierarchische Strukturen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5746}, school = {Universit{\"a}t Passau}, pages = {vii, 225 Seiten}, year = {2018}, abstract = {F{\"u}r Monumentalbauten als Teil unseres Kulturgutes im Speziellen als auch f{\"u}r Geb{\"a}ude im Allgemeinen, wurden im Rahmen des MonArch- rojektes verschiedene Methoden zur digitalen Speicherung von Informationen {\"u}ber Monumentalbauten erforscht. Das daraus entstandene MonArch-System ist f{\"u}r die Dokumentation von Monumentalbauten verwendbar und speichert das digitale Modell des Bauwerks in einer relationalen Datenbank. Das digitale Modell des Bauwerks entsteht durch eine Segmentierung in Geb{\"a}udeteile, die dann in einer Strukturhierarchie zusammengefasst werden k{\"o}nnen. Als Strukturhierarchie versteht man in diesem Zusammenhang eine Hierarchie von Geb{\"a}udeteilen, die in einer Teil-von-Beziehung stehen. Die Strukturhierarchie erlaubt es Informationen z.B. Dokumente mit einem r{\"a}umlichen Bezug auszuzeichnen. Zus{\"a}tzlich wird eine Themenhierarchie unterst{\"u}tzt, die es erlaubt Informationen thematisch mit Begriffen zu beschreiben. Betrachtet man r{\"a}umliche und thematische Anfragen in vernetzten MonArch-Systemen, in denen sich mehrere Geb{\"a}udearchive zusammenschließen, ist diese starke Bindung der Information an die einzigartige Struktur jedes Geb{\"a}udes ein Hindernis f{\"u}r ein einfaches Verfahren zur r{\"a}umlichen Suche. Da sich jedes Geb{\"a}ude in seinem speziellen strukturellen und r{\"a}umlichen Aufbau unterscheidet, liefert eine r{\"a}umliche Anfrage, die speziell auf diese Eigenheiten eines Geb{\"a}udes ausgerichtet ist, f{\"u}r andere Geb{\"a}ude keine Suchergebnisse. F{\"u}r thematische Anfragen stellen nicht kompatible Themenhierarchien ein Hindernis dar, die eine {\"u}bergreifende thematische Anfrage verhindern. Die gr{\"o}ßte Herausforderung ist es, Struktur- und Themenhierarchien aufeinander abzubilden. Zur L{\"o}sung des geschilderten Problems wird in vernetzten Informationssystemen auf eine geeignete Transformation der urspr{\"u}nglichen Anfrage zur{\"u}ckgegriffen, um den Anfragefokus zu erweitern (Relaxation) oder eine Anpassung an die Gegebenheiten des entfernten Informationssystems zu erreichen (Transformation). Das Anfragetransformations- und -relaxationsverfahren, das in dieser Arbeit vorgestellt wird, nutzt eine Generalisierungsbeziehung aus, um ausgehend von einer Anfrage an eine spezielle Struktur- und Themenhierarchie eine automatische Transformation der Anfrage durchzuf{\"u}hren. Bei Themenhierarchien sind gemeinsame Oberthemen ein Ansatzpunkt. Bei Strukturhierarchien k{\"o}nnen Typinformationen zu Geb{\"a}udeteilen die Generalisierungsbeziehung darstellen. Die transformierte und dadurch relaxierte Anfrage kann dann an ein Netzwerk von MonArch-Systemen gestellt werden, ohne dass eine manuelle Auswahl der Geb{\"a}udeteile in anderen Strukturhierarchien oder eine angepasste Themenauswahl erfolgen muss. Dazu muss die Strukturhierarchie der anderen Geb{\"a}ude im Netzwerk von MonArch-Systemen nicht bekannt sein. Im Rahmen der vorliegenden Arbeit werden verschiedene Relaxationsverfahren, z.B. ein angepasstes Spreading-Activation-Verfahren, zur automatischen Anfragetransformation von r{\"a}umlichen und thematischen Anfragen vorgestellt, mit dem Ziel eine vollst{\"a}ndige Abbildung zwischen den Strukturhierarchien von Geb{\"a}uden und Themenhierarchien zu vermeiden. Erreicht wird das Ziel durch eine Erweiterung des MonArch-Datenmodells und eine Verallgemeinerung der MonArch-Anfragen, die eine Anfragetransformation zum Anfragezeitpunkt erlauben.}, subject = {Abfragesprache}, language = {de} } @phdthesis{LoperaGonzalez2018, author = {Lopera Gonzalez, Luis Ignacio}, title = {Mining Functional and Structural Relationships of Context Variables in Smart-Buildings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5737}, school = {Universit{\"a}t Passau}, pages = {viii, 99 Seiten}, year = {2018}, abstract = {The Internet of Things (IoT) is a network of computational services, devices, and people, which share information with each other. In IoT, inter-system communication is possible and human interaction is not required. IoT devices are penetrating the home and office building environments. According to current estimates, about 35 billion IoT devices will be connected by the year 20212. In the IoT business model, value comes from integrating devices into applications, e.g., home and office automation. In general, an IoT application associates different information sources with actions which can modify the environment, e.g., change the room's temperature, inform a person, e.g., send an e-mail, or activate other services, e.g., buy milk on-line. In this thesis, we focus on the commissioning and verification processes of IoT devices used in building automation applications. Within a building's lifespan, new devices are added, interior spaces are refurbished, and faulty devices are replaced. All of these changes are currently made manually. Furthermore, consider that a context-aware Building Management System (BMS) is an IoT application, which measures direct-context from the building's sensors to characterize environmental conditions, user locations, and state. Additionally, a BMS combines sensor information to derive inferred-context, such as user activity. Similar to IoT devices, inferred-context instances have to be created manually. As the number of devices and inferred-context instances increases, keeping track of all associations becomes a time-consuming and error-prone task. The hypothesis of the thesis is that users who interact with the building create use-patterns in the data, which describe functional relations between devices and inferred-context instances, e.g., which desk-movement sensor is used to infer desk-presence and controls which overhead light; additionally, use-patterns can also provide structural relations, e.g., the relative position of spatial sensors. To test the hypothesis, this thesis presents an extension to the new IoT class rule programming paradigm, which simplifies rule creation based on classes. The proposed extension uses a semantic compiler to simplify the device and inferred-context associations. Using direct-context information and template classes, the compiler creates all possible inferredcontext instances. Buildings using context-aware BMSs will have a dynamic response to user behaviour, e.g., required illumination for computer-work is provided by adjusting blinds or increasing the dim setting of overhead ceiling lamps. We propose a rule mining framework to extract use-patterns and find the functional and structural relationships between devices. The rule mining framework uses three stages: (1) event extraction, (2) rule mining, (3) structure creation. The event extraction combines the building's data into a time-series of device events. Then, in the rule mining stage, rules are mined from the time series, where we use the established algorithm temporal interval tree association rule learner. Additionally, we proposed a rule extraction algorithm for spatial sensor's data. The algorithm is based on statistical analysis of user transition times between adjacent sensors. We also introduce a new rule extraction algorithm based on increasing belief. In the last stage, structure creation uses the extracted rules to produce device association groups, hierarchical representation of the building, or the relative location of spatial sensors. The proposed algorithms were tested using a year-long installation in a living-lab consisting of a four-person office, a 12-person open office, and a meeting room. For the spatial sensors, four locations within public buildings were used: a meeting room, a hallway, T-crossing, and a foyer. The recording times range from two weeks to two months depending on scenario complexity. We found that user-generated patterns appear in building data. The rule mining framework produced structures that represent functional and spatial relationships of building's devices and provide sufficient information to automate maintenance tasks, e.g., automatic device naming. Furthermore, we found that environmental changes are also a source of device data patterns, which provide additional associations. For example, using the framework we found the fa{\c{c}}ade group for exterior light sensors. The fa{\c{c}}ade group can be used to automatically find an alternative signal source to replace broken outdoor light sensors. Finally, the rule mining framework successfully retrieved the relative location of spatial sensors in all locations but the foyer.}, subject = {Internet der Dinge}, language = {en} } @article{KronawitterLengauer2018, author = {Kronawitter, Stefan and Lengauer, Christian}, title = {Polyhedral Search Space Exploration in the ExaStencils Code Generator}, series = {ACM Transactions on Architecture and Code Optimization}, volume = {15}, journal = {ACM Transactions on Architecture and Code Optimization}, number = {4}, issn = {1544-3973}, doi = {10.1145/3274653}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5778}, year = {2018}, abstract = {Performance optimization of stencil codes requires data locality improvements. The polyhedron model for loop transformation is well suited for such optimizations with established techniques, such as the PLuTo algorithm and diamond tiling. However, in the domain of our project ExaStencils, stencil codes, it fails to yield optimal results. As an alternative, we propose a new, optimized, multi-dimensional polyhedral search space exploration and demonstrate its effectiveness: we obtain better results than existing approaches in several cases. We also propose how to specialize the search for the domain of stencil codes, which dramatically reduces the exploration effort without significantly impairing performance.}, language = {en} } @inproceedings{ParraRodriguezPosegga2018, author = {Parra Rodriguez, Juan D. and Posegga, Joachim}, title = {Local Storage on Steroids: Abusing Web Browsers for Hidden Content Storage and Distribution}, series = {International Conference on Security and Privacy in Communication Systems}, booktitle = {International Conference on Security and Privacy in Communication Systems}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-01704-0}, doi = {10.1007/978-3-030-01704-0_19}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6572}, pages = {20 Seiten}, year = {2018}, abstract = {Analysing security assumptions taken for the WebRTC and postMessage APIs led us to find a novel attack abusing the browsers' persistent storage capabilities. The presented attack can be executed without the website's visitor knowledge, and it requires neither browser vulnerabilities nor additional software on the browser's side. To exemplify this, we study how can an attacker use browsers to create a network for persistent storage and distribution of arbitrary data. In our proof of concept, the total storage of the network, and therefore the space used within each browser, grows linearly with the number of origins delivering the malicious JavaScript code. Further, data transfers between browsers are not restricted by the Same Origin Policy, which allows for a unified cross-origin browser network, regardless of the origin from which the script executing the functionality is loaded from. In the course of our work, we assess the feasibility of a real-life deployment of the network by running experiments using Linux containers and browser automation tools. Moreover, we show how security mechanisms against third-party tracking, cross-site scripting and click-jacking can diminish the attack's impact, or even prevent it.}, language = {en} } @phdthesis{Kasinathan2021, author = {Kasinathan, Prabhakaran}, title = {Workflow-aware access control for the Internet of Things}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8915}, school = {Universit{\"a}t Passau}, pages = {xxiii, 214 Seiten}, year = {2021}, abstract = {IoT is defined as a paradigm where "things" have sensing, actuating, communicating, and self-configuring abilities, and are connected to each other and to the Internet. Recent advancements in the manufacturing industry have helped to produce embedded devices with various sensors and actuators in mass numbers at a reduced cost. As part of the IoT revolution, everyday devices such as television, refrigerator, cars, even industrial machines are now connected IoT devices. Recent studies have predicted that by 2025 there will be over 75 billion of such IoT devices connected to the Internet. The providers of IoT based services want to integrate their services to satisfy customer requirements. For example, in the mobility scenario, different mobility solution providers want to offer a multi-modal ticket to their customers jointly. In such a distributed and loosely coupled environment, each owner and stakeholder wants to secure his/her own integrity, confidentiality, and functionality goals. This means that distributed rules and conditions defined by the individual owners must be enforced on the participating entities (e.g., customers or partners using their services). The owners and stakeholders may not necessarily trust each other's actions. Therefore, a mechanism is required that guarantees the rules and conditions specified by the different owners. Attacks on IoT devices and similar computing systems are increasing and getting more advanced. IoT devices are often constrained, i.e., they have limited processing power, memory, and energy. Security mechanisms designed for traditional computing systems, e.g., computers, servers, or mobile computing devices such as smartphones, may not fit in those constrained IoT devices. Weak security mechanisms and unenforced security measures were one of the main reasons for recent successful attacks on IoT devices and services. As IoT is now used in many sensitive places, including critical infrastructures, securing them becomes more critical than ever. This thesis focuses on developing mechanisms that secure IoT devices and services and enforcing the rules and conditions specified by the owners on entities that want to access owners' resources. In classical computer systems, security automata are used for specifying security policies and monitoring mechanisms are used for enforcing such policies. For instance, a reference monitor observes and stops the execution when the security policies are about to be violated, thus, the security policies are enforced. To restrict the adversary from using protected IoT devices or services for malicious purposes, it is required to ensure that a workflow must be followed to access the protected resource. In distributed IoT systems where the policies are governed by different owners, each owner would like to specify their rules and conditions in their workflows. The workflows contain tasks that must be performed in a particular order. The goal of this thesis is to develop mechanisms to specify and enforce these workflows in the distributed IoT environment. This thesis introduces a distributed WFAC framework that restricts the entities to do only what they are allowed to do in a collaborative environment. To gain access to a service protected by the WFAC framework, every workflow participant must prove that he/she is in a particular state of an authorized workflow. Authorized means two things: (a) the owner has authorized the workflow to be executed; (b) the workflow participant is authorized to execute it. This restricts the adversary's access to the devices and its services. The security policies defined by different owners are modeled as workflows and specified using Petri Nets. The policies are then enforced with the help of the WFAC framework which supports error-handling, accountability, integration of practitioner-friendly tools, and interoperability with existing security mechanisms such as OAuth. Thus, the WFAC guarantees the integrity of workflows in a distributed environment.}, language = {en} } @phdthesis{Reislhuber2017, author = {Reislhuber, Josef}, title = {Optical Graph Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5159}, school = {Universit{\"a}t Passau}, pages = {270 Seiten}, year = {2017}, abstract = {Graphs are an important model for the representation of structural information between objects. One identifies objects and nodes as well as a binary relation between objects and edges. Graphs have many uses, e. g., in social sciences, life sciences and engineering. There are two primary representations: abstract and visual. The abstract representation is well suited for processing graphs by computers and is given by an adjacency list, an adjacency matrix or any abstract data structure. A visual representation is used by human users who prefer a picture. Common terms are diagram, scheme, plan, or network. The objective of Graph Drawing is to transform a graph into a visual representation called the drawing of a graph. The goal is a "nice" drawing. In this thesis we introduce Optical Graph Recognition. Optical Graph Recognition (OGR) reverses Graph Drawing and transforms a digital image of a graph into an abstract representation. Our approach consists of four phases: Preprocessing where we determine which pixels of an image are part of the graph, Segmentation where we recognize the nodes, Topology Recognition where we detect the edges and Postprocessing where we enrich the recognized graph with additional information. We apply established digital image processing methods and make use of the special property that the image contains nodes that are connected by edges. We have focused on developing algorithms that need as little parameters as possible or to automatically calibrate the parameters. Most false recognition results are caused by crossing edges as this makes tracing the edges difficult and can lead to other recognition errors. We have evaluated hand-drawn and computer-drawn graphs. Our algorithms have a very high recognition rate for computer-drawn graphs, e. g., from a set of 100000 computer-drawn graphs over 90\% were correctly recognized. Most false recognition results where observed for hand-drawn graphs as they can include drawing errors and inaccuracies. For universal usability we have implemented a prototype called OGRup for mobile devices like smartphones or tablet computers. With our software it is possible to directly take a picture of a graph via a built in camera, recognize the graph, and then use the result for further processing. Furthermore, in order to gain more insight into the way a person draws a graph by hand, we have conducted a field study.}, subject = {Bildverarbeitung}, language = {en} } @article{MandarawiRottmeierRezaeighaleetal.2020, author = {Mandarawi, Waseem and Rottmeier, J{\"u}rgen and Rezaeighale, Milad and de Meer, Hermann}, title = {Policy-Based Composition and Embedding of Extended Virtual Networks and SFCs for IIoT}, series = {Algorithms}, volume = {13}, journal = {Algorithms}, number = {9}, publisher = {MDPI}, issn = {1999-4893}, doi = {10.3390/a13090240}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8488}, year = {2020}, abstract = {The autonomic composition of Virtual Networks (VNs) and Service Function Chains (SFCs)based on application requirements is significant for complex environments. In this paper, we use graph transformation in order to compose an Extended Virtual Network (EVN) that is based on different requirements, such as locations, low latency, redundancy, and security functions. The EVN can represent physical environment devices and virtual application and network functions. We build a generic Virtual Network Embedding (VNE) framework for transforming an Application Request (AR) to an EVN. Subsequently, we define a set of transformations that reflect preliminary topological, performance, reliability, and security policies. These transformations update the entities and demands of the VN and add SFCs that include the required Virtual Network Functions (VNFs). Additionally, we propose a greedy proactive heuristic for path-independent embedding of the composed SFCs. This heuristic is appropriate for real complex environments, such as industrial networks. Furthermore, we present an Industrail Internet of Things (IIoT) use case that was inspired by Industry 4.0 concepts,in which EVNs for remote asset management are deployed over three levels; manufacturing halls and edge and cloud computing. We also implement the developed methods in Alevin and show exemplary mapping results from our use case. Finally, we evaluate the chain embedding heuristic while using a random topology that is typical for such a use case, and show that it can improve the admission ratio and resource utilization with minimal overhead.}, language = {en} } @phdthesis{Loewe2017, author = {L{\"o}we, Stefan}, title = {Effective Approaches to Abstraction Refinement for Automatic Software Verification}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4815}, school = {Universit{\"a}t Passau}, pages = {XXI, 155 S.}, year = {2017}, abstract = {This thesis presents various techniques that aim at enabling more effective and more efficient approaches for automatic software verification. After a brief motivation why automatic software verification is getting ever more relevant, we continue with detailing the formalism used in this thesis and on the concepts it is built on. We then describe the design and implementation of the value analysis, an analysis for automatic software verification that tracks state information concretely. From a thorough evaluation based on well over 4 000 verification tasks from the latest edition of the International Competition on Software Verification (SV-COMP), we learn that this plain value analysis leads to an efficient verification process for many verification tasks, but at the same time, fails to solve other verification tasks due to state-space explosion. From this insight we infer that some form of abstraction technique must be added to the value analysis in order to also allow the successful verification of large and complex verification tasks. As a solution, we propose to incorporate counterexample-guided abstraction refinement (CEGAR) and interpolation into the value domain. To this end, we design a novel interpolation procedure, that extracts from infeasible counterexamples interpolants for the value domain, allowing to form a precision strong enough to exclude these infeasible counterexamples, and to make progress in the CEGAR loop. We then describe several optimizations and extensions to these concepts, such that the value analysis with CEGAR becomes competitive for automatic software verification. As the next step, we combine the value analysis with CEGAR with a predicate analysis, to obtain a more precise and efficient composite analysis based on CEGAR. This composite analysis is indeed on a par with the world's leading software verification tools, as witnessed by the results of SV-COMP'13 where this approach achieved the 2 nd place in the overall ranking. After having available competitive CEGAR-based analyses for the value domain, the predicate domain, and the combination thereof, we then turn our attention to techniques that have the goal to make all these CEGAR-based approaches more successful. Our first novel idea in this regard is based on the concept of infeasible sliced prefixes, which allow the computation of different precisions from a single infeasible counterexample. This adds choice to the CEGAR loop, while without this enhancement, no choice for a specific precision, i. e., a specific refinement, is possible. In our evaluation we show, for both the value analysis and the predicate analysis, that choosing different infeasible sliced prefixes during the refinement step leads to major differences in verification effectiveness and verification efficiency. Extending on the concept of infeasible sliced prefixes, we define several heuristics in order to precisely select a single refinement from a set of possible refinements. We make this new concept, which we refer to as guided refinement selection, available to both the value and predicate analysis, and in a large-scale evaluation we try to answer the question which selection technique leads to well suited abstractions and thus, to a more effective verification process. Additionally, we present the idea of inter-analysis refinement selection, where the refinement component of a composite analysis may decide which of its component analyses is best to be refined, and in yet another evaluation we highlight the positive effects of this technique. Finally, we present the results of SV-COMP'16, where the verifier we contributed and which is based on the concepts and ideas presented in this thesis achieved the 1 st place in the category DeviceDriversLinux64.}, subject = {Programmverifikation}, language = {en} } @phdthesis{Petit2017, author = {Petit, Albin}, title = {Introducing Privacy in Current Web Search Engines}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4652}, school = {Universit{\"a}t Passau}, pages = {XVI, 153 S.}, year = {2017}, abstract = {During the last few years, the technological progress in collecting, storing and processing a large quantity of data for a reasonable cost has raised serious privacy issues. Privacy concerns many areas, but is especially important in frequently used services like search engines (e.g., Google, Bing, Yahoo!). These services allow users to retrieve relevant content on the Internet by exploiting their personal data. In this context, developing solutions to enable users to use these services in a privacy-preserving way is becoming increasingly important. In this thesis, we introduce SimAttack an attack against existing protection mechanism to query search engines in a privacy-preserving way. This attack aims at retrieving the original user query. We show with this attack that three representative state-of-the-art solutions do not protect the user privacy in a satisfactory manner. We therefore develop PEAS a new protection mechanism that better protects the user privacy. This solution leverages two types of protection: hiding the user identity (with a succession of two nodes) and masking users' queries (by combining them with several fake queries). To generate realistic fake queries, PEAS exploits previous queries sent by the users in the system. Finally, we present mechanisms to identify sensitive queries. Our goal is to adapt existing protection mechanisms to protect sensitive queries only, and thus save user resources (e.g., CPU, RAM). We design two modules to identify sensitive queries. By deploying these modules on real protection mechanisms, we establish empirically that they dramatically improve the performance of the protection mechanisms.}, subject = {Suchmaschine}, language = {en} } @phdthesis{Kinseher2018, author = {Kinseher, Josef}, title = {New Methods for Improving Embedded Memory Manufacturing Tests}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6017}, school = {Universit{\"a}t Passau}, pages = {119 Seiten}, year = {2018}, abstract = {Due to the need for fast and energy-efficient accesses to growing amounts of data, the share and number of embedded memories inside modern microchips has been continuously increasing within the last years. Since embedded memories have the highest integration density of a fabrication technology they pose special test challenges due to complex manufacturing defects as well as strong transistor aging phenomena. This necessitates efficient methods for detecting more subtle defects while keeping test costs low. This work presents novel methods and techniques for improving the efficiency of embedded memory manufacturing tests. The proposed methods are demonstrated in an industrial setting based on production-proven transistor, memory as well as chip models and their benefits over the current state-of-the art is worked out.}, subject = {Speicher }, language = {en} } @phdthesis{Alshawish2021, author = {Alshawish, Ali}, title = {Risk-based Security Management in Critical Infrastructure Organizations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10026}, school = {Universit{\"a}t Passau}, pages = {xii, 181 Seiten}, year = {2021}, abstract = {Critical infrastructure and contemporary business organizations are experiencing an ongoing paradigm shift of business towards more collaboration and agility. On the one hand, this shift seeks to enhance business efficiency, coordinate large-scale distribution operations, and manage complex supply chains. But, on the other hand, it makes traditional security practices such as firewalls and other perimeter defenses insufficient. Therefore, concerns over risks like terrorism, crime, and business revenue loss increasingly impose the need for enhancing and managing security within the boundaries of these systems so that unwanted incidents (e.g., potential intrusions) can still be detected with higher probabilities. To this end, critical infrastructure organizations step up their efforts to investigate new possibilities for actively engaging in situational awareness practices to ensure a high level of persistent monitoring as well as on-site observation. Compliance with security standards is necessary to ensure that organizations meet regulatory requirements mostly shaped by a set of best practices. Nevertheless, it does not necessarily result in a coherent security strategy that considers the different aims and practical constraints of each organization. In this regard, there is an increasingly growing demand for risk-based security management approaches that enable critical infrastructures to focus their efforts on mitigating the risks to which they are exposed. Broadly speaking, security management involves the identification, assessment, and evaluation of long-term (or overall) objectives and interests as well as the means of achieving them. Due to the critical role of such systems, their decision-makers tend to enhance the system resilience against very unpleasant outcomes and severe consequences. That is, they seek to avoid decision options associated with likely extreme risks in the first place. Practically speaking, this risk attitude can significantly influence the decision-making process in such critical organizations. Towards incorporating the aversion to extreme risks into security management decisions, this thesis investigates thoroughly the capabilities of a recently emerged theory of games with payoffs that are probability distributions. Unlike traditional optimization techniques, this theory provides an alternative decision technique that is more robust to extreme risks and uncertainty. Furthermore, this thesis proposes a new method that gives a decision maker more control over the decision-making process through defining loss regions with different importance levels according to people's risk attitudes. In this way, the static decision analysis used in the distribution-valued games is transformed into a dynamic process to adapt to different subjective risk attitudes or account for future changes in the decision caused by a learning process or other changes in the context. Throughout their different parts, this thesis shows how theoretical models, simulation, and risk assessment models can be combined into practical solutions. In this context, it deals with three facets of security management: allocating limited security resources, prioritizing security actions, and tweaking decision making. Finally, the author discusses experiences and limitations distilled from this research and from investigating the new theory of games, which can be taken into account in future approaches.}, subject = {Spieltheorie}, language = {en} } @phdthesis{Silva2022, author = {Silva, Vivian dos Santos}, title = {A Composite Syntactic-Semantic Interpretable Text Entailment Approach Exploring Commonsense Knowledge Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10706}, school = {Universit{\"a}t Passau}, pages = {xiv, 229 Seiten}, year = {2022}, abstract = {Natural Language Processing has an important role in Artificial Intelligence for easing human-machine interaction. Processing human language, though, poses many challenges, among which is the semantics-related phenomenon known as language variability, the fact that the same thing can be said in several ways. NLP applications' inputs and outputs can be expressed in different forms, whose equivalence can be verified through inference. The textual entailment paradigm was established to enable the creation of a unifying framework for applied inference, providing a means of delivering other NLP task from handling inference issues in an ad-hoc manner, using instead the outputs of an inference-dedicated mechanism. Text entailment, the task of determining whether a piece of text logically follows from another piece of text, involves different scenarios, which can range from a simple syntactic variation to more complex semantic relationships between sentences. However, most approaches try a one-size-fits-all solution that usually favors some scenario to the detriment of another. The commonsense world knowledge necessary to support more complex inferences is also usually employed in a limited way, with most approaches sticking to shallow semantic information, leaving more elaborate semantic relationships aside. Furthermore, most systems still work as a "black box", providing a yes/no answer that does not explain the underlying reasoning process. This thesis aims at addressing these issues by proposing a composite interpretable approach for recognizing text entailment where the entailment pair is analyzed so the most relevant phenomenon is detected and the suitable method can be used to solve it. Syntactic variations are dealt with through the analysis of the sentences' syntactic structures, and semantic relationships are detected with the aid of a knowledge graph built from natural language dictionary definitions. Also, if a semantic matching is involved, the answer is made interpretable through the generation of natural language justifications that explain the semantic relationship between the pieces of text. The result is the XTE - Explainable Text Entailment - a system that outperforms well-established tools based on single-technique entailment algorithms, and that also gives an important step towards Explainable AI, allowing the inference model interpretation, making the semantic reasoning process explicit and understandable.}, language = {en} } @phdthesis{Opris2022, author = {Opris, Andre}, title = {Holomorphic Extensions in the Structure R_{an,exp}}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10691}, school = {Universit{\"a}t Passau}, pages = {233 Seiten}, year = {2022}, abstract = {In this thesis we consider real analytic functions, i.e. functions which can be described locally as convergent power series and ask the following: Which real analytic functions definable in R_{an,exp} have a holomorphic extension which is again definable in R_{an,exp}? Finding a holomorphic extension is of course not difficult simply by power series expansion. The difficulty is to construct it in a definably way. We will not answer the question above completely, but introduce a large non trivial class of definable functions in R_{an,exp} where for example functions which are iterated compositions from either side of globally subanalytic functions and the global logarithm are contained. We call them restricted log-exp-analytic. After giving some preliminary results like preparation theorems and Tamm's Theorem for this class of functions we are able to show that real analytic restricted log-exp-analytic functions have a holomorphic extension which is again restricted log-exp-analytic.}, subject = {O-Minimalit{\"a}t}, language = {en} } @phdthesis{Schmid2022, author = {Schmid, Josef}, title = {Learning-Based Quality of Service Prediction in Cellular Vehicle Communication}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10772}, school = {Universit{\"a}t Passau}, pages = {xvi, 147 Seiten}, year = {2022}, abstract = {Network communication has become a part of everyday life, and the interconnection among devices and people will increase even more in the future. A new area where this development is on the rise is the field of connected vehicles. It is especially useful for automated vehicles in order to connect the vehicles with other road users or cloud services. In particular for the latter it is beneficial to establish a mobile network connection, as it is already widely used and no additional infrastructure is needed. With the use of network communication, certain requirements come along. One of them is the reliability of the connection. Certain Quality of Service (QoS) parameters need to be met. In case of degraded QoS, according to the SAE level specification, a downgrade of the automated system can be required, which may lead to a takeover maneuver, in which control is returned back to the driver. Since such a handover takes time, prediction is necessary to forecast the network quality for the next few seconds. Prediction of QoS parameters, especially in terms of Throughput (TP) and Latency (LA), is still a challenging task, as the wireless transmission properties of a moving mobile network connection are undergoing fluctuation. In this thesis, a new approach for prediction Network Quality Parameters (NQPs) on Transmission Control Protocol (TCP) level is presented. It combines the knowledge of the environment with the low level parameters of the mobile network. The aim of this work is to perform a comprehensive study of various models including both Location Smoothing (LS) grid maps and Learning Based (LB) regression ones. Moreover, the possibility of using the location independence of a model as well as suitability for automated driving is evaluated.}, language = {en} }