@phdthesis{Silva2022, author = {Silva, Vivian dos Santos}, title = {A Composite Syntactic-Semantic Interpretable Text Entailment Approach Exploring Commonsense Knowledge Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10706}, school = {Universit{\"a}t Passau}, pages = {xiv, 229 Seiten}, year = {2022}, abstract = {Natural Language Processing has an important role in Artificial Intelligence for easing human-machine interaction. Processing human language, though, poses many challenges, among which is the semantics-related phenomenon known as language variability, the fact that the same thing can be said in several ways. NLP applications' inputs and outputs can be expressed in different forms, whose equivalence can be verified through inference. The textual entailment paradigm was established to enable the creation of a unifying framework for applied inference, providing a means of delivering other NLP task from handling inference issues in an ad-hoc manner, using instead the outputs of an inference-dedicated mechanism. Text entailment, the task of determining whether a piece of text logically follows from another piece of text, involves different scenarios, which can range from a simple syntactic variation to more complex semantic relationships between sentences. However, most approaches try a one-size-fits-all solution that usually favors some scenario to the detriment of another. The commonsense world knowledge necessary to support more complex inferences is also usually employed in a limited way, with most approaches sticking to shallow semantic information, leaving more elaborate semantic relationships aside. Furthermore, most systems still work as a "black box", providing a yes/no answer that does not explain the underlying reasoning process. This thesis aims at addressing these issues by proposing a composite interpretable approach for recognizing text entailment where the entailment pair is analyzed so the most relevant phenomenon is detected and the suitable method can be used to solve it. Syntactic variations are dealt with through the analysis of the sentences' syntactic structures, and semantic relationships are detected with the aid of a knowledge graph built from natural language dictionary definitions. Also, if a semantic matching is involved, the answer is made interpretable through the generation of natural language justifications that explain the semantic relationship between the pieces of text. The result is the XTE - Explainable Text Entailment - a system that outperforms well-established tools based on single-technique entailment algorithms, and that also gives an important step towards Explainable AI, allowing the inference model interpretation, making the semantic reasoning process explicit and understandable.}, language = {en} } @phdthesis{Schmid2022, author = {Schmid, Josef}, title = {Learning-Based Quality of Service Prediction in Cellular Vehicle Communication}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10772}, school = {Universit{\"a}t Passau}, pages = {xvi, 147 Seiten}, year = {2022}, abstract = {Network communication has become a part of everyday life, and the interconnection among devices and people will increase even more in the future. A new area where this development is on the rise is the field of connected vehicles. It is especially useful for automated vehicles in order to connect the vehicles with other road users or cloud services. In particular for the latter it is beneficial to establish a mobile network connection, as it is already widely used and no additional infrastructure is needed. With the use of network communication, certain requirements come along. One of them is the reliability of the connection. Certain Quality of Service (QoS) parameters need to be met. In case of degraded QoS, according to the SAE level specification, a downgrade of the automated system can be required, which may lead to a takeover maneuver, in which control is returned back to the driver. Since such a handover takes time, prediction is necessary to forecast the network quality for the next few seconds. Prediction of QoS parameters, especially in terms of Throughput (TP) and Latency (LA), is still a challenging task, as the wireless transmission properties of a moving mobile network connection are undergoing fluctuation. In this thesis, a new approach for prediction Network Quality Parameters (NQPs) on Transmission Control Protocol (TCP) level is presented. It combines the knowledge of the environment with the low level parameters of the mobile network. The aim of this work is to perform a comprehensive study of various models including both Location Smoothing (LS) grid maps and Learning Based (LB) regression ones. Moreover, the possibility of using the location independence of a model as well as suitability for automated driving is evaluated.}, language = {en} } @techreport{EckhardtFreilingHerrmannetal.2023, author = {Eckhardt, Dennis and Freiling, Felix and Herrmann, Dominik and Katzenbeisser, Stefan and P{\"o}hls, Henrich C.}, title = {Sicherheit in der Digitalisierung des Alltags: Definition eines ethnografisch-informatischen Forschungsfeldes f{\"u}r die L{\"o}sung allt{\"a}glicher Sicherheitsprobleme}, doi = {10.15475/sidial.2023}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13721}, pages = {18 Seiten}, year = {2023}, abstract = {In den vergangenen Jahrzehnten hat es un{\"u}bersehbar zahlreiche Fortschritte im Bereich der IT-Sicherheitsforschung gegeben, etwa in den Bereichen Systemsicherheit und Kryptographie. Es ist jedoch genauso un{\"u}bersehbar, dass IT-Sicherheitsprobleme im Alltag der Menschen fortbestehen. Mutmaßlich liegt dies an der Komplexit{\"a}t von Alltagssituationen, in denen Sicherheitsmechanismen und Ger{\"a}tefunktionalit{\"a}t sowie deren Heterogenit{\"a}t in schwer antizipierbarer Weise mit menschlichem Verst{\"a}ndnis und Alltagsgebrauch interagieren. Um die wissenschaftliche Forschung besser auf Menschen und deren IT-Sicherheitsbed{\"u}rfnisse auszurichten, m{\"u}ssen wir daher den Alltag der Menschen besser verstehen. Das Verst{\"a}ndnis von Alltag ist in der Informatik jedoch noch unterentwickelt. Dieser Beitrag m{\"o}chte das Forschungsfeld "Sicherheit in der Digitalisierung des Alltags" definieren, um Forschenden die Gelegenheit zu geben, ihre Anstrengungen in diesem Bereich zu b{\"u}ndeln. Wir machen dabei Vorschl{\"a}ge einerseits zur inhaltlichen Eingrenzung der informatischen Forschung. Andererseits m{\"o}chten wir durch die Einbeziehung von Forschungsmethoden aus der Ethnografie, die Erkenntnisse aus der durchaus subjektiven Beobachtung des "Alltags" vieler einzelner Individuen zieht, zur methodischen Weiterentwicklung interdisziplin{\"a}rer Forschung in diesem Feld beitragen. Die IT- Sicherheitsforschung kann dann Bestehendes gezielt f{\"u}r eine richtige Alltagstauglichkeit optimieren und neue grundlegende Sicherheitsfunktionalit{\"a}ten f{\"u}r die konkreten Herausforderungen im Alltag entwickeln.}, language = {de} } @phdthesis{Hintz2009, author = {Hintz, Martin}, title = {Micro-Impact: Deconstructing the complex impact process of a simple microinsurance product in Indonesia}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-20389}, school = {Universit{\"a}t Passau}, year = {2009}, abstract = {This thesis analyses the social impact of Payung Keluarga, an obligatory enhanced credit life microinsurance product launched by Allianz in Indonesia in 2006. Payung Keluarga automatically insures micro-borrowers who take out microcredits from microfinance institutions. In case of death, the outstanding credit balance is canceled and the beneficiary receives twice the original loan as additional payout. Payung Keluarga was conceived to ameliorate the assumed post-mortem financial crisis of low-asset families. Through qualitative-explorative field research from 2006 until 2008 I investigated if this developmental intention was realized. It is the first impact analysis on microinsurance in Indonesia. In the research process, I took the position of an observing participant. As operational project leader for Allianz in Indonesia I was virtually doing research on my own work. The resulting challenge to research neutrality is primarily mitigated by the sobering to discerning social impact which was eventually revealed. The majority of insured were married female Muslim petty traders in urban and semi-urban areas around Jakarta. Socio-economically these women stand at the upper end of the low-asset stratum. Their husbands were generally the main bread-winners of the family, and it was mostly them who received the insurance payouts. It could therefore be said that Payung Keluarga benefited the main breadwinner instead of insuring him. The study found that norms of a moral economy are still exerting significant clout on the insured. The moral economy aims at providing "subsistence insurance" for all community members through an intricate collective system of balanced exchanges. The corresponding "premium" is a denouncement of self-interested material asset accumulation. Next to structural reasons, it was this moral restriction that saw the businesses of the women stagnate at low and socially inconspicuous levels. Payung Keluarga did not help to overcome the assumed post-mortem financial crisis. In reality, such crisis did not exist since community and family support among low-asset Muslim Indonesians is normally strong enough to largely provide for the bereft family. This support is driven by the perception of death as a collective risk in the light of the moral economy and hinged on principles of balanced reciprocity. For cultural and religious reasons, the beneficiaries used most of the insurance payouts for funeral ceremonies and repayment of informal debt. With the advent of Payung Keluarga familial post-mortem assistance has been reduced. Funeral costs also seem to have been inflated by the product. It has thereby promoted a long-term societal shift from equality-seeking balanced reciprocity towards status-seeking and socially diversifying general reciprocity. In effect, Payung Keluarga has attacked cooperative social cohesion head-on where it is still strongest in a rapidly modernizing Indonesian society. This discerning and unintended impact of Payung Keluarga is hardly offset by a positive increase in financial literacy among the insured. Furthermore, the effect on "peace of mind" on the insured is ambivalent: while most insured stated to feel safer, some declared to feel less secure with their obligatory coverage for fear of interference with divine predetermination. Its overall developmental impact can be literally described as "micro". Instead of protecting the status-quo of the family, Payung Keluarga has assumed the role of an actor of social change. Not only because it has changed the funeral pattern of the beneficiaries, but also because it promotes a far-reaching conceptual paradigm shift from balanced reciprocity, which forms a core pillar of the insured's social structure, towards general reciprocity. The thesis hypothesizes that with sufficient insurance coverage provided, the insured will increasingly opt out of the coercively egalitarian "subsistence insurance" system. Such opt out will allow the insured to pursue a more aggressive economic asset accumulation strategy, particularly in combination with micro-credit. For the individual, this can be seen as a "liberating fortune" that would induce more women to grow their businesses to significant sizes. In parallel, it would deal a blow to cooperative social cohesion. I propose to call this the "double fortune / double blow" dilemma of microfinance. Although this thesis is exemplary, some of its findings can be generalized: The impact of microinsurance is highly dependent on cultural, religious and socio-demographic context. Any microinsurance intervention concerned with social impact should be preceded by a thick contextualization going beyond the usual demand assessments. In turn, microinsurance likewise impacts context as an actor of ambivalent social change. The complex influence of context and the role of microinsurance as an actor of social change have so far been hardly discussed in the development discourse.}, subject = {Versicherung}, language = {en} } @phdthesis{Roetzer2003, author = {R{\"o}tzer, Andreas}, title = {Die Einteilung der Wissenschaften - Analyse und Typologisierung von Wissenschaftsklassifikationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-707}, school = {Universit{\"a}t Passau}, year = {2003}, abstract = {Die Arbeit diskutiert die vielf{\"a}ltigen Implikationen von Wissenschaftsklassifikationen und stellt eine Typologie zur Diskussion, mit deren Hilfe es m{\"o}glich wird, Wissensklassifikationen hinsichtlich einer Vielzahl von Aspekte zu analysieren. Konkrete Beispiele aus der mehr als 2000 Jahre zur{\"u}ckreichenden Geschichte der Wissenschaftsklassifikation illustrieren dabei die Ergebnisse der Untersuchung. Sie dienen als Anschauungsmaterial, um mit ihren jeweils charakteristischen Probleml{\"o}sungen die ihnen zugrunde liegenden Voraussetzungen zu untersuchen. Im Zentrum der Untersuchung lagen dabei aber die impliziten und expliziten Voraussetzungen konkreter Wissenschaftsklassifikationen. Die angestrebte Systematisierung macht den Verzicht auf eine historische Darstellung notwendig. Die systematisch geordnete Unersuchung in Verbindung mit dem chronologisch geordneten Anhang erm{\"o}glicht es dem Leser, Aufschl{\"u}sse {\"u}ber das Aufkommen und Verschwinden beherrschender wissenstheoretischer Ideen und {\"u}ber den Stand der Entwicklung der Wissenschaften zu einer gegebenen Epoche zu erschließen, sowie die darunter liegenden Strukturen zu erkennen. Obwohl also auf eine lineare historische Darstellung verzichtet wurde, stellt sich dem Leser dadurch ein kleiner, repr{\"a}sentativer {\"U}berblick {\"u}ber ihre Geschichte dar, mit dessen Hilfe sich ihm die großen Entwicklungslinien erschließen.}, subject = {Systematische Aufstellung}, language = {de} } @phdthesis{vonRhein2016, author = {von Rhein, Alexander}, title = {Analysis Strategies for Configurable Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3682}, school = {Universit{\"a}t Passau}, pages = {xiii, 229 Seiten}, year = {2016}, abstract = {A configurable system enables users to derive individual system variants based on a selection of configuration options. To cope with the often huge number of possible configurations, several analysis approaches (e.g., for verification of configurable systems) implement different strategies to account for configurability. One popular strategy—often applied in practice—is to use sampling (i.e.,analyzing only a subset of all system variants). While sampling reduces the analysis effort significantly, the information obtained is necessarily incomplete as some variants are not analyzed. A second strategy is to identify the common parts and the variable parts of a configurable system and analyze each part separately (called feature-based strategy). As a third strategy, researchers have begun to develop family-based analyses. Family-based approaches analyze the code base of a configurable system as a whole, rather than the individual variants or parts of the system, this way exploiting similarities among individual variants to reduce analysis effort. Each of these three strategies has advantages and disadvantages, which might even prevent its application (e.g., the family-based strategy typically needs much main memory). The goal of this thesis is to enable the efficient analysis of configuable systems, even if existing strategies fail (e.g., the family-based strategy, because of memory limitations). To this end, we designed a framework that models the key aspects of configurable-system analysis strategies, independent of their implementation and of the analyses techniques (e.g., type checking or model checking). Guided by our model, we developed a number of analysis strategies for configurable systems. To learn about advantages and disadvantages of individual strategies, we compared these in a series of empirical studies. In particular, we developed and evaluated a model-checking analysis and a data-flow analysis for configurable systems. One of our key findings is that family-based analysis outperforms most sampling heuristics with respect to analysis time, while being able to make definite statements about all variants of a configurable system. Furthermore, we identified advantages and disadvantages of analysis strategies and how to mitigate them by combining strategies. In our endeavor, we identified two key problems that are common to configurable-system analyses, and we developed supporting techniques to solve them. These techniques are general and are applicable beyond our research. In particular, we developed presence-condition simplification and variability encoding. Presence-condition simplification provides a simple method to reduce the size of the output or the internal data structure of configurable-systemanalyses. Variability encoding provides a means for transforming compile-time variability to run-time variability, which enables many family-based analyses. Our key contributions are the model of analysis strategies for configurable systems and the corresponding empirical comparisons of strategies. Our findings are backed by empirical studies, which helped broaden the community knowledge on analyses of configurable systems (indicated by citations). For these evaluations, we prepared several subject systems, which have also been used already by other researchers. Furthermore, we developed several analysis tools and demonstrated their feasibility in practical application scenarios based on code from, for example, the Linux kernel. Our tools are based on variability-aware optimizations that enable levels of scalability on configurable systems that were not possible with other tools before.}, subject = {Software Engineering}, language = {en} } @phdthesis{Sui2018, author = {Sui, Zhiyuan}, title = {Security and Privacy Schemes for Demand Response in Smart Grids}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5809}, school = {Universit{\"a}t Passau}, pages = {xx, 153 Seiten}, year = {2018}, abstract = {Smart Grids integrate currently isolated power and communications networks, while introducing several new technologies on the hardware and software sides. One of the most important ingredients is the potential for demand-response programs, which offer the possibility of sending instructions to consumers to adapt their power consumption over a certain period of time. However, high-frequency data collection exposes consumers' usage behaviors, leading to security and privacy challenges for Smart Grids. In this thesis, three cryptographic schemes are constructed for different demand-response programs. In the mandatory incentive-based demand-response program, privacy preservation depends on the power consumption of consumers. An anonymous authentication scheme is constructed for overload auditing and privacy preservation. Consumers' identities are anonymous during normal operation. The operation center defines an acceptable consumption threshold at times of power shortage. Consumers must follow the instruction and curtail their power consumption to meet the threshold. If they do so, the consumers keep their anonymity, while disobedient consumers, whose power consumption exceeds the threshold, can be identified. Security analysis demonstrates that the constructed anonymous authentication scheme is secure in a random oracle model. In the voluntary incentivebased demand-response program, consumers are categorized as either obedient or disobedient consumers according to their consumption curtailment. Consumers utilize a homomorphic encryption algorithm to encrypt their usage and report the ciphertexts to the operation center periodically. At a time of grid instability, the obedient consumers reduce their consumption and prove their curtailment by using a range proof. Both the usage reports and the proofs from obedient consumers concerning their consumption are reported without leaking private information. In order to achieve the real-time requirement, a security model is proposed and a batch verification algorithm is constructed, which is proved to be secure in the defined oracle model. Apart from reward and penalty detection in demand-response programs, theft detection is also an important requirement in Smart Grids. In order to achieve theft detection, this thesis employs the dynamic k-times anonymous authentication and blind signatures to create an efficient theft detection mechanism in the prepaid card system, where consumers pay for their consumption in advance and obtain credentials. A consumer sends the credentials anonymously and obtains corresponding credentials during times of consumption. If a thief tries to send reused credentials to steal electricity, his anonymity will be revoked. Finally, this thesis proves that the proposed mechanism finds the real identities of power thieves, without sacrificing the privacy of honest consumers under the random oracle model.}, subject = {Intelligentes Stromnetz}, language = {en} } @phdthesis{Kinseher2018, author = {Kinseher, Josef}, title = {New Methods for Improving Embedded Memory Manufacturing Tests}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6017}, school = {Universit{\"a}t Passau}, pages = {119 Seiten}, year = {2018}, abstract = {Due to the need for fast and energy-efficient accesses to growing amounts of data, the share and number of embedded memories inside modern microchips has been continuously increasing within the last years. Since embedded memories have the highest integration density of a fabrication technology they pose special test challenges due to complex manufacturing defects as well as strong transistor aging phenomena. This necessitates efficient methods for detecting more subtle defects while keeping test costs low. This work presents novel methods and techniques for improving the efficiency of embedded memory manufacturing tests. The proposed methods are demonstrated in an industrial setting based on production-proven transistor, memory as well as chip models and their benefits over the current state-of-the art is worked out.}, subject = {Speicher }, language = {en} } @phdthesis{Stenzer2018, author = {Stenzer, Alexander}, title = {Ein Ansatz zur semantik-basierten Anfragerelaxation f{\"u}r hierarchische Strukturen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5746}, school = {Universit{\"a}t Passau}, pages = {vii, 225 Seiten}, year = {2018}, abstract = {F{\"u}r Monumentalbauten als Teil unseres Kulturgutes im Speziellen als auch f{\"u}r Geb{\"a}ude im Allgemeinen, wurden im Rahmen des MonArch- rojektes verschiedene Methoden zur digitalen Speicherung von Informationen {\"u}ber Monumentalbauten erforscht. Das daraus entstandene MonArch-System ist f{\"u}r die Dokumentation von Monumentalbauten verwendbar und speichert das digitale Modell des Bauwerks in einer relationalen Datenbank. Das digitale Modell des Bauwerks entsteht durch eine Segmentierung in Geb{\"a}udeteile, die dann in einer Strukturhierarchie zusammengefasst werden k{\"o}nnen. Als Strukturhierarchie versteht man in diesem Zusammenhang eine Hierarchie von Geb{\"a}udeteilen, die in einer Teil-von-Beziehung stehen. Die Strukturhierarchie erlaubt es Informationen z.B. Dokumente mit einem r{\"a}umlichen Bezug auszuzeichnen. Zus{\"a}tzlich wird eine Themenhierarchie unterst{\"u}tzt, die es erlaubt Informationen thematisch mit Begriffen zu beschreiben. Betrachtet man r{\"a}umliche und thematische Anfragen in vernetzten MonArch-Systemen, in denen sich mehrere Geb{\"a}udearchive zusammenschließen, ist diese starke Bindung der Information an die einzigartige Struktur jedes Geb{\"a}udes ein Hindernis f{\"u}r ein einfaches Verfahren zur r{\"a}umlichen Suche. Da sich jedes Geb{\"a}ude in seinem speziellen strukturellen und r{\"a}umlichen Aufbau unterscheidet, liefert eine r{\"a}umliche Anfrage, die speziell auf diese Eigenheiten eines Geb{\"a}udes ausgerichtet ist, f{\"u}r andere Geb{\"a}ude keine Suchergebnisse. F{\"u}r thematische Anfragen stellen nicht kompatible Themenhierarchien ein Hindernis dar, die eine {\"u}bergreifende thematische Anfrage verhindern. Die gr{\"o}ßte Herausforderung ist es, Struktur- und Themenhierarchien aufeinander abzubilden. Zur L{\"o}sung des geschilderten Problems wird in vernetzten Informationssystemen auf eine geeignete Transformation der urspr{\"u}nglichen Anfrage zur{\"u}ckgegriffen, um den Anfragefokus zu erweitern (Relaxation) oder eine Anpassung an die Gegebenheiten des entfernten Informationssystems zu erreichen (Transformation). Das Anfragetransformations- und -relaxationsverfahren, das in dieser Arbeit vorgestellt wird, nutzt eine Generalisierungsbeziehung aus, um ausgehend von einer Anfrage an eine spezielle Struktur- und Themenhierarchie eine automatische Transformation der Anfrage durchzuf{\"u}hren. Bei Themenhierarchien sind gemeinsame Oberthemen ein Ansatzpunkt. Bei Strukturhierarchien k{\"o}nnen Typinformationen zu Geb{\"a}udeteilen die Generalisierungsbeziehung darstellen. Die transformierte und dadurch relaxierte Anfrage kann dann an ein Netzwerk von MonArch-Systemen gestellt werden, ohne dass eine manuelle Auswahl der Geb{\"a}udeteile in anderen Strukturhierarchien oder eine angepasste Themenauswahl erfolgen muss. Dazu muss die Strukturhierarchie der anderen Geb{\"a}ude im Netzwerk von MonArch-Systemen nicht bekannt sein. Im Rahmen der vorliegenden Arbeit werden verschiedene Relaxationsverfahren, z.B. ein angepasstes Spreading-Activation-Verfahren, zur automatischen Anfragetransformation von r{\"a}umlichen und thematischen Anfragen vorgestellt, mit dem Ziel eine vollst{\"a}ndige Abbildung zwischen den Strukturhierarchien von Geb{\"a}uden und Themenhierarchien zu vermeiden. Erreicht wird das Ziel durch eine Erweiterung des MonArch-Datenmodells und eine Verallgemeinerung der MonArch-Anfragen, die eine Anfragetransformation zum Anfragezeitpunkt erlauben.}, subject = {Abfragesprache}, language = {de} } @phdthesis{LoperaGonzalez2018, author = {Lopera Gonzalez, Luis Ignacio}, title = {Mining Functional and Structural Relationships of Context Variables in Smart-Buildings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5737}, school = {Universit{\"a}t Passau}, pages = {viii, 99 Seiten}, year = {2018}, abstract = {The Internet of Things (IoT) is a network of computational services, devices, and people, which share information with each other. In IoT, inter-system communication is possible and human interaction is not required. IoT devices are penetrating the home and office building environments. According to current estimates, about 35 billion IoT devices will be connected by the year 20212. In the IoT business model, value comes from integrating devices into applications, e.g., home and office automation. In general, an IoT application associates different information sources with actions which can modify the environment, e.g., change the room's temperature, inform a person, e.g., send an e-mail, or activate other services, e.g., buy milk on-line. In this thesis, we focus on the commissioning and verification processes of IoT devices used in building automation applications. Within a building's lifespan, new devices are added, interior spaces are refurbished, and faulty devices are replaced. All of these changes are currently made manually. Furthermore, consider that a context-aware Building Management System (BMS) is an IoT application, which measures direct-context from the building's sensors to characterize environmental conditions, user locations, and state. Additionally, a BMS combines sensor information to derive inferred-context, such as user activity. Similar to IoT devices, inferred-context instances have to be created manually. As the number of devices and inferred-context instances increases, keeping track of all associations becomes a time-consuming and error-prone task. The hypothesis of the thesis is that users who interact with the building create use-patterns in the data, which describe functional relations between devices and inferred-context instances, e.g., which desk-movement sensor is used to infer desk-presence and controls which overhead light; additionally, use-patterns can also provide structural relations, e.g., the relative position of spatial sensors. To test the hypothesis, this thesis presents an extension to the new IoT class rule programming paradigm, which simplifies rule creation based on classes. The proposed extension uses a semantic compiler to simplify the device and inferred-context associations. Using direct-context information and template classes, the compiler creates all possible inferredcontext instances. Buildings using context-aware BMSs will have a dynamic response to user behaviour, e.g., required illumination for computer-work is provided by adjusting blinds or increasing the dim setting of overhead ceiling lamps. We propose a rule mining framework to extract use-patterns and find the functional and structural relationships between devices. The rule mining framework uses three stages: (1) event extraction, (2) rule mining, (3) structure creation. The event extraction combines the building's data into a time-series of device events. Then, in the rule mining stage, rules are mined from the time series, where we use the established algorithm temporal interval tree association rule learner. Additionally, we proposed a rule extraction algorithm for spatial sensor's data. The algorithm is based on statistical analysis of user transition times between adjacent sensors. We also introduce a new rule extraction algorithm based on increasing belief. In the last stage, structure creation uses the extracted rules to produce device association groups, hierarchical representation of the building, or the relative location of spatial sensors. The proposed algorithms were tested using a year-long installation in a living-lab consisting of a four-person office, a 12-person open office, and a meeting room. For the spatial sensors, four locations within public buildings were used: a meeting room, a hallway, T-crossing, and a foyer. The recording times range from two weeks to two months depending on scenario complexity. We found that user-generated patterns appear in building data. The rule mining framework produced structures that represent functional and spatial relationships of building's devices and provide sufficient information to automate maintenance tasks, e.g., automatic device naming. Furthermore, we found that environmental changes are also a source of device data patterns, which provide additional associations. For example, using the framework we found the fa{\c{c}}ade group for exterior light sensors. The fa{\c{c}}ade group can be used to automatically find an alternative signal source to replace broken outdoor light sensors. Finally, the rule mining framework successfully retrieved the relative location of spatial sensors in all locations but the foyer.}, subject = {Internet der Dinge}, language = {en} } @phdthesis{Opris2022, author = {Opris, Andre}, title = {Holomorphic Extensions in the Structure R_{an,exp}}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10691}, school = {Universit{\"a}t Passau}, pages = {233 Seiten}, year = {2022}, abstract = {In this thesis we consider real analytic functions, i.e. functions which can be described locally as convergent power series and ask the following: Which real analytic functions definable in R_{an,exp} have a holomorphic extension which is again definable in R_{an,exp}? Finding a holomorphic extension is of course not difficult simply by power series expansion. The difficulty is to construct it in a definably way. We will not answer the question above completely, but introduce a large non trivial class of definable functions in R_{an,exp} where for example functions which are iterated compositions from either side of globally subanalytic functions and the global logarithm are contained. We call them restricted log-exp-analytic. After giving some preliminary results like preparation theorems and Tamm's Theorem for this class of functions we are able to show that real analytic restricted log-exp-analytic functions have a holomorphic extension which is again restricted log-exp-analytic.}, subject = {O-Minimalit{\"a}t}, language = {en} } @phdthesis{NorbertoSales2022, author = {Norberto Sales, Juliano Efson}, title = {An Explainable Semantic Parser for End-User Development}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10718}, school = {Universit{\"a}t Passau}, pages = {xvi, 165 Seiten}, year = {2022}, abstract = {Programming is a key skill in a world where businesses are driven by digital transformations. Although many of the programming demand can be addressed by a simple set of instructions composing libraries and services available in the web, non-technical professionals, such as domain experts and analysts, are still unable to construct their own programs due to the intrinsic complexity of coding. Among other types of end-user development, natural language programming has emerged to allow users to program without the formalism of traditional programming languages, where a tailored semantic parser can translate a natural language utterance to a formal command representation able to be processed by a computational machine. Currently, semantic parsers are typically built on the top of a learning method that defines its behaviours based on the patterns behind a large training data, whose production frequently are costly and time-consuming. Our research is devoted to study and propose a semantic parser for natural language commands targeting a scenario with low availability of training data. Our proposed semantic parser follows a multi-component architecture, composed of a specialised shallow parser that associates natural language commands to predicate-argument structures, integrated to a distributional ranking model that matches the command to a function signature available from an API knowledge base. Systems developed with statistical learning models and complex linguistics resources, as the proposed semantic parser, do not provide natively an easy way to associate a single feature from the input data to the impact in system behaviour. In this scenario, end-user explanations for intelligent systems has become a strong requirement to increase user confidence and system literacy. Thus, our research designed an explanation model for the proposed semantic parser that fits the heterogeneity of its multi-component architecture. The explanation model explores a hierarchical representation in an increasing degree of technical depth, providing higher-level explanations in the initial layers, going gradually to those that demand technical knowledge, applying different explanation strategies to better express the approach behind each component. With the support of a user-centred experiment, we compared the utility of different types of explanations and the impact of background knowledge in their preferences.}, language = {en} } @inproceedings{ParraRodriguezPosegga2018, author = {Parra Rodriguez, Juan D. and Posegga, Joachim}, title = {Local Storage on Steroids: Abusing Web Browsers for Hidden Content Storage and Distribution}, series = {International Conference on Security and Privacy in Communication Systems}, booktitle = {International Conference on Security and Privacy in Communication Systems}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-01704-0}, doi = {10.1007/978-3-030-01704-0_19}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6572}, pages = {20 Seiten}, year = {2018}, abstract = {Analysing security assumptions taken for the WebRTC and postMessage APIs led us to find a novel attack abusing the browsers' persistent storage capabilities. The presented attack can be executed without the website's visitor knowledge, and it requires neither browser vulnerabilities nor additional software on the browser's side. To exemplify this, we study how can an attacker use browsers to create a network for persistent storage and distribution of arbitrary data. In our proof of concept, the total storage of the network, and therefore the space used within each browser, grows linearly with the number of origins delivering the malicious JavaScript code. Further, data transfers between browsers are not restricted by the Same Origin Policy, which allows for a unified cross-origin browser network, regardless of the origin from which the script executing the functionality is loaded from. In the course of our work, we assess the feasibility of a real-life deployment of the network by running experiments using Linux containers and browser automation tools. Moreover, we show how security mechanisms against third-party tracking, cross-site scripting and click-jacking can diminish the attack's impact, or even prevent it.}, language = {en} } @article{HassenBenAhmed, author = {Hassen, Wiem Fekih and Ben Ahmed, Mariem}, title = {Optimization of a Redox-Flow Battery Simulation Model Based on a Deep Reinforcement Learning Approach}, series = {Batteries}, volume = {10}, journal = {Batteries}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/batteries10010008}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13994}, pages = {20 Seiten}, abstract = {Vanadium redox-flow batteries (VRFBs) have played a significant role in hybrid energy storage systems (HESSs) over the last few decades owing to their unique characteristics and advantages. Hence, the accurate estimation of the VRFB model holds significant importance in large-scale storage applications, as they are indispensable for incorporating the distinctive features of energy storage systems and control algorithms within embedded energy architectures. In this work, we propose a novel approach that combines model-based and data-driven techniques to predict battery state variables, i.e., the state of charge (SoC), voltage, and current. Our proposal leverages enhanced deep reinforcement learning techniques, specifically deep q-learning (DQN), by combining q-learning with neural networks to optimize the VRFB-specific parameters, ensuring a robust fit between the real and simulated data. Our proposed method outperforms the existing approach in voltage prediction. Subsequently, we enhance the proposed approach by incorporating a second deep RL algorithm—dueling DQN—which is an improvement of DQN, resulting in a 10\% improvement in the results, especially in terms of voltage prediction. The proposed approach results in an accurate VFRB model that can be generalized to several types of redox-flow batteries.}, language = {en} } @article{HassenImenAzzouz, author = {Hassen, Wiem Fekih and Imen Azzouz, Imen Azzouz}, title = {Optimization of Electric Vehicles Charging Scheduling Based on Deep Reinforcement Learning: A Decentralized Approach}, series = {Energies}, volume = {16}, journal = {Energies}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en16248102}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13985}, pages = {18 Seiten}, abstract = {The worldwide adoption of Electric Vehicles (EVs) has embraced promising advancements toward a sustainable transportation system. However, the effective charging scheduling of EVs is not a trivial task due to the increase in the load demand in the Charging Stations (CSs) and the fluctuation of electricity prices. Moreover, other issues that raise concern among EV drivers are the long waiting time and the inability to charge the battery to the desired State of Charge (SOC). In order to alleviate the range of anxiety of users, we perform a Deep Reinforcement Learning (DRL) approach that provides the optimal charging time slots for EV based on the Photovoltaic power prices, the current EV SOC, the charging connector type, and the history of load demand profiles collected in different locations. Our implemented approach maximizes the EV profit while giving a margin of liberty to the EV drivers to select the preferred CS and the best charging time (i.e., morning, afternoon, evening, or night). The results analysis proves the effectiveness of the DRL model in minimizing the charging costs of the EV up to 60\%, providing a full charging experience to the EV with a lower waiting time of less than or equal to 30 min.}, language = {en} } @phdthesis{Kasinathan2021, author = {Kasinathan, Prabhakaran}, title = {Workflow-aware access control for the Internet of Things}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8915}, school = {Universit{\"a}t Passau}, pages = {xxiii, 214 Seiten}, year = {2021}, abstract = {IoT is defined as a paradigm where "things" have sensing, actuating, communicating, and self-configuring abilities, and are connected to each other and to the Internet. Recent advancements in the manufacturing industry have helped to produce embedded devices with various sensors and actuators in mass numbers at a reduced cost. As part of the IoT revolution, everyday devices such as television, refrigerator, cars, even industrial machines are now connected IoT devices. Recent studies have predicted that by 2025 there will be over 75 billion of such IoT devices connected to the Internet. The providers of IoT based services want to integrate their services to satisfy customer requirements. For example, in the mobility scenario, different mobility solution providers want to offer a multi-modal ticket to their customers jointly. In such a distributed and loosely coupled environment, each owner and stakeholder wants to secure his/her own integrity, confidentiality, and functionality goals. This means that distributed rules and conditions defined by the individual owners must be enforced on the participating entities (e.g., customers or partners using their services). The owners and stakeholders may not necessarily trust each other's actions. Therefore, a mechanism is required that guarantees the rules and conditions specified by the different owners. Attacks on IoT devices and similar computing systems are increasing and getting more advanced. IoT devices are often constrained, i.e., they have limited processing power, memory, and energy. Security mechanisms designed for traditional computing systems, e.g., computers, servers, or mobile computing devices such as smartphones, may not fit in those constrained IoT devices. Weak security mechanisms and unenforced security measures were one of the main reasons for recent successful attacks on IoT devices and services. As IoT is now used in many sensitive places, including critical infrastructures, securing them becomes more critical than ever. This thesis focuses on developing mechanisms that secure IoT devices and services and enforcing the rules and conditions specified by the owners on entities that want to access owners' resources. In classical computer systems, security automata are used for specifying security policies and monitoring mechanisms are used for enforcing such policies. For instance, a reference monitor observes and stops the execution when the security policies are about to be violated, thus, the security policies are enforced. To restrict the adversary from using protected IoT devices or services for malicious purposes, it is required to ensure that a workflow must be followed to access the protected resource. In distributed IoT systems where the policies are governed by different owners, each owner would like to specify their rules and conditions in their workflows. The workflows contain tasks that must be performed in a particular order. The goal of this thesis is to develop mechanisms to specify and enforce these workflows in the distributed IoT environment. This thesis introduces a distributed WFAC framework that restricts the entities to do only what they are allowed to do in a collaborative environment. To gain access to a service protected by the WFAC framework, every workflow participant must prove that he/she is in a particular state of an authorized workflow. Authorized means two things: (a) the owner has authorized the workflow to be executed; (b) the workflow participant is authorized to execute it. This restricts the adversary's access to the devices and its services. The security policies defined by different owners are modeled as workflows and specified using Petri Nets. The policies are then enforced with the help of the WFAC framework which supports error-handling, accountability, integration of practitioner-friendly tools, and interoperability with existing security mechanisms such as OAuth. Thus, the WFAC guarantees the integrity of workflows in a distributed environment.}, language = {en} } @article{KronawitterLengauer2018, author = {Kronawitter, Stefan and Lengauer, Christian}, title = {Polyhedral Search Space Exploration in the ExaStencils Code Generator}, series = {ACM Transactions on Architecture and Code Optimization}, volume = {15}, journal = {ACM Transactions on Architecture and Code Optimization}, number = {4}, issn = {1544-3973}, doi = {10.1145/3274653}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5778}, year = {2018}, abstract = {Performance optimization of stencil codes requires data locality improvements. The polyhedron model for loop transformation is well suited for such optimizations with established techniques, such as the PLuTo algorithm and diamond tiling. However, in the domain of our project ExaStencils, stencil codes, it fails to yield optimal results. As an alternative, we propose a new, optimized, multi-dimensional polyhedral search space exploration and demonstrate its effectiveness: we obtain better results than existing approaches in several cases. We also propose how to specialize the search for the domain of stencil codes, which dramatically reduces the exploration effort without significantly impairing performance.}, language = {en} } @article{Basmadjian2019, author = {Basmadjian, Robert}, title = {Flexibility-Based Energy and Demand Management in Data Centers}, series = {Energies}, volume = {2019}, journal = {Energies}, number = {12}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en12173301}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-9251}, pages = {1 -- 22}, year = {2019}, abstract = {The power demand (kW) and energy consumption (kWh) of data centers were augmenteddrastically due to the increased communication and computation needs of IT services. Leveragingdemand and energy management within data centers is a necessity. Thanks to the automated ICTinfrastructure empowered by the IoT technology, such types of management are becoming more feasiblethan ever. In this paper, we look at management from two different perspectives: (1) minimization of theoverall energy consumption and (2) reduction of peak power demand during demand-response periods.Both perspectives have a positive impact on total cost of ownership for data centers. We exhaustivelyreviewed the potential mechanisms in data centers that provided flexibilities together with flexiblecontracts such as green service level and supply-demand agreements. We extended state-of-the-artby introducing the methodological building blocks and foundations of management systems for theabove mentioned two perspectives. We validated our results by conducting experiments on a lab-gradescale cloud computing data center at the premises of HPE in Milano. The obtained results support thetheoretical model, by highlighting the excellent potential of flexible service level agreements in Green IT:33\% of overall energy savings and 50\% of power demand reduction during demand-response periods inthe case of data center federation.}, language = {en} } @phdthesis{Lachat2024, author = {Lachat, Paul}, title = {Detecting Inference Attacks Involving Sensor Data}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14149}, school = {Universit{\"a}t Passau}, pages = {xiii, 141 Seiten}, year = {2024}, abstract = {The collection of personal information by organizations has become increasingly essential for social interactions. Nevertheless, according to the GDPR (General Data Protection Regulation), the organizations have to protect collected data. Access Control (AC) mechanisms are traditionally used to secure information systems against unauthorized access to sensitive data. The increased availability of personal sensor data, thanks to IoT-oriented applications, motivates new services to offer insights about individuals. Consequently, data mining algorithms have been proposed to infer personal insights from collected sensor data. Although they can be used for genuine purposes, attackers can leverage those outcomes, combining them with other type of data, and further breaching individuals' privacy. Thus, bypassing AC mechanisms thanks to such insights is a concrete problem. We propose an inference detection system based on the analysis of queries issued on a sensor database. The knowledge obtained through these queries, and the inference channels corresponding to the use of data mining algorithms on sensor data to infer individual information, are described using Raw sensor data based Inference ChannEl Model (RICE-M). The detection is carried out by RICE-M based inference detection System (RICE-Sy). RICE-Sy considers at the time of the query, the knowledge that a user obtains via a new query and has obtained via his query history, and determines whether this is sufficient to allow that user to operate a channel. Thus, privacy protection systems can take advantage of the inferences detected by RICE-Sy, taking into account individuals' information obtained by the attackers via a database of sensors, to further protect these individuals.}, language = {en} } @article{AnagnostopoulosTeymuriSeratietal.2023, author = {Anagnostopoulos, Nikolaos Athanasios and Teymuri, Benyamin and Serati, Reza and Rasti, Mehdi}, title = {LP-MAB: Improving the Energy Efficiency of LoRaWAN Using a Reinforcement-Learning-Based Adaptive Configuration Algorithm}, series = {Sensors}, volume = {23}, journal = {Sensors}, number = {4}, editor = {Xie, Bin and Wang, Ning and Gu, Yi and Stefanidis, Angelos}, publisher = {MDPI}, address = {Basel, Switzerland}, issn = {1424-8220}, doi = {10.3390/s23042363}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11853}, pages = {22 Seiten}, year = {2023}, abstract = {In the Internet of Things (IoT), Low-Power Wide-Area Networks (LPWANs) are designed to provide low energy consumption while maintaining a long communications' range for End Devices (EDs). LoRa is a communication protocol that can cover a wide range with low energy consumption. To evaluate the efficiency of the LoRa Wide-Area Network (LoRaWAN), three criteria can be considered, namely, the Packet Delivery Rate (PDR), Energy Consumption (EC), and coverage area. A set of transmission parameters have to be configured to establish a communication link. These parameters can affect the data rate, noise resistance, receiver sensitivity, and EC. The Adaptive Data Rate (ADR) algorithm is a mechanism to configure the transmission parameters of EDs aiming to improve the PDR. Therefore, we introduce a new algorithm using the Multi-Armed Bandit (MAB) technique, to configure the EDs' transmission parameters in a centralized manner on the Network Server (NS) side, while improving the EC, too. The performance of the proposed algorithm, the Low-Power Multi-Armed Bandit (LP-MAB), is evaluated through simulation results and is compared with other approaches in different scenarios. The simulation results indicate that the LP-MAB's EC outperforms other algorithms while maintaining a relatively high PDR in various circumstances.}, language = {en} }