@phdthesis{McLarren2021, author = {McLarren, Katharina}, title = {Religion and International Society - Approaches to Including Religion in the International Relations Research Agenda}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-12365}, school = {Universit{\"a}t Passau}, pages = {143 Seiten}, year = {2021}, abstract = {Religion can unite and divide, it can lead to a strengthening or a weakening of identity and legitimacy. Religion can stoke conflicts but it can also pacify them - within societies and in international politics. Religion endures and it can exist independently of states, it can constitute them, and it can provide new forms of states, societies, and empires. Arguably, religion shapes or even constitutes the international society of states, an aspect so far neglected in the field of International Relations. The dissertation provides a new definition of religion for International Relations and the English School in particular. Based upon this understanding of religion, the five publications presented in the dissertation provide new analytical and theoretical concepts and approaches to fill the research gap. Religion is integrated into the theoretical framework of the English School in the form of a "prime institution" and with the help of the "quilt model". While the former expands the theoretical framework, the latter adds an analytical layer. Based upon this definition religion is also introduced as a concept ("hybrid actorness") in Foreign Policy Analysis, opening it up to become less state-centrist and more transnational-oriented, thereby boosting its relevance considering the evolving international (global) society. In another step, the Securitization framework of analysis is expanded to include (freedom) of religion. By revisiting the publications, the dissertation is able to identify next steps in terms of avenues of research. Finally, the dissertation reveals areas of study which contribute to increasing the pertinence of IR, particularly of the English School.}, language = {en} } @phdthesis{Gerl2019, author = {Gerl, Armin}, title = {Modelling of a Privacy Language and Efficient Policy-based De-identification}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7674}, school = {Universit{\"a}t Passau}, pages = {xviii, 257 Seiten}, year = {2019}, abstract = {The processing of personal information is omnipresent in our data-driven society enabling personalized services, which are regulated by privacy policies. Although privacy policies are strictly defined by the General Data Protection Regulation (GDPR), no systematic mechanism is in place to enforce them. Especially if data is merged from several sources into a data-set with different privacy policies associated, the management and compliance to all privacy requirements is challenging during the processing of the data-set. Privacy policies can vary hereby due to different policies for each source or personalization of privacy policies by individual users. Thus, the risk for negligent or malicious processing of personal data due to defiance of privacy policies exists. To tackle this challenge, a privacy-preserving framework is proposed. Within this framework privacy policies are expressed in the proposed Layered Privacy Language (LPL) which allows to specify legal privacy policies and privacy-preserving de-identification methods. The policies are enforced by a Policy-based De-identification (PD) process. The PD process enables efficient compliance to various privacy policies simultaneously while applying pseudonymization, personal privacy anonymization and privacy models for de-identification of the data-set. Thus, the privacy requirements of each individual privacy policy are enforced filling the gap between legal privacy policies and their technical enforcement.}, subject = {Datenschutz}, language = {en} } @phdthesis{Niedermeier2020, author = {Niedermeier, Florian}, title = {Power-Adaptive Computing in Future Energy Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-9993}, school = {Universit{\"a}t Passau}, pages = {xvi, 204 Seiten}, year = {2020}, abstract = {The current electricity grid is undergoing major changes. There is increasing pressure to move away from power generation from fossil fuels, both due to ecological concerns and fear of dependencies on scarce natural resources. Increasing the share of decentralized generation from renewable sources is a widely accepted way to a more sustainable power infrastructure. However, this comes at the price of new challenges: generation from solar or wind power is not controllable and only forecastable with limited accuracy. To compensate for the increasing volatility in power generation, exerting control on the demand side is a promising approach. By providing flexibility on demand side, imbalances between power generation and demand may be mitigated. This work is concerned with developing methods to provide grid support on demand side while limiting the associated costs. This is done in four major steps: first, the target power curve to follow is derived taking both goals of a grid authority and costs of the respective load into account. In the following, the special case of data centers as an instance of significant loads inside a power grid are focused on more closely. Data center services are adapted in a way such as to achieve the previously derived power curve. By means of hardware power demand models, the required adaptation of hardware utilization can be derived. The possibilities of adapting software services are investigated for the special use case of live video encoding. A method to minimize quality of experience loss while reducing power demand is presented. Finally, the possibility of applying probabilistic model checking to a continuous demand-response scenario is demonstrated.}, subject = {Energieversorgung}, language = {en} } @phdthesis{Bongers2020, author = {Bongers, Franziska Maria}, title = {Three essays on digital and non-digital transformations in business-to-business markets}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-9149}, school = {Universit{\"a}t Passau}, pages = {XVI, 231 Seiten}, year = {2020}, abstract = {Fundamental changes in business-to-business (B2B) buying behavior confront B2B supplier firms with unprecedented challenges. On the one hand, a rising share of industrial buyers demands digitalized offerings and processes from suppliers. Consequently, suppliers are urged to implement digital transformations by expanding the range of both digital offerings and processes. On the other hand, B2B buyers increasingly expect suppliers to provide individually tailored solutions to their idiosyncratic needs. Hence, suppliers are also required to implement non-digital transformations by providing offerings and processes that are customized to each customers' specific requirements. The rise of these digital and non-digital transformations calls established knowledge into question. Thus, B2B marketing research and practice are urged to create a comprehensive understanding of digital and non-digital transformations by means of novel and empirically grounded insights and derive actionable response strategies. In respond, my dissertation addresses the overall research question of how B2B supplier firms can successfully implement both digital and non-digital transformations in three individual essays. In Essay 1, I offer a broader perspective on both digital and non-digital transformations by investigating digital service customization (i.e., the tailoring of digital B2B services to customers' individual needs). Through a systematic literature review and bibliometric analysis, I outline a comprehensive set of factors that favor the application of distinct digital service customization strategies. Essay 2 represents a deep dive into digital transformations of sales processes. By making use of two rich sets of qualitative interview material from supplier and buyer firms, I identify the challenges resulting for B2B salespeople from the introduction of digital sales channels into personal selling. Moreover, I uncover facilitating mechanisms that sales managers can employ to support salespeople in coping with digital sales channels. Finally, Essay 3 constitutes a deep dive into non-digital transformations. Based on qualitative interview material and survey data from matched sales manager-salesperson dyads, the essay explores how configurations of individual salespeople's personal and procedural competencies facilitate success at selling customer solutions (i.e., highly customized, performance-oriented offerings comprising products and/or services). The essay shows that successfully selling customized offerings like solutions hinges on salespeople's unique configurations of present and absent competencies. In a nutshell, these essays provide three major insights on how B2B suppliers can successfully implement digital and non-digital transformations. First, they underscore that a comprehensive understanding of the origins and spillover effects of transformations is a key prerequisite to successfully implementing them. Second, they unveil that digital and non-digital transformations impact on multiple organizational levels. Third, they point out important resources and capabilities that help suppliers to successfully implement transformations, be they digital or non-digital. With this dissertation, I make substantial contributions to the broader literature on digital and non-digital transformations in B2B contexts. At the same time, my dissertation provides hands-on implications for managers in B2B supplier firms that are facing fundamental transformations in the marketplace—both digital and non-digital in nature.}, language = {en} } @phdthesis{SalehiRizi2021, author = {Salehi Rizi, Fatemeh}, title = {Graph Representation Learning for Social Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-9211}, school = {Universit{\"a}t Passau}, pages = {ix, 130 Seiten}, year = {2021}, abstract = {Online social networks provide a rich source of information about millions of users worldwide. However, due to sparsity and complex structure, analyzing these networks is quite challenging and expensive. Recently, graph embedding emerged to map networked data into low-dimensional representations, i.e. vector embeddings. These representations are fed into off-the-shelf machine learning algorithms to simplify and speed up graph analytic tasks. Given the immense importance of social network analysis, in this thesis, we aim to study graph embedding for social networks in three directions. Firstly, we focus on social networks at microscopic level to primarily encode the structural characteristic of users' personal networks so-called ego networks. These representations are utilized in evaluation tasks whose performance depends on relational information from direct neighbors. For example, social circle prediction and event attendance inference both need structural information from neighbors in social networks. Secondly, we explore assessing the content of vector embeddings in terms of topological properties. This could be explained via two proposed approaches: 1) a learning to rank algorithm in which the model weights reveal the importance of properties at subgraph level (ego networks), 2) a regression model for direct approximation of network statistical properties at vertex level. Thirdly, we propose extensions of graph embedding to capture sign or additional content of social networks. Users in social media often express their feelings and attitudes towards others which forms sentiment links besides social links. We design a joint objective function whose terms capture semantics of both social and sentiment links simultaneously. We also propose a multi-task learning framework for networks with attributes and labels by stacking autoencoders. The weights of the learning tasks are automatically assigned via an adaptive loss weighting layer.}, language = {en} } @article{FrankBoettgerMexisetal.2023, author = {Frank, Florian and B{\"o}ttger, Simon and Mexis, Nico and Anagnostopoulos, Nikolaos Athanasios and Mohamed, Ali and Hartmann, Martin and Kuhn, Harald and Helke, Christian and Arul, Tolga and Katzenbeisser, Stefan and Hermann, Sascha}, title = {CNT-PUFs: highly robust and heat-tolerant carbon-nanotube-based physical unclonable functions}, volume = {2023}, number = {13(22)}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/nano13222930}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14011}, pages = {24 Seiten}, year = {2023}, abstract = {In this work, we explored a highly robust and unique Physical Unclonable Function (PUF) based on the stochastic assembly of single-walled Carbon NanoTubes (CNTs) integrated within a wafer-level technology. Our work demonstrated that the proposed CNT-based PUFs are exceptionally robust with an average fractional intra-device Hamming distance well below 0.01 both at room temperature and under varying temperatures in the range from 23 °C to 120 °C. We attributed the excellent heat tolerance to comparatively low activation energies of less than 40 meV extracted from an Arrhenius plot. As the number of unstable bits in the examined implementation is extremely low, our devices allow for a lightweight and simple error correction, just by selecting stable cells, thereby diminishing the need for complex error correction. Through a significant number of tests, we demonstrated the capability of novel nanomaterial devices to serve as highly efficient hardware security primitives.}, language = {en} } @article{HassenBenAhmed, author = {Hassen, Wiem Fekih and Ben Ahmed, Mariem}, title = {Optimization of a Redox-Flow Battery Simulation Model Based on a Deep Reinforcement Learning Approach}, series = {Batteries}, volume = {10}, journal = {Batteries}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/batteries10010008}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13994}, pages = {20 Seiten}, abstract = {Vanadium redox-flow batteries (VRFBs) have played a significant role in hybrid energy storage systems (HESSs) over the last few decades owing to their unique characteristics and advantages. Hence, the accurate estimation of the VRFB model holds significant importance in large-scale storage applications, as they are indispensable for incorporating the distinctive features of energy storage systems and control algorithms within embedded energy architectures. In this work, we propose a novel approach that combines model-based and data-driven techniques to predict battery state variables, i.e., the state of charge (SoC), voltage, and current. Our proposal leverages enhanced deep reinforcement learning techniques, specifically deep q-learning (DQN), by combining q-learning with neural networks to optimize the VRFB-specific parameters, ensuring a robust fit between the real and simulated data. Our proposed method outperforms the existing approach in voltage prediction. Subsequently, we enhance the proposed approach by incorporating a second deep RL algorithm—dueling DQN—which is an improvement of DQN, resulting in a 10\% improvement in the results, especially in terms of voltage prediction. The proposed approach results in an accurate VFRB model that can be generalized to several types of redox-flow batteries.}, language = {en} } @article{RowedderWilcoxBrandtstaedter, author = {Rowedder, Simon and Wilcox, Phill and Brandtst{\"a}dter, Susanne}, title = {Negotiating Chinese Infrastructures of Modern Mobilities: Insights from Southeast Asia}, series = {Advances in Southeast Asian Studies (ASEAS)}, volume = {vol. 16}, journal = {Advances in Southeast Asian Studies (ASEAS)}, number = {no. 2}, doi = {10.14764/10.ASEAS-0103}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13973}, pages = {175 -- 188}, abstract = {Since the launch of the BRI, particular modes of movement are integral to its vision of what it means to be a modern world citizen. Nowhere is this more apparent than in Southeast Asia, where China-backed infrastructure projects expand, and at great speed. Such infrastructure projects are carriers of particular versions of modernity, promising rapid mobility to populations better connected than ever before. Yet, until now, little attention has been paid to how mobility and promises of mobility intersect with local understandings of development. In the introduction to this special issue, we argue that it is essential to think about the role infrastructure plays in forms of development that place connectivity at the center. We suggest that considering development, mobility and mo-dernity together is enlightening because it interrogates the connections between these interlocking themes. Through an introduction to five ethnographically grounded papers and two commentaries, all of which engage with infrastructures in different contexts throughout Southeast Asia, we demonstrate that there are significant gaps between of-ficial policy and lived experience. This makes the need to interrogate what infrastructure, mobilities, and global China really mean all the more pressing.}, language = {en} } @article{HassenImenAzzouz, author = {Hassen, Wiem Fekih and Imen Azzouz, Imen Azzouz}, title = {Optimization of Electric Vehicles Charging Scheduling Based on Deep Reinforcement Learning: A Decentralized Approach}, series = {Energies}, volume = {16}, journal = {Energies}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en16248102}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-13985}, pages = {18 Seiten}, abstract = {The worldwide adoption of Electric Vehicles (EVs) has embraced promising advancements toward a sustainable transportation system. However, the effective charging scheduling of EVs is not a trivial task due to the increase in the load demand in the Charging Stations (CSs) and the fluctuation of electricity prices. Moreover, other issues that raise concern among EV drivers are the long waiting time and the inability to charge the battery to the desired State of Charge (SOC). In order to alleviate the range of anxiety of users, we perform a Deep Reinforcement Learning (DRL) approach that provides the optimal charging time slots for EV based on the Photovoltaic power prices, the current EV SOC, the charging connector type, and the history of load demand profiles collected in different locations. Our implemented approach maximizes the EV profit while giving a margin of liberty to the EV drivers to select the preferred CS and the best charging time (i.e., morning, afternoon, evening, or night). The results analysis proves the effectiveness of the DRL model in minimizing the charging costs of the EV up to 60\%, providing a full charging experience to the EV with a lower waiting time of less than or equal to 30 min.}, language = {en} } @phdthesis{Dengel2020, author = {Dengel, Andreas}, title = {Effects of Immersion and Presence on Learning Outcomes in Immersive Educational Virtual Environments for Computer Science Education}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8413}, school = {Universit{\"a}t Passau}, pages = {ix, 239 Seiten}, year = {2020}, abstract = {Abstract concepts and ideas from Computer Science Education can benefit from immersive visualizations that can be provided in virtual environments. This thesis explores the effects of the key characteristics of virtual environments, immersion and presence, on learning outcomes in Educational Virtual Environments for learning Computer Science. Immersion is a quantifiable description of the technology to immerse the user into the virtual environment; presence describes the subjective feeling of 'being there'. While technological immersion can be seen as a strong predictor for presence, motivational traits, cognition, and the emotional state of the user also influence presence. A possible localization of these technological and person-specific variables in Helmke's pedagogical supply-use framework is introduced as the Educational Framework for Immersive Learning (EFiL). Presence is emphasized as a central criterion influencing immersive learning processes. The EFiL provides an educational understanding of immersive learning as learning activities initiated by a mediated or medially enriched environment that evokes a sense of presence. The idea of Computer Science Unplugged is pursued by using Virtual Reality technology in order to provide interactive virtual learning experiences that can be accurately displayed, schematizing, substantiating, or metaphorical. For exploring the effects of virtual environment characteristics on learning, the idea of Computer Science Replugged focuses 'hands-on' activities and combines them with immersive technology. By providing a perception of non-mediation, Computer Science Replugged might enable experiences that can contribute additional possibilities to the real activity or enable new activities for teaching Computer Science. Three game-based Educational Virtual Environments were developed as treatments: 'Bill's Computer Workshop' introduces the components of a computer; 'Fluxi's Cryptic Potions' uses a metaphor to teach asymmetric encryption; 'Pengu's Treasure Hunt' is an immersive visualization of finite state machines. A first study with 23 middle school students was conducted to test the instruments in terms of selectivity, the devices' induced levels of presence, and adequacy of the selected learning objectives. The second study with 78 middle school students playing the environments on different devices (laptop, Mobile Virtual Reality, or head-mounted-display) assessed motivational, cognitive, and emotional factors, as well as presence and learning outcomes. An overall analysis showed that pre-test performance, presence, and the previous scholastic performance in Maths and German predict the learning outcomes in the virtual environments. Presence could be predicted by the student's positive emotions and by the technological immersion. The level of immersion had no significant effect on learning outcomes. While a good-fitting path analysis model indicated that the assumed relations deriving from the EFiL are largely correct for 'Bill's Computer Workshop' and 'Fluxi's Cryptic Potions', not all results of the overall path analysis were significant for the analyses of the particular environments. Presence seems to have a small effect on learning outcomes while being influenced by technological and emotional factors. Even though the level of immersion can be used to predict the level of presence, it is not an appropriate predictor for learning outcomes. For future studies, the questionnaires have to be revised as some of them suffered from poor scale reliabilities. While the second study could provide indications that the localization of presence and immersion in an existing educational supply-use framework seems to be appropriate, many factors had to be blanked out. The thesis contributes to existing research as it adds factors that are crucial for learning processes to the discussion on immersive learning from an educational perspective and assesses these factors in hands-on activities in Educational Virtual Environments for Computer Science Education.}, language = {en} } @phdthesis{Calmels2020, author = {Calmels, Dorothea}, title = {Job Sequencing and Tool Switching Problems with a Generalisation to Non-Identical Parallel Machines}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8436}, school = {Universit{\"a}t Passau}, pages = {v, 103, lxxiv Seiten}, year = {2020}, abstract = {Manufacturing tools have been dominating the manufacturing process since the 1960s. The job sequencing and tool switching problem is an NP-hard combinatorial optimization that has first been introduced in the context of flexible manufacturing systems in the late 1980s. Since then, production systems have undisputedly changed and improved but manufacturing tools still dominate manufacturing processes. Production and system operation processes are continuously adjusted and optimised to changing customer requirements. If the product variety requires an increasing number of tools for processing that exceeds the local tool magazine capacity of the manufacturing system, tool switches become necessary. Although tool changing times within a manufacturing centre or cell may nowadays be very small due to the high degree of automation, tool switching within a dynamic production environment is still a time consuming process that must be avoided. In order to minimize the total tool setup time to enhance productivity, the objectives of the basic job sequencing and tool switching problem are to sequence a set of jobs and simultaneously to determine the best tool loading. Therefore, job sequencing and tool switching problems are gaining considerable attention. Several solution approaches to the standard problem and related versions of the problem exist. The first part of this dissertation assesses the current state-of-the-art of the job sequencing and tool switching problem and provides a classification scheme for literature on the job sequencing and tool switching problem and its variations. Only few authors consider generalisations of the problem because the level of complexity of extended problems is high. A general approach of the job sequencing and tool switching problem with non-identical parallel machines and sequence-dependent setup times is described in this dissertation. A novel mathematical model based on time periods is presented and analysed which can be adapted to different objective functions. The last part of this dissertation is a quantitative evaluation of fast and effective construction heuristics as well as of an iterated local search algorithm tested on a new set of benchmark instances. As such this dissertation provides a broad basis for future evaluations of solution approaches to the job sequencing and tool switching problem with non-identical parallel machines and sequence-dependent setup times as well as a basis for further generalisations of the problem like for example tool availability constraints or tool-size dependent variations.}, language = {en} } @phdthesis{Schloetterer2020, author = {Schl{\"o}tterer, J{\"o}rg}, title = {Supporting the Discovery of Long-Tail Resources on the Web}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8539}, school = {Universit{\"a}t Passau}, pages = {xi, 218 Seiten}, year = {2020}, abstract = {A plethora of resources made available via retrieval systems in digital libraries remains untapped in the so called long tail of the Web. These long-tail websites get considerably less visits than major Web hubs. Zero-effort queries ease the discovery of long-tail resources by proactively retrieving and presenting information based on a user's context. However, zero-effort queries over existing digital library structures are challenging, since the underlying retrieval system is only accessible via an API. The information need must be expressed by a query, instead of optimizing the ranking between context and resources in the retrieval system directly. We address three research questions that arise from replacing the user information seeking process by zero-effort queries. Our first question addresses the transformation of a user query to an automatic query, derived from the context. We present means to 1) identify the relevant context on different levels of granularity, 2) derive an information need from the context via keyword extraction and personalization and 3) express this information need in a query scheme that avoids over- or under-specified queries. We address the cold start problem with an approach to bootstrap user profiles from social media, even for passive users. With the second question, we address the presentation of resources in zero-effort query scenarios, presenting guidelines for presentation interfaces in the browser and a visualization of the triadic relationship between context, query and results. QueryCrumbs, a compact query history visualization supports recalling information found in the past and exploratory search by visualizing qualitative and quantitative query similarity. Our last question addresses the gap between (simple) keyword queries and the representation of resources by rich and complex meta-data. We investigate and extend feature representation learning techniques centered around the skip-gram model with negative sampling. Finally, we present an approach to learn representations from network and text jointly that can cope with the partial absence of one modality. Experimental results show close to human performance of our zero-effort query and user profile generation approach and visualizations to be helpful in terms of transparency, efficiency and support for exploratory search. These results indicate that the proposed zero-effort query approach indeed eases the discovery of long-tail resources and the accompanying visualizations further facilitate this process. The joint representation model provides a first step to bridge the gap between query and resource representation and we plan to follow and investigate this route further in the future.}, subject = {Data Sience}, language = {en} } @phdthesis{Ganser2019, author = {Ganser, Stefan}, title = {Iterative Schedule Optimization for Parallelization in the Polyhedron Model}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7936}, school = {Universit{\"a}t Passau}, pages = {xvii, 176 Seiten}, year = {2019}, abstract = {In high-performance computing, one primary objective is to exploit the performance that the given target hardware can deliver to the fullest. Compilers that have the ability to automatically optimize programs for a specific target hardware can be highly useful in this context. Iterative (or search-based) compilation requires little or no prior knowledge and can adapt more easily to concrete programs and target hardware than static cost models and heuristics. Thereby, iterative compilation helps in situations in which static heuristics do not reflect the combination of input program and target hardware well. Moreover, iterative compilation may enable the derivation of more accurate cost models and heuristics for optimizing compilers. In this context, the polyhedron model is of help as it provides not only a mathematical representation of programs but, more importantly, a uniform representation of complex sequences of program transformations by schedule functions. The latter facilitates the systematic exploration of the set of legal transformations of a given program. Early approaches to purely iterative schedule optimization in the polyhedron model do not limit their search to schedules that preserve program semantics and, thereby, suffer from the need to explore numbers of illegal schedules. More recent research ensures the legality of program transformations but presumes a sequential rather than a parallel execution of the transformed program. Other approaches do not perform a purely iterative optimization. We propose an approach to iterative schedule optimization for parallelization and tiling in the polyhedron model. Our approach targets loop programs that profit from data locality optimization and coarse-grained loop parallelization. The schedule search space can be explored either randomly or by means of a genetic algorithm. To determine a schedule's profitability, we rely primarily on measuring the transformed code's execution time. While benchmarking is accurate, it increases the time and resource consumption of program optimization tremendously and can even make it impractical. We address this limitation by proposing to learn surrogate models from schedules generated and evaluated in previous runs of the iterative optimization and to replace benchmarking by performance prediction to the extent possible. Our evaluation on the PolyBench 4.1 benchmark set reveals that, in a given setting, iterative schedule optimization yields significantly higher speedups in the execution of the program to be optimized. Surrogate performance models learned from training data that was generated during previous iterative optimizations can reduce the benchmarking effort without strongly impairing the optimization result. A prerequisite for this approach is a sufficient similarity between the training programs and the program to be optimized.}, subject = {Parallelrechner}, language = {en} } @phdthesis{Kasinathan2021, author = {Kasinathan, Prabhakaran}, title = {Workflow-aware access control for the Internet of Things}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8915}, school = {Universit{\"a}t Passau}, pages = {xxiii, 214 Seiten}, year = {2021}, abstract = {IoT is defined as a paradigm where "things" have sensing, actuating, communicating, and self-configuring abilities, and are connected to each other and to the Internet. Recent advancements in the manufacturing industry have helped to produce embedded devices with various sensors and actuators in mass numbers at a reduced cost. As part of the IoT revolution, everyday devices such as television, refrigerator, cars, even industrial machines are now connected IoT devices. Recent studies have predicted that by 2025 there will be over 75 billion of such IoT devices connected to the Internet. The providers of IoT based services want to integrate their services to satisfy customer requirements. For example, in the mobility scenario, different mobility solution providers want to offer a multi-modal ticket to their customers jointly. In such a distributed and loosely coupled environment, each owner and stakeholder wants to secure his/her own integrity, confidentiality, and functionality goals. This means that distributed rules and conditions defined by the individual owners must be enforced on the participating entities (e.g., customers or partners using their services). The owners and stakeholders may not necessarily trust each other's actions. Therefore, a mechanism is required that guarantees the rules and conditions specified by the different owners. Attacks on IoT devices and similar computing systems are increasing and getting more advanced. IoT devices are often constrained, i.e., they have limited processing power, memory, and energy. Security mechanisms designed for traditional computing systems, e.g., computers, servers, or mobile computing devices such as smartphones, may not fit in those constrained IoT devices. Weak security mechanisms and unenforced security measures were one of the main reasons for recent successful attacks on IoT devices and services. As IoT is now used in many sensitive places, including critical infrastructures, securing them becomes more critical than ever. This thesis focuses on developing mechanisms that secure IoT devices and services and enforcing the rules and conditions specified by the owners on entities that want to access owners' resources. In classical computer systems, security automata are used for specifying security policies and monitoring mechanisms are used for enforcing such policies. For instance, a reference monitor observes and stops the execution when the security policies are about to be violated, thus, the security policies are enforced. To restrict the adversary from using protected IoT devices or services for malicious purposes, it is required to ensure that a workflow must be followed to access the protected resource. In distributed IoT systems where the policies are governed by different owners, each owner would like to specify their rules and conditions in their workflows. The workflows contain tasks that must be performed in a particular order. The goal of this thesis is to develop mechanisms to specify and enforce these workflows in the distributed IoT environment. This thesis introduces a distributed WFAC framework that restricts the entities to do only what they are allowed to do in a collaborative environment. To gain access to a service protected by the WFAC framework, every workflow participant must prove that he/she is in a particular state of an authorized workflow. Authorized means two things: (a) the owner has authorized the workflow to be executed; (b) the workflow participant is authorized to execute it. This restricts the adversary's access to the devices and its services. The security policies defined by different owners are modeled as workflows and specified using Petri Nets. The policies are then enforced with the help of the WFAC framework which supports error-handling, accountability, integration of practitioner-friendly tools, and interoperability with existing security mechanisms such as OAuth. Thus, the WFAC guarantees the integrity of workflows in a distributed environment.}, language = {en} } @phdthesis{Tueno2020, author = {Tueno, Anselme}, title = {Multiparty Protocols for Tree Classifiers}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8251}, school = {Universit{\"a}t Passau}, pages = {xvii, 171 Seiten}, year = {2020}, abstract = {Cryptography is the scientific study of techniques for securing information and communication against adversaries. It is about designing and analyzing encryption schemes and protocols that protect data from unauthorized reading. However, in our modern information-driven society with highly complex and interconnected information systems, encryption alone is no longer enough as it makes the data unintelligible, preventing any meaningful computation without decryption. On the one hand, data owners want to maintain control over their sensitive data. On the other hand, there is a high business incentive for collaborating with an untrusted external party. Modern cryptography encompasses different techniques, such as secure multiparty computation, homomorphic encryption or order-preserving encryption, that enable cloud users to encrypt their data before outsourcing it to the cloud while still being able to process and search on the outsourced and encrypted data without decrypting it. In this thesis, we rely on these cryptographic techniques for computing on encrypted data to propose efficient multiparty protocols for order-preserving encryption, decision tree evaluation and kth-ranked element computation. We start with Order-preserving encryption (OPE) which allows encrypting data, while still enabling efficient range queries on the encrypted data. However, OPE is symmetric limiting, the use case to one client and one server. Imagine a scenario where a Data Owner (DO) outsources encrypted data to the Cloud Service Provider (CSP) and a Data Analyst (DA) wants to execute private range queries on this data. Then either the DO must reveal its encryption key or the DA must reveal the private queries. We overcome this limitation by allowing the equivalent of a public-key OPE. Decision trees are common and very popular classifiers because they are explainable. The problem of evaluating a private decision tree on private data consists of a server holding a private decision tree and a client holding a private attribute vector. The goal is to classify the client's input using the server's model such that the client learns only the result of the classification, and the server learns nothing. In a first approach, we represent the tree as an array and execute only d interactive comparisons (instead of 2 d as in existing solutions), where d denotes the depth of the tree. In a second approach, we delegate the complete tree evaluation to the server using somewhat or fully homomorphic encryption where the ciphertexts are encrypted under the client's public key. A generalization of a decision tree is a random forest that consists of many decision trees. A classification with a random forest evaluates each decision tree in the forest and outputs the classification label which occurs most often. Hence, the classification labels are ranked by their number of occurrences and the final result is the best ranked one. The best ranked element is a special case of the kth-ranked element. In this thesis, we consider the secure computation of the kth-ranked element in a distributed setting with applications in benchmarking and auctions. We propose different approaches for privately computing the kth-ranked element in a star network, using either garbled circuits or threshold homomorphic encryption.}, subject = {Mathematik}, language = {en} } @phdthesis{Schmid2020, author = {Schmid, Angelika}, title = {Geographic and Social Space in Latent Factor Models - Four Essays}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7947}, school = {Universit{\"a}t Passau}, pages = {XLVI, 187 Seiten}, year = {2020}, abstract = {Geography, social context, time, and cultural mindset are four (out of many) cornerstones of human interaction. When building statistical models, their consideration is vital: They all cause dependency between individual observations, violating assumptions of independence and exchangeability. While this can be problematic and inhibit the unbiased inference of parameters, it can also be a fruitful source of insights and enhance prediction performance. One class of models that serves to manage or profit from the presence of dependence is the class of latent variable models. This class of models assumes that the presence of non-explicit, unobserved causes of continuous or discrete nature can explain the observed correlations. Latent variable models explicitly take account of dependency, for example, by modeling an unobserved local source of pollution as a continuous spatial variable. Through their widespread use for information fitering, link prediction, and statistical inference, latent variable models have developed an essential impact on our daily life and the way we consume information. The four articles in this thesis shed light on assumptions, usage, and potential drawbacks of latent variable models in various contexts that involve geographic and interaction data. We model unobserved sources of pollution in geophysical data, explore individual taste and mindsets in cross-cultural contexts, and predict the evolution of social relationships in software development projects. This combination of various perspectives contributes to the interdisciplinary exchange of methodological knowledge on the modeling of dependent data.}, language = {en} } @phdthesis{ParraRodriguez2019, author = {Parra Rodriguez, Juan David}, title = {Computational Resource Abuse in Web Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7706}, school = {Universit{\"a}t Passau}, pages = {xi, 158 Seiten}, year = {2019}, abstract = {Internet browsers include Application Programming Interfaces (APIs) to support Web applications that require complex functionality, e.g., to let end users watch videos, make phone calls, and play video games. Meanwhile, many Web applications employ the browser APIs to rely on the user's hardware to execute intensive computation, access the Graphics Processing Unit (GPU), use persistent storage, and establish network connections. However, providing access to the system's computational resources, i.e., processing, storage, and networking, through the browser creates an opportunity for attackers to abuse resources. Principally, the problem occurs when an attacker compromises a Web site and includes malicious code to abuse its visitor's computational resources. For example, an attacker can abuse the user's system networking capabilities to perform a Denial of Service (DoS) attack against third parties. What is more, computational resource abuse has not received widespread attention from the Web security community because most of the current specifications are focused on content and session properties such as isolation, confidentiality, and integrity. Our primary goal is to study computational resource abuse and to advance the state of the art by providing a general attacker model, multiple case studies, a thorough analysis of available security mechanisms, and a new detection mechanism. To this end, we implemented and evaluated three scenarios where attackers use multiple browser APIs to abuse networking, local storage, and computation. Further, depending on the scenario, an attacker can use browsers to perform Denial of Service against third-party Web sites, create a network of browsers to store and distribute arbitrary data, or use browsers to establish anonymous connections similarly to The Onion Router (Tor). Our analysis also includes a real-life resource abuse case found in the wild, i.e., CryptoJacking, where thousands of Web sites forced their visitors to perform crypto-currency mining without their consent. In the general case, attacks presented in this thesis share the attacker model and two key characteristics: 1) the browser's end user remains oblivious to the attack, and 2) an attacker has to invest little resources in comparison to the resources he obtains. In addition to the attack's analysis, we present how existing, and upcoming, security enforcement mechanisms from Web security can hinder an attacker and their drawbacks. Moreover, we propose a novel detection approach based on browser API usage patterns. Finally, we evaluate the accuracy of our detection model, after training it with the real-life crypto-mining scenario, through a large scale analysis of the most popular Web sites.}, subject = {Computersicherheit}, language = {en} } @phdthesis{Jurgovsky2019, author = {Jurgovsky, Johannes}, title = {Context-Aware Credit Card Fraud Detection}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7622}, school = {Universit{\"a}t Passau}, pages = {xvii, 160 Seiten}, year = {2019}, abstract = {Credit card fraud has emerged as major problem in the electronic payment sector. In this thesis, we study data-driven fraud detection and address several of its intricate challenges by means of machine learning methods with the goal to identify fraudulent transactions that have been issued illegitimately on behalf of the rightful card owner. In particular, we explore several means to leverage contextual information beyond a transaction's basic attributes on the transaction level, sequence level and user level. On the transaction level, we aim to identify fraudulent transactions which, in terms of their attribute values, are globally distinguishable from genuine transactions. We provide an empirical study of the influence of class imbalance and forecasting horizons on the classification performance of a random forest classifier. We augment transactions with additional features extracted from external knowledge sources and show that external information about countries and calendar events improves classification performance most noticeably on card-not-present transactions. On the sequence level, we aim to detect frauds that are inconspicuous in the background of all transactions but peculiar with respect to the short-term sequence they appear in. We use a Long Short-term Memory network (LSTM) for modeling the sequential succession of transactions. Our results suggest that LSTM-based modeling is a promising strategy for characterizing sequences of card-present transactions but it is not adequate for card-not-present transactions. On the user level, we elaborate on feature aggregations and propose a flexible concept allowing us define numerous features by means of a simple syntax. We provide a CUDA-based implementation for the computationally expensive extraction with a speed-up of two orders of magnitude over a single-core implementation. Our feature selection study reveals that aggregates extracted from users' transaction sequences are more useful than those extracted from merchant sequences. Moreover, we discover multiple sets of candidate features with equivalent performance as manually engineered aggregates while being structurally different. Regarding future work, we motivate the usage of simple and transparent machine learning methods for credit card fraud detection and we sketch a simple user-focused modeling approach.}, subject = {Kreditkartenmissbrauch}, language = {en} } @phdthesis{Taubmann2020, author = {Taubmann, Benjamin}, title = {Improving Digital Forensics and Incident Analysis in Production Environments by Using Virtual Machine Introspection}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8319}, school = {Universit{\"a}t Passau}, pages = {ix, 153 Seiten}, year = {2020}, abstract = {Main memory forensics and its special form, virtual machine introspection (VMI), are powerful tools for digital forensics and can be used to improve the security of computer-based systems. However, their use in production systems is often not possible. This work identifies the causes and offers practical solutions to apply these techniques in cloud computing and on mobile devices to improve digital forensics and incident analysis. Four key challenges must be tackled. The first challenge is that many existing solutions are not reproducible, for example, because the corresponding software components are not available, obsolete or incompatible. The use of these tools is also often complex and can lead to a crash of the system to be monitored in case of incorrect use. To solve this problem, this thesis describes the design and implementation of Libvmtrace, which is a framework for the introspection of Linux-based virtual machines. The focus of the developed design is to implement frequently used methods in encapsulated modules so that they are easy for developers to use, optimize and test. The second challenge is that many production systems do not provide an interface for main memory forensics and virtual machine introspection. To address this problem, this thesis describes possible solutions for how such an interface can be implemented on mobile devices and in cloud environments designed to protect main memory from unprivileged access. We discuss how cold boot attacks, the ARM TrustZone and the hypervisor of cloud servers can be used to acquire data from storage. The third challenge is how to reconstruct information from main memory efficiently. This thesis describes how these questions can be solved by employing two practical examples. The first example involves extracting the keys of encrypted TLS connections from the main memory of applications to decrypt network traffic without affecting the performance of the monitored application. The TLSKex and DroidKex architecture describe two approaches to localize the keys efficiently with the help of semantic knowledge in the main memory of applications. The second example discusses how to monitor and document SSH sessions of potential attackers from outside of a virtual machine. It is important that the monitoring routines are not noticed by an attacker. To achieve this, we evaluate how to optimize the performance of the monitoring mechanism. The fourth challenge is how to deal with the performance degradation caused by introspection in productive systems. This thesis discusses how this can be achieved using the example of a SIEM system. To reduce the performance overhead, we describe how to configure the monitoring routine to collect only the information needed to detect incidents. Also, we describe two approaches that permit the monitoring routine to be dynamically adjusted at runtime to extract more information if necessary so that incidents can be better analyzed.}, subject = {Computerforensik}, language = {en} } @phdthesis{Horaček2020, author = {Hor{\´a}ček, Jan}, title = {Algebraic and Logic Solving Methods for Cryptanalysis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7731}, school = {Universit{\"a}t Passau}, pages = {v, 154 Seiten}, year = {2020}, abstract = {Algebraic solving of polynomial systems and satisfiability of propositional logic formulas are not two completely separate research areas, as it may appear at first sight. In fact, many problems coming from cryptanalysis, such as algebraic fault attacks, can be rephrased as solving a set of Boolean polynomials or as deciding the satisfiability of a propositional logic formula. Thus one can analyze the security of cryptosystems by applying standard solving methods from computer algebra and SAT solving. This doctoral thesis is dedicated to studying solvers that are based on logic and algebra separately as well as integrating them into one such that the combined solvers become more powerful tools for cryptanalysis. This disseration is divided into three parts. In this first part, we recall some theory and basic techniques for algebraic and logic solving. We focus mainly on DPLL-based SAT solving and techniques that are related to border bases and Gr{\"o}bner bases. In particular, we describe in detail the Border Basis Algorithm and discuss its specialized version for Boolean polynomials called the Boolean Border Basis Algorithm. In the second part of the thesis, we deal with connecting solvers based on algebra and logic. The ultimate goal is to combine the strength of different solvers into one. Namely, we fuse the XOR reasoning from algebraic solvers with the light, efficient design of SAT solvers. As a first step in this direction, we design various conversions from sets of clauses to sets of Boolean polynomials, and vice versa, such that solutions and models are preserved via the conversions. In particular, based on a block-building mechanism, we design a new blockwise algorithm for the CNF to ANF conversion which is geared towards producing fewer and lower degree polynomials. The above conversions allow usto integrate both solvers via a communication interface. To reach an even tighter integration, we consider proof systems that combine resolution and polynomial calculus, i.e. the two most used proof systems in logic and algebraic solving. Based on such a proof system, which we call SRES, we introduce new types of solving algorithms that demostrate the synergy between Gr{\"o}bner-like and DPLL-like solving. At the end of the second part of the dissertation, we provide some experiments based on a new benchmark which illustrate that the our new method based on DPLL has the potential to outperform CDCL SAT solvers. In the third part of the thesis, we focus on practical attacks on various cryptograhic primitives. For instance, we apply SAT solvers in the case of algebraic fault attacks on the symmetric ciphers LED and derivatives of the block cipher AES. The main goal there is to derive so-called fault equations automatically from the hardware description of the cryptosystem and thus automatizate the attack. To give some extra power to a SAT solver that inverts the hash functions SHA-1 and SHA-2, we describe how to tweak the SAT solver using a programmatic interface such that the propagation of the solver and thus the attack itself is improved.}, subject = {Kryptoanalyse}, language = {en} }