@phdthesis{Maerz2016, author = {M{\"a}rz, Armin}, title = {Three Essays on Understanding Mobile Consumer Behavior: Business Models, Perceptions, and Features}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3948}, school = {Universit{\"a}t Passau}, pages = {145 S.}, year = {2016}, abstract = {For about a decade, consumers have been carrying the Internet in their pockets. The rapid penetration of modern smartphones has meant that more than two-thirds of the people in the West can access and use online resources, anytime and anywhere. Consumers also can communicate and share their consumption experiences instantaneously. Platforms reach users for time-critical events through highly personal communication channels, in the sense that smartphones serve as constant companions. Many mobile applications and their basic services and contents also are available for free. The digital and mobile worlds thus are changing the very means of communication, suggesting the powerful need for marketing research and practice to find the opportunities and meet the challenges of the mobile Internet. In particular, scientific investigations are required to describe new business models in the free e-service industry and the consumer behavior affected by mobile features. This thesis examines these topics in three essays. Study 1 considers business models that offer their services without charge. Offering services for free is symptomatic of not only mobile apps (90\% of all apps are available for free) but the digital economy in general. For companies offering free e-services, this situation raises several important questions, in that, without any access device restrictions, how do customers of free e-services contribute value without paying? What are the nature and dynamics of nonmonetary value contributions by nonpaying customers? With a literature review and interviews with senior executives of free e-service providers, Study 1 presents a comprehensive overview of nonmonetary value contributions in the free e-service sector, including word of mouth, co-production, and network effects. Moreover, adding attention and data into this framework reveals two further aspects that have not been addressed in prior customer value research. By putting the findings in the context of the existing literature on customer value and customer engagement, this study sheds light on the complex processes of value creation in the emerging e-service sector, while advancing marketing and service research in general. Study 2 deepens the findings from the first study; specifically, the focus is on the way that mobile users co-produce content and how this contribution is perceived by recipients in the network. With field data and a scenario experiment, this study demonstrates that recipients appreciate mobile-generated customer reviews fundamentally differently from other reviews. In particular, they discount the helpfulness of mobile reviews, due to their text-specific content and style particularities. The very fact that a review has been identified as written on a mobile device also lowers recipients' perceptions of its value. Recipients use information about the device as a source cue to assess their compatibility with the review contribution channel. If they perceive themselves as compatible with the method used to generate the review (mobile or non-mobile), recipients regard the review as more helpful, because they attribute the review to the quality of the reviewed subject. If they perceive it as incompatible though, recipients assume that the review reflects the personal dispositions of the reviewer and discount its helpfulness. Finally, Study 3 takes up the attention and cross-market network effects in a mobile setting; these were two nonmonetary dimensions identified by Study 1. Platform providers should develop measures to draw the attention of nonpaying customers to the offers of their paying customers. One attention-grabbing mobile-specific feature is push notifications to the device, which provide information about temporally or spatially relevant events. More concretely, Study 3 investigates how mobile push notifications remind users of upcoming deadlines in online auctions and therefore improve late bidding success. Late bidding is a prevalent strategy, in which bidders submit their bids at the very end of an online auction. This research uses field data about an online auction platform to demonstrate that late bidders use these mobile push notifications more frequently than do bidders with different bidding patterns. Within the group of late bidders, the chance to win an auction increases with their use of push notifications. After a mobile push notification, late bidders submit bids through mobile devices but also through non-mobile channels. Less experienced late bidders also benefit from push notifications, which increase their chances of success. In summary, this dissertation contributes to an enhanced understanding of mobile consumer behavior by using various methods, including qualitative interviews, field observations, and online experiments. From a theoretical perspective, it contributes to current knowledge about nonmonetary costumer value contributions in general and their role in mobile settings in particular. This thesis highlights the role of mobile devices in co-production and perceptions of co-produced content. It also reveals how mobile-specific interactive features, like push notifications, affect late bidding efficiency. Therefore, it specifies the role of mobile devices in cross-market effects, in that they enable the platform to direct the relationship between buyers and sellers. The insights presented herein encourage managers to reevaluate their current practices, think about whether they should label co-produced content as generated through a mobile channel or not, and contemplate whether to develop mobile push notifications as helpful features for users (not as intrusive marketing messages).}, language = {en} } @phdthesis{Alsarem2016, author = {Alsarem, Mazen}, title = {Semantic Snippets via Query-Biased Ranking of Linked Data Entities}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3959}, school = {Universit{\"a}t Passau}, pages = {148 S.}, year = {2016}, abstract = {In our knowledge-driven society, the acquisition and the transfer of knowledge play a principal role. Web search engines are somehow tools for knowledge acquisition and transfer from the web to the user. The search engine results page (SERP) consists mainly of a list of links and snippets (excerpts from the results). The snippets are used to express, as efficiently as possible, the way a web page may be relevant to the query. As an extension of the existing web, the semantic web or "web 3.0" is designed to convert the presently available web of unstructured documents into a web of data consumable by both human and machines. The resulting web of data and the current web of documents coexist and interconnect via multiple mechanisms, such as the embedded structured data, or the automatic annotation. In this thesis, we introduce a new interactive artifact for the SERP: the "Semantic Snippet". Semantic Snippets rely on the coexistence of the two webs to facilitate the transfer of knowledge to the user thanks to a semantic contextualization of the user's information need. It makes apparent the relationships between the information need and the most relevant entities present in the web page. The generation of semantic snippets is mainly based on the automatic annotation of the LOD1's entities in web pages. The annotated entities have different level of impor- tance, usefulness and relevance. Even with state of the art solutions for the automatic annotations of LOD entities within web pages, there is still a lot of noise in the form of erroneous or off-topic annotations. Therefore, we propose a query-biased algorithm (LDRANK) for the ranking of these entities. LDRANK adopts a strategy based on the linear consensual combination of several sources of prior knowledge (any form of con- textual knowledge, like the textual descriptions for the nodes of the graph) to modify a PageRank-like algorithm. For generating semantic snippets, we use LDRANK to find the more relevant entities in the web page. Then, we use a supervised learning algorithm to link each selected entity to excerpts from the web page that highlight the relationship between the entity and the original information need. In order to evaluate our semantic snippets, we integrate them in ENsEN (Enhanced Search Engine), a software system that enhances the SERP with semantic snippets. Finally, we use crowdsourcing to evaluate the usefulness and the efficiency of ENsEN.}, subject = {World Wide Web 3.0}, language = {en} } @phdthesis{Tomashevich2016, author = {Tomashevich, Victor}, title = {Fault Tolerance Aspects of Virtual Massive MIMO Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4047}, school = {Universit{\"a}t Passau}, pages = {183}, year = {2016}, abstract = {Employment of a very large number of antennas is seen as the key technology to provide future users with very high data rates. At the same time, the implementation complexity will rise due to large memories required and sophisticated signal processing algorithms employed. Continuous technology downscaling allows implementation of such complex digital designs. At the same time, its inherent variability and vulnerability to physical disturbances violate the assumption of perfectly reliable hardware operation. This work considers Unique Word OFDM which represents the alternative to the standard Cyclic Prefix OFDM providing superior detection quality. The generalization of Unique Word OFDM to a MIMO system is performed which allows interpretation as a virtual massive MIMO system with only few physical antennas. Detection methods for the introduced generalization are discussed and their performance is quantified. Because of the large memory size required, linear detection represents the cost and performance effective solution. The possible memory errors due to radiation effects or voltage scaling are addressed and the nonlinear MMSE detection algorithm is proposed. This algorithm keeps track of the memory errors and is able to significantly mitigate their effect on the quality of the estimated data. Apart of memory issues, reliability of the actual computational hardware which constitutes the receiver is of concern in this work. An own implementation of the MMSE Sorted Givens Rotations is subjected to transient fault injection. The impact of faults in various parts of the implemented circuit on the detection performance is quantified. Most vulnerable components of the implemented circuit in terms of reliability are identified. Security is another major address of this work, since most current implementations include cryptographic devices. Fault-based attacks on such systems are known to be able to extract the secret key in feasible time. The remaining part of this work addresses such fault injection-based malicious attacks. Countermeasures based on a combination of information and hardware redundancy are considered. Recently introduced robust codes target such attacks by providing guaranteed detection capability. The performance of these codes is assessed by application to actual cryptographic and general purpose circuits. The work introduces metrics that help to identify fault locations in the circuit which could escape detection with high probability. These locations are targeted by transistor resizing that renders fault injection unfeasible.}, subject = {MIMO}, language = {en} } @phdthesis{Woelfl2018, author = {W{\"o}lfl, Andreas}, title = {Data Management in Certified Avionics Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5758}, school = {Universit{\"a}t Passau}, pages = {xi, 177 Seiten}, year = {2018}, abstract = {Data management is a cornerstone for any kind of information system - including the aerospace and aviation sector. In contrast to conventional domains, software development in the avionics domain must adhere to a legally binding certification process, called qualification. The success of the process depends on compliance with international standards, such as DO-178: Software Considerations in Airborne Systems and Equipment Certification. From a software developer's perspective, challenges arise in terms of methods and tools. Techniques that have a potential impact on the deterministic and predictable execution of avionics software are prohibited. The objective of this thesis' research is to develop a scalable method to realize data-management for multi-variant avionics software under the restrictions and constraints of the domain. Since avionics software faces very long-term life-cycles (up to 75 years), a particular focus is being placed on maintenance and evolution. Based on the insights gained in a semi-structured interview at Airbus Helicopters, industrial established approaches to implement qualified avionics software are assessed at first and compared with respect to strengths and weaknesses for data-management afterwards. As a result, a novel development approach is proposed, combining model-based techniques and product-line technology to derive the source code of highly specific data-management variants, as well as the majority of assets required for the qualification process, from a declarative system specification. In order to demonstrate the practicability of the approach in industry, a framework is presented that is deployed and applied at Airbus Helicopters to generate qualifiable data-management components for the variants of the NH90 helicopter. The maintainability is shown by means of a domain-specific optimization, in which the model-based and generative approach is used to establish safe memory overlays at compile-time. Key findings reveal a substantially reduced memory footprint (29,1\% in case of a real-world scenario), as well as an significantly facilitated implementation process, which would not be accomplishable using conventional methods for software development in the avionics domain.}, subject = {Avionik}, language = {en} } @phdthesis{Jovanovic2015, author = {Jovanovic, Philipp}, title = {Analysis and Design of Symmetric Cryptographic Algorithms}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3319}, school = {Universit{\"a}t Passau}, pages = {216}, year = {2015}, abstract = {This doctoral thesis is dedicated to the analysis and the design of symmetric cryptographic algorithms. In the first part of the dissertation, we deal with fault-based attacks on cryptographic circuits which belong to the field of active implementation attacks and aim to retrieve secret keys stored on such chips. Our main focus lies on the cryptanalytic aspects of those attacks. In particular, we target block ciphers with a lightweight and (often) non-bijective key schedule where the derived subkeys are (almost) independent from each other. An attacker who is able to reconstruct one of the subkeys is thus not necessarily able to directly retrieve other subkeys or even the secret master key by simply reversing the key schedule. We introduce a framework based on differential fault analysis that allows to attack block ciphers with an arbitrary number of independent subkeys and which rely on a substitution-permutation network. These methods are then applied to the lightweight block ciphers LED and PRINCE and we show in both cases how to recover the secret master key requiring only a small number of fault injections. Moreover, we investigate approaches that utilize algebraic instead of differential techniques for the fault analysis and discuss advantages and drawbacks. At the end of the first part of the dissertation, we explore fault-based attacks on the block cipher Bel-T which also has a lightweight key schedule but is not based on a substitution-permutation network but instead on the so-called Lai-Massey scheme. The framework mentioned above is thus not usable against Bel-T. Nevertheless, we also present techniques for the case of Bel-T that enable full recovery of the secret key in a very efficient way using differential fault analysis. In the second part of the thesis, we focus on authenticated encryption schemes. While regular ciphers only protect privacy of processed data, authenticated encryption schemes also secure its authenticity and integrity. Many of these ciphers are additionally able to protect authenticity and integrity of so-called associated data. This type of data is transmitted unencrypted but nevertheless must be protected from being tampered with during transmission. Authenticated encryption is nowadays the standard technique to protect in-transit data. However, most of the currently deployed schemes have deficits and there are many leverage points for improvements. With NORX we introduce a novel authenticated encryption scheme supporting associated data. This algorithm was designed with high security, efficiency in both hardware and software, simplicity, and robustness against side-channel attacks in mind. Next to its specification, we present special features, security goals, implementation details, extensive performance measurements and discuss advantages over currently deployed standards. Finally, we describe our preliminary security analysis where we investigate differential and rotational properties of NORX. Noteworthy are in particular the newly developed techniques for differential cryptanalysis of NORX which exploit the power of SAT- and SMT-solvers and have the potential to be easily adaptable to other encryption schemes as well.}, subject = {Kryptologie}, language = {en} } @phdthesis{Ruppert2017, author = {Ruppert, Julia}, title = {Asymptotic Expansion for the Time Evolution of the Probability Distribution Given by the Brownian Motion on Semialgebraic Sets}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5069}, school = {Universit{\"a}t Passau}, pages = {vi, 174 S.}, year = {2017}, abstract = {In this thesis, we examine whether the probability distribution given by the Brownian Motion on a semialgebraic set is definable in an o-minimal structure and we establish asymptotic expansions for the time evolution. We study the probability distribution as an example for the occurrence of special parameterized integrals of a globally subanalytic function and the exponential function of a globally subanalytic function. This work is motivated by the work of Comte, Lion and Rolin, which considered parameterized integrals of globally subanalytic functions, of Cluckers and Miller, which examined parameterized integrals of constructible functions, and by the work of Cluckers, Comte, Miller, Rolin and Servi, which treated oscillatory integrals of globally subanalytic functions. In the one dimensional case we show that the probability distribution on a family of sets, which are definable in an o-minimal structure, are definable in the Pfaffian closure. In the two-dimensional case we investigate asymptotic expansions for the time evolution. As time t approaches zero, we show that the integrals behave like a Puiseux series, which is not necessarily convergent. As t tends towards infinity, we show that the probability distribution is definable in the expansion of the real ordered field by all restricted analytic functions if the semialgebraic set is bounded. For this purpose, we apply results for parameterized integrals of globally subanalytic functions of Lion and Rolin. By establishing the asymptotic expansion of the integrals over an unbounded set, we demonstrate that this expansion has the form of convergent Puiseux series with negative exponents and their logarithm. Subsequently, we get that the asymptotic expansion is definable in an o-minimal structure. Finally, we study the three-dimensional case and give the proof that the probability distribution given by the Brownian Motion behaves like a Puiseux series as time t tends towards zero. As t approaches infinity and the semialgebraic set is bounded, it can be ascertained that the probability distribution has the form of a constructible function by results of Cluckers and Miller and therefore it is definable in an o-minimal structure. If the semialgebraic set is unbounded, we establish the asymptotic expansions and prove that the probability distribution given by the Brownian Motion on unbounded sets has an asymptotic expansion of the form of a constructible function. In consequence of that, the asymptotic expansion is definable in an o-minimal structure.}, subject = {Brownsche Bewegung}, language = {en} } @phdthesis{Boshe2017, author = {Boshe, Patricia}, title = {Data Protection Legal Reforms in Africa}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5147}, school = {Universit{\"a}t Passau}, pages = {xxii, 141 Seiten}, year = {2017}, abstract = {This work illustrates reform approaches in Africa using an international legal comparative approach. The research uses Tanzania and Senegal as the primary case studies and France, the United Kingdom and Germany as secondary case studies to illustrate how Europe reformed data protection regimes through the transposition of the EU Data Protection Directive of 1995. Chapter one introduces the work; explaining the forces towards data protection regulations and their basis. Chapter two provides for a 'back-to-back' comparison in three countries (France, Germany and United Kingdom) against the 1995 Data Protection Directive. The idea behind this chapter is to draw a picture on how the legal culture and the pre-existing notions of the right to privacy inform on data protection legal reforms and determines the nature, contents, context and interpretation of adopted regime for data protection. Eventually, all these aspects affect the nature and extent of protection offered regardless of the substance of the law adopted. Chapter three gives a narrative explanation of nature and perceptions of the right to privacy in Africa and how this may affect data protection reforms in Africa. In the same disposition, African customary legal systems and practices are explained providing a reader with a picture of the overall nature of African systems that makes up an African legal culture. The overview of African privacy perception and legal system is necessary for assessing the workability of any data protection regime to be adopted in Africa which in effect answers the first research question. The chapter draws its rationale from chapter two. In understanding African perceptions of privacy and the African legal culture, one can be able to predict the content and context of the reforms and maybe how the judiciary might interpret the laws based on local perceptions and supporting systems. An overview of the African data protection architecture or rather human right architecture is provided in chapter four; ideally to provide a reader with a picture of the enforcement systems in Africa as a continent. This is followed by chapter five discussing the two major legal systems in Africa; the civil law and the common law system. The chapter also illustrates the position of African landscape in relation to legal harmonization/unification. This aspect is considered necessary because data protection regimes are more focused on legal harmonization and hence the question of how well or to what extent Africa as a continent can bring about harmonization in law became inevitable. Eventually, the chapter offers a comparative mirror analysis of the primary case studies, i.e. Senegal and Tanzania. The analysis is made on the reform approach taken, motivation behind the reforms and on the regime erected (this is done through textual analysis of the law and the draft bill respectively). Chapter six concludes the work by answering research questions based on findings and scrutiny from each chapter. It is concluded that there is a very slim chance for the African States to cling on the cultural defence against the adoption of the Western frameworks for data protection. It is also concluded that, lest Africa becomes an active participant in the global process that informs on data protection challenges and regulations, it faces a danger of becoming a puppet of foreign data protection regulation, which may or may not fit African legal culture. The chapter also illustrates how Africa as a continent and the African States individually have taken up data protection reforms blindly. The motivations for the reforms are vaguely stated and unclear. In the majority of legal instruments, the reforms are not taken as a move towards securing and protecting individual rights rather a purely political move influenced by economic motivations. The reforms are to a large extent, a mere impression to align with global data protection regimes and hence lack the political will to enforce the laws.}, subject = {African Privacy}, language = {en} } @phdthesis{Reislhuber2017, author = {Reislhuber, Josef}, title = {Optical Graph Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5159}, school = {Universit{\"a}t Passau}, pages = {270 Seiten}, year = {2017}, abstract = {Graphs are an important model for the representation of structural information between objects. One identifies objects and nodes as well as a binary relation between objects and edges. Graphs have many uses, e. g., in social sciences, life sciences and engineering. There are two primary representations: abstract and visual. The abstract representation is well suited for processing graphs by computers and is given by an adjacency list, an adjacency matrix or any abstract data structure. A visual representation is used by human users who prefer a picture. Common terms are diagram, scheme, plan, or network. The objective of Graph Drawing is to transform a graph into a visual representation called the drawing of a graph. The goal is a "nice" drawing. In this thesis we introduce Optical Graph Recognition. Optical Graph Recognition (OGR) reverses Graph Drawing and transforms a digital image of a graph into an abstract representation. Our approach consists of four phases: Preprocessing where we determine which pixels of an image are part of the graph, Segmentation where we recognize the nodes, Topology Recognition where we detect the edges and Postprocessing where we enrich the recognized graph with additional information. We apply established digital image processing methods and make use of the special property that the image contains nodes that are connected by edges. We have focused on developing algorithms that need as little parameters as possible or to automatically calibrate the parameters. Most false recognition results are caused by crossing edges as this makes tracing the edges difficult and can lead to other recognition errors. We have evaluated hand-drawn and computer-drawn graphs. Our algorithms have a very high recognition rate for computer-drawn graphs, e. g., from a set of 100000 computer-drawn graphs over 90\% were correctly recognized. Most false recognition results where observed for hand-drawn graphs as they can include drawing errors and inaccuracies. For universal usability we have implemented a prototype called OGRup for mobile devices like smartphones or tablet computers. With our software it is possible to directly take a picture of a graph via a built in camera, recognize the graph, and then use the result for further processing. Furthermore, in order to gain more insight into the way a person draws a graph by hand, we have conducted a field study.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{dePonteMueller2016, author = {de Ponte M{\"u}ller, Fabian}, title = {Cooperative Relative Positioning for Vehicular Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5411}, school = {Universit{\"a}t Passau}, pages = {vii, 247 Seiten}, year = {2016}, abstract = {Fahrerassistenzsysteme sind ein wesentlicher Baustein zur Steigerung der Sicherheit im Straßenverkehr. Vor allem sicherheitsrelevante Applikationen ben{\"o}tigen eine genaue Information {\"u}ber den Ort und der Geschwindigkeit der Fahrzeuge in der unmittelbaren Umgebung, um m{\"o}gliche Gefahrensituationen vorherzusehen, den Fahrer zu warnen oder eigenst{\"a}ndig einzugreifen. Repr{\"a}sentative Beispiele f{\"u}r Assistenzsysteme, die auf eine genaue, kontinuierliche und zuverl{\"a}ssige Relativpositionierung anderer Verkehrsteilnehmer angewiesen sind, sind Notbremsassitenten, Spurwechselassitenten und Abstandsregeltempomate. Moderne L{\"o}sungsans{\"a}tze benutzen Umfeldsensorik wie zum Beispiel Radar, Laser Scanner oder Kameras, um die Position benachbarter Fahrzeuge zu sch{\"a}tzen. Dieser Sensorsysteme gemeinsame Nachteile sind deren limitierte Erfassungsreichweite und die Notwendigkeit einer direkten und nicht blockierten Sichtlinie zum Nachbarfahrzeug. Kooperative L{\"o}sungen basierend auf einer Fahrzeug-zu-Fahrzeug Kommunikation k{\"o}nnen die eigene Wahrnehmungsreichweite erh{\"o}hen, in dem Positionsinformationen zwischen den Verkehrsteilnehmern ausgetauscht werden. In dieser Dissertation soll die M{\"o}glichkeit der kooperativen Relativpositionierung von Straßenfahrzeugen mittels Fahrzeug-zu-Fahrzeug Kommunikation auf ihre Genauigkeit, Kontinuit{\"a}t und Robustheit untersucht werden. Anstatt die in jedem Fahrzeug unabh{\"a}ngig ermittelte Position zu {\"u}bertragen, werden in einem neuartigem Ansatz GNSS-Rohdaten, wie Pseudoranges und Doppler-Messungen, ausgetauscht. Dies hat den Vorteil, dass sich korrelierte Fehler in beiden Fahrzeugen potentiell herausk{\"u}rzen. Dies wird in dieser Dissertation mathematisch untersucht, simulativ modelliert und experimentell verifiziert. Um die Zuverl{\"a}ssigkeit und Kontinuit{\"a}t auch in "gest{\"o}rten" Umgebungen zu erh{\"o}hen, werden in einem Bayesischen Filter die GNSS-Rohdaten mit Inertialsensormessungen aus zwei Fahrzeugen fusioniert. Die Validierung des Sensorfusionsansatzes wurde im Rahmen dieser Dissertation in einem Verkehrs- sowie in einem GNSS-Simulator durchgef{\"u}hrt. Zur experimentellen Untersuchung wurden zwei Testfahrzeuge mit den verschiedenen Sensoren ausgestattet und Messungen in diversen Umgebungen gefahren. In dieser Arbeit wird gezeigt, dass auf Autobahnen, die Relativposition eines anderen Fahrzeugs mit einer Genauigkeit von unter einem Meter kontinuierlich gesch{\"a}tzt werden kann. Eine hohe Zuverl{\"a}ssigkeit in der longitudinalen und lateralen Richtung k{\"o}nnen erzielt werden und das System erweist 90\% der Zeit eine Unsicherheit unter 2.5m. In l{\"a}ndlichen Umgebungen w{\"a}chst die Unsicherheit in der relativen Position. Mit Hilfe der on-board Sensoren k{\"o}nnen Fehler bei der Fahrt durch W{\"a}lder und D{\"o}rfer korrekt gest{\"u}tzt werden. In st{\"a}dtischen Umgebungen werden die Limitierungen des Systems deutlich. Durch die erschwerte Sch{\"a}tzung der Fahrtrichtung des Ego-Fahrzeugs ist vor Allem die longitudinale Komponente der Relativen Position in st{\"a}dtischen Umgebungen stark verf{\"a}lscht.}, subject = {Fahrerassistenzsystem}, language = {en} } @phdthesis{Hanauer2018, author = {Hanauer, Kathrin}, title = {Linear Orderings of Sparse Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5524}, school = {Universit{\"a}t Passau}, pages = {vii, 282 Seiten}, year = {2018}, abstract = {The Linear Ordering problem consists in finding a total ordering of the vertices of a directed graph such that the number of backward arcs, i.e., arcs whose heads precede their tails in the ordering, is minimized. A minimum set of backward arcs corresponds to an optimal solution to the equivalent Feedback Arc Set problem and forms a minimum Cycle Cover. Linear Ordering and Feedback Arc Set are classic NP-hard optimization problems and have a wide range of applications. Whereas both problems have been studied intensively on dense graphs and tournaments, not much is known about their structure and properties on sparser graphs. There are also only few approximative algorithms that give performance guarantees especially for graphs with bounded vertex degree. This thesis fills this gap in multiple respects: We establish necessary conditions for a linear ordering (and thereby also for a feedback arc set) to be optimal, which provide new and fine-grained insights into the combinatorial structure of the problem. From these, we derive a framework for polynomial-time algorithms that construct linear orderings which adhere to one or more of these conditions. The analysis of the linear orderings produced by these algorithms is especially tailored to graphs with bounded vertex degrees of three and four and improves on previously known upper bounds. Furthermore, the set of necessary conditions is used to implement exact and fast algorithms for the Linear Ordering problem on sparse graphs. In an experimental evaluation, we finally show that the property-enforcing algorithms produce linear orderings that are very close to the optimum and that the exact representative delivers solutions in a timely manner also in practice. As an additional benefit, our results can be applied to the Acyclic Subgraph problem, which is the complementary problem to Feedback Arc Set, and provide insights into the dual problem of Feedback Arc Set, the Arc-Disjoint Cycles problem.}, subject = {Graphentheorie}, language = {en} } @unpublished{Lucke2016, author = {Lucke, Robin Jan}, title = {How Securitization Theory Can Benefit from Psychology Findings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6216}, pages = {20 Seiten}, year = {2016}, abstract = {Securitization Theory has been applied and advanced continuously since the publication of the seminal work "Security - A New Framework for Analysis" by Buzan et al. in 1998. Various extensions, clarifications and definitions have been added over the years. Ontological and epistemological debates as well as debates about the normativity of the concept have taken place, furthering the approach incrementally and adapting it to new empirical cases. This paper aims at contributing to the improvement of the still useful framework in a more general way by amending it with well-established findings from another discipline: Psychology. The exploratory article will point out what elements of Securitization Theory might benefit most from incorporating insights from Psychology and in which ways they might change our understanding of the phenomenon. Some well-studied phenomena in the field of (Social) Psychology, it is argued here, play an important role for the construction and perception of security threats and the acceptance of the audience to grant the executive branch extraordinary measures to counter these threats: availability heuristic, loss-aversion and social identity theory are central psychological concepts that can help us to better understand how securitization works, and in which situations securitizing moves have great or little chances to reverberate. The empirical cases of the 9/11 and Paris terror attacks will serve to illustrate the potential of this approach, allowing for variances in key factors, among them: (point in) time, system of government and ideological orientation. As a hypotheses-generating pilot study, the paper will conclude by discussing further research possibilities in the field of Securitization.}, subject = {Securitization}, language = {en} } @phdthesis{Seidler2018, author = {Seidler, Anna-Raissa}, title = {Changing for the Better? Essays on the Role of Institutional Logics and Information System in Organizational Sustainability Transformations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6330}, school = {Universit{\"a}t Passau}, pages = {XXXV, 207 Seiten}, year = {2018}, abstract = {An increasing number of companies report that eco-sustainable initiatives have a positive impact on firms' economic performance and concurrently allow the combination of social and commercial goals by optimizing environmental and economic decisions simultaneously. These initiatives are considered an integral part of organizational sustainability transformations, which are a special case of multilayered, complex organizational change efforts that relate to environmental, organizational, and individual factors. Institutional logics and information systems (IS) have shown to be two important perspectives from which to explore mechanisms and processes central to organizational sustainability transformations. Institutional logics offer a unique perspective to investigate organizational change for sustainability because they provide a new approach to organizational change that incorporates macro structures, culture, and agency to explain how actions are enabled or constrained. It thus allows for insights into the complex and miscellaneous interplay of external and internal determinants that govern organizational transformation processes towards sustainability. By providing insights into institutional changes of practice and behaviors, an institutional logic perspective allows for a detailed analysis of organizational transformations. Within these change processes, IS have shown to be an efficient and pervasive tool to leverage sustainability by integrating human and technological factors. Since IS have become a key resource for the encouragement of organizational sustainability transformations, adopting an IS perspective allows for an understanding of mechanisms and processes that enable IS to foster sustainability in organizations. Thus, this dissertation draws on four studies by investigateing an institutional logic perspective as well as an IS perspective to explore organizational sustainability transformations and facilitate an in-depth understanding of organizational, human, and technological factors that encourage sustainability in organizational transformations.}, subject = {Informationssystem}, language = {en} } @phdthesis{Kell2019, author = {Kell, Christian}, title = {A Structure-based Attack on the Linearized Braid Group-based Diffie-Hellman Conjugacy Problem in Combination with an Attack using Polynomial Interpolation and the Chinese Remainder Theorem}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6476}, school = {Universit{\"a}t Passau}, pages = {xi, 209 Seiten}, year = {2019}, abstract = {This doctoral thesis is dedicated to improve a linear algebra attack on the so-called braid group-based Diffie-Hellman conjugacy problem (BDHCP). The general procedure of the attack is to transform a BDHCP to the problem of solving several simultaneous matrix equations. A first improvement is achieved by reducing the solution space of the matrix equations to matrices that have a specific structure, which we call here the left braid structure. Using the left braid structure the number of matrix equations to be solved reduces to one. Based on the left braid structure we are further able to formulate a structure-based attack on the BDHCP. That is to transform the matrix equation to a system of linear equations and exploiting the structure of the corresponding extended coefficient matrix, which is induced by the left braid structure of the solution space. The structure-based attack then has an empirically high probability to solve the BDHCP with significantly less arithmetic operations than the original attack. A third improvement of the original linear algebra attack is to use an algorithm that combines Gaussian elimination with integer polynomial interpolation and the Chinese remainder theorem (CRT), instead of fast matrix multiplication as suggested by others. The major idea here is to distribute the task of solving a system of linear equations over a giant finite field to several much smaller finite fields. Based on our empirically measured bounds for the degree of the polynomials to be interpolated and the bit size of the coefficients and integers to be recovered via the CRT, we conclude an improvement of the run time complexity of the original algorithm by a factor of n^8 bit operations in the best case, and still n^6 in the worst case.}, subject = {Kryptologie}, language = {en} } @phdthesis{Mosch2022, author = {Mosch, Philipp}, title = {Four Essays on Digital Transformation Strategies from the Perspectives of Capital Markets, Incumbents and Start-ups}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11337}, school = {Universit{\"a}t Passau}, pages = {XVII, 229 Seiten}, year = {2022}, abstract = {This dissertation uses four studies to examine the context-contingent strategic factors that are critical to the success of digital transformation strategies from the perspectives of capital markets, incumbents, and start-ups. It focuses on a better understanding of (1) digital innovations and their quantitative evaluation, (2) power disruptions in digitally servitized supply chains, (3) strategic measures and dynamics in digital B2B platform markets, and (4) strategizing by data-driven start-ups in digitalized business networks.}, language = {en} } @phdthesis{Groesbrink2022, author = {Gr{\"o}sbrink, Carl-Friederich}, title = {Three Essays on Firm Value and Firm Risk and their Relation to IT-Exposure, Corporate Social Responsibility, and Religiosity}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11558}, school = {Universit{\"a}t Passau}, pages = {IV, 236 Seiten}, year = {2022}, abstract = {1. IT-Exposure and Firm Value: We analyze the joint influence of a firm's information technology (IT)-Exposure and investment behavior on firm value. Estimating a firm's (partial) IT-Exposure allows for distinguishing between firms with a business model that is challenged by IT above and below market average. Hence, we estimate the annual IT-Exposure of a firm using a 3-factor Fama-French model extended by an IT-proxy. Subsequently, we analyze the relationship with Tobin's Q in a panel data context, accounting for the relationship between IT-Exposure and investments proxied by R\&D as well as CapEx. We use more than 48,000 firm-year observations for firms in the Russell 3000 Index covering the period 1990 to 2018. Although IT-Exposure has a negative impact on firm value, this discount can be overcompensated by up to 2.1 times by sufficient investments through R\&D and CapEx, giving a firm with an average Tobin's Q a premium of 14.8\% to 19.2\%, while controlling for endogeneity. 2. Corporate Social Responsibility, Risk, and Firm Value: An Unconditional Quantile Regression Approach: This paper examines the impact of corporate social responsibility (CSR) on firm risk, comprising total risk, idiosyncratic risk, and systematic risk, as well as firm value. We focus on analyzing the interrelationships along the entire distribution of the dependent variables, thus estimating an unconditional quantile regression (UQR). The analysis is based on CSR scores from Refinitiv and MSCI, using up to 12,013 firm-year observations over the period 2002 to 2019 for all U.S. companies listed on NYSE, NASDAQ, and AMEX. UQR reveals strongly heterogeneous effects along the unconditional quantiles of the dependent variables, which are reflected in sign changes, magnitude and significance variations. For CSR we find a risk-reducing as well as value-enhancing effect. When applying fixed effects OLS, we can just partly confirm the risk-reducing and value-enhancing effect of CSR shown in the literature. 3. Heterogenous Effects of Religiosity on Firm Risk and Firm Value: An Unconditional Quantile Regression Approach: This paper examines the impact of religiosity on firm risk, comprising total risk, idiosyncratic risk, and systematic risk, as well as firm value. We focus on analyzing the interrelationships along the entire distribution of the dependent variables, thus estimating an unconditional quantile regression (UQR). The analysis is based on all U.S. companies listed on NYSE, NASDAQ, and AMEX for the period from 1980 through 2020. UQR reveals strongly heterogeneous effects along the unconditional quantiles of the dependent variables, which are reflected in sign changes, magnitude and significance variations. Overall, the risk-reducing effect of religiosity is more pronounced in the higher quantiles of the distribution. We further observe a value-reducing as well as value-enhancing religiosity effect. When applying fixed effects OLS, we can confirm the risk-reducing and non-existing value effect of religiosity shown in the literature. The robustness of our results is underpinned by a battery of additional tests.}, language = {en} } @phdthesis{Seruset2023, author = {Seruset, Marco}, title = {Three Essays on Price Discovery, Stock Liquidity, and Crash Risk}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-11563}, school = {Universit{\"a}t Passau}, pages = {IV, 227 Seiten}, year = {2023}, abstract = {Abstract 1: This paper investigates whether market quality, uncertainty, investor sentiment and attention, and macroeconomic news affect bitcoin price discovery in spot and futures markets. Over the period December 2017 - March 2019, we find significant time variation in the contribution to price discovery of the two markets. Increases in price discovery are mainly driven by relative trading costs and volume, and by uncertainty to a lesser extent. Additionally, medium-sized trades contain most information in terms of price discovery. Finally, higher news-based bitcoin sentiment increases the informational role of the futures market, while attention and macroeconomic news have no impact on price discovery. Abstract 2: We investigate whether local religious norms affect stock liquidity for U.S. listed companies. Over the period 1997-2020, we find that firms located in more religious areas have higher liquidity, as reflected by lower bid-ask spreads. This result persists after the inclusion of additional controls, such as governance metrics, and further sensitivity and endogeneity analyses. Subsample tests indicate that the impact of religiosity on stock liquidity is particularly evident for firms operating in a poor information environment. We further show that firms located in more religious areas have lower price impact of trades and smaller probability of information-based trading. Overall, our findings are consistent with the notion that religiosity, with its antimanipulative ethos, probably fosters trust in corporate actions and information flows, especially when little is known about the firm. Finally, we conjecture an indirect firm value implication of religiosity through the channel of stock liquidity. Abstract 3: This study shows that higher physical distance to institutional shareholders is associated with higher stock price crash risk. Since monitoring costs increase with distance, the results are consistent with the monitoring theory of local institutional investors. Cross-sectional analyses show that the effect of proximity on crash risk is more pronounced for firms with weak internal governance structures. The significant relation between distance and crash risk still holds under the implementation of the Sarbanes-Oxley Act, however, to a lower extent. Also, the existence of the channel of bad news hoarding is confirmed. Finally, I show that there is heterogeneity in distance-induced monitoring activities of different types of institutions.}, language = {en} } @phdthesis{Lehner2016, author = {Lehner, Sabrina}, title = {The Asymptotic Behaviour of the Riemann Mapping Function at Analytic Cusps}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3587}, school = {Universit{\"a}t Passau}, pages = {97}, year = {2016}, abstract = {The well-known Riemann Mapping Theorem states the existence of a conformal map of a simply connected proper domain of the complex plane onto the upper half plane. One of the main topics in geometric function theory is to investigate the behaviour of the mapping functions at the boundary of such domains. In this work, we always assume that a piecewise analytic boundary is given. Hereby, we have to distinguish regular and singular boundary points. While the asymptotic behaviour for regular boundary points can be investigated by using the Schwarz Reflection at analytic arcs, the situation for singular boundary points is far more complicated. In the latter scenario two cases have to be differentiated: analytic corners and analytic cusps. The first part of the thesis deals with the asymptotic behaviour at analytic corners where the opening angle is greater than 0. The results of Lichtenstein and Warschawski on the asymptotic behaviour of the Riemann map and its derivatives at an analytic corner are presented as well as the much stronger result of Lehman that the mapping function can be developed in a certain generalised power series which in turn enables to examine the o-minimal content of the Riemann Mapping Theorem. To obtain a similar statement for domains with analytic cusps, it is necessary to investigate the asymptotic behaviour of a Riemann map at the cusp and based on this result to determine the asymptotic power series expansion. Therefore, the aim of the second part of this work is to investigate the asymptotic behaviour of a Riemann map at an analytic cusp. A simply connected domain has an analytic cusp if the boundary is locally given by two analytic arcs such that the interior angle vanishes. Besides the asymptotic behaviour of the mapping function, the behaviour of its derivatives, its inverse, and the derivatives of the inverse are analysed. Finally, we present a conjecture on the asymptotic power series expansion of the mapping function at an analytic cusp.}, subject = {Geometrische Funktionentheorie}, language = {en} } @phdthesis{Petit2017, author = {Petit, Albin}, title = {Introducing Privacy in Current Web Search Engines}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4652}, school = {Universit{\"a}t Passau}, pages = {XVI, 153 S.}, year = {2017}, abstract = {During the last few years, the technological progress in collecting, storing and processing a large quantity of data for a reasonable cost has raised serious privacy issues. Privacy concerns many areas, but is especially important in frequently used services like search engines (e.g., Google, Bing, Yahoo!). These services allow users to retrieve relevant content on the Internet by exploiting their personal data. In this context, developing solutions to enable users to use these services in a privacy-preserving way is becoming increasingly important. In this thesis, we introduce SimAttack an attack against existing protection mechanism to query search engines in a privacy-preserving way. This attack aims at retrieving the original user query. We show with this attack that three representative state-of-the-art solutions do not protect the user privacy in a satisfactory manner. We therefore develop PEAS a new protection mechanism that better protects the user privacy. This solution leverages two types of protection: hiding the user identity (with a succession of two nodes) and masking users' queries (by combining them with several fake queries). To generate realistic fake queries, PEAS exploits previous queries sent by the users in the system. Finally, we present mechanisms to identify sensitive queries. Our goal is to adapt existing protection mechanisms to protect sensitive queries only, and thus save user resources (e.g., CPU, RAM). We design two modules to identify sensitive queries. By deploying these modules on real protection mechanisms, we establish empirically that they dramatically improve the performance of the protection mechanisms.}, subject = {Suchmaschine}, language = {en} } @phdthesis{Kriegl2015, author = {Kriegl, Markus}, title = {Generalizations and Applications of Border Bases}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3628}, school = {Universit{\"a}t Passau}, year = {2015}, abstract = {This doctoral thesis is devoted to generalize border bases to the module setting and to apply them in various ways. First, we generalize the theory of border bases to finitely generated modules over a polynomial ring. We characterize these generalized border bases and show that we can compute them. As an application, we are able to characterize subideal border bases in various new ways and give a new algorithm for their computation. Moreover, we prove Schreyer's Theorem for border bases of submodules of free modules of finite rank over a polynomial ring. In the second part of this thesis, we study the effect of homogenization to border bases of zero-dimensional ideals. This yields the new concept of projective border bases of homogeneous one-dimensional ideals. We show that there is a one-to-one correspondence between projective border bases and zero-dimensional closed subschemes of weighted projective spaces that have no point on the hyperplane at infinity. Applying that correspondence, we can characterize uniform zero-dimensional closed subschemes of weighted projective spaces that have a rational support over the base field in various ways. Finally, we introduce projective border basis schemes as specific subschemes of border basis schemes. We show that these projective border basis schemes parametrize all zero-dimensional closed subschemes of a weighted projective space whose defining ideals possess a projective border basis. Assuming that the base field is algebraically closed, we are able to prove that the set of all closed points of a projective border basis scheme that correspond to a uniform subscheme is a constructive set with respect to the Zariski topology.}, language = {en} } @phdthesis{Liebig2015, author = {Liebig, J{\"o}rg}, title = {Analysis and Transformation of Configurable Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-2996}, school = {Universit{\"a}t Passau}, pages = {160}, year = {2015}, abstract = {Static analysis tools and transformation engines for source code belong to the standard equipment of a software developer. Their use simplifies a developer's everyday work of maintaining and evolving software systems significantly and, hence, accounts for much of a developer's programming efficiency and programming productivity. This is also beneficial from a financial point of view, as programming errors are early detected and avoided in the the development process, thus the use of static analysis tools reduces the overall software-development costs considerably. In practice, software systems are often developed as configurable systems to account for different requirements of application scenarios and use cases. To implement configurable systems, developers often use compile-time implementation techniques, such as preprocessors, by using \#ifdef directives. Configuration options control the inclusion and exclusion of \#ifdef-annotated source code and their selection/deselection serve as an input for generating tailor-made system variants on demand. Existing configurable systems, such as the linux kernel, often provide thousands of configuration options, forming a huge configuration space with billions of system variants. Unfortunately, existing tool support cannot handle the myriads of system variants that can typically be derived from a configurable system. Analysis and transformation tools are not prepared for variability in source code, and, hence, they may process it incorrectly with the result of an incomplete and often broken tool support. We challenge the way configurable systems are analyzed and transformed by introducing variability-aware static analysis tools and a variability-aware transformation engine for configurable systems' development. The main idea of such tool support is to exploit commonalities between system variants, reducing the effort of analyzing and transforming a configurable system. In particular, we develop novel analysis approaches for analyzing the myriads of system variants and compare them to state-of-the-art analysis approaches (namely sampling). The comparison shows that variability-aware analysis is complete (with respect to covering the whole configuration space), efficient (it outperforms some of the sampling heuristics), and scales even to large software systems. We demonstrate that variability-aware analysis is even practical when using it with non-trivial case studies, such as the linux kernel. On top of variability-aware analysis, we develop a transformation engine for C, which respects variability induced by the preprocessor. The engine provides three common refactorings (rename identifier, extract function, and inline function) and overcomes shortcomings (completeness, use of heuristics, and scalability issues) of existing engines, while still being semantics-preserving with respect to all variants and being fast, providing an instantaneous user experience. To validate semantics preservation, we extend a standard testing approach for refactoring engines with variability and show in real-world case studies the effectiveness and scalability of our engine. In the end, our analysis and transformation techniques show that configurable systems can efficiently be analyzed and transformed (even for large-scale systems), providing the same guarantees for configurable systems as for standard systems in terms of detecting and avoiding programming errors.}, subject = {Refactoring}, language = {en} } @phdthesis{Braun2015, author = {Braun, Bastian}, title = {Web-based Secure Application Control}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3048}, school = {Universit{\"a}t Passau}, year = {2015}, abstract = {The world wide web today serves as a distributed application platform. Its origins, however, go back to a simple delivery network for static hypertexts. The legacy from these days can still be observed in the communication protocol used by increasingly sophisticated clients and applications. This thesis identifies the actual security requirements of modern web applications and shows that HTTP does not fit them: user and application authentication, message integrity and confidentiality, control-flow integrity, and application-to-application authorization. We explore the other protocols in the web stack and work out why they can not fill the gap. Our analysis shows that the underlying problem is the connectionless property of HTTP. However, history shows that a fresh start with web communication is far from realistic. As a consequence, we come up with approaches that contribute to meet the identified requirements. We first present impersonation attack vectors that begin before the actual user authentication, i.e. when secure web interaction and authentication seem to be unnecessary. Session fixation attacks exploit a responsibility mismatch between the web developer and the used web application framework. We describe and compare three countermeasures on different implementation levels: on the source code level, on the framework level, and on the network level as a reverse proxy. Then, we explain how the authentication credentials that are transmitted for the user login, i.e. the password, and for session tracking, i.e. the session cookie, can be complemented by browser-stored and user-based secrets respectively. This way, an attacker can not hijack user accounts only by phishing the user's password because an additional browser-based secret is required for login. Also, the class of well-known session hijacking attacks is mitigated because a secret only known by the user must be provided in order to perform critical actions. In the next step, we explore alternative approaches to static authentication credentials. Our approach implements a trusted UI and a mutually authenticated session using signatures as a means to authenticate requests. This way, it establishes a trusted path between the user and the web application without exchanging reusable authentication credentials. As a downside, this approach requires support on the client side and on the server side in order to provide maximum protection. Another approach avoids client-side support but can not implement a trusted UI and is thus susceptible to phishing and clickjacking attacks. Our approaches described so far increase the security level of all web communication at all time. This is why we investigate adaptive security policies that fit the actual risk instead of permanently restricting all kinds of communication including non-critical requests. We develop a smart browser extension that detects when the user is authenticated on a website meaning that she can be impersonated because all requests carry her identity proof. Uncritical communication, however, is released from restrictions to enable all intended web features. Finally, we focus on attacks targeting a web application's control-flow integrity. We explain them thoroughly, check whether current web application frameworks provide means for protection, and implement two approaches to protect web applications: The first approach is an extension for a web application framework and provides protection based on its configuration by checking all requests for policy conformity. The second approach generates its own policies ad hoc based on the observed web traffic and assuming that regular users only click on links and buttons and fill forms but do not craft requests to protected resources.}, subject = {Computersicherheit}, language = {en} } @phdthesis{Sipal2017, author = {Sipal, Bilge}, title = {Border Basis Schemes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4702}, school = {Universit{\"a}t Passau}, year = {2017}, abstract = {The basic idea of border basis theory is to describe a zero-dimensional ring P/I by an order ideal of terms whose residue classes form a K-vector space basis of P/I. The O-border basis scheme is a scheme that parametrizes all zero-dimensional ideals that have an O-border basis. In general, the O-border basis scheme is not an affine space. Subsequently, in [Huib09] it is proved that if an order ideal with "d" elements is defined in a two-dimensional polynomial ring and it is of some special shapes, then the O-border basis scheme is isomorphic to the affine space of dimension 2d. This thesis is dedicated to find a more general condition for an O-border basis scheme to be isomorphic to an affine space of dimension "nd" that is independent of the shape of the order ideal with "d" elements and "n" is the dimension of the polynomial ring that the order ideal is defined in. We accomplish this in 6 Chapters. In Chapters 2 and 3 we develop the concepts and properties of border basis schemes. In Chapter 4 we transfer the smoothness criterion (see [Huib05]) for the point (0,...,0) in a Hilbert scheme of points to the monomial point of the border basis scheme by employing the tools from border basis theory. In Chapter 5 we explain trace and Jacobi identity syzygies of the defining equations of a O-border basis scheme and characterize them by the arrow grading. In Chapter 6 we give a criterion for the isomorphism between 2d dimensional affine space and O-border basis scheme by using the results from Chapters 3 and Chapter 4. The techniques from other chapters are applied in Chapter 6.1 to segment border basis schemes and in Chapter 6.2 to O-border basis schemes for which O is of the sawtooth form.}, subject = {Polynomring}, language = {en} } @phdthesis{Fischer2017, author = {Fischer, Andreas}, title = {An Evaluation Methodology for Virtual Network Embedding}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4793}, school = {Universit{\"a}t Passau}, pages = {XVII, 179 S.}, year = {2017}, abstract = {The increasing scale and complexity of computer networks imposes a need for highly flexible management mechanisms. The concept of network virtualization promises to provide this flexibility. Multiple arbitrary virtual networks can be constructed on top of a single substrate network. This allows network operators and service providers to tailor their network topologies to the specific needs of any offered service. However, the assignment of resources proves to be a problem. Each newly defined virtual network must be realized by assigning appropriate physical resources. For a given set of virtual networks, two questions arise: Can all virtual networks be accommodated in the given substrate network? And how should the respective resources be assigned? The underlying problem is commonly known as the Virtual Network Embedding problem. A multitude of algorithms has already been proposed, aiming to provide solutions to that problem under various constraints. For the evaluation of these algorithms typically an empirical approach is adopted, using artificially created random problem instances. However, due to complex effects of random problem generation the obtained results can be hard to interpret correctly. A structured evaluation methodology that can avoid these effects is currently missing. This thesis aims to fill that gap. Based on a thorough understanding of the problem itself, the effects of random problem generation are highlighted. A new simulation architecture is defined, increasing the flexibility for experimentation with embedding algorithms. A novel way of generating embedding problems is presented which migitates the effects of conventional problem generation approaches. An evaluation using these newly defined concepts demonstrates how new insights on algorithm behavior can be gained. The proposed concepts support experimenters in obtaining more precise and tangible evaluation data for embedding algorithms.}, subject = {Virtuelles Netz}, language = {en} } @phdthesis{Loewe2017, author = {L{\"o}we, Stefan}, title = {Effective Approaches to Abstraction Refinement for Automatic Software Verification}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4815}, school = {Universit{\"a}t Passau}, pages = {XXI, 155 S.}, year = {2017}, abstract = {This thesis presents various techniques that aim at enabling more effective and more efficient approaches for automatic software verification. After a brief motivation why automatic software verification is getting ever more relevant, we continue with detailing the formalism used in this thesis and on the concepts it is built on. We then describe the design and implementation of the value analysis, an analysis for automatic software verification that tracks state information concretely. From a thorough evaluation based on well over 4 000 verification tasks from the latest edition of the International Competition on Software Verification (SV-COMP), we learn that this plain value analysis leads to an efficient verification process for many verification tasks, but at the same time, fails to solve other verification tasks due to state-space explosion. From this insight we infer that some form of abstraction technique must be added to the value analysis in order to also allow the successful verification of large and complex verification tasks. As a solution, we propose to incorporate counterexample-guided abstraction refinement (CEGAR) and interpolation into the value domain. To this end, we design a novel interpolation procedure, that extracts from infeasible counterexamples interpolants for the value domain, allowing to form a precision strong enough to exclude these infeasible counterexamples, and to make progress in the CEGAR loop. We then describe several optimizations and extensions to these concepts, such that the value analysis with CEGAR becomes competitive for automatic software verification. As the next step, we combine the value analysis with CEGAR with a predicate analysis, to obtain a more precise and efficient composite analysis based on CEGAR. This composite analysis is indeed on a par with the world's leading software verification tools, as witnessed by the results of SV-COMP'13 where this approach achieved the 2 nd place in the overall ranking. After having available competitive CEGAR-based analyses for the value domain, the predicate domain, and the combination thereof, we then turn our attention to techniques that have the goal to make all these CEGAR-based approaches more successful. Our first novel idea in this regard is based on the concept of infeasible sliced prefixes, which allow the computation of different precisions from a single infeasible counterexample. This adds choice to the CEGAR loop, while without this enhancement, no choice for a specific precision, i. e., a specific refinement, is possible. In our evaluation we show, for both the value analysis and the predicate analysis, that choosing different infeasible sliced prefixes during the refinement step leads to major differences in verification effectiveness and verification efficiency. Extending on the concept of infeasible sliced prefixes, we define several heuristics in order to precisely select a single refinement from a set of possible refinements. We make this new concept, which we refer to as guided refinement selection, available to both the value and predicate analysis, and in a large-scale evaluation we try to answer the question which selection technique leads to well suited abstractions and thus, to a more effective verification process. Additionally, we present the idea of inter-analysis refinement selection, where the refinement component of a composite analysis may decide which of its component analyses is best to be refined, and in yet another evaluation we highlight the positive effects of this technique. Finally, we present the results of SV-COMP'16, where the verifier we contributed and which is based on the concepts and ideas presented in this thesis achieved the 1 st place in the category DeviceDriversLinux64.}, subject = {Programmverifikation}, language = {en} } @phdthesis{Nagel2014, author = {Nagel, Volker}, title = {Three Essays on Moral Self-Regulation of Honesty and Impression Management}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-2896}, school = {Universit{\"a}t Passau}, pages = {69}, year = {2014}, abstract = {In study 1 an introduction to the research on moral self-regulation is provided alongside with an explanation of the two manifestations of moral self-regulation: moral licensing and moral cleansing. At the core of the first study is an experiment which was designed to identify moral licensing and cleansing in the domain of honesty. The experiment merges relevant studies from social psychology and experimental economics. It assesses the question if moral self-regulation exists within the domain of honesty or more precisely, if the truth and lies are told in such a way as to balance each other out. After manipulating participants' moral balances (either positively or negatively), rates of truth-telling are compared to a neutral baseline scenario. Since neither moral licensing nor moral cleansing is observed, the results provide no support to the initial hypothesis that moral self-regulation exists within the domain of honesty. Study 2 builds on these results and discusses possible reasons for the absence of moral self-regulation. The research on moral hypocrisy and self-concept maintenance are presented and discussed as possible explanations. In order to shed more light on participants' behavior, a coding procedure is presented that was used on the dataset from study 1. This approach makes it possible to quantify participants' handwritten stories that resulted from the moral manipulation in study 1 and gain more insights on how truth-telling and lying affect the moral balance. By analyzing (dis)honesty on a more detailed level, results show that participants tend to act consistent to what they revealed about themselves in their stories. Study 3 links together aspects of moral self-regulation, moral hypocrisy and impression management. The "looting game" is presented which lets participants loot money from a charity box being subject to altruistic punishment from observers. For their punishment decision observers are provided with a history of participants' past actions. This design allows to assess how misconduct, punishment and the creation of a favorable impression interact and ultimately impact profits. The results indicate that moral cleansing, and not the desire to trick observers, is the reason for manipulation. Participants who loot money from the charity box do not expect to receive less punishment, rather they simply want to present a more favorable picture of themselves. On the other hand, observers fully account for the possibility of manipulation and tend to disregard a manipulated history. The looting game therefore brings the hypothesis into question that impressions are managed and manipulated to increase profits.}, subject = {Selbstevaluation}, language = {en} } @misc{Krah2015, author = {Krah, Hans}, title = {German "volskt{\"u}mliche Musik" of the Early Nineties and "Modern Society": Strategies of De-Individualisation as a Contribution to a Collective Re-Organisation}, doi = {10.15475/skms.2015.1.2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-2968}, pages = {43-64}, year = {2015}, abstract = {"Volkst{\"u}mliche Musik" is a significant phenomenon (at least) of the early 1990s. The analysis of this phenomenon can paradigmatically represent the discursive practices and sub-thought systems of large parts of the population of the Federal Republic of Germany at this time. "Volkst{\"u}mliche Musik" depends on the political situation and indirectly deals with the needs and problems of individuals which result from such a situation and which lie in the deep structure of their mentalities. It thus takes on a cultural function.}, language = {en} } @misc{Rockenberger2016, author = {Rockenberger, Annika}, title = {Materiality and Meaning in Literary Studies}, doi = {10.15475/skms.2016.1.2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-4085}, pages = {39-60}, year = {2016}, abstract = {Recently, non- and paraverbal properties of literary texts at the level of documentary inscription (i.e. materiality), seen individually or as aspects of a so-called 'material text', that is, the union of materiality and verbal sign systems, received an increasing amount of attention in textual scholarship and literary studies. Here, 'meaning' or at least 'semantic potentiality' has been attributed to both or either and physical features of texts have been construed as hitherto neglected aspects of literary communication and literary aesthetics. In what follows, I will present a brief conspectus of the current debate and then try to provide a reconstruction of underlying ideas by answering the question 'how does a material text mean?'. Taking a descriptive meta-perspective and focusing on conceptual and methodological clarification, I try to clarify the somewhat blurry expressions 'meaning', 'to mean' and the like by translating them into the distinct terminology of semiotics and transferring them into the theoretical framework of an instrumentalist notion of signs.}, subject = {Behinderter Mensch}, language = {en} } @phdthesis{Kurz2019, author = {Kurz, Thomas}, title = {Adapting Semantic Web Information Retrieval to Multimedia}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8276}, school = {Universit{\"a}t Passau}, pages = {xvi, 206 Seiten}, year = {2019}, abstract = {The amount of audio, video and image data on the Web is immensely growing, which leads to data management problems based on the hidden character of Multimedia. Therefore the interlinking of semantic concepts and media data with the aim to bridge the gap between the Internet of documents and the Web of Data has become a common practice. However, the value of connecting media to its semantic meta data is limited due to lacking access methods and the absence of an adapted query language specialized for media assets and fragments. This thesis aims to extend the standard query language for the Semantic Web (SPARQL) with media specific concepts and functions. The main contributions of the work are an exhaustive survey on Multimedia query languages of the last 3 decades, the SPARQL extension specification itself and an approach for the efficient evaluation of the new query concepts. Additionally I elaborate and evaluate a meta data based media fragment similarity approach, which provides a basis for further language extensions.}, subject = {Semantic Web}, language = {en} } @phdthesis{Wimbauer2020, author = {Wimbauer, Lisa Kristina}, title = {Innovate with Crowds. Co-Creation and Idea Evaluation in Internal and External Crowdsourcing.}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8359}, school = {Universit{\"a}t Passau}, pages = {VIII, 208 Seiten}, year = {2020}, abstract = {Crowdsourcing seems to be a promising approach for organizations to overcome challenges widely discussed in innovation and organizational research. However, the extent to which an organization can leverage the benefits from crowdsourcing is contingent on which type of crowd is addressed and how crowds are used. Based on unique data from crowdsourcing contests, the dissertation provides insights how to innovate with internal and external crowds in order to utilize their potential for co-creation and idea evaluation.}, language = {en} } @phdthesis{Planche2020, author = {Planche, Benjamin}, title = {Bridging the Realism Gap for CAD-Based Visual Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8361}, school = {Universit{\"a}t Passau}, pages = {xx, 152}, year = {2020}, abstract = {Computer vision aims at developing algorithms to extract high-level information from images and videos. In the industry, for instance, such algorithms are applied to guide manufacturing robots, to visually monitor plants, or to assist human operators in recognizing specific components. Recent progress in computer vision has been dominated by deep artificial neural network, i.e., machine learning methods simulating the way that information flows in our biological brains, and the way that our neural networks adapt and learn from experience. For these methods to learn how to accurately perform complex visual tasks, large amounts of annotated images are needed. Collecting and labeling such domain-relevant training datasets is, however, a tedious—sometimes impossible—task. Therefore, it has become common practice to leverage pre-available three-dimensional (3D) models instead, to generate synthetic images for the recognition algorithms to be trained on. However, methods optimized over synthetic data usually suffer a significant performance drop when applied to real target images. This is due to the realism gap, i.e., the discrepancies between synthetic and real images (in terms of noise, clutter, etc.). In my work, three main directions were explored to bridge this gap. First, an innovative end-to-end framework is proposed to render realistic depth images from 3D models, as a growing number of solutions (especially in the industry) are utilizing low-cost depth cameras (e.g., Microsoft Kinect and Intel RealSense) for recognition tasks. Based on a thorough study of these devices and the different types of noise impairing them, the proposed framework simulates their inner mechanisms, comprehensively modeling vital factors such as sensor noise, material reflectance, surface geometry, etc. Able to simulate a wide panel of depth sensors and to quickly generate large datasets, this framework is used to train algorithms for various recognition tasks, consistently and significantly enhancing their performance compared to other state-of-the-art simulation tools. In some cases, however, relevant 2D or 3D object representations to generate synthetic samples are not available. Considering this different case of data scarcity, a solution is then proposed to incrementally build a representation of visual scenes from partial observations. Provided observations are localized from one to another based on their content and registered in a global memory with spatial properties. Simultaneously, this memory can be queried to render novel views of the scene. Furthermore, unobserved regions can be hallucinated in memory, in consistence with previous observations, hallucinations, and global priors. The efficacy of the proposed mnemonic and generative system, trainable end-to-end, is demonstrated on various 2D and 3D use-cases. Finally, an advanced convolutional neural network pipeline is introduced, tackling the realism gap from a novel angle. While most methods addressing this problem focus on bringing synthetic samples—or the knowledge acquired from them—closer to the real target domain, the proposed solution performs the opposite process, mapping unseen target images into controlled synthetic domains. The pre-processed samples can then be handed to downstream recognition methods, themselves purely trained on similar synthetic data, to greatly improve their accuracy. For each approach, a variety of qualitative and quantitative studies are detailed, providing successful comparisons to state-of-the-art methods. By proposing solutions to bridge the realism gap from either side, as well as a pipeline to improve the acquisition and generation of new visual content, this thesis provides a unique perspective on the challenges of data scarcity when building robust recognition systems.}, language = {en} } @phdthesis{Fink2019, author = {Fink, Thomas}, title = {Curvature Detection by Integral Transforms}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7684}, school = {Universit{\"a}t Passau}, pages = {viii, 194 Seiten}, year = {2019}, abstract = {In various fields of image analysis, determining the precise geometry of occurrent edges, e.g. the contour of an object, is a crucial task. Especially the curvature of an edge is of great practical relevance. In this thesis, we develop different methods to detect a variety of edge features, among them the curvature. We first examine the properties of the parabolic Radon transform and show that it can be used to detect the edge curvature, as the smoothness of the parabolic Radon transform changes when the parabola is tangential to an edge and also, when additionally the curvature of the parabola coincides with the edge curvature. By subsequently introducing a parabolic Fourier transform and establishing a precise relation between the smoothness of a certain class of functions and the decay of the Fourier transform, we show that the smoothness result for the parabolic Radon transform can be translated into a change of the decay rate of the parabolic Fourier transform. Furthermore, we introduce an extension of the continuous shearlet transform which additionally utilizes shears of higher order. This extension, called the Taylorlet transform, allows for a detection of the position and orientation, as well as the curvature and other higher order geometric information of edges. We introduce novel vanishing moment conditions which enable a more robust detection of the geometric edge features and examine two different constructions for Taylorlets. Lastly, we translate the results of the Taylorlet transform in R^2 into R^3 and thereby allow for the analysis of the geometry of object surfaces.}, subject = {Kr{\"u}mmung}, language = {en} } @phdthesis{Lucas2019, author = {Lucas, Yvan}, title = {Credit card fraud detection using machine learning with integration of contextual knowledge}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7713}, school = {Universit{\"a}t Passau}, pages = {xxi, 125 Seiten}, year = {2019}, abstract = {We have proposed a strategy for the creation of attributes based on hidden Markov models (HMM) characterizing the transaction from different points of view. This strategy makes it possible to integrate a broad spectrum of sequential information into the attributes of transactions. In fact, we model the authentic and fraudulent behavior of merchants and card holders according to two univariate characteristics: the date and the amount of transactions. In addition, attributes based on HMMs are created in a supervised manner, thereby reducing the need for expert knowledge for the creation of the fraud detection system. Ultimately, our HMM-based multi-perspective approach allows automated data pre-processing to model time correlations to complement and eventually replace transaction aggregation strategies to improve detection efficiency. Experiments carried out on a large set of credit card transaction data from the real world (46 million transactions carried out by Belgian card holders between March and May 2015) have shown that the strategy proposed for data preprocessing based on HMM can detect more fraudulent transactions when combined with the strategy of preprocessing reference data based on expert knowledge for the detection of credit card fraud.}, subject = {Kreditkartenmissbrauch}, language = {en} } @phdthesis{Ansah2021, author = {Ansah, Frimpong}, title = {Performance and optimization technologies for software defined industrial networks}, publisher = {Universit{\"a}t Passau}, address = {Passau}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-9002}, school = {Universit{\"a}t Passau}, pages = {xxi, 173 Seiten}, year = {2021}, abstract = {The concept of programmable networks is radically changing the way communication infrastructures are designed, integrated, and operated. Currently, the topic is spearheaded by concepts such as software-defined networking, forwarding and control element separation, and network function virtualization. Notably, software-defined networking has attracted significant attention in telecommunication and data centers and thus already in some production-grade networks. Despite the prevalence of software-defined networking in these domains, industrial networks are yet to see its benefits to encourage adoption. However, the misconceptions around the concept itself, the role of virtualization, and algorithms pose a significant obstacle. Furthermore, the desire to accommodate new services in the automation industry results in a pattern of constantly increasing complexity of industrial networks, which is compounded by the requirement to provide stringent deterministic service guarantees considering characteristically different applications and thus posing a significant challenge for management, configuration, and maintenance as existing solutions are architecturally inflexible. Therefore, the first contribution of this thesis addresses the misconceptions around software-defined networking by providing a comparative analysis of programmable network concepts, detailing where software-defined networks compare with other concepts and how its principles can be leveraged to evolve industrial networks. Armed with the fundamental principles of programmable networks, the second contribution identifies virtualization technologies and proposes novel algorithms to provide varied quality of service guarantees on converged time-sensitive Ethernet networks using software-defined networking concepts. Finally, a performance analysis of a software-defined hybrid deployment solution for control and management of time-sensitive Ethernet networks that integrates proposed novel algorithms is presented as an industrial use-case that enables industrial operators to harness the full potential of time-sensitive networks.}, language = {en} } @phdthesis{Charpenay2019, author = {Charpenay, Victor}, title = {Semantics for the Web of Things: Modeling the Physical World as a Collection of Things and Reasoning with their Descriptions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7578}, school = {Universit{\"a}t Passau}, pages = {xiii, 127 Seiten}, year = {2019}, abstract = {The main research question of this thesis is to develop a theory that would provide foundations for the development of Web of Things (WoT) systems. A theory for WoT shall provide a model of the 'things' WoT agents relate to such that these relations determine what interactions take place between these agents. This thesis presents a knowledge-based approach in which the semantics of WoT systems is given by a transformation (an homomorphism) between a graph representing agent interactions and a knowledge graph describing 'things'. It focuses on three aspects of knowledge graphs in particular: the vocabulary with which assertions can be made, the rules that can be defined over this vocabulary and its serialization to efficiently exchange pieces of a knowledge graph. Each aspect is developed in a dedicated chapter, with specific contributions to the state-of-the-art. The need for a unified vocabulary to describe 'things' in WoT and the Internet of Things (IoT) has been identified early on in the literature. Many proposals have been consequently published, in the form of Web ontologies. In Ch. 2, a systematic review of these proposals is being developed, as well as a comparison with the data models of the principal IoT frameworks and protocols. The contribution of the thesis in that respect is an alignment between the Thing Description (TD) model and the Semantic Sensor Network (SSN) ontology, two standards of the World Wide Web Consortium (W3C). The scope of this thesis is generally limited to Web standards, especially those defined by the Resource Description framework (RDF). Web ontologies do not only expose a vocabulary but also rules to extend a knowledge graph by means of reasoning. Starting from a set of TD documents, new relations between 'things' can be "discovered" this way, indicating possible interactions between the servients that relate to them. The experiments presented in Ch. 3 were done on the basis of this semantic discovery framework on two use cases: a building automation use case provided by Intel Labs and an industrial control use case developed internally at Siemens. The relations to discover often involve anonymous nodes in the knowledge graph: the chapter also introduces a novel skolemization algorithm to correctly process these nodes on a well-defined fragment of the Web Ontology Language (OWL). Finally, because this semantic discovery framework relies on the exchange of TD documents, Ch. 4 introduces a binary format for RDF that proves efficient in serializing TD assertions such that even the smallest WoT agents, i.e. micro-controllers, can store and process them. A formalization for the semantics-preserving compaction and querying of TD documents is also introduced in this chapter, at the basis of an embedded RDF store called the µRDF store. The ability of all WoT agents to query logical assertions about themselves and their environment, as found in TD documents, is a first step towards knowledge-based intelligent systems that can operate autonomously and dynamically in a decentralized way. The µRDF store is an attempt to illustrate the practical outcomes of the theory of WoT developed throughout this thesis.}, subject = {Semantic Web}, language = {en} } @phdthesis{Golovko2019, author = {Golovko, Dimitri}, title = {Three Essays on the influence of company Facebook and traditional channel activities on recruitment success}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-7645}, school = {Universit{\"a}t Passau}, pages = {XII, 134 Seiten}, year = {2019}, abstract = {The appearance of web and online media has created a substantial change in the manner by which employers and applicants interact. The development of web 1.0 applications with one-way communication and the advancement of web 2.0 technologies with interactive components have extended the spectrum of recruitment channels. The new recruitment media channels have led the selection and analysis of their impact out of interaction on each other to a new challenge within academical literature. This dissertation addresses these issues in three separate essays. Study 1 focuses on the impact of Facebook as a social media recruitment channel on recruitment success. Many companies embed Facebook into their recruitment strategy as an additional recruitment channel for reaching potential applicants and motivating them to apply for available positions. Study 1 analyzes these activities and addresses the question of whether different Facebook activities influence recruitment success above and beyond other undertakings on traditional and online media channels. Study 1 concludes that on Facebook, company posts with a general focus and posts containing work or recruitment information both have a positive impact on recruitment success. The results of Study 1 are validated by company interviews with human resources (HR) managers who are responsible for the overall HR strategy of the company. Study 1 is the first academic work within HR and marketing research, which analyzes the impact of a company's Facebook activities. Study 2 examines the impact of traditional media recruitment channels on recruitment success. Many companies employ traditional media channels for their recruitment marketing actions with the aim of achieving recruitment success. Study 2 uses media richness theory as a basis for analyzing the impact of a company's activities within traditional media channels on recruitment success. Study 2 concludes that exhibition fair and online marketing activities influence recruitment success. In connection with brand equity theory, Study 2 also verifies whether the addition of Facebook activities reinforces the impact of traditional media channels on recruitment success. The results indicate that general Facebook activities have a reinforcing impact on exhibition fair and print media recruitment practices. Finally, Study 3 focuses on both the literature overview of traditional and social media recruitment practices and social media influence from the marketing literature. It also summarizes and categorizes previous research on the influence of traditional, online, and social media recruitment practices; the effect of a multichannel mix; and the influence of social media and social networking sites on different business outcomes from the marketing literature. Additionally, Study 3 identifies the research gaps and provides recommendations for future studies. This dissertation uses vector autoregression modelling, including a validation with the help of company interviews and the employment of media richness, signaling, and brand equity theories, combined with a thorough analysis of the research need. The dissertation closes the research gap regarding the analysis of the impact of Facebook, online, and traditional media on recruitment success. It also adds new perspectives to the HR and marketing literature.}, language = {en} } @phdthesis{Berndl2018, author = {Berndl, Emanuel}, title = {Embedding a Multimedia Metadata Model into a Workflow-driven Environment Using Idiomatic Semantic Web Technologies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6708}, school = {Universit{\"a}t Passau}, pages = {xix, 301 Seiten}, year = {2018}, abstract = {The Semantic Web exists for about 20 years by now, but its applicability as well as its presence does not live up to the standards of its original idea. Incorporated Semantic Web Technologies do have an initial barrier to learn and apply, which can discourage many potential users. This leads to less available data overall in addition to decreased data quality. This work solves parts of the aforementioned problem by supporting idiomatic entry to those Semantic Web Technologies, allowing for "easier" accessibility and usability. Anno4j is a Java library that implements a form of Object-Relational Mapping for RDF data. With its application, RDF data can be created via a mapping by simply instantiating Java objects - an object-oriented programming concept the user is familiar with. On the other side, requesting persisted data is supported by a path-based querying possibility, while other features like transactional behaviour, code generation, and automated validation of input contribute to a more effective, comprehensive, and straightforward usage. A use-case is provided by the MICO Platform, a centralized software instance that connects autonomous multimedia extractors in a workflow-driven fashion. This leads to a rich metadata background for the inserted multimedia files, enabling them to be used in diverse scenarios as well as unlocking yet hidden semantics. For this task it was necessary to design and implement a metadata model that is able to aggregate and merge the varying extractor results under a common denominator: the MICO Metadata Model. The results of this work allow the use case to incorporate idiomatic Semantic Web Technologies which are then usable natively by non-Semantic Web experts. Additionally, an increase has been achieved in forms of data integration, synchronisation, integrity and validity, as well as an overall more comprehensive and rich implementation of the multimedia extractors.}, subject = {Multimedia}, language = {en} } @phdthesis{Kolesnikov2019, author = {Kolesnikov, Sergiy}, title = {Feature Interactions in Configurable Software Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6739}, school = {Universit{\"a}t Passau}, pages = {ix, 140 Seiten}, year = {2019}, abstract = {Software has become an important part of our life. Therefore, the number of different applications scenarios and user requirements of software systems grows rapidly. To satisfy these requirements, software vendors build configurable software systems that can be tailored to diverse needs without rebuilding them from scratch, which reduces costs and development time. Despite considerable advances in software engineering, which allow building high-quality configurable software systems, some challenges remain. One of these challenges is the feature interaction problem that arises when parts (features), from which a configurable system is composed, interact in unexpected ways, and inadvertently change the behavior or quality attributes (such as performance) of the system. The goal of this dissertation is to systematically study the nature of feature interactions, their causes, their influence on performance of configurable systems, and, based on empirical results, suggest ways of improving techniques for detecting and predicting feature interactions. More specifically, we compared and evaluated different strategies for the analysis of configurable software systems. The results of our evaluation complement empirical data from previous work about how different analysis strategies for configurable software systems compare with respect to different aspects, such as performance. These results shall be used to develop effective and scalable techniques and tools for analysis of configurable software including feature-interaction detection and prediction techniques and tools. Technically, we used a machine-learning technique to quantify the influence of feature interactions on performance of real-world configurable systems. We studied the characteristics of interactions that have the largest influence on performance and found that interactions among few features have higher influence than interactions among many features. With a growing number of interacting features, the influence of the corresponding interactions decreases consistently. This implies that interactions involving multiple features can be ignored in practice because of their marginal influence on performance. We also investigated the causes of the interactions and were able to identify several patterns that link these interactions to the architecture of the systems: For example, we found that if a data processing system consisted of multiple features that processed the same data in sequence then these features interacted. The identified patterns can help to anticipate performance interactions already at an early development stage when a system's architecture is designed. Furthermore, considering that control-flow interactions (observable at the level of control flow among features) are easier to detect than performance interactions (externally observable through measuring performance of different combinations of features), we conducted a case study on two configurable systems. In this case study, we investigated a possible relation among control-flow feature interactions and performance feature interactions. We also discussed how this relation can be exploited by interaction detection and performance prediction techniques to make them more time efficient and precise. Our case study on two real-world configurable systems revealed that a relation indeed exists, and we were able to show how it can be used to reduce the search space of possibly existing performance interactions. The study can serve as a blueprint for further studies that can rely on our conceptual framework for investigating relations among external and internal interactions. Overall, the contribution of this dissertation consists of scientific and technical insights, practical tool implementations, empirical evaluations, and case studies that advance the current state of research in the area of feature interactions in configurable software systems. In particular, we provide insights into the causes of feature interactions and their influence on performance of real-world configurable systems (e.g., interaction patterns, decreasing influence of interactions with growing number of involved features). Our results also suggest ways of improving techniques for detecting and predicting feature interactions (e.g., ignoring interactions among multiple features, reducing the search space based on relations among interactions).}, subject = {Softwareentwicklung}, language = {en} } @phdthesis{Stahlbauer2019, author = {Stahlbauer, Andreas}, title = {Abstract Transducers for Software Analysis and Verification}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8468}, school = {Universit{\"a}t Passau}, pages = {xv, 187 Seiten}, year = {2019}, abstract = {Whenever software faults can endanger human life, property, or the environment, the absence of faults must be ensured with utmost care and the best technologies available. Evidence is needed showing that all requirements are satisfied and that the risk of faults is reduced. One technique to conduct such a verification task—composed of the software to verify, the specification to check, and a model of the environment—is software model checking. To conduct a verification task with a model checker, different models of the task are constructed. We distinguish between two types of task models: syntactic task models and semantic task models, which define the respective syntactic structure (control flow) and semantic structure (state transitions, invariants) of the verification task. When constructing such models, we can observe that similar structures and substructures reappear within and among different verification tasks. For example, the same assertions to check can appear in different functions, or the same predicate can be part of different invariants to describe sets of program states. Similarities that appear during the model construction process can be the result of solving similar reasoning problems, often solved using computationally expensive procedures (as typical for model checking), over and over again. Not reusing results of solving similar problems, not having a means for conducting repeated efforts automatically, or not trying to reduce the number of similar reasoning efforts, is a waste of precious resources. To address these problems, we present a common conceptual and technical foundation for sharing syntactic and semantic task artifacts for reuse, within and among verification runs. Both the syntactic construction of a verification task and the construction of its semantic model—which describes all possible behaviors and states—are covered. We study how commonalities and regularities in the task models can be taken into account to facilitate the process of sharing task artifacts for reuse, and to make the overall verification process more efficient and effective. We introduce abstract transducers as the theoretical foundation of this thesis: a type of finite-state transducers with an inherent notion of abstraction for states, the input alphabet, and its output alphabet. Abstracting these transducers allows us to widen both the set of input words for that they produce output and the sets of output words. Abstract transducers are instantiated as task artifact transducers to map from program structures to task artifacts to share. We show that the notion of abstraction provides a means for increasing the scope for that task artifacts are shared for reuse. We present two instances of task artifact transducers: Yarn transducers and precision transducers. We use Yarn transducers for providing code to weave into the control-flow structure of a computer program, and present the Loom analysis as a means for orchestrating the weaving process. Precision transducers provide a means for sharing abstraction precisions for reuse, thus aid in defining the level of abstraction of a semantic task model. For both types of transducers, we provide empirical evidence on their practical applicability, for example, to verify Linux kernel modules, and show that they can help in increasing the verification performance.}, subject = {Transduktor}, language = {en} } @phdthesis{Ihl2020, author = {Ihl, Andreas}, title = {Four investigations of arising phenomena in contemporary work settings: the cases of mindfulness practices and crowdworking online platforms}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8696}, school = {Universit{\"a}t Passau}, pages = {148 Seiten}, year = {2020}, abstract = {New arising phenomena in the occupational realm strongly shape contemporary work settings. These developments heavily affect how individuals work within and beyond organizational boundaries. Two phenomena associated with the changing nature of work have been especially prevalent in work settings and intensively discussed in public debates. First, organizations started to introduce mindfulness practices to their workforce. Rooted in spirituality and formerly used in clinical therapy, mindfulness is applied as a human resource development practice to train employees and managers to cope with the increased work intensification. Second, digitization and the importance of individualization opened up the path for work settings beyond organizational boundaries on crowdworking online platforms. On these online platforms, workers process tasks independently and remotely. Research just started to address the implications and meaning of mindfulness practices in organizations and the rise of crowdworking platforms. Several questions remain unanswered. This dissertation addresses unanswered but pressing questions related to these two phenomena shaping contemporary work settings. Structured in four essays the first two essays address the application and meaning of mindfulness practices. The first essay analyzes the meaning and interpretations of these new practices within organizations. The second essay takes contextual factors of the organizational environment into account and investigates their relevance for the successful implementation of mindfulness practices. The second two essays are dedicated to work attitudes and behavior on crowdworking online platform. Essay three captures individuals' motivation for working on such platforms and their effects for workers' work performance. The last essay deals with the role of professional crowdworking online communities in the work experience and asses the effects of social support in these communities on occupational identification, work meaningfulness and finally on work engagement. Each essay in this dissertation generates new insights on arising phenomena in contemporary work settings. They address several timely yet unanswered research questions for these rising phenomena and thereby offer a deeper and more nuanced understanding of the role mindfulness practices and crowdworking online platforms play in the context of the future of work.}, subject = {Organisation}, language = {en} } @phdthesis{Garchery2020, author = {Garchery, Mathieu}, title = {User-centered intrusion detection using heterogeneous data}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8704}, school = {Universit{\"a}t Passau}, pages = {vii, 119 Seiten}, year = {2020}, abstract = {With the frequency and impact of data breaches raising, it has become essential for organizations to automate intrusion detection via machine learning solutions. This generally comes with numerous challenges, among others high class imbalance, changing target concepts and difficulties to conduct sound evaluation. In this thesis, we adopt a user-centered anomaly detection perspective to address selected challenges of intrusion detection, through a real-world use case in the identity and access management (IAM) domain. In addition to the previous challenges, salient properties of this particular problem are high relevance of categorical data, limited feature availability and total absence of ground truth. First, we ask how to apply anomaly detection to IAM audit logs containing a restricted set of mixed (i.e. numeric and categorical) attributes. Then, we inquire how anomalous user behavior can be separated from normality, and this separation evaluated without ground truth. Finally, we examine how the lack of audit data can be alleviated in two complementary settings. On the one hand, we ask how to cope with users without relevant activity history ("cold start" problem). On the other hand, we seek how to extend audit data collection with heterogeneous attributes (i.e. categorical, graph and text) to improve insider threat detection. After aggregating IAM audit data into sessions, we introduce and compare general anomaly detection methods for mixed data to a user identification approach, designed to learn the distinction between normal and malicious user behavior. We find that user identification outperforms general anomaly detection and is effective against masquerades. An additional clustering step allows to reduce false positives among similar users. However, user identification is not effective against insider threats. Furthermore, results suggest that the current scope of our audit data collection should be extended. In order to tackle the "cold start" problem, we adopt a zero-shot learning approach. Focusing on the CERT insider threat use case, we extend an intrusion detection system by integrating user relations to organizational entities (like assignments to projects or teams) in order to better estimate user behavior and improve intrusion detection performance. Results show that this approach is effective in two realistic scenarios. Finally, to support additional sources of audit data for insider threat detection, we propose a method representing audit events as graph edges with heterogeneous attributes. By performing detection at fine-grained level, this approach advantageously improves anomaly traceability while reducing the need for aggregation and feature engineering. Our results show that this method is effective to find intrusions in authentication and email logs. Overall, our work suggests that masquerades and insider threats call for different detection methods. For masquerades, user identification is a promising approach. To find malicious insiders, graph features representing user context and relations to other entities can be informative. This opens the door for tighter coupling of intrusion detection with user identities, roles and privileges used in IAM solutions.}, subject = {Anomalie}, language = {en} } @phdthesis{Koop2021, author = {Koop, Martin}, title = {Preventing the Leakage of Privacy Sensitive User Data on the Web}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8717}, school = {Universit{\"a}t Passau}, pages = {137 Seiten}, year = {2021}, abstract = {Das Aufzeichnen der Internetaktivit{\"a}t ist mit der Verkn{\"u}pfung pers{\"o}nlicher Daten zu einer Schl{\"u}sselressource f{\"u}r viele kostenpflichtige und kostenfreie Dienste im Web geworden. Diese Dienste sind zum einen Webanwendungen, wie beispielsweise die von Google bereitgestellten Karten/Navigation oder Websuche, die t{\"a}glich kostenlos verwendet werden. Zum anderen sind es alle Webseiten, die meist kostenlos Nachrichten oder allgemeine Informationen zu verschiedenen Themen bereitstellen. Durch das Aufrufen und die Nutzung dieser Webdienste werden alle Informationen, die im Webdienst verarbeitet werden, an den Dienstanbieter weitergeben. Dies umfasst nicht nur die im Benutzerkonto des Webdienstes gespeicherte Profildaten wie Name oder Adresse, sondern auch die Aktivit{\"a}t mit dem Webdienst wie das anklicken von Links oder die Verweildauer. Dar{\"u}ber hinaus gibt es jedoch auch unz{\"a}hlige Drittparteien, welche zumeist im Hintergrund in die Webdienste eingebunden sind und das Benutzerverhalten der kompletten Webaktivit{\"a}t - Webseiten {\"u}bergreifend - mitspeichern sowie auswerten. Der Einsatz verschiedener, in der Regel f{\"u}r den Benutzer verborgener Techniken, dient dazu das Online-Verhalten der Benutzer genau zu verfolgen und viele sensible Daten zu sammeln. Dieses Verhalten wird als Web-Tracking bezeichnet und wird haupts{\"a}chlich von Werbeunternehmen genutzt. Die gesammelten Daten sind oft personenbezogen und eine wertvolle Ressourcen der Unternehmen, um Beispielsweise passend zum Benutzerprofil personalisierte Werbung schalten zu k{\"o}nnen. Mit der Nutzung dieser personenbezogenen Daten entstehen aber auch weitreichendere Auswirkungen, welche sich unter anderem in Preisanpassungen f{\"u}r Benutzer mit speziellen Profilattributen, wie der Nutzung von teuren Endger{\"a}ten, widerspiegeln. Ziel dieser Arbeit ist es die Privatsph{\"a}re der Nutzer im Internet zu steigern und die Nutzerverfolgung von Web-Tracking signifikant zu reduzieren. Dabei stellen sich vier Herausforderungen, die jeweils einen Forschungsschwerpunkt dieser Arbeit bilden: (1) Systematische Analyse und Einordnung eingesetzter Tracking-Techniken, (2) Untersuchung vorhandener Schutzmechanismen und deren Schwachstellen,(3) Konzeption einer Referenzarchitektur zum Schutz vor Web-Tracking und (4) Entwurf einer automatisierten Testumgebungen unter Realbedingungen, um die Reduzierung von Web-Tracking in den entwickelten Schutzmaßnahmen zu untersuchen. Jeder dieser Forschungsschwerpunkte stellt neue Beitr{\"a}ge bereit, um einheitlich das {\"u}bergeordnete Ziel zu erreichen: der Entwicklung von Schutzmaßnahmen gegen die Preisgabe sensibler Benutzerdaten im Internet. Der erste wissenschaftliche Beitrag dieser Dissertation ist eine umfassende Evaluation eingesetzter Web-Tracking Techniken und Methoden, sowie deren Gefahren, Risiken und Implikationen f{\"u}r die Privatsph{\"a}re der Internetnutzer. Die Evaluation beinhaltet zus{\"a}tzlich die Untersuchung vorhandener Tracking-Schutzmechanismen und deren Schwachstellen. Die gewonnenen Erkenntnisse sind maßgeblich f{\"u}r die in dieser Arbeit neu entwickelten Ans{\"a}tze und verbessern den bisherigen nicht hinreichend gew{\"a}hrleisteten Schutz vor Web-Tracking. Der zweite wissenschaftliche Beitrag ist die Entwicklung einer robusten Klassifizierung von Web-Tracking, der Entwurf einer effizienten Architektur zur Langzeituntersuchung von Web-Tracking sowie einer interaktiven Visualisierung des Auftreten von Web-Tracking im Internet. Dabei basiert der neue Klassifizierungsansatz, um Tracking zu identifizieren, auf der Entropie Messung des Informationsgehalts von Cookies. Die Resultate der Web-Tracking Langzeitstudien sind unter anderem 1.209 identifizierte Tracking-Domains auf den meistbesuchten Webseiten in Deutschland. Hierbei wurden innerhalb der Top 25 Webseiten im Durchschnitt 45 Tracking-Elemente pro Webseite gefunden. Der Tracker mit dem h{\"o}chsten Potenzial zum Erstellen eines Benutzerprofils war doubleclick.com, da er 90\% der Webseiten {\"u}berwacht. Die Auswertung des untersuchten Tracking-Netzwerks ergab weiterhin einen detaillierten Einblick in die Tracking-Technik mithilfe von Weiterleitungslinks. Dabei haben wir 1,2 Millionen HTTP-Traces von monatelangen Crawls der 50.000 international meistbesuchten Webseiten analysiert. Die Ergebnisse zeigen, dass 11,6\% dieser Webseiten HTTP-Redirects, verborgen in Webseiten-Links, zum Tracken verwenden. Dies wird eingesetzt, um den Webseitenverlauf des Benutzers nach dem Klick durch eine Kette von (Tracking-)Servern umzuleiten, welche in der Regel nicht sichtbar sind, bevor das beabsichtigte Link-Ziel geladen wird. In diesem Szenario erfasst der Tracker wertvolle Verbindungs-Metadaten zu Inhalt, Thema oder Benutzerinteressen der Website. Die Visualisierung des Tracking {\"O}kosystem stellen wir in einem interaktiven Open-Source Web-Tool bereit. Der dritte wissenschaftliche Beitrag dieser Dissertation ist die Konzeption von zwei neuartigen Schutzmechanismen gegen Web-Tracking und der Aufbau einer automatisierten Simulationsumgebung unter Realbedingungen, um die Effektivit{\"a}t der Umsetzungen zu verifizieren. Der Fokus liegt auf den beiden meist verwendeten Tracking-Verfahren: Cookies (hierbei wird eine eindeutigen ID auf dem Ger{\"a}t des Benutzers gespeichert), sowie Browser-Fingerprinting. Letzteres beschreibt eine Methode zum Sammeln einer Vielzahl an Ger{\"a}teeigenschaften, um den Benutzer eindeutig zu (re- )identifizieren, ohne eine eindeutige ID auf dem Ger{\"a}t zu speichern. Um die Effektivit{\"a}t der in dieser Arbeit entwickelten Schutzmechanismen vor Web-Tracking zu untersuchen, implementierten und evaluierten wir die Schutzkonzepte direkt im Chromium Browser. Das Ergebnis zeigt eine erfolgreiche Reduzierung von Web-Tracking um 44\%. Zus{\"a}tzlich verbessert das in dieser Arbeit entwickelte Konzept "Site Isolation" den Datenschutz des privaten Browsing-Modus, erm{\"o}glicht das Setzen eines manuellen Speicher-Zeitlimits von Cookies und sch{\"u}tzt den Browser gegen verschiedene Bedrohungen wie CSRF (Cross-Site Request Forgery) oder CORS (Cross-Origin Ressource Sharing). Site Isolation speichert dabei den Status der lokalen Website in separaten Containern und kann dadurch diverse Tracking-Methoden wie Cookies, lokalStorage oder redirect tracking verhindern. Bei der Auswertung von 1,6 Millionen Webseiten haben wir gezeigt, dass der Tracker doubleclick.com das h{\"o}chste Potenzial besitzt, den Nutzer zu verfolgen und auf 25\% der 40.000 international meistbesuchten Webseiten vertreten ist. Schließlich demonstrieren wir in unserem erweiterten Chromium-Browser einen robusten Browser-Fingerprinting-Schutz. Der Test unseres Prototyps mittels 70.000 Browsersitzungen zeigt, dass unser Browser den Nutzer vor sogenanntem Browser-Fingerprinting Tracking sch{\"u}tzt. Im Vergleich zu f{\"u}nf anderen Browser-Fingerprint-Tools erzielte unser Prototyp die besten Ergebnisse und ist der erste Schutzmechanismus gegen Flash sowie Canvas Fingerprinting.}, subject = {Datenschutz}, language = {en} } @phdthesis{Alshawish2021, author = {Alshawish, Ali}, title = {Risk-based Security Management in Critical Infrastructure Organizations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10026}, school = {Universit{\"a}t Passau}, pages = {xii, 181 Seiten}, year = {2021}, abstract = {Critical infrastructure and contemporary business organizations are experiencing an ongoing paradigm shift of business towards more collaboration and agility. On the one hand, this shift seeks to enhance business efficiency, coordinate large-scale distribution operations, and manage complex supply chains. But, on the other hand, it makes traditional security practices such as firewalls and other perimeter defenses insufficient. Therefore, concerns over risks like terrorism, crime, and business revenue loss increasingly impose the need for enhancing and managing security within the boundaries of these systems so that unwanted incidents (e.g., potential intrusions) can still be detected with higher probabilities. To this end, critical infrastructure organizations step up their efforts to investigate new possibilities for actively engaging in situational awareness practices to ensure a high level of persistent monitoring as well as on-site observation. Compliance with security standards is necessary to ensure that organizations meet regulatory requirements mostly shaped by a set of best practices. Nevertheless, it does not necessarily result in a coherent security strategy that considers the different aims and practical constraints of each organization. In this regard, there is an increasingly growing demand for risk-based security management approaches that enable critical infrastructures to focus their efforts on mitigating the risks to which they are exposed. Broadly speaking, security management involves the identification, assessment, and evaluation of long-term (or overall) objectives and interests as well as the means of achieving them. Due to the critical role of such systems, their decision-makers tend to enhance the system resilience against very unpleasant outcomes and severe consequences. That is, they seek to avoid decision options associated with likely extreme risks in the first place. Practically speaking, this risk attitude can significantly influence the decision-making process in such critical organizations. Towards incorporating the aversion to extreme risks into security management decisions, this thesis investigates thoroughly the capabilities of a recently emerged theory of games with payoffs that are probability distributions. Unlike traditional optimization techniques, this theory provides an alternative decision technique that is more robust to extreme risks and uncertainty. Furthermore, this thesis proposes a new method that gives a decision maker more control over the decision-making process through defining loss regions with different importance levels according to people's risk attitudes. In this way, the static decision analysis used in the distribution-valued games is transformed into a dynamic process to adapt to different subjective risk attitudes or account for future changes in the decision caused by a learning process or other changes in the context. Throughout their different parts, this thesis shows how theoretical models, simulation, and risk assessment models can be combined into practical solutions. In this context, it deals with three facets of security management: allocating limited security resources, prioritizing security actions, and tweaking decision making. Finally, the author discusses experiences and limitations distilled from this research and from investigating the new theory of games, which can be taken into account in future approaches.}, subject = {Spieltheorie}, language = {en} } @phdthesis{Lang2021, author = {Lang, Thomas}, title = {AI-Supported Interactive Segmentation of 3D Volumes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-9221}, school = {Universit{\"a}t Passau}, pages = {184 Seiten}, year = {2021}, abstract = {The segmentation of volumetric datasets, i.e., the partitioning of the data into disjoint sub-volumes with the goal to extract information about these regions,is a difficult problem and has been discussed in medical imaging for decades. Due to the ever-increasing imaging capabilities, in particular in X-ray computed tomography (CT) or magnetic resonance imaging, segmentation in industrial applications also gains interest. Especially in industrial applications the generated datasets increase in size. Hence, most applications apply well-known techniques in a 2+1-dimensional manner,i.e., they apply image segmentation procedures on each slice separately and track the progress along the axis of the volume in which the slices are stacked on. This discards the information on preceding or subsequent slices, which is often assumed to be nearly identical. However, in the industrial context this might prove wrong since industrial parts might change their appearance significantly over the course of even a few slices. Moreover, artifacts can further distort the content of the slices. Therefore, three-dimensional processing of voxel volumes has to be preferred, which induces constraints upon the segmentation procedures. For example, they must not consider global information as it is usually not feasible in big scans to compute them efficiently. Yet another frequent problem is that applications focus on individual parts only and algorithms are tailored to that case. Most prominent medical segmentation procedures do so by applying methods to specifically find the liver and only the liver of a patient, for example. The implication is that the same method then cannot be applied to find other parts of the scan and such methods have to be designed individually for any object to be segmented. Flexible segmentation methods are needed too specifically when partitioning unique scans. We define a unique scan to be a voxel dataset for which no comparable volume exists. Classical examples include the use case of cultural heritage where not only the objects themselves are unique but also scan parameters are optimized to obtain the best image quality possible for that specific scan. This thesis aims at introducing novel methods for voxelwise classifications based on local geometric features. The latter are computed from local environments around each voxel and extract information in similar ways as humans do, namely by observing their similarity to geometric or textural primitives. These features serve as the foundation to learning the proposed voxelwise classifiers and to discriminate between segmented and unsegmented voxels. On the one hand, they perform fully automated clustering of volumes for which a representative random sample is extracted first. On the other hand, a set of segmenting classifiers can be trained from few seed voxels, i.e., volume elements for which a domain expert marked if they belong to the components that shall be segmented. The interactive selection offers the advantage that no completely labeled voxel volumes are necessary and hence that unique scans of objects can be segmented for which no comparable scans exist. Overall, it will be shown that all proposed segmentation methods are effectively of linear runtime with respect to the number of voxels in the volume. Thus, voxel volumes without size restrictions can be segmented in an efficient linear pass through the volume. Finally, the segmentation performance is evaluated on selected datasets which shows that the introduced methods can achieve good results on scans from a broad variety of domains for both small and big voxel volumes.}, language = {en} } @phdthesis{Niedermeier2020, author = {Niedermeier, Michael}, title = {Towards High Performability in Advanced Metering Infrastructures}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8597}, school = {Universit{\"a}t Passau}, pages = {xvii, 198 Seiten}, year = {2020}, abstract = {The current movement towards a smart grid serves as a solution to present power grid challenges by introducing numerous monitoring and communication technologies. A dependable, yet timely exchange of data is on the one hand an existential prerequisite to enable Advanced Metering Infrastructure (AMI) services, yet on the other a challenging endeavor, because the increasing complexity of the grid fostered by the combination of Information and Communications Technology (ICT) and utility networks inherently leads to dependability challenges. To be able to counter this dependability degradation, current approaches based on high-reliability hardware or physical redundancy are no longer feasible, as they lead to increased hardware costs or maintenance, if not both. The flexibility of these approaches regarding vendor and regulatory interoperability is also limited. However, a suitable solution to the AMI dependability challenges is also required to maintain certain regulatory-set performance and Quality of Service (QoS) levels. While a part of the challenge is the introduction of ICT into the power grid, it also serves as part of the solution. In this thesis a Network Functions Virtualization (NFV) based approach is proposed, which employs virtualized ICT components serving as a replacement for physical devices. By using virtualization techniques, it is possible to enhance the performability in contrast to hardware based solutions through the usage of virtual replacements of processes that would otherwise require dedicated hardware. This approach offers higher flexibility compared to hardware redundancy, as a broad variety of virtual components can be spawned, adapted and replaced in a short time. Also, as no additional hardware is necessary, the incurred costs decrease significantly. In addition to that, most of the virtualized components are deployed on Commercial-Off-The-Shelf (COTS) hardware solutions, further increasing the monetary benefit. The approach is developed by first reviewing currently suggested solutions for AMIs and related services. Using this information, virtualization technologies are investigated for their performance influences, before a virtualized service infrastructure is devised, which replaces selected components by virtualized counterparts. Next, a novel model, which allows the separation of services and hosting substrates is developed, allowing the introduction of virtualization technologies to abstract from the underlying architecture. Third, the performability as well as monetary savings are investigated by evaluating the developed approach in several scenarios using analytical and simulative model analysis as well as proof-of-concept approaches. Last, the practical applicability and possible regulatory challenges of the approach are identified and discussed. Results confirm that—under certain assumptions—the developed virtualized AMI is superior to the currently suggested architecture. The availability of services can be severely increased and network delays can be minimized through centralized hosting. The availability can be increased from 96.82\% to 98.66\% in the given scenarios, while decreasing the costs by over 60\% in comparison to the currently suggested AMI architecture. Lastly, the performability analysis of a virtualized service prototype employing performance analysis and a Musa-Okumoto approach reveals that the AMI requirements are fulfilled.}, subject = {Energieversorgung}, language = {en} } @phdthesis{Hatzesberger2020, author = {Hatzesberger, Simon}, title = {Strongly Asymptotically Optimal Methods for the Pathwise Global Approximation of Stochastic Differential Equations with Coefficients of Super-linear Growth}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8100}, school = {Universit{\"a}t Passau}, pages = {ii, 116 Seiten}, year = {2020}, abstract = {Our subject of study is strong approximation of stochastic differential equations (SDEs) with respect to the supremum and the L_p error criteria, and we seek approximations that are strongly asymptotically optimal in specific classes of approximations. For the supremum error, we prove strong asymptotic optimality for specific tamed Euler schemes relating to certain adaptive and to equidistant time discretizations. For the L_p error, we prove strong asymptotic optimality for specific tamed Milstein schemes relating to certain adaptive and to equidistant time discretizations. To illustrate our findings, we numerically analyze the SDE associated with the Heston-3/2-model originating from mathematical finance.}, subject = {Stochastische Differentialgleichung}, language = {en} } @phdthesis{Nguyen2020, author = {Nguyen, Van Nghia}, title = {Internationale Standards f{\"u}r die Vollstreckung von Zivilurteilen: Aktuelle Situation und m{\"o}gliche L{\"o}sungen f{\"u}r Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8191}, school = {Universit{\"a}t Passau}, pages = {XXVII, 238, xxxi Seiten}, year = {2020}, abstract = {Das Ziel der vorliegenden Dissertation ist es, Erkenntnisse f{\"u}r eine m{\"o}gliche Verbesserung des vietnamesischen Zwangsvollstreckungsrechts zu gewinnen, um in Vietnam ein effektives und effizientes Vollstreckungsverfahren zu erreichen, das im Einklang mit den internationalen Standards steht. Das erste Kapitel untersucht die neuen internationalen Standards im Bereich der Vollstreckung von Zivilurteilen und behandelt sie im Vergleich mit wichtigen Grunds{\"a}tzen der Vollstreckung von Gerichtsurteilen in Vietnam. Der zweite Kapitel widmet sich dem Aufbau und der Organisation der Vollstreckungsbeh{\"o}rden und dabei insbesondere den folgenden Themen: Den Vorteilen des Aufbaus eines Berufsverbands der Gerichtsvollzieher, welcher alle Mitglieder des Berufsstandes umfasst. Das dritte Kapitel zeigt unter anderem, dass wirksame Mechanismen zur Vollstreckung von Entschreidungen den Grundsatz der Verh{\"a}ltnism{\"a}ßigkeit einhalten m{\"u}ssen. Das vierte Kapitel stellt die internationalen Normen {\"u}ber den einstweiligen Rechtsschutz dar, der ein unverzichtbares Mittel ist, um die Durchsetzung von Zivilurteilen zu gew{\"a}hrleisten. Basierend auf den Ergebnissen aus den vier Kapiteln ergeben sich eine Reihe von wertvollen Erkenntnissen f{\"u}r die Verbesserung des vietnamesischen Rechtssystems und die Verbesserung der Effizienz der Vollstreckung zivilgerichtlicher Urteile.}, language = {en} } @phdthesis{Kopp2020, author = {Kopp, Katrina}, title = {Essays on Fraud and Forensic Accounting - Research from a German Accounting Perspective}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8264}, school = {Universit{\"a}t Passau}, pages = {190 Seiten}, year = {2020}, abstract = {Investment fraud, cybercrime, inconsistencies in health care or the emission scams at the car manufacturers, economic crime (fraud) manifests itself in many facets. For Germany, the cases of FlowTex, Comroad, HRE-Bad-Bank, Holzmann, Volkswagen and the current fraud suspicions at Porsche AG are prominent examples with mostly appalling consequences (Ballwieser and Dobler 2003; K{\"o}gler 2015; Meck, Nienhaus, and von Petersdorff 2011; Peem{\"o}ller and Hofmann 2005). Nevertheless, newspapers without reports on fraud have become scarce. Headlines such as: "Corruption - the daily business" impress hardly anyone, not least because of their certain regularity. The cases revealed publicly are, however, only the tip of the iceberg, as reported by renowned experts (Bundeskriminalamt 2018; LKA 2018). Currently, the State Criminal Police Office (Landeskriminalamt (LKA)) of Baden-W{\"u}rttemberg and its department for economic and environmental crime and corruption is concerned with 72 major proceedings (LKA 2018). However, fraud could be avoided or at least contained by appropriate preventive measures (Bundeskriminalamt 2018; Bussmann 2004; Hlavica, Klapproth, and H{\"u}lsberg 2011). Consequently, the pressure on companies and employees to demonstrate compliant and ethical behavior and to meet the demands of stakeholders at all times within their business activities has grown (Buff 2000). This raises the question about which precautionary measures a company can and must implement (Weick and Sutcliffe 2015). Although corporate awareness of this issue has increased, most in-house detection of fraud is accidental, suggesting that companies are still lacking appropriately functioning and systematic (early) detection mechanism (Hlavica et al. 2011). If a company is accused of fraud, this usually has serious repercussions on its corporate reputation. Prior research found that capital market reputation-based penalties for affected companies are on average 7.5 times higher than penalties imposed by the legal system (Karpoff, Lee, and Martin 2008). Furthermore, the accusation of fraud also affects the external auditor's reputation, since lacking the detection of manipulations in clients' (financial) reports not only damages public confidence in the accuracy of firms' financial statements but also in the reliability of the auditor's report. Therefore, it is not surprising that the demand for greater supervision and control of firms' (financial) reporting as well as for reliable work of statutory auditors continually increases (Herkendell 2007). Although to a lesser extent, this is also the case for the determination of material (accounting) errors within a firm's financial statements, which are often difficult to distinguish from accounting fraud. According to the International Accounting Standard (IAS) 8.5, published by the International Accounting Standards Board (IASB), errors are omissions and/or misstatements of items that result from the nonapplication or misapplication of trusted information (IASB 2003). Thus, accounting errors and accounting fraud both result in incorrect information of a firm's financial reports and consequently affect stakeholders' decision-making. One resulting attempt in counteracting the broad demand for appropriate protective measures was the implementation of a two-stage enforcement system involving the German Financial Reporting Enforcement Panel (Deutsche Pr{\"u}fstelle f{\"u}r Rechnungslegung (DPR)) as part of the adopted Financial Reporting Enforcement Act (Bilanzkontrollgesetz (BilKoG)) in 2004. The primary objective of the Federal Government's implementation of this mechanism was to strengthen investors' lost confidence in the German capital market, the information content of financial reporting, and Germany as a financial center in the international competition. In addition, the enforcement system serves as a sanctioning instrument for firms in the event of an error detection and subsequent adverse error disclosure via the German federal registry (elektronischer Bundesanzeiger). This adverse error disclosure not only sanctions denounced firms but also questions the quality of the annual financial statement audit and thus the quality of the responsible audit firm. Hence, the often thin line between firms' unintentional accounting errors, purposive engagement in earnings management, and intentional fraud in particular presents an increasing challenge for the audit profession. The objective of my cumulative dissertation is to provide a comprehensive overview of fraud and forensic accounting as well as insights into the distinct dimensions among the concepts of errors, earnings management and fraud from a German accounting perspective. I aim at achieving this objective in three steps: First (1), by providing an overview of discipline-specific education possibilities, existing forensic accounting practices, institutions, and current developments in research. Second (2), by assessing auditors' obligations and responsibilities for the detection of irregularities within the scope of the annual financial statement audit and whether including forensic services into the service portfolio of audit firms can help increase their audit quality due to spillover effects. Third (3), by examining firms' reputation (re-)building management in response to financial violations and how this process is associated with managing multiple (stakeholder) reputations. This dissertation is composed of three individual papers whereby each considers one of the above outlined focus areas}, subject = {Wirtschaftskriminalit{\"a}t}, language = {en} } @unpublished{Yakouchyk2018, author = {Yakouchyk, Katsiaryna}, title = {Belarusian State Ideology: A Strategy of Flexible Adaptation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-6028}, pages = {20 Seiten}, year = {2018}, abstract = {While in some Eastern European countries a wave of colored revolutions challenged existing political orders, Belarus has remained largely untouched by mass protests. In Minsk, the diffusion of democratic ideas leading to the mobilization of population meets a stable authoritarian regime. Nevertheless, the stagnating democratization process cannot be only attributed to the strong authoritarian rule and abuse of power. Indeed, Belarusian president Alexander Lukashenko still enjoys popularity by a large part of the population. Although international observers report that elections in Belarus have never been free and fair, few commentators doubt that Lukashenko would not have won in democratic elections. This evidence suggests that the regime succeeded in building a strong legitimizing basis, which has not been seriously challenged during the last two decades. This paper explores the authoritarian stability in Belarus by looking at the patterns of state ideology. The government effectively spreads state ideology since the early 2000s. Ideology departments have been created in almost all state institutions. The education sector has been affected by the introduction of the compulsory course "The Fundamentals of Belarusian State Ideology" at all universities, and increasing attention to the patriotic education at schools. Based on document analysis, I trace the creation of "ideological vertical" in Belarus and focuse on the issue of ideology in education and youth policy sectors.}, language = {en} } @phdthesis{Baehne2015, author = {B{\"a}hne, Katharina}, title = {The Will to Play. Performance and Construction of Royal Masculinity in Early Modern History Plays}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3329}, school = {Universit{\"a}t Passau}, pages = {415}, year = {2015}, abstract = {Die vorliegende Arbeit untersucht M{\"a}nnlichkeitskonzepte in der Fr{\"u}hen Neuzeit, wobei das Hauptaugenmerk auf die dramatische Konstruktion der Figur des K{\"o}nigs gerichtet wird. Anhand von zehn Historiendramen der 1590er wird zum einen die diskursive Komplexit{\"a}t k{\"o}niglicher M{\"a}nnlichkeit in der Renaissance untersucht, um darauf aufbauend deren performative Darstellung zu analysieren. Im Theorieteil werden M{\"a}nnlichkeit und Herrschaft im elisabethanischen England mithilfe zeitgen{\"o}ssischer Texte diskutiert und durch den Genderdiskurs und die Performativit{\"a}t von Gender erweitert. Der darauf folgende Methodikteil entwickelt aus den gewonnenen Erkenntnissen eine Semiotik von k{\"o}niglicher M{\"a}nnlichkeit, die anschließend im Analyseteil anhand der ausgew{\"a}hlten Historiendramen evaluiert wird.}, subject = {M{\"a}nnlichkeit}, language = {en} } @phdthesis{Milisavljevic2013, author = {Milisavljevic, Maria}, title = {Looking Behind the Scenes. The History of the Royal Court Theatre Through the Lens of Prominent Productions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5172}, school = {Universit{\"a}t Passau}, pages = {317 Seiten}, year = {2013}, abstract = {An investigative study of newly released archive material representing six decades of Royal Court Theatre history including analyses of the productions of John Osborne's Look Back in Anger, Edward Bond's Early Morning, Caryl Churchill's Cloud Nine, Jim Allen's Perdition, Sarah Kane's Blasted, and debbie tucker green's Stoning Mary. Sixty years after the first season of the English Stage Company was launched at the Royal Court Theatre there is no theatre maker and theatre scholar in the world who has not heard of this first writer's theatre in Britain: it famously put the angry young Jimmy Porter on stage, it helped put an end to stage censorship in Britain and has through the years been one of the most important engines for new writing in the English speaking world. The who-is-who of British playwrighting started off, visited or ended up at "the most important theatre in Europe" (New York Times). But no matter how big the names attached to a theatre are, it is the everyday battles of budgets, politics and compromises that really are a theatre's history. Like a detective story, this first independent study of the Royal Court delves deep into the newly opened Royal Court Archives to fully bring to light some of the most controversial decisions, struggles and compromises that shaped the Royal Court.}, subject = {Royal Court Theatre, London}, language = {en} }