@phdthesis{Linsbauer2019, author = {Linsbauer, Michael}, title = {Musikfestival-Landschaft Nieder{\"o}sterreich - Musikhistorische und kulturpolitische Rahmenbedingungen eines Erfolgsmodells}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8334}, school = {Universit{\"a}t Passau}, pages = {382 Seiten}, year = {2019}, abstract = {Das Hauptanliegen dieser Dissertation ist es einerseits, zu untermauern, dass sich mit der nieder{\"o}sterreichischen „Musiklandschaft" ein {\"u}berregional relevantes Erfolgsmodell kultureller Entwicklungs- und Aufbauarbeit pr{\"a}sentiert, und andererseits zu erforschen, welche konkreten Rahmenbedingungen und Erfolgsfaktoren zu diesem positiven Befund beigetragen haben. Die zentrale Frage in diesem Zusammenhang lautet: Wie war es m{\"o}glich, ausgerechnet in Nieder{\"o}sterreich, dem rund um die {\"o}sterreichische Hauptstadt mit ihrem intensiven Kulturangebot und international beachteten Musikleben gelegenen Bundesland, eine eigenst{\"a}ndige und ebenfalls hochwertige Musikszene und Festivaldichte - zwar im Spannungsfeld Wiens aber dennoch unabh{\"a}ngig von den Musikaktivit{\"a}ten der Hauptstadt - zu entwickeln und langfristig zu etablieren?}, subject = {Musikfestspiel}, language = {de} } @phdthesis{Peboeck2020, author = {Peb{\"o}ck, Karl}, title = {\#relichat - informelles Lernen mit Twitter}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8287}, school = {Universit{\"a}t Passau}, pages = {418 Seiten}, year = {2020}, abstract = {Der \#relichat ist der w{\"o}chentlich stattfindende Twitter-Chat zur Religionsp{\"a}dagogik. In einer einst{\"u}ndigen, durch Fragen strukturierten Diskussion werden religionsp{\"a}dagogische Themen auf der Plattform Twitter unter Verwendung des Hashtags \#relichat {\"o}ffentlich diskutiert. Von 2017 bis zum Sommer 2020 fanden 89 \#relichats statt, an denen sich etwa 220 Personen aus dem deutschsprachigen Raum aktiv beteiligt haben. Die vorliegende Dissertation untersucht das Projekt \#relichat hermeneutisch und evaluiert die Erfahrungen der Teilnehmer*innen am \#relichat als informelles Fortbildungsformat. Titel der Dissertation: \#relichat - informelles Lernen mit Twitter. Religionslehrer*innenfortbildung als sozial-konstruktivistische Vernetzung in Communities of Practice Im hermeneutischen Teil der Arbeit werden die Aspekte der sozialen Medien, des informellen und konstruktivistischen Lernens, des vernetzten Lernens in Communities of Practice, des {\"o}ffentlichen Lernens, aber auch des Selbstverst{\"a}ndnisses der Kirche als Institution in der {\"O}ffentlichkeit er{\"o}rtert. F{\"u}r die Evaluation wurde ein Mixed Methods-Design gew{\"a}hlt, welches das Projekt aus verschiedenen Positionen betrachtet und damit der Komplexit{\"a}t des Unterfangens gerecht wird. Neben der Betrachtung statistischer Daten (Zugriffszahlen, Beteiligung etc.), der Analyse der thematischen Entfaltung der Diskussionen nach dem Konzept der Themenkonstitution, der Analyse der Diskussionen nach der Methode der Textlinguistik, bildete die Evaluation qualitativer Interviews mit Beteiligten am \#relichat das Zentrum der Forschung. Mit Hilfe der Methodologie der Grounded Theory wurde eine Theorie mit Verallgemeinerungsanspruch f{\"u}r das vernetzte Lernen mit sozialen Medien entwickelt, die {\"u}ber das konkrete Projekt \#relichat hinaus reichen soll. Als Ergebnis l{\"a}sst sich zusammenfassen: Lernen in und mit dem \#relichat ist konstruktivistisches, informelles, selbstorganisiertes und selbstverantwortliches Lernen. Es kann als Fortbildung bezeichnet werden, insofern es eine Weiterentwicklung der eigenen religionsp{\"a}dagogischen Praxis bewirken kann. Eine besondere Rolle spielen die sozialen Beziehungen in der Community of Practice. Das Medium Twitter gibt die Rahmenbedingungen der Kommunikation vor. Aufgrund der Erkenntnisse aus dem Forschungsprojekt \#relichat ist davon auszugehen, dass es in Zukunft verst{\"a}rkt Formen des informellen Lernens in Communities of Practice geben wird, die sich die M{\"o}glichkeiten sozialer Kommunikation in digitalen Medien zunutze machen werden. Das gilt f{\"u}r lebenslanges Lernen grunds{\"a}tzlich, aber auch f{\"u}r P{\"a}dagog*innenfortbildung. Vernetzung wird als Ressource f{\"u}r Weiterbildung noch bedeutsamer werden. Es werden in Zukunft Dialog und Kommunikation vermehrt in der {\"O}ffentlichkeit stattfinden, hierarchische Strukturen werden dadurch ihre gesellschaftliche Legitimation zunehmend verlieren.}, language = {de} } @phdthesis{Tueno2020, author = {Tueno, Anselme}, title = {Multiparty Protocols for Tree Classifiers}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8251}, school = {Universit{\"a}t Passau}, pages = {xvii, 171 Seiten}, year = {2020}, abstract = {Cryptography is the scientific study of techniques for securing information and communication against adversaries. It is about designing and analyzing encryption schemes and protocols that protect data from unauthorized reading. However, in our modern information-driven society with highly complex and interconnected information systems, encryption alone is no longer enough as it makes the data unintelligible, preventing any meaningful computation without decryption. On the one hand, data owners want to maintain control over their sensitive data. On the other hand, there is a high business incentive for collaborating with an untrusted external party. Modern cryptography encompasses different techniques, such as secure multiparty computation, homomorphic encryption or order-preserving encryption, that enable cloud users to encrypt their data before outsourcing it to the cloud while still being able to process and search on the outsourced and encrypted data without decrypting it. In this thesis, we rely on these cryptographic techniques for computing on encrypted data to propose efficient multiparty protocols for order-preserving encryption, decision tree evaluation and kth-ranked element computation. We start with Order-preserving encryption (OPE) which allows encrypting data, while still enabling efficient range queries on the encrypted data. However, OPE is symmetric limiting, the use case to one client and one server. Imagine a scenario where a Data Owner (DO) outsources encrypted data to the Cloud Service Provider (CSP) and a Data Analyst (DA) wants to execute private range queries on this data. Then either the DO must reveal its encryption key or the DA must reveal the private queries. We overcome this limitation by allowing the equivalent of a public-key OPE. Decision trees are common and very popular classifiers because they are explainable. The problem of evaluating a private decision tree on private data consists of a server holding a private decision tree and a client holding a private attribute vector. The goal is to classify the client's input using the server's model such that the client learns only the result of the classification, and the server learns nothing. In a first approach, we represent the tree as an array and execute only d interactive comparisons (instead of 2 d as in existing solutions), where d denotes the depth of the tree. In a second approach, we delegate the complete tree evaluation to the server using somewhat or fully homomorphic encryption where the ciphertexts are encrypted under the client's public key. A generalization of a decision tree is a random forest that consists of many decision trees. A classification with a random forest evaluates each decision tree in the forest and outputs the classification label which occurs most often. Hence, the classification labels are ranked by their number of occurrences and the final result is the best ranked one. The best ranked element is a special case of the kth-ranked element. In this thesis, we consider the secure computation of the kth-ranked element in a distributed setting with applications in benchmarking and auctions. We propose different approaches for privately computing the kth-ranked element in a star network, using either garbled circuits or threshold homomorphic encryption.}, subject = {Mathematik}, language = {en} } @phdthesis{Taubmann2020, author = {Taubmann, Benjamin}, title = {Improving Digital Forensics and Incident Analysis in Production Environments by Using Virtual Machine Introspection}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8319}, school = {Universit{\"a}t Passau}, pages = {ix, 153 Seiten}, year = {2020}, abstract = {Main memory forensics and its special form, virtual machine introspection (VMI), are powerful tools for digital forensics and can be used to improve the security of computer-based systems. However, their use in production systems is often not possible. This work identifies the causes and offers practical solutions to apply these techniques in cloud computing and on mobile devices to improve digital forensics and incident analysis. Four key challenges must be tackled. The first challenge is that many existing solutions are not reproducible, for example, because the corresponding software components are not available, obsolete or incompatible. The use of these tools is also often complex and can lead to a crash of the system to be monitored in case of incorrect use. To solve this problem, this thesis describes the design and implementation of Libvmtrace, which is a framework for the introspection of Linux-based virtual machines. The focus of the developed design is to implement frequently used methods in encapsulated modules so that they are easy for developers to use, optimize and test. The second challenge is that many production systems do not provide an interface for main memory forensics and virtual machine introspection. To address this problem, this thesis describes possible solutions for how such an interface can be implemented on mobile devices and in cloud environments designed to protect main memory from unprivileged access. We discuss how cold boot attacks, the ARM TrustZone and the hypervisor of cloud servers can be used to acquire data from storage. The third challenge is how to reconstruct information from main memory efficiently. This thesis describes how these questions can be solved by employing two practical examples. The first example involves extracting the keys of encrypted TLS connections from the main memory of applications to decrypt network traffic without affecting the performance of the monitored application. The TLSKex and DroidKex architecture describe two approaches to localize the keys efficiently with the help of semantic knowledge in the main memory of applications. The second example discusses how to monitor and document SSH sessions of potential attackers from outside of a virtual machine. It is important that the monitoring routines are not noticed by an attacker. To achieve this, we evaluate how to optimize the performance of the monitoring mechanism. The fourth challenge is how to deal with the performance degradation caused by introspection in productive systems. This thesis discusses how this can be achieved using the example of a SIEM system. To reduce the performance overhead, we describe how to configure the monitoring routine to collect only the information needed to detect incidents. Also, we describe two approaches that permit the monitoring routine to be dynamically adjusted at runtime to extract more information if necessary so that incidents can be better analyzed.}, subject = {Computerforensik}, language = {en} } @phdthesis{Kurz2019, author = {Kurz, Thomas}, title = {Adapting Semantic Web Information Retrieval to Multimedia}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8276}, school = {Universit{\"a}t Passau}, pages = {xvi, 206 Seiten}, year = {2019}, abstract = {The amount of audio, video and image data on the Web is immensely growing, which leads to data management problems based on the hidden character of Multimedia. Therefore the interlinking of semantic concepts and media data with the aim to bridge the gap between the Internet of documents and the Web of Data has become a common practice. However, the value of connecting media to its semantic meta data is limited due to lacking access methods and the absence of an adapted query language specialized for media assets and fragments. This thesis aims to extend the standard query language for the Semantic Web (SPARQL) with media specific concepts and functions. The main contributions of the work are an exhaustive survey on Multimedia query languages of the last 3 decades, the SPARQL extension specification itself and an approach for the efficient evaluation of the new query concepts. Additionally I elaborate and evaluate a meta data based media fragment similarity approach, which provides a basis for further language extensions.}, subject = {Semantic Web}, language = {en} } @phdthesis{Wimbauer2020, author = {Wimbauer, Lisa Kristina}, title = {Innovate with Crowds. Co-Creation and Idea Evaluation in Internal and External Crowdsourcing.}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8359}, school = {Universit{\"a}t Passau}, pages = {VIII, 208 Seiten}, year = {2020}, abstract = {Crowdsourcing seems to be a promising approach for organizations to overcome challenges widely discussed in innovation and organizational research. However, the extent to which an organization can leverage the benefits from crowdsourcing is contingent on which type of crowd is addressed and how crowds are used. Based on unique data from crowdsourcing contests, the dissertation provides insights how to innovate with internal and external crowds in order to utilize their potential for co-creation and idea evaluation.}, language = {en} } @phdthesis{Kopp2020, author = {Kopp, Katrina}, title = {Essays on Fraud and Forensic Accounting - Research from a German Accounting Perspective}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-8264}, school = {Universit{\"a}t Passau}, pages = {190 Seiten}, year = {2020}, abstract = {Investment fraud, cybercrime, inconsistencies in health care or the emission scams at the car manufacturers, economic crime (fraud) manifests itself in many facets. For Germany, the cases of FlowTex, Comroad, HRE-Bad-Bank, Holzmann, Volkswagen and the current fraud suspicions at Porsche AG are prominent examples with mostly appalling consequences (Ballwieser and Dobler 2003; K{\"o}gler 2015; Meck, Nienhaus, and von Petersdorff 2011; Peem{\"o}ller and Hofmann 2005). Nevertheless, newspapers without reports on fraud have become scarce. Headlines such as: "Corruption - the daily business" impress hardly anyone, not least because of their certain regularity. The cases revealed publicly are, however, only the tip of the iceberg, as reported by renowned experts (Bundeskriminalamt 2018; LKA 2018). Currently, the State Criminal Police Office (Landeskriminalamt (LKA)) of Baden-W{\"u}rttemberg and its department for economic and environmental crime and corruption is concerned with 72 major proceedings (LKA 2018). However, fraud could be avoided or at least contained by appropriate preventive measures (Bundeskriminalamt 2018; Bussmann 2004; Hlavica, Klapproth, and H{\"u}lsberg 2011). Consequently, the pressure on companies and employees to demonstrate compliant and ethical behavior and to meet the demands of stakeholders at all times within their business activities has grown (Buff 2000). This raises the question about which precautionary measures a company can and must implement (Weick and Sutcliffe 2015). Although corporate awareness of this issue has increased, most in-house detection of fraud is accidental, suggesting that companies are still lacking appropriately functioning and systematic (early) detection mechanism (Hlavica et al. 2011). If a company is accused of fraud, this usually has serious repercussions on its corporate reputation. Prior research found that capital market reputation-based penalties for affected companies are on average 7.5 times higher than penalties imposed by the legal system (Karpoff, Lee, and Martin 2008). Furthermore, the accusation of fraud also affects the external auditor's reputation, since lacking the detection of manipulations in clients' (financial) reports not only damages public confidence in the accuracy of firms' financial statements but also in the reliability of the auditor's report. Therefore, it is not surprising that the demand for greater supervision and control of firms' (financial) reporting as well as for reliable work of statutory auditors continually increases (Herkendell 2007). Although to a lesser extent, this is also the case for the determination of material (accounting) errors within a firm's financial statements, which are often difficult to distinguish from accounting fraud. According to the International Accounting Standard (IAS) 8.5, published by the International Accounting Standards Board (IASB), errors are omissions and/or misstatements of items that result from the nonapplication or misapplication of trusted information (IASB 2003). Thus, accounting errors and accounting fraud both result in incorrect information of a firm's financial reports and consequently affect stakeholders' decision-making. One resulting attempt in counteracting the broad demand for appropriate protective measures was the implementation of a two-stage enforcement system involving the German Financial Reporting Enforcement Panel (Deutsche Pr{\"u}fstelle f{\"u}r Rechnungslegung (DPR)) as part of the adopted Financial Reporting Enforcement Act (Bilanzkontrollgesetz (BilKoG)) in 2004. The primary objective of the Federal Government's implementation of this mechanism was to strengthen investors' lost confidence in the German capital market, the information content of financial reporting, and Germany as a financial center in the international competition. In addition, the enforcement system serves as a sanctioning instrument for firms in the event of an error detection and subsequent adverse error disclosure via the German federal registry (elektronischer Bundesanzeiger). This adverse error disclosure not only sanctions denounced firms but also questions the quality of the annual financial statement audit and thus the quality of the responsible audit firm. Hence, the often thin line between firms' unintentional accounting errors, purposive engagement in earnings management, and intentional fraud in particular presents an increasing challenge for the audit profession. The objective of my cumulative dissertation is to provide a comprehensive overview of fraud and forensic accounting as well as insights into the distinct dimensions among the concepts of errors, earnings management and fraud from a German accounting perspective. I aim at achieving this objective in three steps: First (1), by providing an overview of discipline-specific education possibilities, existing forensic accounting practices, institutions, and current developments in research. Second (2), by assessing auditors' obligations and responsibilities for the detection of irregularities within the scope of the annual financial statement audit and whether including forensic services into the service portfolio of audit firms can help increase their audit quality due to spillover effects. Third (3), by examining firms' reputation (re-)building management in response to financial violations and how this process is associated with managing multiple (stakeholder) reputations. This dissertation is composed of three individual papers whereby each considers one of the above outlined focus areas}, subject = {Wirtschaftskriminalit{\"a}t}, language = {en} } @phdthesis{Ehlers2015, author = {Ehlers, Christoph}, title = {Top-k Semantic Caching}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3055}, school = {Universit{\"a}t Passau}, pages = {266}, year = {2015}, abstract = {The subject of this thesis is the intelligent caching of top-k queries in an environment with high latency and low throughput. In such an environment, caching can be used to reduce network traffic and improve response time. Slow database connections of mobile devices and to databases, which have been offshored, are practical use cases. A semantic cache is a query-based cache that caches query results and maintains their semantic description. It reuses partial matches of previous query results. Each query that is processed by the semantic cache is split into two disjoint parts: one that can be completely answered with tuples of the cache probe query, and another that requires tuples to be transferred from the server (remainder query). Existing semantic caches do not support top-k queries, i.e., ordered and limited queries. In this thesis, we present an innovative semantic cache that naturally supports top-k queries. The support of top-k queries in a semantic cache has considerable effects on cache elements, operations on cache elements -- like creation, difference, intersection, and union -- and query answering. Hence, we introduce new techniques for cache management and query processing. They enable the semantic cache to become a true top-k semantic cache. In addition, we have developed a new algorithm that can estimate the lower bounds of query results of sorted queries using multidimensional histograms. Using this algorithm, our top-k semantic cache is able to pipeline partial query results of top-k queries. Thereby, query execution performance can be significantly increased. We have implemented a prototype of a top-k semantic cache called IQCache (Intelligent Query Cache). An extensive and thorough evaluation with various benchmarks using our prototype demonstrates the applicability and performance of top-k semantic caching in practice. The experiments prove that the top-k semantic cache invariably outperforms simple hash-based caching strategies and scales very well.}, subject = {Semantisches Caching}, language = {en} } @phdthesis{Braun2015, author = {Braun, Bastian}, title = {Web-based Secure Application Control}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3048}, school = {Universit{\"a}t Passau}, year = {2015}, abstract = {The world wide web today serves as a distributed application platform. Its origins, however, go back to a simple delivery network for static hypertexts. The legacy from these days can still be observed in the communication protocol used by increasingly sophisticated clients and applications. This thesis identifies the actual security requirements of modern web applications and shows that HTTP does not fit them: user and application authentication, message integrity and confidentiality, control-flow integrity, and application-to-application authorization. We explore the other protocols in the web stack and work out why they can not fill the gap. Our analysis shows that the underlying problem is the connectionless property of HTTP. However, history shows that a fresh start with web communication is far from realistic. As a consequence, we come up with approaches that contribute to meet the identified requirements. We first present impersonation attack vectors that begin before the actual user authentication, i.e. when secure web interaction and authentication seem to be unnecessary. Session fixation attacks exploit a responsibility mismatch between the web developer and the used web application framework. We describe and compare three countermeasures on different implementation levels: on the source code level, on the framework level, and on the network level as a reverse proxy. Then, we explain how the authentication credentials that are transmitted for the user login, i.e. the password, and for session tracking, i.e. the session cookie, can be complemented by browser-stored and user-based secrets respectively. This way, an attacker can not hijack user accounts only by phishing the user's password because an additional browser-based secret is required for login. Also, the class of well-known session hijacking attacks is mitigated because a secret only known by the user must be provided in order to perform critical actions. In the next step, we explore alternative approaches to static authentication credentials. Our approach implements a trusted UI and a mutually authenticated session using signatures as a means to authenticate requests. This way, it establishes a trusted path between the user and the web application without exchanging reusable authentication credentials. As a downside, this approach requires support on the client side and on the server side in order to provide maximum protection. Another approach avoids client-side support but can not implement a trusted UI and is thus susceptible to phishing and clickjacking attacks. Our approaches described so far increase the security level of all web communication at all time. This is why we investigate adaptive security policies that fit the actual risk instead of permanently restricting all kinds of communication including non-critical requests. We develop a smart browser extension that detects when the user is authenticated on a website meaning that she can be impersonated because all requests carry her identity proof. Uncritical communication, however, is released from restrictions to enable all intended web features. Finally, we focus on attacks targeting a web application's control-flow integrity. We explain them thoroughly, check whether current web application frameworks provide means for protection, and implement two approaches to protect web applications: The first approach is an extension for a web application framework and provides protection based on its configuration by checking all requests for policy conformity. The second approach generates its own policies ad hoc based on the observed web traffic and assuming that regular users only click on links and buttons and fill forms but do not craft requests to protected resources.}, subject = {Computersicherheit}, language = {en} } @phdthesis{Meier2014, author = {Meier, Christian}, title = {Experimental Studies in Decision Making and Management Control}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3498}, school = {Universit{\"a}t Passau}, pages = {196}, year = {2014}, abstract = {Management accounting information is used in organizations to facilitate decision making and to influence actions for management control. Decisions and actions include the allocation of resources within a firm, coordination across organizational units, costing, pricing, compensation, and incentives. There are many ways how management accounting information is provided, including performance measurement, budgeting, capital budgeting, valuation, inventory systems, product-costing systems, and transfer pricing systems.}, subject = {Entscheidungstheorie}, language = {mul} }