@phdthesis{Hanning2002, author = {Hanning, Tobias}, title = {Vektorielle Mehrniveaupassung. Anwendungen in der Bildsegmentierung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-295}, school = {Universit{\"a}t Passau}, year = {2002}, abstract = {Die Anwendungen der vektoriellen Mehrniveaupassung in der Bildsegmentierung stehen in unmittelbarer Nachbarschaft zum verbreiteten Ansatz, Bilder mit Methoden der Variationsrechnung {\"u}ber einen Energieterm zu segmentieren. Beiden Verfahren ist gemeinsam, daß sie versuchen, das gegebene Bild durch st{\"u}ckweise stetige Funktionen zu approximieren. Die maximalen Teilmengen des Definitionsbereichs, auf denen die approximierende Funktion stetig ist, bilden dann die Segmente. Im Gegensatz zu dem in der Literatur h{\"a}ufig unter dem Schlagwort Mumford-Shah-Modell bekannten Energieminimierungsverfahren ist der Raum der Funktionen, mit denen das Bild approximiert wird, bei der vektoriellen Mehrniveaupassung ein endlicher Vektorraum. Ein weiterer Unterschied zu diesem Ansatz ist das System der erlaubten Mengen. Es werden nur Segmentierungen erlaubt, deren Segmente aus diesem Mengensystem sind. Die durch diese Einschr{\"a}nkung schlankere Theorie f{\"u}hrt zu einer gesicherten Existenz einer optimalen L{\"o}sung des Segmentierungsproblems, die der g{\"a}ngigen Vorstellung einer Segmentierung gen{\"u}gt. Die Berechnung lokaler Optima ist algorithmisch innerhalb der Theorie umsetzbar.}, subject = {Bildsegmentierung}, language = {de} } @phdthesis{Kunze2011, author = {Kunze, Kai}, title = {Compensating for On-Body Placement Effects in Activity Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-26114}, school = {Universit{\"a}t Passau}, year = {2011}, abstract = {This thesis investigates, how placement variations of electronic devices influence the possibility of using sensors integrated in those devices for context recognition. The vast majority of context recognition research assumes well defined, fixed sen- sor locations. Although this might be acceptable for some application domains (e.g. in an industrial setting), users, in general, will have a hard time coping with these limitations. If one needs to remember to carry dedicated sensors and to adjust their orientation from time to time, the activity recognition system is more distracting than helpful. How can we deal with device location and orientation changes to make context sensing mainstream? This thesis presents a systematic evaluation of device placement effects in context recognition. We first deal with detecting if a device is carried on the body or placed somewhere in the environ- ment. If the device is placed on the body, it is useful to know on which body part. We also address how to deal with sensors changing their position and their orientation during use. For each of these topics some highlights are given in the following. Regarding environmental placement, we introduce an active sampling ap- proach to infer symbolic object location. This approach requires only simple sensors (acceleration, sound) and no infrastructure setup. The method works for specific placements such as "on the couch", "in the desk drawer" as well as for general location classes, such as "closed wood compartment" or "open iron sur- face". In the experimental evaluation we reach a recognition accuracy of 90\% and above over a total of over 1200 measurements from 35 specific locations (taken from 3 different rooms) and 12 abstract location classes. To derive the coarse device placement on the body, we present a method solely based on rotation and acceleration signals from the device. It works independent of the device orientation. The on-body placement recognition rate is around 80\% over 4 min. of unconstrained motion data for the worst scenario and up to 90\% over a 2 min. interval for the best scenario. We use over 30 hours of motion data for the analysis. Two special issues of device placement are orientation and displacement. This thesis proposes a set of heuristics that significantly increase the robustness of motion sensor-based activity recognition with respect to sen- sor displacement. We show how, within certain limits and with modest quality degradation, motion sensor-based activity recognition can be implemented in a displacement tolerant way. We evaluate our heuristics first on a set of synthetic lower arm motions which are well suited to illustrate the strengths and limits of our approach, then on an extended modes of locomotion problem (sensors on the upper leg) and finally on a set of exercises performed on various gym machines (sensors placed on the lower arm). In this example our heuristic raises the dis- placed recognition rate from 24\% for a displaced accelerometer, which had 96\% recognition when not displaced, to 82\%.}, subject = {Kontextbezogenes System}, language = {en} } @phdthesis{Capco2010, author = {Capco, Jose}, title = {Real Closed * Rings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-25915}, school = {Universit{\"a}t Passau}, year = {2010}, abstract = {In this dissertation I examine a definition of real closure of commutative unitary reduced rings. I also give a characterization of rings that are real closed in this context and how one is able to arrive to such a real closure. There are sufficient examples to help the reader get a feel for real closed * rings and the real closure * of commutative unitary rings.}, language = {en} } @unpublished{KreitmeierLinder2011, author = {Kreitmeier, Wolfgang and Linder, Tamas}, title = {Entropy Density and Mismatch in High-Rate Scalar Quantization with R{\´e}nyi Entropy Constraint}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-26132}, year = {2011}, abstract = {Properties of scalar quantization with \$r\$th power distortion and constrained R\'enyi entropy of order \$\alpha\in (0,1)\$ are investigated. For an asymptotically (high-rate) optimal sequence of quantizers, the contribution to the R\'enyi entropy due to source values in a fixed interval is identified in terms of the "entropy density" of the quantizer sequence. This extends results related to the well-known point density concept in optimal fixed-rate quantization. A dual of the entropy density result quantifies the distortion contribution of a given interval to the overall distortion. The distortion loss resulting from a mismatch of source densities in the design of an asymptotically optimal sequence of quantizers is also determined. This extends Bucklew's fixed-rate (\$\alpha=0\$) and Gray \emph{et al.}'s variable-rate (\$\alpha=1\$)mismatch results to general values of the entropy order parameter \$\alpha\$}, subject = {Maßtheorie}, language = {de} } @unpublished{Kreitmeier2008, author = {Kreitmeier, Wolfgang}, title = {Optimal quantization for uniform distributions on Cantor-like sets}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-12449}, year = {2008}, abstract = {In this paper, the problem of optimal quantization is solved for uniform distributions on some higher dimensional, not necessarily self-similar \$N-\$adic Cantor-like sets. The optimal codebooks are determined and the optimal quantization error is calculated. The existence of the quantization dimension is characterized and it is shown that the quantization coefficient does not exist. The special case of self-similarity is also discussed. The conditions imposed are a separation property of the distribution and strict monotonicity of the first \$N\$ quantization error differences. Criteria for these conditions are proved and as special examples modified versions of classical fractal distributions are discussed.}, subject = {Maßtheorie}, language = {en} } @unpublished{Kreitmeier2005, author = {Kreitmeier, Wolfgang}, title = {Optimal Quantization for Dyadic Homogeneous Cantor Distributions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-3845}, year = {2005}, abstract = {For a large class of dyadic homogeneous Cantor distributions in \mathbb{R}, which are not necessarily self-similar, we determine the optimal quantizers, give a characterization for the existence of the quantization dimension, and show the non-existence of the quantization coefficient. The class contains all self-similar dyadic Cantor distributions, with contraction factor less than or equal to \frac{1}{3}. For these distributions we calculate the quantization errors explicitly.}, subject = {Maßtheorie}, language = {en} } @unpublished{Kreitmeier2007, author = {Kreitmeier, Wolfgang}, title = {Asymptotic order of quantization for Cantor distributions in terms of Euler characteristic, Hausdorff and Packing measure}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-7374}, year = {2007}, abstract = {For homogeneous one-dimensional Cantor sets, which are not necessarily self-similar, we show under some restrictions that the Euler exponent equals the quantization dimension of the uniform distribution on these Cantor sets. Moreover for a special sub-class of these sets we present a linkage between the Hausdorff and the Packing measure of these sets and the high-rate asymptotics of the quantization error.}, subject = {Maßtheorie}, language = {en} } @unpublished{Kreitmeier2007, author = {Kreitmeier, Wolfgang}, title = {Optimal quantization of probabilities concentrated on small balls}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-12010}, year = {2007}, abstract = {We consider probability distributions which are uniformly distributed on a disjoint union of balls with equal radius. For small enough radius the optimal quantization error is calculated explicitly in terms of the ball centroids. We apply the results to special self-similar measures.}, subject = {Maßtheorie}, language = {en} } @phdthesis{Guppenberger2010, author = {Guppenberger, Michael}, title = {Enhancing Information Systems with Event-Handling - A Non-Invasive Approach}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-22485}, school = {Universit{\"a}t Passau}, year = {2010}, abstract = {Due to the immense advance of widely accessible information systems in industrial applications, science, education and every day use, it becomes more and more difficult for users of those information systems to keep track with new and updated information. An approach to cope with this problem is to go beyond traditional search facilities and instead use the users' profiles to monitor data changes and to actively inform them about these updates - an aspect that has to be explicitly developed and integrated into a variety of information systems. This is traditionally done in an individual way, depending on the application and its platform. In this dissertation, we present a novel approach to model the semantic interrelations that specify which users to inform about which updates, based on the underlying model of the respective information system. For the first time, a meta-model that allows information system designers to tag an arbitrary data model and thus specify the event-handling semantics is presented. A formal specification of how to interpret meta-models to determine the receivers of the events completes the presented concept. For the practical realization of this new concept, model driven architecture (MDA) shows to be an ideal technical means. Using our newly developed UML profile based on data-modelling standards, an implementation of the event-handling specification can automatically be generated for a variety of different target platforms, like e.g. relational databases, using triggers. This meta-approach makes the proposed solution ideal with respect to maintainability and genericity. Our solution significantly reduces the overall development efforts for an event-handling facility. In addition, the enhanced model of the information system can be used to generate an implementation that also fulfils non-functional requirements like high performance and extensibility. The overall framework, consisting of the domain specific language (i.e. the meta-model), formal and technical transformations of how to interpret the enhanced information system model and a cost-based optimizing strategy, constitutes an integrated approach, offering several advantages over traditional implementation techniques: our framework can be applied to new information systems as well as to legacy applications without having to modify existing systems; it offers an extensible, easy-to-use, generic and thus re-usable solution and it can be tailored to and optimized for many use cases, as the practical evaluation presented in this dissertation verifies.}, subject = {Notifikation}, language = {en} } @phdthesis{Berl2011, author = {Berl, Andreas}, title = {Energy Efficiency in Office Computing Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-22516}, school = {Universit{\"a}t Passau}, year = {2011}, abstract = {The increasing cost of energy and the worldwide desire to reduce CO2 emissions has raised concern about the energy efficiency of information and communication technology. Whilst research has focused on data centres recently, this thesis identifies office computing environments as significant consumers of energy. Office computing environments offer great potential for energy savings: On one hand, such environments consist of a large number of hosts. On the other hand, these hosts often remain turned on 24~hours per day while being underutilised or even idle. This thesis analyzes the energy consumption within office computing environments and suggests an energy-efficient virtualized office environment. The office environment is virtualized to achieve flexible virtualized office resources that enable an energy-based resource management. This resource management stops idle services and idle hosts from consuming resources within the office and consolidates utilised office services on office hosts. This increases the utilisation of some hosts while other hosts are turned off to save energy. The suggested architecture is based on a decentralized approach that can be applied to all kinds of office computing environments, even if no centralized data centre infrastructure is available. The thesis develops the architecture of the virtualized office environment together with an energy consumption model that is able to estimate the energy consumption of hosts and network within office environments. The model enables the energy-related comparison of ordinary and virtualized office environments, considering the energy-efficient management of services. Furthermore, this thesis evaluates energy efficiency and overhead of the suggested approach. First, it theoretically proves the energy efficiency of the virtualized office environment with respect to the energy consumption model. Second, it uses Markov processes to evaluate the impact of user behaviour on the suggested architecture. Finally, the thesis develops a discrete-event simulation that enables the simulation and evaluation of office computing environments with respect to varying virtualization approaches, resource management parameters, user behaviour, and office equipment. The evaluation shows that the virtualized office environment saves more than half of the energy consumption within office computing environments, depending on user behaviour and office equipment.}, subject = {Energieeffizienz}, language = {en} } @unpublished{Kreitmeier2011, author = {Kreitmeier, Wolfgang}, title = {Optimal vector quantization in terms of Wasserstein distance}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-22502}, year = {2011}, abstract = {The optimal quantizer in memory-size constrained vector quantization induces a quantization error which is equal to a Wasserstein distortion. However, for the optimal (Shannon-)entropy constrained quantization error a proof for a similar identity is still missing. Relying on principal results of the optimal mass transportation theory, we will prove that the optimal quantization error is equal to a Wasserstein distance. Since we will state the quantization problem in a very general setting, our approach includes the R\'enyi-\$\alpha\$-entropy as a complexity constraint, which includes the special case of (Shannon-)entropy constrained \$(\alpha = 1)\$ and memory-size constrained \$(\alpha = 0)\$ quantization. Additionally, we will derive for certain distance functions codecell convexity for quantizers with a finite codebook. Using other methods, this regularity in codecell geometry has already been proved earlier by Gy\"{o}rgy and Linder.}, subject = {Maßtheorie}, language = {en} } @phdthesis{Hoelbling2011, author = {H{\"o}lbling, G{\"u}nther}, title = {Personalized Means of Interacting with Multimedia Content}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-24210}, school = {Universit{\"a}t Passau}, year = {2011}, abstract = {Today the world of multimedia is almost completely device- and content-centered. It focuses it's energy nearly exclusively on technical issues such as computing power, network specifics or content and device characteristics and capabilities. In most multimedia systems, the presentation of multimedia content and the basic controls for playback are main issues. Because of this, a very passive user experience, comparable to that of traditional TV, is most often provided. In the face of recent developments and changes in the realm of multimedia and mass media, this "traditional" focus seems outdated. The increasing use of multimedia content on mobile devices, along with the continuous growth in the amount and variety of content available, make necessary an urgent re-orientation of this domain. In order to highlight the depth of the increasingly difficult situation faced by users of such systems, it is only logical that these individuals be brought to the center of attention. In this thesis we consider these trends and developments by applying concepts and mechanisms to multimedia systems that were first introduced in the domain of usercentrism. Central to the concept of user-centrism is that devices should provide users with an easy way to access services and applications. Thus, the current challenge is to combine mobility, additional services and easy access in a single and user-centric approach. This thesis presents a framework for introducing and supporting several of the key concepts of user-centrism in multimedia systems. Additionally, a new definition of a user-centric multimedia framework has been developed and implemented. To satisfy the user's need for mobility and flexibility, our framework makes possible seamless media and service consumption. The main aim of session mobility is to help people cope with the increasing number of different devices in use. Using a mobile agent system, multimedia sessions can be transferred between different devices in a context-sensitive way. The use of the international standard MPEG-21 guarantees extensibility and the integration of content adaptation mechanisms. Furthermore, a concept is presented that will allow for individualized and personalized selection and face the need for finding appropriate content. All of which can be done, using this approach, in an easy and intuitive way. Especially in the realm of television, the demand that such systems cater to the need of the audience is constantly growing. Our approach combines content-filtering methods, state-of-the-art classification techniques and mechanisms well known from the area of information retrieval and text mining. These are all utilized for the generation of recommendations in a promising new way. Additionally, concepts from the area of collaborative tagging systems are also used. An extensive experimental evaluation resulted in several interesting findings and proves the applicability of our approach. In contrast to the "lean-back" experience of traditional media consumption, interactive media services offer a solution to make possible the active participation of the audience. Thus, we present a concept which enables the use of interactive media services on mobile devices in a personalized way. Finally, a use case for enriching TV with additional content and services demonstrates the feasibility of this concept.}, subject = {Empfehlungssystem}, language = {en} } @phdthesis{Johns2009, author = {Johns, Martin}, title = {Code Injection Vulnerabilities in Web Applications - Exemplified at Cross-site Scripting}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-23626}, school = {Universit{\"a}t Passau}, year = {2009}, abstract = {The majority of all security problems in today's Web applications is caused by string-based code injection, with Cross-site Scripting (XSS)being the dominant representative of this vulnerability class. This thesis discusses XSS and suggests defense mechanisms. We do so in three stages: First, we conduct a thorough analysis of JavaScript's capabilities and explain how these capabilities are utilized in XSS attacks. We subsequently design a systematic, hierarchical classification of XSS payloads. In addition, we present a comprehensive survey of publicly documented XSS payloads which is structured according to our proposed classification scheme. Secondly, we explore defensive mechanisms which dynamically prevent the execution of some payload types without eliminating the actual vulnerability. More specifically, we discuss the design and implementation of countermeasures against the XSS payloads Session Hijacking'', Cross-site Request Forgery'', and attacks that target intranet resources. We build upon this and introduce a general methodology for developing such countermeasures: We determine a necessary set of basic capabilities an adversary needs for successfully executing an attack through an analysis of the targeted payload type. The resulting countermeasure relies on revoking one of these capabilities, which in turn renders the payload infeasible. Finally, we present two language-based approaches that prevent XSS and related vulnerabilities: We identify the implicit mixing of data and code during string-based syntax assembly as the root cause of string-based code injection attacks. Consequently, we explore data/code separation in web applications. For this purpose, we propose a novel methodology for token-level data/code partitioning of a computer language's syntactical elements. This forms the basis for our two distinct techniques: For one, we present an approach to detect data/code confusion on run-time and demonstrate how this can be used for attack prevention. Furthermore, we show how vulnerabilities can be avoided through altering the underlying programming language. We introduce a dedicated datatype for syntax assembly instead of using string datatypes themselves for this purpose. We develop a formal, type-theoretical model of the proposed datatype and proof that it provides reliable separation between data and code hence, preventing code injection vulnerabilities. We verify our approach's applicability utilizing a practical implementation for the J2EE application server.}, subject = {Computersicherheit}, language = {en} } @phdthesis{Ali2011, author = {Ali, Rashid}, title = {Weyl Gr{\"o}bner Basis Cryptosystems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-23195}, school = {Universit{\"a}t Passau}, year = {2011}, abstract = {In this thesis, we shall consider a certain class of algebraic cryptosystems called Gr{\"o}bner Basis Cryptosystems. In 1994, Koblitz introduced the Polly Cracker cryptosystem that is based on the theory of Gr{\"o}bner basis in commutative polynomials rings. The security of this cryptosystem relies on the fact that the computation of Gr{\"o}bner basis is, in general, EXPSPACE-hard. Cryptanalysis of these commutative Polly Cracker type cryptosystems is possible by using attacks that do not require the computation of Gr{\"o}bner basis for breaking the system, for example, the attacks based on linear algebra. To secure these (commutative) Gr{\"o}bner basis cryptosystems against various attacks, among others, Ackermann and Kreuzer introduced a general class of Gr{\"o}bner Basis Cryptosystems that are based on the difficulty of computing module Gr{\"o}bner bases over general non-commutative rings. The objective of this research is to describe a special class of such cryptosystems by introducing the Weyl Gr{\"o}bner Basis Cryptosystems. We divide this class of cryptosystems in two parts namely the (left) Weyl Gr{\"o}bner Basis Cryptosystems and Two-Sided Weyl Gr{\"o}bner Basis Cryptosystems. We suggest to use Gr{\"o}bner bases for left and two-sided ideals in Weyl algebras to construct specific instances of such cryptosystems. We analyse the resistance of these cryptosystems to the standard attacks and provide computational evidence that secure Weyl Gr{\"o}bner Basis Cryptosystems can be built using left (resp. two-sided) Gr{\"o}bner bases in Weyl algebras.}, subject = {Gr{\"o}bner-Basis}, language = {en} } @phdthesis{Rabl2011, author = {Rabl, Tilmann}, title = {Efficiency in Cluster Database Systems - Dynamic and Workload-Aware Scaling and Allocation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-25821}, school = {Universit{\"a}t Passau}, year = {2011}, abstract = {Database systems have been vital in all forms of data processing for a long time. In recent years, the amount of processed data has been growing dramatically, even in small projects. Nevertheless, database management systems tend to be static in terms of size and performance which makes scaling a difficult and expensive task. Because of performance and especially cost advantages more and more installed systems have a shared nothing cluster architecture. Due to the massive parallelism of the hardware programming paradigms from high performance computing are translated into data processing. Database research struggles to keep up with this trend. A key feature of traditional database systems is to provide transparent access to the stored data. This introduces data dependencies and increases system complexity and inter process communication. Therefore, many developers are exchanging this feature for a better scalability. However, explicitly managing the data distribution and data flow requires a deep understanding of the distributed system and reduces the possibilities for automatic and autonomic optimization. In this thesis we present an approach for database system scaling and allocation that features good scalability although it keeps the data distribution transparent. The first part of this thesis analyzes the challenges and opportunities for self-scaling database management systems in cluster environments. Scalability is a major concern of Internet based applications. Access peaks that overload the application are a financial risk. Therefore, systems are usually configured to be able to process peaks at any given moment. As a result, server systems often have a very low utilization. In distributed systems the efficiency can be increased by adapting the number of nodes to the current workload. We propose a processing model and an architecture that allows efficient self-scaling of cluster database systems. In the second part we consider different allocation approaches. To increase the efficiency we present a workload-aware, query-centric model. The approach is formalized; optimal and heuristic algorithms are presented. The algorithms optimize the data distribution for local query execution and balance the workload according to the query history. We present different query classification schemes for different forms of partitioning. The approach is evaluated for OLTP and OLAP style workloads. It is shown that variants of the approach scale well for both fields of application. The third part of the thesis considers benchmarks for large, adaptive systems. First, we present a data generator for cloud-sized applications. Due to its architecture the data generator can easily be extended and configured. A key feature is the high degree of parallelism that makes linear speedup for arbitrary numbers of nodes possible. To simulate systems with user interaction, we have analyzed a productive online e-learning management system. Based on our findings, we present a model for workload generation that considers the temporal dependency of user interaction.}, subject = {Verteiltes Datenbanksystem}, language = {en} } @unpublished{KreitmeierLinder2011, author = {Kreitmeier, Wolfgang and Linder, Tamas}, title = {High-Resolution Scalar Quantization with R{\´e}nyi Entropy Constraint}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-23787}, year = {2011}, abstract = {We consider optimal scalar quantization with \$r\$th power distortion and constrained R\'enyi entropy of order \$\alpha\$. For sources with absolutely continuous distributions the high rate asymptotics of the quantizer distortion has long been known for \$\alpha=0\$ (fixed-rate quantization) and \$\alpha=1\$ (entropy-constrained quantization). These results have recently been extended to quantization with R\'enyi entropy constraint of order \$\alpha \ge r+1\$. Here we consider the more challenging case \$\alpha\in [-\infty,0)\cup (0,1)\$ and for a large class of absolutely continuous source distributions we determine the sharp asymptotics of the optimal quantization distortion. The achievability proof is based on finding (asymptotically) optimal quantizers via the companding approach, and is thus constructive.}, subject = {Maßtheorie}, language = {de} } @phdthesis{Houyou2009, author = {Houyou, Amine Mohamed}, title = {Context-Aware Mobility: A Distributed Approach to Context Management}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-17975}, school = {Universit{\"a}t Passau}, year = {2009}, abstract = {The recent development of a whole plethora of new wireless technologies, such as IEEE 802.11, IEEE 802.15, IEEE 802.16, UMTS, and more recently LTE, etc, has triggered several efforts to integrate these technologies in a converged world of transparent and ubiquitous wireless connectivity. Most of these technologies have evolved around a certain use case and with some user behaviour being assumed; however, there still lacks a holistic solution to adapt access to user needs, in an automatic and transparent manner. One major problem that has to be addressed first, is mobility management between heterogeneous wireless networks. Current mobility management solutions mostly originate from cellular networking systems, which are operator specific, centralised, and focused on a single link technology. In order to deal with the wireless diversity of future wireless and mobile Internet, a new approach is needed. Adaptive wireless connectivity that is tailored around the user needs and capabilities is named context-aware mobility management. Context refers to the information describing the surroundings of the user as well as his/her behaviour, and additional semantic information that could optimise the adaption process. Context management normally entails discovering and tracking context, reasoning based on the discovered information, then adapting (or acting) upon the context-aware application or system. This context management chain is adapted throughout the thesis to the task of context-aware mobility management. The added complexity is necessary to adapt the ubiquitous access to the condition of both the user and the surrounding networks, while assuming that overlapping wireless networks could still be managed in separate management domains. Linking these management domains and aggregating this composite information in the form of a network context is one of the major contributions of this work. An overlay-based solution takes into account this scattered nature of the context management system, which is modelled as a decentralised dynamic location-based service. The proposed architecture is generalised to support ubiquitous location-based services, and a design methodology is proposed to ensure the localised impact of mobility-led context retrieval overhead.}, subject = {Netze}, language = {en} } @phdthesis{Ogris2009, author = {Ogris, Georg}, title = {Multi-modal on-body sensing of human activities}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-17930}, school = {Universit{\"a}t Passau}, year = {2009}, abstract = {Increased usage and integration of state-of-the-art information technology in our everyday work life aims at increasing the working efficiency. Due to unhandy human-computer-interaction methods this progress does not always result in increased efficiency, for mobile workers in particular. Activity recognition based contextual computing attempts to balance this interaction deficiency. This work investigates wearable, on-body sensing techniques on their applicability in the field of human activity recognition. More precisely we are interested in the spotting and recognition of so-called manipulative hand gestures. In particular the thesis focuses on the question whether the widely used motion sensing based approach can be enhanced through additional information sources. The set of gestures a person usually performs on a specific place is limited -- in the contemplated production and maintenance scenarios in particular. As a consequence this thesis investigates whether the knowledge about the user's hand location provides essential hints for the activity recognition process. In addition, manipulative hand gestures -- due to their object manipulating character -- typically start in the moment the user's hand reaches a specific place, e.g. a specific part of a machinery. And the gestures most likely stop in the moment the hand leaves the position again. Hence this thesis investigates whether hand location can help solving the spotting problem. Moreover, as user-independence is still a major challenge in activity recognition, this thesis investigates location context as a possible key component in a user-independent recognition system. We test a Kalman filter based method to blend absolute position readings with orientation readings based on inertial measurements. A filter structure is suggested which allows up-sampling of slow absolute position readings, and thus introduces higher dynamics to the position estimations. In such a way the position measurement series is made aware of wrist motions in addition to the wrist position. We suggest location based gesture spotting and recognition approaches. Various methods to model the location classes used in the spotting and recognition stages as well as different location distance measures are suggested and evaluated. In addition a rather novel sensing approach in the field of human activity recognition is studied. This aims at compensating drawbacks of the mere motion sensing based approach. To this end we develop a wearable hardware architecture for lower arm muscular activity measurements. The sensing hardware based on force sensing resistors is designed to have a high dynamic range. In contrast to preliminary attempts the proposed new design makes hardware calibration unnecessary. Finally we suggest a modular and multi-modal recognition system; modular with respect to sensors, algorithms, and gesture classes. This means that adding or removing a sensor modality or an additional algorithm has little impact on the rest of the recognition system. Sensors and algorithms used for spotting and recognition can be selected and fine-tuned separately for each single activity. New activities can be added without impact on the recognition rates of the other activities.}, subject = {Kontextbezogenes System}, language = {de} } @phdthesis{Brunner2010, author = {Brunner, Wolfgang}, title = {Cyclic Level Drawings of Directed Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-17962}, school = {Universit{\"a}t Passau}, year = {2010}, abstract = {The Sugiyama framework proposed in the seminal paper of 1981 is one of the most important algorithms in graph drawing and is widely used for visualizing directed graphs. In its common version, it draws graphs hierarchically and, hence, maps the topological direction to a geometric direction. However, such a hierarchical layout is not possible if the graph contains cycles, which have to be destroyed in a preceding step. In certain application and problem settings, e.g., bio sciences or periodic scheduling problems, it is important that the cyclic structure of the input graph is preserved and clearly visible in drawings. Sugiyama et al. also suggested apart from the nowadays standard horizontal algorithm a cyclic version they called recurrent hierarchies. However, this cyclic drawing style has not received much attention since. In this thesis we consider such cyclic drawings and investigate the Sugiyama framework for this new scenario. As our goal is to visualize cycles directly, the first phase of the Sugiyama framework, which is concerned with removing such cycles, can be neglected. The cyclic structure of the graph leads to new problems in the remaining phases, however, for which solutions are proposed in this thesis. The aim is a complete adaption of the Sugiyama framework for cyclic drawings. To complement our adaption of the Sugiyama framework, we also treat the problem of cyclic level planarity and present a linear time cyclic level planarity testing and embedding algorithm for strongly connected graphs.}, subject = {Graphenzeichnen}, language = {en} } @unpublished{Kreitmeier2009, author = {Kreitmeier, Wolfgang}, title = {Hausdorff measure of uniform self-similar fractals}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-17948}, year = {2009}, abstract = {Let d \&\#8805; 1 be an integer and E a self-similar fractal set, which is the attractor of a uniform contracting iterated function system (UIFS) on Rd. Denote by D the Hausdorff dimension, by HD(E) the Hausdorff measure and by diam (E) the diameter of E. If the UIFS is parametrised by its contracting factor c, while the set \ω of fixed points of the UIFS does not depend on c, we will show the existence of a positive constant depending only on \ω, such that the Hausdorff dimension is smaller than one and HD = (E) D if c is smaller than this constant. We apply our result to modified versions of various classical fractals. Moreover we present a parametrised UIFS where \ω depends on c and HD < diam(E)D, if c is small enough.}, subject = {Maßtheorie}, language = {en} } @phdthesis{Weitl2007, author = {Weitl, Franz}, title = {Document Verification with Temporal Description Logics}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-12528}, school = {Universit{\"a}t Passau}, year = {2007}, abstract = {The thesis proposes a new formal framework for checking the content of web documents along individual reading paths. It is vital for the readability of web documents that their content is consistent and coherent along the possible browsing paths through the document. Manually ensuring the coherence of content along the possibly huge number of different browsing paths in a web document is time-consuming and error-prone. Existing methods for document validation and verification are not sufficiently expressive and efficient. The innovative core idea of this thesis is to combine the temporal logic CTL and description logic ALC for the representation of consistency criteria. The resulting new temporal description logics ALCCTL can - in contrast to existing specification formalisms - compactly represent coherence criteria on documents. Verification of web documents is modelled as a model checking problem of ALCCTL. The decidability and polynomial complexity of the ALCCTL model checking problem is proven and a sound, complete, and optimal model checking algorithm is presented. Case studies on real and realistic web documents demonstrate the performance and adequacy of the proposed methods. Existing methods such as symbolic model checking or XML-based document validation are outperformed in both expressiveness and speed.}, subject = {Verifikation}, language = {en} } @phdthesis{Schwaiger2008, author = {Schwaiger, Petra}, title = {Ein Bedingungsmodell f{\"u}r Planungsprobleme in strukturierten Dom{\"a}nen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-12497}, school = {Universit{\"a}t Passau}, year = {2008}, abstract = {Computerunterst{\"u}tzte Beratungssysteme finden sowohl in der Industrie als auch im akademischen Bereich eine zunehmende Bedeutung. Die Anforderungen an solche Systeme sind hinsichtlich der abbildbaren Strukturen, der Flexibilit{\"a}t der Anfragen und der Vollst{\"a}ndigkeit und Korrektheit der Antworten hoch. Dies gilt insbesondere f{\"u}r Planungsprobleme in strukturierten Dom{\"a}nen. Derartige Probleme treten beispielsweise bei der Erstellung von Tests auf der Grundlage einer Menge von Fragen und gewissen Anforderungen an den Test, bei der Konsistenzpr{\"u}fung von Studienordnungen und bei der computerunterst{\"u}tzten Studienberatung auf. In der vorliegenden Arbeit wird ein Framework zur Behandlung eben genannter Probleme pr{\"a}sentiert. Die vorgestellte L{\"o}sung bietet durch den modellbasierten Ansatz und die entwickelte anwendungsnahe Modellierungssprache - gerade auch im Vergleich zu existierenden Ans{\"a}tzen - einen sehr hohen Grad an Abstraktion, Allgemeing{\"u}ltigkeit, Ausdrucksst{\"a}rke, Flexibilit{\"a}t und Integrierbarkeit. Im Rahmen des entwickelten Modells wird eine geeignete Verzahnung von strukturellen und constraintbasierten Aspekten erreicht. Der hierbei in Syntax und Semantik definierte Constraintbegriff kann dar{\"u}ber hinaus als Formalisierung und Verallgemeinerung von Pfadconstraints bzw. Pfadanfragen in hierarchischen Datenmodellen aufgefasst werden. F{\"u}r die interne Repr{\"a}sentation erweist sich ein logikbasierter Ansatz mit Constraints, n{\"a}mlich Answer Set Programming mit Gewichten, als eine ausgezeichnete Methode bez{\"u}glich der Ausdrucksst{\"a}rke, M{\"a}chtigkeit und Ad{\"a}quatheit. Die Praxistauglichkeit des verfolgten Ansatzes im Hinblick auf Performanz und Skalierbarkeit wird in verschiedenen realen Anwendungsf{\"a}llen demonstriert.}, subject = {Logik}, language = {de} } @phdthesis{Graf2007, author = {Graf, Simone}, title = {Kamerakalibrierung mit radialer Verzeichnung - die radiale essentielle Matrix}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-12711}, school = {Universit{\"a}t Passau}, year = {2007}, abstract = {In der Bildverarbeitung wird die beobachtende Kamera meist als Lochkamera modelliert: ein Modell, das zahlreiche theoretische Vorteile bietet. So kann etwa das Abbildungsverhalten als projektive Abbildung aufgefasst werden. In einem Stereokamerasystem dieses Modells stehen korrespondierende Punkte - das sind Bildpunkte desselben 3D-Punktes - in einem linearen Zusammenhang, der auch ohne Kenntnis der Kameraparameter aus beobachteten Korrespondenzen gesch{\"a}tzt werden kann. F{\"u}r die meisten Kameras, insbesondere f{\"u}r solche mit Weitwinkelobjektiven, ist die Modellannahme einer Lochkamera allerdings sichtbar unzureichend. Deshalb m{\"u}ssen zus{\"a}tzlich zur Lochkamera noch Verzeichnungsabbildungen ins Modell integriert werden. In dieser Arbeit wird gezeigt, dass bei polynomialer radialer Verzeichnung die Parameter der Projektionsabbildung die Verzeichnungsparameter bestimmen. Dieses theoretische Ergebnis fließt in Algorithmen zur Kamerakalibrierung, d.h. zur Bestimmung der Parameter eines Kameramodells, ein. Diese wurden experimentell getestet und mit bestehenden Verfahren verglichen. Weiterhin wird die radiale essentielle Matrix eingef{\"u}hrt, die die Beziehung von korrespondierenden Punkten im Stereokamerafall bei radialer Verzeichnung beschreibt. Es werden vier Algorithmen vorgestellt, die diese theoretische Beziehung verwerten. Sie geben an, wie aus korrespondierenden Punkten die radiale essentielle Matrix gesch{\"a}tzt werden kann und welche Kameraparameter daraus gewonnen werden k{\"o}nnen. Damit ist beispielsweise eine Nachkalibrierung m{\"o}glich. Auch diese Verfahren wurden implementiert und evaluiert. Umgekehrt ist bei bekannter radialer essentieller Matrix eine Einschr{\"a}nkung des Suchraums f{\"u}r korrespondierende Punkte m{\"o}glich, die f{\"u}r die Rekonstruktion ben{\"o}tigt werden.}, subject = {Optische Messtechnik}, language = {de} } @phdthesis{Oberender2009, author = {Oberender, Jens O.}, title = {Widerstandsf{\"a}hige Anonymisierungsnetze}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-16846}, school = {Universit{\"a}t Passau}, year = {2009}, abstract = {Unverkettbare Nachrichten sind ein Grundbaustein anonymer Kommunikation. Anonymisierungsnetze sch{\"u}tzen mittels Unverkettbarkeit, wer mit wem kommuniziert sowie die Identit{\"a}t der Beteiligten einer Kommunikationsbeziehung. Anonymisierungsnetze ben{\"o}tigen Kooperation, da die Anonymit{\"a}t durch Ressourcen anderer Teilnehmer gesch{\"u}tzt wird. Wenn die Kosten und der Nutzen eines Anonymisierungsnetzes transparent sind, ergeben sich Zielkonflikte zwischen rationalen Teilnehmern. Es wird daher untersucht, inwiefern daraus resultierendes egoistisches Verhalten die Widerstandsf{\"a}higkeit dieser Netze beeintr{\"a}chtigt. St{\"o}rungen werden in einem spieltheoretischen Modell untersucht, um widerstandsf{\"a}hige Konfigurationen von Anonymisierungsnetzen zu ermitteln. Eine weitere St{\"o}rquelle sind {\"U}berflutungsangriffe mittels unverkettbarer Nachrichten. Es soll sowohl die Verf{\"u}gbarkeit als auch die Anonymit{\"a}t gesch{\"u}tzt werden. Dazu wird Unverkettbarkeit f{\"u}r Nachrichten aufrecht erhalten, außer wenn die Senderate eines Nachrichtenstroms eine Richtlinie {\"u}berschreitet. Innerhalb verkettbarer Nachrichten k{\"o}nnen {\"U}berflutungsangriffe erkannt werden. Dar{\"u}ber kann die Verf{\"u}gbarkeit des Netzdienstes gesch{\"u}tzt werden.}, subject = {Anonymit{\"a}t}, language = {de} } @unpublished{Kreitmeier2009, author = {Kreitmeier, Wolfgang}, title = {Optimal quantization for the one-dimensional uniform distribution with R{\´e}nyi -α-entropy constraints}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-16983}, year = {2009}, abstract = {We establish the optimal quantization problem for probabilities under constrained R{\´e}nyi-α-entropy of the quantizers. We determine the optimal quantizers and the optimal quantization error of one-dimensional uniform distributions including the known special cases α = 0 (restricted codebook size) and α = 1 (restricted Shannon entropy).}, subject = {Maßtheorie}, language = {de} } @unpublished{Kreitmeier2009, author = {Kreitmeier, Wolfgang}, title = {Error bounds for high-resolution quantization with R{\´e}nyi - \&\#945; - entropy constraints}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-16647}, year = {2009}, abstract = {We consider the problem of optimal quantization with norm exponent r > 0 for Borel probabilities on Rd under constrained R{\´e}nyi-\&\#945;-entropy of the quantizers. If the bound on the entropy becomes large, then sharp asymptotics for the optimal quantization error are well-known in the special cases \&\#945; = 0 (memory-constrained quantization) and \&\#945; = 1 (Shannon-entropy-constrained quantization). In this paper we determine sharp asymptotics for the optimal quantization error under large entropy bound with entropy parameter \&\#945; \&\#8712; [1+r/d, \&\#8734;]. For \&\#945; \&\#8712; [0,1+r/d[ we specify the asymptotical order of the optimal quantization error under large entropy bound. The optimal quantization error decays exponentially fast with the entropy bound and the exact decay rate is determined for all \&\#945; \&\#8712; [0, \&\#8734;].}, subject = {Maßtheorie}, language = {en} } @phdthesis{Bachl2003, author = {Bachl, Walter}, title = {Interaktives orthogonales Zeichnen von planaren Graphen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-392}, school = {Universit{\"a}t Passau}, year = {2003}, abstract = {Die Arbeit besch{\"a}ftigt sich mit dem automatischen Zeichnen von Graphen. Hier wird ein interaktiver Ansatz untersucht, bei dem der Graph mit einer Menge von Operationen Schritt f{\"u}r Schritt aufgebaut wird. Der Zielgraph und die Einf{\"u}gereihenfolge sind dabei nicht fest vorgegeben, sondern werden vom Benutzer bestimmt. In der Arbeit wird vor allem ein Szenario f{\"u}r zweifach zusammenh{\"a}ngende Graphen untersucht und ein f{\"u}r diese Zwecke passendes Zeichenmodell entwickelt. Dieser Ansatz wird dann um verschiedene Varianten erweitert. Außerdem wird gezeigt, dass das fl{\"a}chenminimale Zeichnen in dem neu entwickelten Zeichenmodell NP-vollst{\"a}ndig ist.}, subject = {Graphenzeichnen}, language = {de} } @phdthesis{Wiesner2004, author = {Wiesner, Christian}, title = {Query Evaluation Techniques for Data Integration Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-406}, school = {Universit{\"a}t Passau}, year = {2004}, abstract = {In this work we present novel query evaluation techniques for data integration systems in different environments, ranging from a central data-warehouse approach, over distributed virtual market places, to peer-to-peer (P2P) systems. Based on a new distributed evaluation technique, the so-called HyperQueries, we present a reference architecture for distributed virtual market places. These HyperQueries enable us to dynamically construct query evaluation plans by referencing sub-plans in the Internet. Furthermore, the process of data integration is structured. Subsequently, we investigate P2P data integration systems without central instances. We introduce so-called Super-Peers which structure a P2P network. Using this Super-Peer based network we "unroll" queries. This allows us to execute even user-defined operators nearby the data sources. Finally, we propose novel, efficient join algorithms for decision support queries in central data-warehouse systems. The proposed order-preserving hashjoins and generalized hashteams are based on early sorting and early partitioning of the inputs and can speed up the query evaluation up to orders of magnitutes.}, subject = {Abfrageverarbeitung}, language = {en} } @phdthesis{Braumandl2002, author = {Braumandl, Reinhard}, title = {Quality of Service and Optimization in Data Integration Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-279}, school = {Universit{\"a}t Passau}, year = {2002}, abstract = {This work presents techniques for the construction of a global data integrations system. Similar to distributed databases this system allows declarative queries in order to express user-specific information needs. Scalability towards global data integration systems and openness were major design goals for the architecture and techniques developed in this work. It is shown how service composition, extensibility and quality of service can be supported in an open system of providers for data, functionality for query processing operations, and computing power.}, subject = {Dienstg{\"u}te}, language = {en} } @phdthesis{Bachmaier2004, author = {Bachmaier, Christian}, title = {Circle Planarity of Level Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-385}, school = {Universit{\"a}t Passau}, year = {2004}, abstract = {In this dissertation we generalise the notion of level planar graphs in two directions: track planarity and radial planarity. Our main results are linear time algorithms both for the planarity test and for the computation of an embedding, and thus a drawing. Our algorithms use and generalise PQ-trees, which are a data structure for efficient planarity tests.}, subject = {Graphentheorie}, language = {en} } @phdthesis{Stoerzer2006, author = {St{\"o}rzer, Maximilian}, title = {Impact Analysis for AspectJ - A Critical Analysis and Tool-Based Approach to AOP}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-897}, school = {Universit{\"a}t Passau}, year = {2006}, abstract = {Aspect-Oriented Programming (AOP) has been promoted as a solution for modularization problems known as the tyranny of the dominant decomposition in literature. However, when analyzing AOP languages it can be doubted that uncontrolled AOP is indeed a silver bullet. The contributions of the work presented in this thesis are twofold. First, we critically analyze AOP language constructs and their effects on program semantics to sensitize programmers and researchers to resulting problems. We further demonstrate that AOP—as available in AspectJ and similar languages—can easily result in less understandable, less evolvable, and thus error prone code—quite opposite to its claims. Second, we examine how tools relying on both static and dynamic program analysis can help to detect problematical usage of aspect-oriented constructs. We propose to use change impact analysis techniques to both automatically determine the impact of aspects and to deal with AOP system evolution. We further introduce an analysis technique to detect potential semantical issues related to undefined advice precedence. The thesis concludes with an overview of available open source AspectJ systems and an assessment of aspect-oriented programming considering both fundamentals of software engineering and the contents of this thesis.}, subject = {Modularit{\"a}t}, language = {en} } @phdthesis{Kreitmeier2006, author = {Kreitmeier, Wolfgang}, title = {Optimale Quantisierung verallgemeinerter Cantor-Verteilungen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-913}, school = {Universit{\"a}t Passau}, year = {2006}, abstract = {F{\"u}r verallgemeinerte Cantor-Verteilungen, die im Eindimensionalen mittels klassischer Wischkonstruktion bzw. in h{\"o}heren Dimensionen {\"u}ber iterierte Funktionensysteme definiert werden, wird das Problem der optimalen Quantisierung unter bestimmten Voraussetzungen vollst{\"a}ndig gel{\"o}st. Es werden die optimalen Codeb{\"u}cher bestimmt und Formeln f{\"u}r den optimalen Quantisierungsfehler bewiesen. Im eindimensionalen Fall wird eine Existenzcharakterisierung der Quantisierungsdimension gegeben und unter bestimmten Voraussetzungen die Nichtexistenz des Quantisierungskoeffizienten gezeigt. Auch in h{\"o}heren Dimensionen wird f{\"u}r die betrachteten Verteilungen bewiesen, dass der Quantisierungskoeffizient, bei existenter Quantisierungsdimension, nicht existiert. Die gewonnenen Resultate werden auf die Gleichverteilungen von modifizierten klassischen fraktalen Mengen, wie das Sierpinski-Dreieck, die Cantormenge und den Cantor-Staub angewandt.}, subject = {Maßtheorie}, language = {de} } @phdthesis{Kickingereder2006, author = {Kickingereder, Reiner}, title = {Optische Vermessung partiell reflektierender Oberfl{\"a}chen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-936}, school = {Universit{\"a}t Passau}, year = {2006}, abstract = {Die vorliegende Arbeit befaßt sich mit der dreidimensionalen Rekonstruktion reflektierender Oberfl{\"a}chen. Dabei wird sowohl die Oberfl{\"a}chenform als auch die absolute Lage im Raum bestimmt. Es werden Oberfl{\"a}chen behandelt, die einerseits total reflektieren, andererseits diffuse und spiegelnde Reflexionsanteile aufweisen. Zur L{\"o}sung des Problems werden zwei Kameras und mehrere Lichtquellen verwendet. Eine genauere Analyse der Lichtreflexion an spiegelnden Objekten f{\"u}hrt zu einer Beschreibung des Problems als L{\"o}sung einer totalen Differentialgleichung. Diese L{\"o}sung wird als Startwert f{\"u}r die Rekonstruktion partiell reflektierender Oberfl{\"a}chen verwendet.}, subject = {Dreidimensionale Rekonstruktion}, language = {de} } @phdthesis{MianSyed2001, author = {Mian Syed, Alexandra}, title = {Engineering wissensbasierter Navigation und Steuerung autonom-mobiler Systeme}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-159}, school = {Universit{\"a}t Passau}, year = {2001}, abstract = {Die autonome Steuerung mobiler, technischer Systeme in nicht exakt vorherbestimmbaren Situationen erfordert Methoden der autonomen Entscheidungsfindung, um ein planvolles, zielgerichtetes Agieren und Reagieren unter Echtzeitbedingungen realisieren zu k{\"o}nnen. W{\"a}hrend mittels mathematischer Formeln Basisverhalten, wie beispielsweise in einer Geradeausbewegung, einer Drehung, bei einem Abbremsen, und in Gefahrenmomenten schnelle Reaktionen, berechnet werden, ben{\"o}tigt man auf der anderen Seite ein Regelsystem, um dar{\"u}ber hinaus "intelligentes", d.h. situationsangepaßtes Verhalten zu produzieren und gleichzeitig im Hinblick auf ein Missionsziel planvoll agieren zu k{\"o}nnen. Derartige Regelsysteme m{\"u}ssen sich auf einer abstrakten Ebene formulieren lassen, sollen sie vom Menschen problemlos entwickelbar, leicht modifizierbarund gut verifizierbar bleiben. Eine aufgrund ihres Konzeptes geeignete Programmierwelt ist die Logikprogrammierung. Ziel der Logikprogrammierung ist es weniger, Arbeitsabl{\"a}ufe zu beschreiben, als vielmehr Wissen in Form von Fakten zu spezifizieren und mit Hilfe von Regeln Schlußfolgerungen aus diesen Fakten ziehen zu k{\"o}nnen. Die klassische Logikprogrammierung ist jedoch aufgrund ihres Auswertungsmechanismus der SLD-Resolution (linear resolution with selected function for definite clauses) zu langsam f{\"u}r die Anwendung bei Echtzeitsystemen. Auch parallele Sprachformen, die ebenfalls mit SLD-Resolution arbeiten, erreichen beim Einsatz auf (von Neumann-) Mehrprozessorsystemen bislang nicht die notwendige Effizienz. Das Anwendungsgebiet der deduktiven Datenbanken hat im Vergleich dazu durch Bottom-Up Techniken einen anderen Auswertungsansatz mit deutlich h{\"o}herer Effizienz geliefert. Viele dort auftretenden Probleme k{\"o}nnen jedoch nur durch die Integration anforderungsgetriebener Abarbeitung gel{\"o}st werden. Auf der anderen Seite stellen Datenflußrechnerarchitekturen aufgrund der automatisierten Ausbeutung feink{\"o}rniger Parallelit{\"a}t einen hervorragenden Ansatz der Parallelverarbeitung dar. Bei Datenflußrechnerarchitekturen handelt es sich um (Mehrprozessor-) Systeme, deren datengetriebener Abarbeitungsmechanismus sich grundlegend vom weit verbreiteten kontrollflußgesteuerten von Neumann-Prinzip unterscheidet.{\"U}berlegungen zur Struktur von Steuerungssystemen werden ergeben, daß es mittels Ans{\"a}tzen aus dem Gebiet der deduktiven Datenbanken m{\"o}glich ist, ein f{\"u}r diese Aufgabenstellung neuartiges, ausschließlich datengetriebenes Auswertungsmodell zu generieren. Dabei vermeidet es Probleme, die bei Bottom-Up Verfahren auftreten k{\"o}nnen, wie z.B. das Auftreten unendlicher Wertemengen und die sp{\"a}te Einschr{\"a}nkung auf relevante Werte, ohne gleichzeitig die Stratifizierung von Programmen zu gef{\"a}hrden. Ergebnis der Arbeit ist eine anwendungsbezogene, problemorientierte Entwicklungsumgebung, die einerseits die Entwicklung und Verifikation der Spezifikation mit existierenden Werkzeugen erlaubt und andererseits die effiziente, parallele Ausf{\"u}hrung auf geeigneten Rechensystemen erm{\"o}glicht. Zus{\"a}tzlich wird die Voraussetzung geschaffen, verschiedene weitere, f{\"u}r die Steuerung autonomer Systeme unverzichtbare Komponenten in das Abarbeitungsmodell zu integrieren. Simulationsergebnisse werden belegen, daß das vorgestellte Berechnungsmodell bez{\"u}glich realer Anwendungsbeispiele bereits in einer Monoprozessorversion Echtzeitbedingungen gen{\"u}gt. Damit ist die Voraussetzung f{\"u}r die Ausf{\"u}hrung zuk{\"u}nftiger, weitaus komplexerer Steuerungsprobleme, ggf. auf Mehrprozessorsystemen, in Echtzeit geschaffen.}, subject = {Autonomer Roboter}, language = {de} } @phdthesis{Schreiber2001, author = {Schreiber, Falk}, title = {Visualisierung biochemischer Reaktionsnetze}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-215}, school = {Universit{\"a}t Passau}, year = {2001}, abstract = {In dieser Arbeit werden Anforderungen an die Darstellung biochemischer Reaktionsnetze untersucht und die Netze unter dem Gesichtspunkt der Visualisierung modelliert. Anschliessend wird ein Algorithmus zum Zeichnen biochemischer Reaktionsnetze entwickelt und analysiert.}, subject = {Biochemie}, language = {de} } @phdthesis{Fischer2001, author = {Fischer, Bernd}, title = {Deduction-Based Software Component Retrieval}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-231}, school = {Universit{\"a}t Passau}, year = {2001}, abstract = {Deduction-based software component retrieval is a software reuse technique that uses formal specifications as component descriptors and as search keys; matching components are identified using an automated theorem prover. This dissertation contains a detailed theoretical investigation of the concept as well as the first substantial experimental evaluation of its technical feasibility.}, subject = {Software engineering}, language = {en} } @phdthesis{Zukowski2001, author = {Zukowski, Ulrich}, title = {Flexible Computation of the Well-Founded Semantics of Normal Logic Programs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-226}, school = {Universit{\"a}t Passau}, year = {2001}, abstract = {The well-founded semantics has been accepted as the most relevant semantics for logic-based information systems. In this dissertation a framework based on a set of program transformations is presented that generalizes all major computation approaches for the well-founded semantics using a common data structure and provides a common language to describe their evaluation strategy. This rewriting system gives the formal background to analyze and combine different evaluation strategies in a common framework, or to design new algorithms and prove the correctness of its implementations at a high level just by changing the order of program transformations.}, subject = {Logische Programmierung}, language = {en} } @phdthesis{Maydl2005, author = {Maydl, Walter}, title = {Komponentenbasierte Softwareentwicklung f{\"u}r datenflußorientierte eingebettete Systeme}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-681}, school = {Universit{\"a}t Passau}, year = {2005}, abstract = {Diese Dissertation besch{\"a}ftigt sich mit den Problemen bei der Entwicklung von effizienter und zuverl{\"a}ssiger Software f{\"u}r eingebettete Systeme. Eingebettete Systeme sind inh{\"a}rent nebenl{\"a}ufig, was mit einen Grund f{\"u}r ihre hohe Entwurfskomplexit{\"a}t darstellt. Aus dieser Nebenl{\"a}ufigkeit resultiert ein hoher Grad an Kommunikation zwischen den einzelnen Komponenten. Eine wichtige Forderung zur Vereinfachung des Entwurfsprozesses besteht in der getrennten Modellierung von Kommunikationsprotokollen und eigentlichen Verarbeitungsalgorithmen. Daraus resultiert eine h{\"o}here Wiederverwendbarkeit bei sich {\"a}ndernden Kommunikationsstrukturen. Die Grundlage f{\"u}r die sogenannten Datenflußsprachen bildet eine einfache von Gilles Kahn konzipierte Sprache f{\"u}r Parallelverarbeitung. In dieser Sprache besteht ein System aus einer Menge sequentieller Prozesse (Komponenten), die {\"u}ber Fifokan{\"a}le miteinander kommunizieren. Ein Prozess ist rechenbereit, wenn seine Eingangsfifos mit entsprechenden Daten gef{\"u}llt sind. {\"U}bertragen werden physikalische Signale, die als Str{\"o}me bezeichnet werden. Str{\"o}me sind Folgen von Werten ohne explizite Zeitangaben. Das Einsatzgebiet von Datenflußsprachen liegt in der Entwicklung von Programmen zur Bild- und Signalverarbeitung, typischen Aufgaben in eingebetteten Systemen. Die Programmierung erfolgt visuell, wobei man Icons als Repr{\"a}sentanten parametrisierbarer Komponenten aus einer Bibliothek ausw{\"a}hlt und mittels Kanten (Fifos) verbindet. Ein im allgemeinen dynamischer Scheduler {\"u}berwacht die Ausf{\"u}hrung des fertiggestellten Anwendungsprogramms. Diese Arbeit schl{\"a}gt ein universelleres Modell physikalischer Signale vor. Dabei werden zwei Ziele verfolgt: 1. Effiziente Kommunikation zwischen den Komponenten 2. Entwurfsbegleitende {\"U}berpr{\"u}fung von Programmeigenschaften unter Verwendung komplexerer Komponentenmodelle Zur Effizienzsteigerung werden nur relevante Werte innerhalb von Str{\"o}men {\"u}bertragen. Dies erh{\"o}ht zwar den Mehraufwand zur Kennzeichnung des Aufbaus eines Teilstroms, in praktischen Anwendungen ist die hier vorgestellte Methode jedoch effizienter. Die Einf{\"u}hrung neuer Signalmerkmale erlaubt unterschiedlichste {\"U}berpr{\"u}fungen der Einhaltung von Typregeln durch die Eingangs- und Ausgangsstr{\"o}me einer Komponente. Anstelle einfacher Schaltregeln werden aufwendigere Kommunikationsprotokolle f{\"u}r die verschiedenen Arten von Komponenten eingef{\"u}hrt. Fifomaten (Fifo-Automaten) dienen als formale Grundlage. Mittels eines dezidierten Model-Checking-Verfahrens wird das Zusammenspiel der Fifomaten daraufhin untersucht, ob ein zyklischer Schedule existiert. Die Existenz eines solchen zyklischen Schedules schließt Speicher{\"u}berlauf und Deadlocks aus und garantiert dar{\"u}ber hinaus, daß das Programm nach endlicher Zeit wieder in die Ausgangssituation zur{\"u}ckfindet. Da im allgemeinen die Datenflußprogramme turing{\"a}quivalent sind, kann es allerdings zyklische Schedules geben, die das Verfahren nicht entdeckt. Mit der hier vorgestellten und implementierten Methode wird die Entwicklungszeit korrekter Datenflußprogramme deutlich reduziert. Das neue Modell physikalischer Signale macht zudem die Ausf{\"u}hrung effizienter.}, subject = {Softwareentwicklung}, language = {de} } @phdthesis{Lauren2005, author = {Lauren, Verena}, title = {Semilineare Approximation in der Bildrekonstruktion}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-730}, school = {Universit{\"a}t Passau}, year = {2005}, abstract = {In der Bildverarbeitung spielen Segmentierungsalgorithmen, bei denen die Art der zu segmentierenden Menge schon im Voraus festgelegt werden kann und nur noch ihre Gr{\"o}ße und Lage angepasst werden muss, eine eher untergeordnete Rolle. Gr{\"u}nde hierf{\"u}r sind vor allem komplizierte Zielfunktionen und daraus resultierende lange Rechenzeiten, die zudem meist kein optimales Ergebnis liefern. Dabei kann eine mengenbasierte Segmentierung durchaus sinnvoll eingesetzt werden, wenn gewisse Rahmenbedingungen eingehalten werden. In dieser Arbeit wird eine Theorie zur allgemeinen mengenbasierten Segmentierung vorgestellt und untersucht, unter welchen Bedingungen optimale Segmentierungsergebnisse erreicht werden k{\"o}nnen. Die anschließenden Anwendungen best{\"a}tigen die N{\"u}tzlichkeit dieser Theorie.}, subject = {Optimale Rekonstruktion}, language = {de} } @phdthesis{Bachl2001, author = {Bachl, Sabine}, title = {Isomorphe Subgraphen und deren Anwendung beim Zeichnen von Graphen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-149}, school = {Universit{\"a}t Passau}, year = {2001}, abstract = {In der Arbeit wird der Begriff der Isomorphen Subgraphen definiert. Anschließend werden theoretische und praktische Ergebnisse bei der Erkennung Isomorpher Graphen er{\"o}rtert.}, subject = {Isomorpher Teilgraph}, language = {de} } @phdthesis{Wichert2000, author = {Wichert, Carl-Alexander}, title = {ULTRA - A Logic Transaction Programming Language}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-105}, school = {Universit{\"a}t Passau}, year = {2000}, abstract = {Rule-based language for the specification of complex database updates and transactions. Formal treatment of the syntax and the declarative semantics}, subject = {Programmierlogik}, language = {en} } @phdthesis{Dolzmann2000, author = {Dolzmann, Andreas}, title = {Algorithmic strategies for applicable real quantifier elimination}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-64}, school = {Universit{\"a}t Passau}, year = {2000}, abstract = {One of the most important algorithms for real quantifier elimination is the quantifier elimination by virtual substitution introduced by Weispfenning in 1988. In this thesis we present numerous algorithmic approaches for optimizing this quantifier elimination algorithm. Optimization goals are the actual running time of the implementation of the algorithm and the size of the output formula. Strategies for obtaining these goals include simplification of first-order formulas,reduction of the size of the computed elimination set, and condensing a new replacement for the virtual substitution. Local quantifier elimination computes formulas that are equivalent to the input formula only nearby a given point. We can make use of this restriction for further optimizing the quantifier elimination by virtual substitution. Finally we discuss how to solve a large class of scheduling problems by real quantifier elimination. To optimize our algorithm for solving scheduling problems we make use of the special form of the input formula and of additional information given by the description of the scheduling problem}, subject = {Quantorenelimination}, language = {en} } @phdthesis{Schwarzfischer2004, author = {Schwarzfischer, Thomas}, title = {Quality and Utility - On the Use of Time-Value Functions to Integrate Quality and Timeliness Flexible Aspects in a Dynamic Real-Time Scheduling Environment}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-619}, school = {Universit{\"a}t Passau}, year = {2004}, abstract = {Scheduling methodologies for real-time applications have been of keen interest to diverse research communities for several decades. Depending on the application area, algorithms have been developed that are tailored to specific requirements with respect to both the individual components of which an application is made up and the computational platform on which it is to be executed. Many real-time scheduling algorithms base their decisions solely or partly on timing constraints expressed by deadlines which must be met even under worst-case conditions. The increasing complexity of computing hardware means that worst-case execution time analysis becomes increasingly pessimistic. Scheduling hard real-time computations according to their worst-case execution times (which is common practice) will thus result, on average, in an increasing amount of spare capacity. The main goal of flexible real-time scheduling is to exploit this otherwise wasted capacity. Flexible scheduling schemes have been proposed to increase the ability of a real-time system to adapt to changing requirements and nondeterminism in the application behaviour. These models can be categorised as those whose source of flexibility is the quality of computations and those which are flexible regarding their timing constraints. This work describes a novel model which allows to specify both flexible timing constraints and quality profiles for an application. Furthermore, it demonstrates the applicability of this specification method to real-world examples and suggests a set of feasible scheduling algorithms for the proposed problem class.}, subject = {Echtzeitsystem}, language = {en} } @phdthesis{Ramsauer2005, author = {Ramsauer, Markus}, title = {Energie- und qualit{\"a}tsbewußte Einplanung von periodischen Prozessen in eingebetteten Echtzeitsystemen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-628}, school = {Universit{\"a}t Passau}, year = {2005}, abstract = {Mobile Ger{\"a}te dienen immer h{\"a}ufiger zur Ausf{\"u}hrung von Echtzeitanwendungen, sie bieten immer mehr Rechenleistung und sie werden kleiner und leichter. Hohe Rechenleistung erfordert jedoch sehr viel Energie, was im Gegensatz zu den geringen Akkukapazit{\"a}ten, die aus der Forderung nach kleinen und leichten Ger{\"a}ten resultieren, steht. Bei der Echtzeiteinplanung von Rechenprozessen gewinnt daher der Energieverbrauch der Ger{\"a}te neben der rechtzeitigen Beendigung von Anwendungen zunehmend an Bedeutung, weil sie m{\"o}glichst lange unabh{\"a}ngig vom Stromnetz betrieben werden sollen. Andererseits werden auf diesen Ger{\"a}ten rechenintensive Anwendungen ausgef{\"u}hrt, bei denen es w{\"u}nschenswert ist, die maximale mit der verf{\"u}gbaren Rechenleistung erzielbare Qualit{\"a}t zu erhalten. In dieser Arbeit wird ein Systemmodell vorgestellt, das den Design-to-time-Ansatz mit den M{\"o}glichkeiten der dynamischen Leistungsanpassung (Rechenleistung und verbrauchte elektrische Leistung) moderner Prozessoren vereinigt. Der Design-to-time-Ansatz erm{\"o}glicht Energieeinsparungen oder Qualit{\"a}tssteigerungen durch die dynamische Auswahl alternativer Implementierungen, welche dieselbe Aufgabe mit unterschiedlicher Ausf{\"u}hrungsdauer und Qualit{\"a}t bzw. Energieverbrauch erf{\"u}llen. Das Systemmodell umfaßt unter anderem periodische Prozesse mit harten Echtzeitbedingungen, Datenabh{\"a}ngigkeiten und alternativen Implementierungen, sowie Prozessoren mit diskreten Leistungsstufen. Die Einplanung der Prozesse erfolgt in zwei Phasen. In der Offline-Phase wird ein flexibler Schedule berechnet, der f{\"u}r die zur Laufzeit m{\"o}glichen Kombinationen von verstrichener Zeit und noch einzuplanender Prozeßmenge den jeweils einzuplanenden Prozeß, sowie die zu verwendende Implementierung und gegebenenfalls die einzustellende Leistungsstufe beinhaltet. Dieser flexible Schedule wird w{\"a}hrend der Online-Phase mit vernachl{\"a}ssigbarem Zeit- und Energieaufwand von einem Scheduler interpretiert. F{\"u}r die Berechnung der optimalen flexiblen Schedules wurde ein Optimierer entwickelt, der eine Folge von flexiblen Schedules mit monoton steigender G{\"u}te (niedriger Energieverbrauch bzw. hohe Qualit{\"a}t) generiert, und damit der Klasse der Anytime-Algorithmen zuzuordnen ist. Eine Variante der Dynamischen Programmierung dient zur Bestimmung global optimaler, flexibler Schedules, die beispielsweise als Basis f{\"u}r Benchmarks dienen. Eine auf Simulated Annealing basierende Variante des Optimierers erm{\"o}glicht ein schnelleres Auffinden guter, flexibler Schedules f{\"u}r umfangreichere Anwendungen.}, subject = {Echtzeitsystem}, language = {de} } @phdthesis{Raitner2005, author = {Raitner, Marcus}, title = {Efficient visual navigation of hierarchically structured graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-658}, school = {Universit{\"a}t Passau}, year = {2005}, abstract = {Visual navigation of hierarchically structured graphs is a technique for interactively exploring large graphs that possess an additional hierarchical structure. This structure is expressed in form of a recursive clustering of the nodes: in call graphs of telephone networks, for instance, the nodes are identified with phone numbers; they are clustered recursively through the implicit structure of the numbers, e. g., nodes with the same area code belong to a cluster. In order to reduce the complexity and the size of the graph, only those subgraphs that are currently needed are shown in detail, while the others are collapsed, i. e., represented by meta nodes. In such a graph view the subgraphs in the areas of interest are expanded furthest, whereas those on the periphery are abstracted. As the areas of interest change over time, clusters in a view need to be expanded or contracted. First and foremost, there is need for an efficient data structure for this graph view maintenance problem. Depending on the admissible modifications of the graph and its hierarchical clustering, three variants have been discussed in the literature: in the static case, everything is fixed; in the dynamic graph variant, only edges of the graph can be inserted and deleted; finally, in the dynamic graph and tree variant the graph additionally is subject to node insertions and deletions and the clustering may change through splitting and merging of clusters. We introduce a new variant, dynamic leaves, which is based on the dynamic graph variant, but additionally allows insertion and deletion of graph nodes, i. e., leaves of the hierarchy. So far efficient data structures were known only for the static and the dynamic graph variant, i. e., neither the nodes of the graph nor the clustering could be modified. As this is unsatisfactory in an interactive editor for hierarchically structured graphs, we first generalize the approach of Buchsbaum et. al (Proc. 8th ESA, vol. 1879 of LNCS, pp. 120-131, 2000), in which graph view maintenance is formulated as a special case of range searching over tree cross products, to the new dynamic leaves variant. This generalization builds on a novel technique of superimposing a search tree over an ordered list maintenance structure. With an additional factor of roughly O(log n/log log n), this is the first data structure for the problem of graph view maintenance where the node set is dynamic. Visualizing the expanding and contracting appropriately is the second challenge. We propose a local update scheme for the algorithm of Sugiyama and Misue (IEEE Trans. on Systems, Man, and Cybernetics 21 (1991) 876- 892) for drawing compound digraphs. The layered drawings that it produces have many applications ranging from biochemical pathways to UML diagrams. Modifying the intermediate results of every step of the original algorithm locally, the update scheme is more efficient than re-applying the entire algorithm after expansion or contraction. As our experimental results on randomly generated graphs show, the average time for updating the drawing is around 50 \% of the time for redrawing for dense graphs and below 20 \% for sparse graphs. Also, the performance gain is not at the expense of quality as regards the area of the drawing, which increases only insignificantly, and the number of crossings, which is reduced. At the same time, the locality of the updates preserves the user 's mental map of the graph: nodes that are are not affected stay on the same level in the same relative order and expanded edges take the same course as the corresponding contracted edge; furthermore, expansion and contraction are visually inverse. Finally, our new data structure and the update scheme are combined into an interactive editor and viewer for compound (di-)graphs. A flexible and extensible software architecture is introduced that lays the ground for future research. It employs the well-known Model-View-Controller (MVC) paradigm to separate the abstract data from its presentation. As a consequence, the purely combinatorial parts, i. e., the compound (di-)graph and its views, are reusable without the editor front-end. A proof-of-concept implementation based on the proposed architecture shows its feasibility and suitability.}, subject = {Dynamische Datenstruktur}, language = {en} } @phdthesis{Seidl2006, author = {Seidl, Andreas}, title = {Cylindrical Decomposition Under Application-Oriented Paradigms}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-816}, school = {Universit{\"a}t Passau}, year = {2006}, abstract = {Quantifier elimination (QE) is a powerful tool for problem solving. Once a problem is expressed as a formula, such a method converts it to a simpler, quantifier-free equivalent, thus solving the problem. Particularly many problems live in the domain of real numbers, which makes real QE very interesting. Among the so far implemented methods, QE by cylindrical algebraic decomposition (CAD) is the most important complete method. The aim of this thesis is to develop CAD-based algorithms, which can solve more problems in practice and/or provide more interesting information as output. An algorithm that satisfies these standards would concentrate on generic cases and postpone special and degenerated ones to be treated separately or to be abandoned completely. It would give a solution, which is locally correct for a region the user is interested in. It would give answers, which can provide much valuable information in particular for decision problems. It would combine these methods with more specialized ones, for subcases that allow for. It would exploit degrees of freedom in the algorithms by deciding to proceed in a way that promises to be efficient. It is the focus of this dissertation to treat these challenges. Algorithms described here are implemented in the computer logic system REDLOG and ship with the computer algebra system REDUCE.}, subject = {Quantorenelimination}, language = {en} } @phdthesis{Ellmenreich2004, author = {Ellmenreich, Nils}, title = {PolyAPM: Comparative Parallel Programming with Abstract Parallel Machines}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-447}, school = {Universit{\"a}t Passau}, year = {2004}, abstract = {A parallelising compilation consists of many translation and optimisation stages. The programmer may steer the compiler through these stages by supplying directives with the source code or setting compiler switches. However, for an evaluation of the effects of individual stages, their selection and their best order, this approach is not optimal. To solve this problem, we propose the following method. The compilation is cast as a sequence of program transformations. Each intermediate program runs on an Abstract Parallel Machine (APM), while the program generated by the final transformation runs on the target architecture. Our intermediate programs are all in the same language, Haskell. Thus, each program is executable and still abstract enough to be legible, which enables the evaluation of the transformation that generated it. This evaluation is supported by a cost model, which makes a performance prediction of the abstract program for a real machine. Our project, PolyAPM, provides an acyclic directed graph -- usually a tree -- of APMs whose traversal specifies different combinations and orders of transformations. From one source program, several target programs can be constructed. Their run time characteristics can be evaluated and compared. The goal of PolyAPM is not to support the one-off construction of parallel application programs. For the method's overhead to pay off, the project aims rather at supporting the construction and comparison of many similar variations of a parallel program and a comparative evaluation of parallelisation techniques. With the automation of transformations, PolyAPM can also be used to construct semi-automatic compilation systems.}, subject = {Parallelverarbeitung}, language = {en} } @phdthesis{Streckenbach2005, author = {Streckenbach, Mirko}, title = {KABA - a system for refactoring Java programs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-638}, school = {Universit{\"a}t Passau}, year = {2005}, abstract = {Refactoring is a well known technique to enhance various aspects of an object-oriented program. It has become very popular during recent years, as it allows to overcome deficits present in many programs. Doing refactoring by hand is almost impossible due to the size and complexity of modern software systems. Automated tools provide support for the application of refactorings, but do not give hints, which refactorings to apply and why. The Snelting/Tip analysis is a program analysis, which creates a refactoring proposal for a class hierarchy by analyzing how class members are used inside a program. KABA is an adaption and extension of the Snelting/Tip analysis for Java. It has been implemented and expanded to become a semantic preserving, interactive refactoring system. Case studies of real world programs will show the usefulness of the system and its practical value.}, subject = {Java }, language = {en} } @phdthesis{Fischer2005, author = {Fischer, Andreas}, title = {Peano-differentiable functions in O-Minimal structures}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-673}, school = {Universit{\"a}t Passau}, year = {2005}, abstract = {We discuss several aspects of Peano-differentiable functions which are definable in an o-minimal structure expanding a real closed field. After recalling some already known results about o-minimal structures we develop techniques for the intrinsic study of differentiable functions in these structures. After this we study (ordinary) differentiable functions definable in an o-minimal structure and their continuiuty properties along curves of different differentiability classes. Then we generalise (ordinary) differentiability to Peano-differentiability. We study differentiability of certain Peano-derivatives of definable functions and characterise the sets of non-continuity of these derivatives. In the end we study extendability of these functions defined on closed sets and give sufficient conditions by which we can extend functions as Peano-differentiable functions.}, subject = {Semialgebraische Menge}, language = {en} } @phdthesis{Forster2004, author = {Forster, Michael}, title = {Crossings in Clustered Level Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-481}, school = {Universit{\"a}t Passau}, year = {2004}, abstract = {Clustered graphs are an enhanced graph model with a recursive clustering of the vertices according to a given nesting relation. This prime technique for expressing coherence of certain parts of the graph is used in many applications, such as biochemical pathways and UML class diagrams. For directed clustered graphs usually level drawings are used, leading to clustered level graphs. In this thesis we analyze the interrelation of clusters and levels and their influence on edge crossings and cluster/edge crossings.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{MianSyed2003, author = {Mian Syed, Ahmed}, title = {Eine {\"o}konomische statische Analysemethode zur Berechnung von Relational Attributes mittels regul{\"a}rer Pfadbedingungen und ihre Anwendung auf Zeigeranalyse}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-333}, school = {Universit{\"a}t Passau}, year = {2003}, abstract = {Ziel der Arbeit ist es, eine neue Programmanalysetechnik f{\"u}r Zeigeranalyse zu entwickeln. Diese soll exakt in dem Sinne sein, daß sie nur Ergebnisse berechnet, die tats{\"a}chlich in realen Programml{\"a}ufen vorkommen k{\"o}nnen, Ebenso soll diese Analysetechnik {\"o}konomisch sein, d.h. nur den minimal f{\"u}r eine exakte L{\"o}sung ben{\"o}tigten Berechnungsaufwand investieren m{\"u}ssen.}, subject = {Softwareproduktion}, language = {de} } @phdthesis{Wuechner2013, author = {W{\"u}chner, Patrick}, title = {Energy-Efficient and Timely Event Reporting Using Wireless Sensor Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27159}, school = {Universit{\"a}t Passau}, year = {2013}, abstract = {This thesis investigates the suitability of state-of-the-art protocols for large-scale and long-term environmental event monitoring using wireless sensor networks based on the application scenario of early forest fire detection. By suitable combination of energy-efficient protocol mechanisms a novel communication protocol, referred to as cross-layer message-merging protocol (XLMMP), is developed. Qualitative and quantitative protocol analyses are carried out to confirm that XLMMP is particularly suitable for this application area. The quantitative analysis is mainly based on finite-source retrial queues with multiple unreliable servers. While this queueing model is widely applicable in various research areas even beyond communication networks, this thesis is the first to determine the distribution of the response time in this model. The model evaluation is mainly carried out using Markovian analysis and the method of phases. The obtained quantitative results show that XLMMP is a feasible basis to design scalable wireless sensor networks that (1) may comprise hundreds of thousands of tiny sensor nodes with reduced node complexity, (2) are suitable to monitor an area of tens of square kilometers, (3) achieve a lifetime of several years. The deduced quantifiable relationships between key network parameters — e.g., node size, node density, size of the monitored area, aspired lifetime, and the maximum end-to-end communication delay — enable application-specific optimization of the protocol.}, subject = {Drahtloses Sensorsystem}, language = {en} } @phdthesis{Limbeck2013, author = {Limbeck, Jan}, title = {Computation of Approximate Border Bases and Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27197}, school = {Universit{\"a}t Passau}, year = {2013}, abstract = {This thesis addresses some of the algorithmic and numerical challenges associated with the computation of approximate border bases, a generalisation of border bases, in the context of the oil and gas industry. The concept of approximate border bases was introduced by D. Heldt, M. Kreuzer, S. Pokutta and H. Poulisse in "Approximate computation of zero-dimensional polynomial ideals" as an effective mean to derive physically relevant polynomial models from measured data. The main advantages of this approach compared to alternative techniques currently in use in the (hydrocarbon) industry are its power to derive polynomial models without additional a priori knowledge about the underlying physical system and its robustness with respect to noise in the measured input data. The so-called Approximate Vanishing Ideal (AVI) algorithm which can be used to compute approximate border bases and which was also introduced by D. Heldt et al. in the paper mentioned above served as a starting point for the research which is conducted in this thesis. A central aim of this work is to broaden the applicability of the AVI algorithm to additional areas in the oil and gas industry, like seismic imaging and the compact representation of unconventional geological structures. For this purpose several new algorithms are developed, among others the so-called Approximate Buchberger M{\"o}ller (ABM) algorithm and the Extended-ABM algorithm. The numerical aspects and the runtime of the methods are analysed in detail - based on a solid foundation of the underlying mathematical and algorithmic concepts that are also provided in this thesis. It is shown that the worst case runtime of the ABM algorithm is cubic in the number of input points, which is a significant improvement over the biquadratic worst case runtime of the AVI algorithm. Furthermore, we show that the ABM algorithm allows us to exercise more direct control over the essential properties of the computed approximate border basis than the AVI algorithm. The improved runtime and the additional control turn out to be the key enablers for the new industrial applications that are proposed here. As a conclusion to the work on the computation of approximate border bases, a detailed comparison between the approach in this thesis and some other state of the art algorithms is given. Furthermore, this work also addresses one important shortcoming of approximate border bases, namely that central concepts from exact algebra such as syzygies could so far not be translated to the setting of approximate border bases. One way to mitigate this problem is to construct a "close by" exact border bases for a given approximate one. Here we present and discuss two new algorithmic approaches that allow us to compute such close by exact border bases. In the first one, we establish a link between this task, referred to as the rational recovery problem, and the problem of simultaneously quasi-diagonalising a set of complex matrices. As simultaneous quasi-diagonalisation is not a standard topic in numerical linear algebra there are hardly any off-the-shelf algorithms and implementations available that are both fast and numerically adequate for our purposes. To bridge this gap we introduce and study a new algorithm that is based on a variant of the classical Jacobi eigenvalue algorithm, which also works for non-symmetric matrices. As a second solution of the rational recovery problem, we motivate and discuss how to compute a close by exact border basis via the minimisation of a sum of squares expression, that is formed from the polynomials in the given approximate border basis. Finally, several applications of the newly developed algorithms are presented. Those include production modelling of oil and gas fields, reconstruction of the subsurface velocities for simple subsurface geometries, the compact representation of unconventional oil and gas bodies via algebraic surfaces and the stable numerical approximation of the roots of zero-dimensional polynomial ideals.}, subject = {Computeralgebra}, language = {en} } @phdthesis{Stegmaier2014, author = {Stegmaier, Florian}, title = {Unified Retrieval in Distributed and Heterogeneous Multimedia Information Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27317}, school = {Universit{\"a}t Passau}, year = {2014}, abstract = {Multimedia retrieval is an essential part of today's world. This situation is observable in industrial domains, e.g., medical imaging, as well as in the private sector, visible by activities in manifold Social Media platforms. This trend led to the creation of a huge environment of multimedia information retrieval services offering multimedia resources for almost any user requests. Indeed, the encompassed data is in general retrievable by (proprietary) APIs and query languages, but unfortunately a unified access is not given due to arising interoperability issues between those services. In this regard, this thesis focuses on two application scenarios, namely a medical retrieval system supporting a radiologist's workflow, as well as an interoperable image retrieval service interconnecting diverse data silos. The scientific contribution of this dissertation is split in three different parts: the first part of this thesis improves the metadata interoperability issue. Here, major contributions to a community-driven, international standardization have been proposed leading to the specification of an API and ontology to enable a unified annotation and retrieval of media resources. The second part issues a metasearch engine especially designed for unified retrieval in distributed and heterogeneous multimedia retrieval environments. This metasearch engine is capable of being operated in a federated as well as autonomous manner inside the aforementioned application scenarios. The remaining third part ensures an efficient retrieval due to the integration of optimization techniques for multimedia retrieval in the overall query execution process of the metasearch engine.}, subject = {Abfrage}, language = {en} } @phdthesis{Mayer2013, author = {Mayer, Tobias Rene}, title = {Achieving Collaboration In Distributed Systems Deployed Over Selfish Peers - An Illustrative Case Study With Publish/Subscribe}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27118}, school = {Universit{\"a}t Passau}, year = {2013}, abstract = {Up to a few years ago, the typical operation of a distributed architecture was modelled as the enactment of a collaborative protocol by networked nodes. In this context, all nodes were under the system designer's control, faithfully executing the programmed behaviour. However, today's networks are often characterized by a free aggregation of nodes. Thus, the possibility increases that a selfish party operates a node, which may violate the collaborative protocol in order to increase a personal benefit. If such violations differ from the system goals they can even be considered as attack. Current fault-tolerance techniques may weaken the harmful impact to some degree but they cannot necessarily prevent them. Furthermore, the several architectures differ in their fault-tolerance capabilities. This emphasizes the need for a systematic approach to achieve collaboration in distributed systems. In this PhD thesis we consider the problem of attaining a targeted level of collaboration in a distributed architecture deployed over rational selfish-driven nodes, which have interest in deviating from the communication protocol to increase a personal benefit. In order to reach this goal and to cover a broad spectrum of systems, we do not modify the architecture or communication protocol itself. Instead, we add a monitoring logic to inspect a node's behaviour in terms of the correct interaction with the system. With this approach, the system designer needs to contrast several aspects such as the specific environmental circumstances, the inspection effort or the node's individual preferences. Furthermore, he should consider the fact that each agent could be aware of the other agents' preferences and selfishness, and perform strategic choices consequently. The natural frame for modelling such complex, interdependent and possibly interactive decision landscape is Game Theory (GT). In this context, the monitoring setup proposed in this thesis corresponds to a class of GT models known as Inspection Games (IG). Such games were introduced 1962 in their simplest formulation by Dresher in the context of non-proliferation treatises and arm control. They model the general situation where one inspector verifies through inspections the correct behaviour of another party, called inspectee. However, inspections are costly and the inspector's resources are limited. Hence, a complete surveillance is not possible and an inspector will try to minimize the inspections. Finally, a game strategy combination (violating/inspecting or not) that is considered optimal by the parties represents a Nash equilibrium for the game. In this thesis, the initial IG model is enriched by the possibility of false negatives, i.e. the probability that a violation is not detected during an inspection. Both the initial and the enriched model remain abstract and can thus easily find interdisciplinary application. However, as solution approach in this thesis considering the context of distributed systems, it models the network participants' strategy choice. As outcome, the IG model enables to calculate system parameters in order to shift the Nash equilibrium to the desired target collaboration. The approach is designed as framework. It can be therefore applied to any architecture considering, any selfish goal and any reliability technique. For sake of concreteness, we will discuss the IG approach by means of the illustrative case of a Publish/Subscribe (pub/sub) architecture. In this way messages over the communication infrastructure will have a specific associated semantics. The Inspection Game approach of this thesis secures the whole collaborative protocol in order to attain a correctly working system up to a specific degree (in the sense of collaboration). This represents a completely new way in terms of reliability mechanisms. Hence, this thesis can be considered as fundamental research. In order to enable a broad application, the generality of this approach is supported by further contributions. This is among others the software library RCourse for practical robustness evaluations of overlay networks and a simulation environment for further research of the abstract IG model. All developments will finally be published as open source software.}, subject = {Zuverl{\"a}ssigkeit}, language = {en} } @phdthesis{Hofmeier2012, author = {Hofmeier, Andreas}, title = {Vergleichen und Aggregieren von partiellen Ordnungen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-26858}, school = {Universit{\"a}t Passau}, year = {2012}, abstract = {Das Vergleichen und Aggregieren von Informationen ist ein zentraler Bereich in der Analyse von Wahlsystemen. In diesen m{\"u}ssen die verschiedenen Meinungen von W{\"a}hlern {\"u}ber eine Menge von Kandidaten zu einem m{\"o}glichst gerechten Wahlergebnis aggregiert werden. In den meisten politischen Wahlen entscheidet sich jeder W{\"a}hler durch Ankreuzen f{\"u}r einen einzigen Kandidaten. Daneben werden aber auch Rangordnungsprobleme als eine Variante von Wahlsystemen untersucht. Bei diesen bringt jeder W{\"a}hler seine Meinung in Form einer totalen Ordnung {\"u}ber der Menge der Kandidaten zum Ausdruck, wodurch seine oftmals komplexe Meinung exakter repr{\"a}sentiert werden kann als durch die Auswahl eines einzigen, favorisierten Kandidaten. Das Wahlergebnis eines Rangordnungsproblems ist dann eine ebenfalls totale Ordnung der Kandidaten, welche die geringste Distanz zu den Meinungen der W{\"a}hler aufweist. Als Distanzmaße zwischen zwei totalen Ordnungen haben sich neben anderen Kendalls Tau-Distanz und Spearmans Footrule-Distanz etabliert. Durch moderne Anwendungsm{\"o}glichkeiten von Rangordnungsproblemen im maschinellen Lernen, in der k{\"u}nstlichen Intelligenz, in der Bioinformatik und vor allem in verschiedenen Bereichen des World Wide Web r{\"u}cken bereits bekannte, jedoch bislang eher wenig studierte Aspekte in den Fokus der Forschung. Zum einen gewinnt die algorithmische Komplexit{\"a}t von Rangordnungsproblemen an Bedeutung. Zum anderen existieren in vielen dieser Anwendungen unvollst{\"a}ndige „W{\"a}hlermeinungen" mit unentschiedenen oder unvergleichbaren Kandidaten, so dass totale Ordnungen zu deren Repr{\"a}sentation nicht l{\"a}nger geeignet sind. Die vorliegende Arbeit greift diese beiden Aspekte auf und betrachtet die algorithmische Komplexit{\"a}t von Rangordnungsproblemen, in denen W{\"a}hlermeinungen anstatt durch totale Ordnungen durch schwache oder partielle Ordnungen repr{\"a}sentiert werden. Dazu werden Kendalls Tau-Distanz und Spearmans Footrule-Distanz auf verschiedene nahe liegende Arten verallgemeinert. Es zeigt sich dabei, dass nun bereits die Distanzberechnung zwischen zwei Ordnungen ein algorithmisch komplexes Problem darstellt. So ist die Berechnung der verallgemeinerten Versionen von Kendalls Tau-Distanz oder Spearmans Footrule-Distanz f{\"u}r schwache Ordnungen noch effizient m{\"o}glich. Sobald jedoch partielle Ordnungen betrachtet werden, sind die Probleme NP-vollst{\"a}ndig, also vermutlich nicht mehr effizient l{\"o}sbar. In diesem Fall werden Resultate zur Approximierbarkeit und zur parametrisierten Komplexit{\"a}t der Probleme vorgestellt. Auch die Komplexit{\"a}t der Rangordnungsprobleme selbst erh{\"o}ht sich. F{\"u}r totale Ordnungen effizient l{\"o}sbare Varianten werden f{\"u}r schwache Ordnungen NP-vollst{\"a}ndig, f{\"u}r totale Ordnungen NP-vollst{\"a}ndige Varianten hingegen liegen f{\"u}r partielle Ordnungen teilweise außerhalb der Komplexit{\"a}tsklasse NP. Die Arbeit schließt mit einem Ausblick auf offene Problemstellungen.}, language = {de} } @phdthesis{ELKhoury2014, author = {EL-Khoury, Vanessa}, title = {Semantic Protection and Personalization of Video Content. PIAF: MPEG Compliant Adaptation Framework Preserving the User Perceived Quality}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27360}, school = {Universit{\"a}t Passau}, year = {2014}, abstract = {UME is the notion that a user should receive informative adapted content anytime and anywhere. Personalization of videos, which adapts their content according to user preferences, is a vital aspect of achieving the UME vision. User preferences can be translated into several types of constraints that must be considered by the adaptation process, including semantic constraints directly related to the content of the video. To deal with these semantic constraints, a fine-grained adaptation, which can go down to the level of video objects, is necessary. The overall goal of this adaptation process is to provide users with adapted content that maximizes their Quality of Experience (QoE). This QoE depends at the same time on the level of the user's satisfaction in perceiving the adapted content, the amount of knowledge assimilated by the user, and the adaptation execution time. In video adaptation frameworks, the Adaptation Decision Taking Engine (ADTE), which can be considered as the "brain" of the adaptation engine, is responsible for achieving this goal. The task of the ADTE is challenging as many adaptation operations can satisfy the same semantic constraint, and thus arising in several feasible adaptation plans. Indeed, for each entity undergoing the adaptation process, the ADTE must decide on the adequate adaptation operator that satisfies the user's preferences while maximizing his/her quality of experience. The first challenge to achieve in this is to objectively measure the quality of the adapted video, taking into consideration the multiple aspects of the QoE. The second challenge is to assess beforehand this quality in order to choose the most appropriate adaptation plan among all possible plans. The third challenge is to resolve conflicting or overlapping semantic constraints, in particular conflicts arising from constraints expressed by owner's intellectual property rights about the modification of the content. In this thesis, we tackled the aforementioned challenges by proposing a Utility Function (UF), which integrates semantic concerns with user's perceptual considerations. This UF models the relationships among adaptation operations, user preferences, and the quality of the video content. We integrated this UF into an ADTE. This ADTE performs a multi-level piecewise reasoning to choose the adaptation plan that maximizes the user-perceived quality. Furthermore, we included intellectual property rights in the adaptation process. Thereby, we modeled content owner constraints. We dealt with the problem of conflicting user and owner constraints by mapping it to a known optimization problem. Moreover, we developed the SVCAT, which produces structural and high-level semantic annotation according to an original object-based video content model. We modeled as well the user's preferences proposing extensions to MPEG-7 and MPEG-21. All the developed contributions were carried out as part of a coherent framework called PIAF. PIAF is a complete modular MPEG standard compliant framework that covers the whole process of semantic video adaptation. We validated this research with qualitative and quantitative evaluations, which assess the performance and the efficiency of the proposed adaptation decision-taking engine within PIAF. The experimental results show that the proposed UF has a high correlation with subjective video quality evaluation.}, subject = {MPEG-Standard}, language = {en} } @phdthesis{MoussellySergieh2014, author = {Mousselly Sergieh, Hatem}, title = {Search-based Automatic Image Annotation Using Geotagged Community Photos}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27387}, school = {Universit{\"a}t Passau}, year = {2014}, abstract = {In the Web 2.0 era, platforms for sharing and collaboratively annotating images with keywords, called tags, became very popular. Tags are a powerful means for organizing and retrieving photos. However, manual tagging is time consuming. Recently, the sheer amount of user-tagged photos available on the Web encouraged researchers to explore new techniques for automatic image annotation. The idea is to annotate an unlabeled image by propagating the labels of community photos that are visually similar to it. Most recently, an ever increasing amount of community photos is also associated with location information, i.e., geotagged. In this thesis, we aim at exploiting the location context and propose an approach for automatically annotating geotagged photos. Our objective is to address the main limitations of state-of-the-art approaches in terms of the quality of the produced tags and the speed of the complete annotation process. To achieve these goals, we, first, deal with the problem of collecting images with the associated metadata from online repositories. Accordingly, we introduce a strategy for data crawling that takes advantage of location information and the social relationships among the contributors of the photos. To improve the quality of the collected user-tags, we present a method for resolving their ambiguity based on tag relatedness information. In this respect, we propose an approach for representing tags as probability distributions based on the algorithm of Laplacian score feature selection. Furthermore, we propose a new metric for calculating the distance between tag probability distributions by extending Jensen-Shannon Divergence to account for statistical fluctuations. To efficiently identify the visual neighbors, the thesis introduces two extensions to the state-of-the-art image matching algorithm, known as Speeded Up Robust Features (SURF). To speed up the matching, we present a solution for reducing the number of compared SURF descriptors based on classification techniques, while the accuracy of SURF is improved through an efficient method for iterative image matching. Furthermore, we propose a statistical model for ranking the mined annotations according to their relevance to the target image. This is achieved by combining multi-modal information in a statistical framework based on Bayes' rule. Finally, the effectiveness of each of mentioned contributions as well as the complete automatic annotation process are evaluated experimentally.}, subject = {Social Tagging}, language = {en} } @phdthesis{Auer2014, author = {Auer, Christopher}, title = {Planar Graphs and their Duals on Cylinder Surfaces}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27430}, school = {Universit{\"a}t Passau}, year = {2014}, abstract = {In this thesis, we investigates plane drawings of undirected and directed graphs on cylinder surfaces. In the case of undirected graphs, the vertices are positioned on a line that is parallel to the cylinder's axis and the edge curves must not intersect this line. We show that a plane drawing is possible if and only if the graph is a double-ended queue (deque) graph, i. e., the vertices of the graph can be processed according to a linear order and the edges correspond to items in the deque inserted and removed at their end vertices. A surprising consequence resulting from these observations is that the deque characterizes planar graphs with a Hamiltonian path. This result extends the known characterization of planar graphs with a Hamiltonian cycle by two stacks. By these insights, we also obtain a new characterization of queue graphs and their duals. We also consider the complexity of deciding whether a graph is a deque graph and prove that it is NP-complete. By introducing a split operation, we obtain the splittable deque and show that it characterizes planarity. For the proof, we devise an algorithm that uses the splittable deque to test whether a rotation system is planar. In the case of directed graphs, we study upward plane drawings where the edge curves follow the direction of the cylinder's axis (standing upward planarity; SUP) or they wind around the axis (rolling upward planarity; RUP). We characterize RUP graphs by means of their duals and show that RUP and SUP swap their roles when considering a graph and its dual. There is a physical interpretation underlying this characterization: A SUP graph is to its RUP dual graph as electric current passing through a conductor to the magnetic field surrounding the conductor. Whereas testing whether a graph is RUP is NP-hard in general [Bra14], for directed graphs without sources and sink, we develop a linear-time recognition algorithm that is based on our dual graph characterization of RUP graphs.}, subject = {Planarer Graph}, language = {en} } @phdthesis{Meixner2014, author = {Meixner, Britta}, title = {Annotated Interactive Non-linear Video - Software Suite, Download and Cache Management}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27403}, school = {Universit{\"a}t Passau}, year = {2014}, abstract = {Modern Web technology makes the dream of fully interactive and enriched video come true. Nowadays it is possible to organize videos in a non-linear way playing in a sequence unknown in advance. Furthermore, additional information can be added to the video, ranging from short descriptions to animated images and further videos. This affords an easy and efficient to use authoring tool which is capable of the management of the single media objects, as well as a clear arrangement of the links between the parts. Tools of this kind can be found rarely and do mostly not provide the full range of needed functions. While providing an interactive experience to the viewer in the Web player, parallel plot sequences and additional information lead to an increased download volume. This may cause pauses during playback while elements have to be downloaded which are displayed with the video. A good quality of experience for these videos with small waiting times and a playback without interruptions is desired. This work presents the SIVA Suite to create the previously described annotated interactive non-linear videos. We propose a video model for interactivity, non-linearity, and annotations, which is implemented in an XML format, an authoring tool, and a player. Video is the main medium, whereby different scenes are linked to a scene graph. Time controlled additional content called annotations, like text, images, audio files, or videos, is added to the scenes. The user is able to navigate in the scene graph by selecting a button at a button panel. Furthermore, other navigational elements like a table of contents or a keyword search are provided. Besides the SIVA Suite, this thesis presents algorithms and strategies for download and cache management to provide a good quality of experience while watching the annotated interactive non-linear videos. Therefor, we implemented a standard-independent player framework. Integrated into a simulation environment, the framework allows to evaluate algorithms and strategies for the calculation of start-up times, and the selection of elements to pre-fetch into and delete from the cache. Their interaction during the playback of non-linear video contents can be analyzed. The algorithms and strategies can be used to minimize interruptions in the video flow after user interactions. Our extensive evaluation showed that our techniques result in faster start-up times and lesser interruptions in the video flow than those of other players. Knowledge of the structure of an interactive non-linear video can be used to minimize the start-up time at the beginning of a video while minimizing an increase in the overall download volume.}, subject = {Nichtlineares Ph{\"a}nomen}, language = {en} } @phdthesis{Schoenberg2013, author = {Sch{\"o}nberg, Christian}, title = {Semantic Processing of Digital Documents}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27635}, school = {Universit{\"a}t Passau}, year = {2013}, abstract = {Precise, content-rich and well-structured document models are required for applications like verifying the consistency of documents. Creating such models for common documents is currently an expensive and error-prone process. In this thesis we present a novel approach to modelling and processing digital documents that uses semantic technologies. In contrast to other modelling approaches, we model the structure of documents as indicated by the content, not as defined by technical attributes like the file format. Additionally, our meta-model can be applied to a wide range of different documents, not just to a small set of documents with a predefined set of features. The models include semantic data and content relationships, which can be further extended with domain knowledge. Our new separation of technical and semantic document models fuels a standardised method for obtaining semantic models. This method is effective, suitable for live processing, and easily transferable to other document types and other domains. As it is makes extensive use of background knowledge, we also present techniques for obtaining such knowledge, and for representing complex forms of knowledge with multiple meta-layers. A flexible technique for obtaining relevant data from our document models completes the approach. This includes the ability to obtain various verification models, suitable for different types of consistency criteria and for different validation formalisms. We conclude this thesis with an evaluation that shows the viability and effectiveness of the proposed approach. We present runtime results for an implementation based on RDF/OWL and the rule language JBoss Drools that are adequate for live processing. We also provide and successfully apply techniques for measuring the quality of both document models and background knowledge.}, subject = {Dokument}, language = {en} } @phdthesis{Boerncke2000, author = {B{\"o}rncke, Frank}, title = {Modellierung syntaktischer Strukturen nat{\"u}rlicher Sprachen mit Graphgrammatiken}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-28}, school = {Universit{\"a}t Passau}, year = {2000}, abstract = {Die vorliegende Arbeit erschließt durch die Formalisierung einer linguistischen Theorie M{\"o}glichkeiten zum Entwurf generischer Verfahren zur Verarbeitung nat{\"u}rlicher Sprachen. Zu diesem Zweck setzen wir Graphsprachen f{\"u}r die Modellierung syntaktischer Strukturen ein. Damit lassen sich Ergebnisse der linguistischen Forschung mit Begriffen der Graphentheorie beschreiben und bewerten. Zu diesem Ansatz motiviert der Umstand, daß in der Linguistik im Rahmen der Syntax jedem Satz einer nat{\"u}rlichen Sprache eine nichtsequentielle Struktur zugesprochen wird. Diese Struktur {\"u}berlagert die lineare Wortfolge, die wir als Satz kennen. Eine Menge solcher syntaktischen Strukturen die wir mit Graphen modellieren k{\"o}nnen betrachten wir als Graphsprache. Die Arbeit zeigt, wie sich solche Graphsprachen mit Hilfe von Graphgrammatiken beschreiben lassen. Wie alle formalen Sprachen zeichnen sich auch Graphgrammatiken dadurch aus, daß sie mathematisch wohldefniert sind. Dies stellt eine notwendige Voraussetzung dar, um Aussagen {\"u}ber eine Sprache zu beweisen. Von Interesse ist dabei vor allem die Untersuchung unendlicher Mengen. Das Ziel besteht dann darin, f{\"u}r sie eine endliche Beschreibung zu finden. Diese Aufgabe wird in der Regel von einer Grammatik erf{\"u}llt. Dar{\"u}ber hinaus ist man an erkennenden Algorithmen f{\"u}r Sprachen interessiert, die das Wortproblem effizient l{\"o}sen. Bez{\"u}glich nat{\"u}rlicher Sprachen werden beide Aufgabenstellungen in dieser Arbeit mit Hilfe von Graphgrammatiken gel{\"o}st.}, subject = {Graphensprache}, language = {de} } @phdthesis{Urban2004, author = {Urban, Christoph}, title = {Das Referenzmodell PECS - Agentenbasierte Modellierung menschlichen Handelns, Entscheidens und Verhaltens}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-471}, school = {Universit{\"a}t Passau}, year = {2004}, abstract = {Die Agententechnologie hat in den letzten Jahren eine rasante Entwicklung erfahren und mittlerweile großen Einfluss auf verschiedene Bereiche in Wissenschaft und Technik genommen. Insbesondere wurde die agentenbasierte Modellbildung und Simulation als wirkungsvolles Mittel f{\"u}r die Untersuchung realer oder hypothetischer Systeme erkannt. Im Vordergrund stehen hierbei ganz besonders Systeme, die durch menschliches Handeln, Entscheiden und Verhalten beeinflusst werden, oder der Mensch selbst, um seine Eigenschaften und F{\"a}higkeiten aus den Blickwinkeln unterschiedlicher wissenschaftlicher Disziplinen heraus weiterf{\"u}hrend zu erforschen. Das prim{\"a}re Ziel der vorliegenden Arbeit besteht darin, den Entwurf agenten-basierter Simulationsmodelle, in denen menschliches Handeln, Entscheiden und Verhalten von ausschlaggebender Bedeutung sind, auf konzeptioneller Ebene zu unterst{\"u}tzen. Um dieses Ziel zu erreichen, wird das dom{\"a}nen- und theorieunabh{\"a}ngige Referenzmodell PECS vorgestellt, das Strukturierungsprinzipien aus der Informatik mit systemtheoretischen Ans{\"a}tzen verbindet, um das Wechselspiel vielf{\"a}ltiger Einflussbereiche auf das menschliche Handeln im Rahmen einer integrativen und umfassenden Agentenarchitektur abzubilden. Auf Grundlage des Referenzmodells werden insgesamt vier charakteristische Fallstudien aus der Psychologie, der Sozialpsychologie, der Soziologie und der experimentellen {\"O}konomie entwickelt, um den Einsatz des Referenzmodells in der Praxis zu demonstrieren.}, subject = {Agent }, language = {de} } @phdthesis{Robschink2004, author = {Robschink, Torsten}, title = {Pfadbedingungen in Abh{\"a}ngigkeitsgraphen und ihre Anwendung in der Softwaresicherheitstechnik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-469}, school = {Universit{\"a}t Passau}, year = {2004}, abstract = {Diese Arbeit pr{\"a}sentiert eine neue Methode zur Sicherheitsanalyse von Software im Bereich der Manipulationspr{\"u}fung und der Einhaltung von Informationsfl{\"u}ssen zwischen verschiedenen Sicherheitsniveaus. Program-Slicing und Constraint-Solving sind eigenst{\"a}ndige Verfahren, die sowohl zur Abh{\"a}ngigkeitsbestimmung als auch zur Berechnung arithmetischer Eigenschaften verwendet werden. Die erstmalige Kombination dieser beiden Verfahren mittels Pfadbedingungen liefert nicht nur bin{\"a}re Abh{\"a}ngigkeitsinformationen wie Slicing, sondern exakte notwendige Bedingungen {\"u}ber die Informationsfl{\"u}sse zwischen zwei Programmpunkten. Neben der Definition der Grundlagen von Abh{\"a}ngigkeitsgraphen und einfachen Pfadbedingungen werden neue Erweiterungen f{\"u}r kontextsensitive interprozedurale Pfadbedingungen gezeigt und die Integration von dom{\"a}nenspezifischen Verfahren f{\"u}r Arrayfelder und abstrakten Datentypen demonstriert. Der Schwerpunkt der Arbeit liegt in der Realisierung von Pfadbedingungen f{\"u}r echte Programme in echten Programmiersprachen. Hierf{\"u}r werden Verfahren vorgeschlagen, realisiert und empirisch untersucht, wie Pfadbedingungen f{\"u}r große Programme skalieren. Die zum Einsatz kommenden Techniken sind u.a. Intervallanalyse und Bin{\"a}re Entscheidungsgraphen, mit denen die generelle exponentielle Komplexit{\"a}t von Pfadbedingungen beherrschbar wird. Fallstudien f{\"u}r den Einsatz von Pfadbedingungen und die empirische Untersuchung mehrerer Verfahren zur Intervallanalyse zeigen, dass Pfadbedingungen f{\"u}r die praktische Programmanalyse und das Programmverstehen geeignet und empfehlenswert sind.}, subject = {Programmanalyse}, language = {de} } @phdthesis{Krinke2003, author = {Krinke, Jens}, title = {Advanced Slicing of Sequential and Concurrent Programs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-375}, school = {Universit{\"a}t Passau}, year = {2003}, abstract = {Program slicing is a technique to identify statements that may influence the computations in other statements. Despite the ongoing research of almost 25 years, program slicing still has problems that prevent a widespread use: Sometimes, slices are too big to understand and too expensive and complicated to be computed for real-life programs. This thesis presents solutions to these problems: It contains various approaches which help the user to understand a slice more easily by making it more focused on the user's problem. All of these approaches have been implemented in the VALSOFT system and thorough evaluations of the proposed algorithms are presented. The underlying data structures used for slicing are program dependence graphs. They can also be used for different purposes: A new approach to clone detection based on identifying similar subgraphs in program dependence graphs is presented; it is able to detect modified clones better than other tools. In the theoretical part, this thesis presents a high-precision approach to slice concurrent procedural programs despite that optimal slicing is known to be undecidable. It is the first approach to slice concurrent programs that does not rely on inlining of called procedures.}, subject = {Programmanalyse}, language = {en} } @inproceedings{OPUS4-181, title = {Proceedings of the 3rd International Workshop on Polyhedral Compilation Techniques}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-26930}, year = {2013}, abstract = {IMPACT 2013 in Berlin, Germany (in conjuction with HiPEAC 2013) is the third workshop in a series of international workshops on polyhedral compilation techniques. The previous workshops were held in Chamonix, France (2011) in conjuction with CGO 2011 and Paris, France (2012) in conjuction with HiPEAC 2012.}, subject = {Optimierender Compiler}, language = {en} } @phdthesis{Matzeder2012, author = {Matzeder, Marco}, title = {Zeichnen von B{\"a}umen auf Gittern}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-26923}, school = {Universit{\"a}t Passau}, year = {2012}, abstract = {Das Zeichnen von Graphen besch{\"a}ftigt sich mit der Frage, wie die durch einen Graphen repr{\"a}sentierten Informationen f{\"u}r einen Betrachter {\"u}bersichtlich und verst{\"a}ndlich dargestellt werden k{\"o}nnen. Die Graphklasse der B{\"a}ume dient insbesondere zur Repr{\"a}sentation von hierarchischen Strukturen. Neben den hierarchisch und radial darstellenden Verfahren werden B{\"a}ume auch auf dem orthogonalen Gitter gezeichnet, in welchem die Knoten auf ganzzahligen Koordinaten liegen und die Kanten entlang der horizontalen und vertikalen Gitterlinien verlaufen. Gew{\"u}nscht wird eine gute Lesbarkeit der Zeichnungen und deren effiziente Berechnung. F{\"u}r die formale Bewertung der Lesbarkeit existieren speziell f{\"u}r das Zeichnen von B{\"a}umen definierte {\"A}sthetikkriterien, wie eine ebenenweise Darstellung, die Ordnungserhaltung und Kriterien zur Darstellung von Subgraphisomorphien und Symmetrien. Die vorliegende Arbeit befasst sich mit einer bislang wenig studierten Erweiterung des orthogonalen Gitters auf das hexagonale und oktagonale Gitter durch das Hinzunehmen von einer bzw. beider diagonalen Gitterrichtungen, und der Problemstellung, wie B{\"a}ume darauf gezeichnet werden. Dadurch k{\"o}nnen auch B{\"a}ume mit einem h{\"o}heren Grad gezeichnet werden als auf dem orthogonalen Gitter. Die Einschr{\"a}nkung, dass nur B{\"a}ume gezeichnet werden k{\"o}nnen, deren Grad kleiner ist als die Anzahl der Gitterrichtungen des verwendeten Gitters, besteht jedoch weiterhin. Als {\"A}sthetikkriterien werden die lokale Uniformit{\"a}t, die die L{\"a}nge der ausgehenden Kanten eines Knotens festlegt, und Pattern, die deren Richtungen festlegen, eingef{\"u}hrt. Gegen{\"u}ber dem bekannten linearen Fl{\"a}chenverbrauch von geradlinigen Zeichnungen von vollst{\"a}ndigen Bin{\"a}rb{\"a}umen auf dem orthogonalen Gitter, werden f{\"u}r Zeichnungen von vollst{\"a}ndigen d-n{\"a}ren B{\"a}umen mit d > 2 nicht-lineare untere Schranken f{\"u}r die ben{\"o}tigte Fl{\"a}che auf dem hexagonalen und dem oktagonalen Gitter gezeigt. Insgesamt werden f{\"u}r vollst{\"a}ndige und beliebige, geordnete und ungeordnete B{\"a}ume obere und untere Fl{\"a}chenschranken f{\"u}r Zeichnungen auf dem hexagonalen und oktagonalen Gitter pr{\"a}sentiert. Dabei zeigt sich, dass bei nicht-ordnungserhaltenden Zeichnungen zwar mehr als lineare, aber deutlich weniger als quadratische Fl{\"a}che ben{\"o}tigt wird. Im Gegensatz dazu gibt es geordnete B{\"a}ume, deren ordnungserhaltende Zeichnungen exponentielle Fl{\"a}che ben{\"o}tigen. Des Weiteren wird die Ermittlung der minimalen Zeichenfl{\"a}che f{\"u}r geordnete d-n{\"a}re B{\"a}ume ebenso als NP-vollst{\"a}ndig bewiesen, wie das Zeichnen von ungeordneten d-n{\"a}ren B{\"a}umen mit einheitlichen Kantenl{\"a}ngen. Schließlich werden zwei Linearzeitalgorithmen vorgestellt, die geordnete d-n{\"a}re B{\"a}ume unter Einhaltung der genannten {\"A}sthetikkriterien zeichnen.}, subject = {Baum }, language = {de} } @phdthesis{Radde2013, author = {Radde, Sven}, title = {A Layered Conversational Recommender System}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27031}, school = {Universit{\"a}t Passau}, year = {2013}, abstract = {In this thesis a new approach to building product recommender systems is introduced. By using a customer-centric dialogue, the customers' preferences are elicited. These are the basis for inferring utility estimations about the desired technical properties of the products in question. Systems built this way can both operate autonomously, e.g., in an online store, and support a salesperson directly at the point-of-sale. The core of the approach is formed by a layered domain description that models customer stereotypes and needs, product attributes, the products themselves, and the causal interrelations between customer and product properties. Maintenance of the domain description, i.e., keeping the model up-to-date in face of frequent changes, is facilitated by the clear separation of concerns provided by the layered structure. In fact, the most frequently used class of updates can be handled in an entirely automated way if some constraints are satisfied. On a high level of abstraction, the system behavior is described by State Charts that are parameterized according to the domain description. Those parts of the system description where State Charts would be too imprecise are implemented by separate components realizing the required complex semantics. From the domain description, a Bayesian network is generated that forms the core of the inference engine of the recommender system. The network essentially controls the system-initiated dialogue flow and the recommendation process. Due to the characteristics of Bayesian networks, it is possible to respond to user-initiated dialogue steps in a natural way. Moreover, an explanation of the current recommendation can be generated without having to explicitly encode additional information in the modeling layer. Finally, a database structure and the SQL queries necessary to obtain recommendations can be inferred from the corresponding parts of the domain description. Instantiation of the system to a specific business domain is supported by a dedicated maintenance application that hides the complexities of the underlying algorithms. Thus, day-to-day system updates by non-technical domain experts, e.g., product managers, are facilitated. The developed concepts were implemented in cooperation with a local industry partner who intends to apply the recommender system in the field of mobile communications.}, subject = {Empfehlungssystem}, language = {en} } @unpublished{Kreitmeier2012, author = {Kreitmeier, Wolfgang}, title = {Asymptotic optimality of scalar Gersho quantizers}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27080}, year = {2012}, abstract = {In his famous paper Gersho stressed that the codecells of optimal quantizers asymptotically make an equal contribution to the distortion of the quantizer. Motivated by this fact, we investigate in this paper quantizers in the scalar case, where each codecell contributes with exactly the same portion to the quantization error. We show that such quantizers of Gersho type - or Gersho quantizers for short - exist for non-atomic scalar distributions. As a main result we prove that Gersho quantizers are asymptotically optimal.}, subject = {Maßtheorie}, language = {en} } @article{PetitCerqueusBoutetetal.2016, author = {Petit, Albin and Cerqueus, Thomas and Boutet, Antoine and Ben Mokhtar, Sonia and Coquil, David and Brunie, Lionel and Kosch, Harald}, title = {SimAttack: private web search under fire}, series = {Journal of Internet Services and Applications}, journal = {Journal of Internet Services and Applications}, publisher = {SpringerOpen}, issn = {1869-0238}, doi = {10.1186/s13174-016-0044-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-3574}, year = {2016}, abstract = {Web Search engines have become an indispensable online service to retrieve content on the Internet. However, using search engines raises serious privacy issues as the latter gather large amounts of data about individuals through their search queries. Two main techniques have been proposed to privately query search engines. A first category of approaches, called unlinkability, aims at disassociating the query and the identity of its requester. A second category of approaches, called indistinguishability, aims at hiding user's queries or user's interests by either obfuscating user's queries, or forging new fake queries. This paper presents a study of the level of protection offered by three popular solutions: Tor-based, TrackMeNot, and GooPIR. For this purpose, we present an efficient and scalable attack - SimAttack - leveraging a similarity metric to capture the distance between preliminary information about the users (i.e., history of query) and a new query. SimAttack de-anonymizes up to 36.7 \% of queries protected by an unlinkability solution (i.e., Tor-based), and identifies up to 45.3 and 51.6 \% of queries protected by indistinguishability solutions (i.e., TrackMeNot and GooPIR, respectively). In addition, SimAttack de-anonymizes 6.7 \% more queries than state-of-the-art attacks and dramatically improves the performance of the attack on TrackMeNot by 23.6 \%, while retaining an execution time faster by two orders of magnitude.}, language = {en} }