@phdthesis{Matzeder2012, author = {Matzeder, Marco}, title = {Zeichnen von B{\"a}umen auf Gittern}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-26923}, school = {Universit{\"a}t Passau}, year = {2012}, abstract = {Das Zeichnen von Graphen besch{\"a}ftigt sich mit der Frage, wie die durch einen Graphen repr{\"a}sentierten Informationen f{\"u}r einen Betrachter {\"u}bersichtlich und verst{\"a}ndlich dargestellt werden k{\"o}nnen. Die Graphklasse der B{\"a}ume dient insbesondere zur Repr{\"a}sentation von hierarchischen Strukturen. Neben den hierarchisch und radial darstellenden Verfahren werden B{\"a}ume auch auf dem orthogonalen Gitter gezeichnet, in welchem die Knoten auf ganzzahligen Koordinaten liegen und die Kanten entlang der horizontalen und vertikalen Gitterlinien verlaufen. Gew{\"u}nscht wird eine gute Lesbarkeit der Zeichnungen und deren effiziente Berechnung. F{\"u}r die formale Bewertung der Lesbarkeit existieren speziell f{\"u}r das Zeichnen von B{\"a}umen definierte {\"A}sthetikkriterien, wie eine ebenenweise Darstellung, die Ordnungserhaltung und Kriterien zur Darstellung von Subgraphisomorphien und Symmetrien. Die vorliegende Arbeit befasst sich mit einer bislang wenig studierten Erweiterung des orthogonalen Gitters auf das hexagonale und oktagonale Gitter durch das Hinzunehmen von einer bzw. beider diagonalen Gitterrichtungen, und der Problemstellung, wie B{\"a}ume darauf gezeichnet werden. Dadurch k{\"o}nnen auch B{\"a}ume mit einem h{\"o}heren Grad gezeichnet werden als auf dem orthogonalen Gitter. Die Einschr{\"a}nkung, dass nur B{\"a}ume gezeichnet werden k{\"o}nnen, deren Grad kleiner ist als die Anzahl der Gitterrichtungen des verwendeten Gitters, besteht jedoch weiterhin. Als {\"A}sthetikkriterien werden die lokale Uniformit{\"a}t, die die L{\"a}nge der ausgehenden Kanten eines Knotens festlegt, und Pattern, die deren Richtungen festlegen, eingef{\"u}hrt. Gegen{\"u}ber dem bekannten linearen Fl{\"a}chenverbrauch von geradlinigen Zeichnungen von vollst{\"a}ndigen Bin{\"a}rb{\"a}umen auf dem orthogonalen Gitter, werden f{\"u}r Zeichnungen von vollst{\"a}ndigen d-n{\"a}ren B{\"a}umen mit d > 2 nicht-lineare untere Schranken f{\"u}r die ben{\"o}tigte Fl{\"a}che auf dem hexagonalen und dem oktagonalen Gitter gezeigt. Insgesamt werden f{\"u}r vollst{\"a}ndige und beliebige, geordnete und ungeordnete B{\"a}ume obere und untere Fl{\"a}chenschranken f{\"u}r Zeichnungen auf dem hexagonalen und oktagonalen Gitter pr{\"a}sentiert. Dabei zeigt sich, dass bei nicht-ordnungserhaltenden Zeichnungen zwar mehr als lineare, aber deutlich weniger als quadratische Fl{\"a}che ben{\"o}tigt wird. Im Gegensatz dazu gibt es geordnete B{\"a}ume, deren ordnungserhaltende Zeichnungen exponentielle Fl{\"a}che ben{\"o}tigen. Des Weiteren wird die Ermittlung der minimalen Zeichenfl{\"a}che f{\"u}r geordnete d-n{\"a}re B{\"a}ume ebenso als NP-vollst{\"a}ndig bewiesen, wie das Zeichnen von ungeordneten d-n{\"a}ren B{\"a}umen mit einheitlichen Kantenl{\"a}ngen. Schließlich werden zwei Linearzeitalgorithmen vorgestellt, die geordnete d-n{\"a}re B{\"a}ume unter Einhaltung der genannten {\"A}sthetikkriterien zeichnen.}, subject = {Baum }, language = {de} } @misc{OPUS4-1030, title = {Universit{\"a}tsbibliothek Passau: Jahresbericht 2020}, address = {Passau}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10306}, year = {2021}, abstract = {Jahresbericht der Universit{\"a}tsbibliothek Passau f{\"u}r das Jahr 2020.}, language = {de} } @article{SonarBleyerHeckmann2017, author = {Sonar, Arne and Bleyer, Bernhard and Heckmann, Dominikus}, title = {Zur Synergie von reflexiver Technikbewertung und E(L)SA-Begleitforschung. Eine Bewertungstheorie sozio-technischer Systemgef{\"u}ge im Rahmen der Digitalisierung}, series = {Bavarian Journal of Applied Science}, volume = {2017}, journal = {Bavarian Journal of Applied Science}, number = {3}, address = {Deggendorf}, doi = {10.15475/srteb.2017}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-14356}, pages = {234 -- 247}, year = {2017}, abstract = {Die zunehmende Verkn{\"u}pfung von Informations- und Kommunikations-technologien (IKT) mit den M{\"o}glichkeiten des Internet of Things (IoT) stellt im Entwicklungsprozess von Produkten neue Anforderungen hinsichtlich ihrer Implementierung, Integration und Anwendung. Aus der Perspektive der E(L)SA-Begleitforschung (Ethical, Legal and Social Aspects) erweist sich eine fr{\"u}hzeitige, in den gesamten Hervorbringungsprozess integrierte, reflexive Begleitung als ebenso bedeutsam wie die Bewertung des Produkts in seinen Anwendungskontexten. Digitalisierte Technik soll nicht nur hinsichtlich ihrer Anwendbarkeit bewertet, sondern aktiv im Prozess der Produktherstellung mitgestaltet werden. Es gilt ein methodisches Vorgehen zu konzipieren, welches die vorhandenen Modelle der Technikfolgenbewertung aufgreift, ihre Kompatibilit{\"a}ten pr{\"u}ft und die Synergien von reflexiver Technikbewertung und E(L)SA-Begleitforschung zur Geltung bringen kann.}, language = {de} } @phdthesis{ELKhoury2014, author = {EL-Khoury, Vanessa}, title = {Semantic Protection and Personalization of Video Content. PIAF: MPEG Compliant Adaptation Framework Preserving the User Perceived Quality}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27360}, school = {Universit{\"a}t Passau}, year = {2014}, abstract = {UME is the notion that a user should receive informative adapted content anytime and anywhere. Personalization of videos, which adapts their content according to user preferences, is a vital aspect of achieving the UME vision. User preferences can be translated into several types of constraints that must be considered by the adaptation process, including semantic constraints directly related to the content of the video. To deal with these semantic constraints, a fine-grained adaptation, which can go down to the level of video objects, is necessary. The overall goal of this adaptation process is to provide users with adapted content that maximizes their Quality of Experience (QoE). This QoE depends at the same time on the level of the user's satisfaction in perceiving the adapted content, the amount of knowledge assimilated by the user, and the adaptation execution time. In video adaptation frameworks, the Adaptation Decision Taking Engine (ADTE), which can be considered as the "brain" of the adaptation engine, is responsible for achieving this goal. The task of the ADTE is challenging as many adaptation operations can satisfy the same semantic constraint, and thus arising in several feasible adaptation plans. Indeed, for each entity undergoing the adaptation process, the ADTE must decide on the adequate adaptation operator that satisfies the user's preferences while maximizing his/her quality of experience. The first challenge to achieve in this is to objectively measure the quality of the adapted video, taking into consideration the multiple aspects of the QoE. The second challenge is to assess beforehand this quality in order to choose the most appropriate adaptation plan among all possible plans. The third challenge is to resolve conflicting or overlapping semantic constraints, in particular conflicts arising from constraints expressed by owner's intellectual property rights about the modification of the content. In this thesis, we tackled the aforementioned challenges by proposing a Utility Function (UF), which integrates semantic concerns with user's perceptual considerations. This UF models the relationships among adaptation operations, user preferences, and the quality of the video content. We integrated this UF into an ADTE. This ADTE performs a multi-level piecewise reasoning to choose the adaptation plan that maximizes the user-perceived quality. Furthermore, we included intellectual property rights in the adaptation process. Thereby, we modeled content owner constraints. We dealt with the problem of conflicting user and owner constraints by mapping it to a known optimization problem. Moreover, we developed the SVCAT, which produces structural and high-level semantic annotation according to an original object-based video content model. We modeled as well the user's preferences proposing extensions to MPEG-7 and MPEG-21. All the developed contributions were carried out as part of a coherent framework called PIAF. PIAF is a complete modular MPEG standard compliant framework that covers the whole process of semantic video adaptation. We validated this research with qualitative and quantitative evaluations, which assess the performance and the efficiency of the proposed adaptation decision-taking engine within PIAF. The experimental results show that the proposed UF has a high correlation with subjective video quality evaluation.}, subject = {MPEG-Standard}, language = {en} } @phdthesis{Poliakoff2025, author = {Poliakoff, Serge}, title = {From Faking Online Content to Orchestrating Its Creation by Public Workers: Examining Russian Disinformation Production Organisations through Curriculum Vitae Analysis (2013-2024)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-16100}, school = {Universit{\"a}t Passau}, pages = {259 Seiten}, year = {2025}, abstract = {This dissertation examines the evolution of Russian digital authoritarianism, focusing on its organisations that produce disinformation, the transition from the Internet Research Agency to the Patriot Media Group and the emergence of ANO Dialog. It shows how the Internet Research Agency operated as a sophisticated "troll farm" during the early stages of the Russo-Ukrainian war, with activities similar to those of a PR firm. The Patriot Media Group absorbed these activities, blurring the lines between media and disinformation and prioritising metrics-driven propaganda over journalistic professionalism. This dissertation identifies the organisation ANO Dialog as a new model, heavily integrated with state structures, combining digital surveillance, repression, and Soviet-style agitation adapted to the digital age. Using a novel methodology of career profile collection and analysis, my research illustrates the institutionalisation and regional expansion of disinformation tactics in Russian digital authoritarianism, highlighting its operational adaptability and evolving infrastructure.}, language = {en} } @phdthesis{Kufner2012, author = {Kufner, Sabrina}, title = {Diagnose und Prognose von Handlungskompetenz im Bereich adaptiven Lehrens bei Studierenden - eine Videostudie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus-27134}, school = {Universit{\"a}t Passau}, year = {2012}, abstract = {Eng verkn{\"u}pft mit der Effektivit{\"a}t von Bildungsprozessen stellt die Modellierung und Messung professioneller Lehrkompetenzen ein zentrales Forschungsinteresse der empirischen Lehrerbildungs- und Unterrichtsforschung dar. Unangefochtenes kennzeichnendes Merkmal von Kompetenzen ist deren Ausrichtung auf die erfolgreiche Bew{\"a}ltigung von Anforderungen in spezifischen Situationen. Dem Begriff der Kompetenz schwingt damit die Perspektive einer realized ability mit. Aus forschungspragmatischen Gr{\"u}nden wird dieser mehr oder minder explizite Handlungsbezug jedoch oft vernachl{\"a}ssigt. Einschl{\"a}gige Studien zur Kompetenzmessung definieren Kompetenzen deshalb als kontextbezogene kognitive Leistungsdispositionen, die sich funktional auf Situationen und Anforderungen in bestimmten Dom{\"a}nen beziehen. Versteht man Kompetenz jedoch tats{\"a}chlich als Handlungskompetenz, greifen Erhebungsmethoden in Form von Selbstausk{\"u}nften wie Wissenstests, Selbsteinsch{\"a}tzungen oder Interviews zu kurz. Es fehlt die Perspektive des tats{\"a}chlichen Handlungsvollzugs. Zwar unterstellt man diesen Messergebnissen quasi Stellvertreter und Garant f{\"u}r die entsprechende Handlung zu sein, ob die getesteten Personen in der realen Situation jedoch tats{\"a}chlich so handeln und sich als kompetent erweisen, muss bei diesen Messverfahren in letzter Konsequenz ungekl{\"a}rt bleiben. An dieser Diskrepanz setzt das vorliegende Forschungsprojekt an. Ziel ist es, die methodische Vorgehensweise bei der Kompetenzmessung eng an die theoretische Fundierung von Kompetenz als Handlungskompetenz zu binden. Die methodologische Grundannahme dabei ist: Wenn sich (Lehr)Personen als kompetent erweisen, dann m{\"u}ssen sie das in der konkreten, kontextuell gebundenen Handlungssituation tun; ihre Kompetenz wird in der Performanz beobachtbar. Kernanliegen der Studie ist die Entwicklung eines Instruments, das Kompetenz stellvertretend {\"u}ber die Performanz erfassen kann. Der Einsatz von Videoanalysen dr{\"a}ngt sich damit auf. Die vorliegende Studie interessiert das kompetente Handeln von Lehrkr{\"a}ften angesichts der Heterogenit{\"a}t der Lerner. Professionellen Lehrpersonen gelingt es, auf unterschiedliche Ausgangslagen einzugehen und adaptive Lernsettings bereitzustellen. In der universit{\"a}ren Ausbildungsphase sollen dazu die ersten Teilkompetenzen erworben werden. F{\"u}r den Modellierungsprozess adaptiver Lehrkompetenz wurde das Modell der Entwicklung professioneller Handlungskompetenz von BAUMERT und KUNTER (2006) im R{\"u}ckgriff auf WEINERT (2001) herangezogen. Professionelles Lehrerhandeln entsteht demnach aus einem Zusammenspiel von Aspekten professioneller Wertvorstellungen und {\"U}berzeugungen, motivationaler Orientierungen, selbstregulativer F{\"a}higkeiten und Professionswissen. Diese personimmanenten Dispositionen wurden auf die Situation adaptiven unterrichtlichen Handelns von Studierenden spezifiziert. Analog dazu wurden stellvertretend f{\"u}r jede Pr{\"a}diktordimension Personmerkmale erhoben, die aus theoretischer Sicht Prognosen im Hinblick auf das beobachtbare Kompetenzlevel zulassen (z.B. Einstellungen zum Lehren und Lernen, Lehrerinteressen, Selbstwirksamkeitserwartungen, Belastungserleben, Professionswissen hinsichtlich individualisierenden Lehrens und Lernens). Ebenso wurde adaptive Lehrkompetenz auf zwei Niveaustufen zun{\"a}chst theoretisch in Form einer Minimalstufe zu Beginn der Ausbildung und einer Maximalstufe als hypothetisches Professionalit{\"a}tsmaximum modelliert. Ausgehend von den theoretischen Vorarbeiten zur Minimalstufe wurde ein niedrig inferentes Beobachtungsschema entwickelt, das die Erhebung der Performanz adaptiver Lehrkompetenz bei Studierenden erm{\"o}glicht. Insgesamt wurden 50 Unterrichtsvideos kodiert und entsprechend die adaptive Lehrkompetenz der Probanden identifiziert. Vor diesem Hintergrund wurden die einzelnen Pr{\"a}diktordimensionen hinsichtlich ihrer prognostischen Qualit{\"a}t {\"u}berpr{\"u}ft. Die Ergebnisse weisen darauf hin, dass es weniger die als relativ stabil geltenden Dispositionen sind, die ein hohes Maß an adaptiver Lehrkompetenz ausmachen, als das relativ leicht ver{\"a}nderbare Merkmal des im Studium erworbenen Wissens.}, subject = {Kompetenz}, language = {de} } @phdthesis{Gruber2025, author = {Gruber, Martin}, title = {Tackling Test Flakiness: Understanding the Problem and Providing Practical Mitigations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15549}, school = {Universit{\"a}t Passau}, pages = {127 Seiten}, year = {2025}, abstract = {"Software is eating the world". With this phrase from his 2011 Wall Street Journal interview, Marc Andreessen predicted a decade of disruptive software-based innovations affecting various industries. Today, over ten years later, many of his predictions have come true: six of the seven most valuable companies worldwide are computer technology firms, and more than half of the world's population has access to the internet and owns a smartphone, with numbers still growing rapidly. The increasing importance of software has also changed software development. To ensure product quality despite high complexity and fast product cycles, software developers started to adopt continuous integration and regression testing practices: each change to an existing system is automatically tested and reverted in case it breaks any existing functionality. As a result, large software projects are conducting millions of test executions each day. One obstacle to such extensive testing are non-deterministic tests that can pass and fail without any changes to the underlying system or the test itself. These tests are commonly referred to as flaky tests. Flaky tests break regression testing, as they cause test failures that are unrelated to the changes that are being tested. Developers are forced to investigate these intermittent failures, wasting their time and decreasing their trust in testing. This thesis presents our research that aims at understanding and mitigating test flakiness. To comprehend the nature of flaky tests, we conducted both code-based studies on open-source projects, as well as a developer survey. All our investigations confirmed that flakiness is a frequently occurring and severe issue. The causes of flakiness, however, depend on the domain of the project and the source of the test: while asynchronous waiting and concurrency are overall the most prevalent causes aside from test order dependencies, Python projects tend to experience more flakiness caused by networking and randomness. Flaky tests that were not written by developers but generated automatically tend to be more often caused by randomness or unspecified behavior. To avoid test flakiness in generated tests, developers can use existing flakiness suppression mechanisms of test generation frameworks, which we found to be effective. In general, however, most developers currently address the issue of test flakiness by rerunning failing tests. Nevertheless, they would like more support when dealing with test flakiness, namely better visualizations, automated detection and debugging techniques, and education on the topic. In response to this feedback, we developed and evaluated a generic flakiness prediction approach, as well as an automated flakiness debugging technique. Our flakiness prediction method is easy to use and widely applicable. In contrast to previous techniques, it avoids any form of static or dynamic analysis. Instead, it relies solely on a test's execution result history and version control information, two commonly available artifacts. Additionally, it aims to classify real-world failures as either caused by flakiness or a regression. Previous techniques mainly focused on identifying potential flaky test cases in test suites, a related but less actionable question. An evaluation on a large-scale automotive software project yielded positive results. Our approach showed a strong predictive performance (95.5\% F1-score), outperforming the previously used heuristic. We also introduced Spectrum-based Flaky Fault Localization (SFFL), an automated debugging technique that aims to pinpoint the specific lines in the source code that cause a flaky test's non-deterministic behavior. SFFL extends traditional Spectrum-based Fault Localization (SFL) by considering multiple coverage behaviors of the same test case, a highly common phenomenon among flaky tests. Our evaluation on 101 flaky Python tests showed that SFFL outperforms traditional SFL and was able to narrow down the flaky fault's location to 3.5\% of a project's code base on average.}, subject = {Softwareentwicklung}, language = {en} } @phdthesis{NorbertoSales2022, author = {Norberto Sales, Juliano Efson}, title = {An Explainable Semantic Parser for End-User Development}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-10718}, school = {Universit{\"a}t Passau}, pages = {xvi, 165 Seiten}, year = {2022}, abstract = {Programming is a key skill in a world where businesses are driven by digital transformations. Although many of the programming demand can be addressed by a simple set of instructions composing libraries and services available in the web, non-technical professionals, such as domain experts and analysts, are still unable to construct their own programs due to the intrinsic complexity of coding. Among other types of end-user development, natural language programming has emerged to allow users to program without the formalism of traditional programming languages, where a tailored semantic parser can translate a natural language utterance to a formal command representation able to be processed by a computational machine. Currently, semantic parsers are typically built on the top of a learning method that defines its behaviours based on the patterns behind a large training data, whose production frequently are costly and time-consuming. Our research is devoted to study and propose a semantic parser for natural language commands targeting a scenario with low availability of training data. Our proposed semantic parser follows a multi-component architecture, composed of a specialised shallow parser that associates natural language commands to predicate-argument structures, integrated to a distributional ranking model that matches the command to a function signature available from an API knowledge base. Systems developed with statistical learning models and complex linguistics resources, as the proposed semantic parser, do not provide natively an easy way to associate a single feature from the input data to the impact in system behaviour. In this scenario, end-user explanations for intelligent systems has become a strong requirement to increase user confidence and system literacy. Thus, our research designed an explanation model for the proposed semantic parser that fits the heterogeneity of its multi-component architecture. The explanation model explores a hierarchical representation in an increasing degree of technical depth, providing higher-level explanations in the initial layers, going gradually to those that demand technical knowledge, applying different explanation strategies to better express the approach behind each component. With the support of a user-centred experiment, we compared the utility of different types of explanations and the impact of background knowledge in their preferences.}, language = {en} } @phdthesis{Sui2018, author = {Sui, Zhiyuan}, title = {Security and Privacy Schemes for Demand Response in Smart Grids}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-5809}, school = {Universit{\"a}t Passau}, pages = {xx, 153 Seiten}, year = {2018}, abstract = {Smart Grids integrate currently isolated power and communications networks, while introducing several new technologies on the hardware and software sides. One of the most important ingredients is the potential for demand-response programs, which offer the possibility of sending instructions to consumers to adapt their power consumption over a certain period of time. However, high-frequency data collection exposes consumers' usage behaviors, leading to security and privacy challenges for Smart Grids. In this thesis, three cryptographic schemes are constructed for different demand-response programs. In the mandatory incentive-based demand-response program, privacy preservation depends on the power consumption of consumers. An anonymous authentication scheme is constructed for overload auditing and privacy preservation. Consumers' identities are anonymous during normal operation. The operation center defines an acceptable consumption threshold at times of power shortage. Consumers must follow the instruction and curtail their power consumption to meet the threshold. If they do so, the consumers keep their anonymity, while disobedient consumers, whose power consumption exceeds the threshold, can be identified. Security analysis demonstrates that the constructed anonymous authentication scheme is secure in a random oracle model. In the voluntary incentivebased demand-response program, consumers are categorized as either obedient or disobedient consumers according to their consumption curtailment. Consumers utilize a homomorphic encryption algorithm to encrypt their usage and report the ciphertexts to the operation center periodically. At a time of grid instability, the obedient consumers reduce their consumption and prove their curtailment by using a range proof. Both the usage reports and the proofs from obedient consumers concerning their consumption are reported without leaking private information. In order to achieve the real-time requirement, a security model is proposed and a batch verification algorithm is constructed, which is proved to be secure in the defined oracle model. Apart from reward and penalty detection in demand-response programs, theft detection is also an important requirement in Smart Grids. In order to achieve theft detection, this thesis employs the dynamic k-times anonymous authentication and blind signatures to create an efficient theft detection mechanism in the prepaid card system, where consumers pay for their consumption in advance and obtain credentials. A consumer sends the credentials anonymously and obtains corresponding credentials during times of consumption. If a thief tries to send reused credentials to steal electricity, his anonymity will be revoked. Finally, this thesis proves that the proposed mechanism finds the real identities of power thieves, without sacrificing the privacy of honest consumers under the random oracle model.}, subject = {Intelligentes Stromnetz}, language = {en} } @phdthesis{Walsh2024, author = {Walsh, Florian}, title = {Computing the Binomial Part of Polynomial Ideals}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-15096}, school = {Universit{\"a}t Passau}, pages = {vi, 131 Seiten}, year = {2024}, abstract = {Given an ideal in a polynomial ring over a field, we present a complete algorithm to compute its binomial part.}, language = {en} }