@article{JoblinApelMauerer, author = {Joblin, Mitchell and Apel, Sven and Mauerer, Wolfgang}, title = {Evolutionary trends of developer coordination: a network approach}, series = {Empirical Software Engineering}, volume = {22}, journal = {Empirical Software Engineering}, number = {4}, publisher = {Springer}, doi = {10.1007/s10664-016-9478-9}, pages = {2050 -- 2094}, abstract = {Software evolution is a fundamental process that transcends the realm of technical artifacts and permeates the entire organizational structure of a software project. By means of a longitudinal empirical study of 18 large open-source projects, we examine and discuss the evolutionary principles that govern the coordination of developers. By applying a network-analytic approach, we found that the implicit and self-organizing structure of developer coordination is ubiquitously described by non-random organizational principles that defy conventional software-engineering wisdom. In particular, we found that: (a) developers form scale-free networks, in which the majority of coordination requirements arise among an extremely small number of developers, (b) developers tend to accumulate coordination requirements with more and more developers over time, presumably limited by an upper bound, and (c) initially developers are hierarchically arranged, but over time, form a hybrid structure, in which core developers are hierarchically arranged and peripheral developers are not. Our results suggest that the organizational structure of large projects is constrained to evolve towards a state that balances the costs and benefits of developer coordination, and the mechanisms used to achieve this state depend on the project's scale.}, language = {en} } @inproceedings{RamsauerKiszkaLohmannetal., author = {Ramsauer, Ralf and Kiszka, Jan and Lohmann, Daniel and Mauerer, Wolfgang}, title = {Look Mum, no VM Exits! (Almost)}, series = {Proceedings of the 13th Annual Workshop on Operating Systems Platforms for Embedded Real-Time Applications (OSPERT '17)}, booktitle = {Proceedings of the 13th Annual Workshop on Operating Systems Platforms for Embedded Real-Time Applications (OSPERT '17)}, doi = {10.48550/arXiv.1705.06932}, abstract = {Multi-core CPUs are a standard component in many modern embedded systems. Their virtualisation extensions enable the isolation of services, and gain popularity to implement mixed-criticality or otherwise split systems. We present Jailhouse, a Linux-based, OS-agnostic partitioning hypervisor that uses novel architectural approaches to combine Linux, a powerful general-purpose system, with strictly isolated special-purpose components. Our design goals favour simplicity over features, establish a minimal code base, and minimise hypervisor activity. Direct assignment of hardware to guests, together with a deferred initialisation scheme, offloads any complex hardware handling and bootstrapping issues from the hypervisor to the general purpose OS. The hypervisor establishes isolated domains that directly access physical resources without the need for emulation or paravirtualisation. This retains, with negligible system overhead, Linux's feature-richness in uncritical parts, while frugal safety and real-time critical workloads execute in isolated, safe domains.}, language = {en} } @inproceedings{PichaBradaRamsaueretal., author = {Picha, Petr and Brada, Premek and Ramsauer, Ralf and Mauerer, Wolfgang}, title = {Towards Architect's Activity Detection through a Common Model for Project Pattern Analysis}, series = {First International Workshop on the Social and Organization Dimensions of Software, 5-7 April 2017, Gothenburg, Sweden}, booktitle = {First International Workshop on the Social and Organization Dimensions of Software, 5-7 April 2017, Gothenburg, Sweden}, publisher = {IEEE}, doi = {10.1109/ICSAW.2017.46}, pages = {175 -- 178}, abstract = {Software development projects leave a large amount of data in repositories of Application Lifecycle Management (ALM) tools. These data contain detailed histories of their respective projects, their results and decisions made along the way. Analysis of such data helps uncover various interesting facts about projects, e.g. their socio-technical structures and the actual (vs. purported) roles of team members. Based on experiences with tools supporting our research we are convinced that it is feasible to consolidate data from different ALM tools, tapping into the situation common in real-life projects. In this paper we report on our work towards a shared common data model and tool integration aimed at improved project analysis. We discuss how this can help in the identification of architects in the project organizational structures, their activity patterns and collaboration with other team roles.}, language = {en} } @inproceedings{RamsauerLohmannMauerer, author = {Ramsauer, Ralf and Lohmann, Daniel and Mauerer, Wolfgang}, title = {Observing Custom Software Modifications: A Quantitative Approach of Tracking the Evolution of Patch Stacks}, series = {Proceedings of the 12th International Symposium on Open Collaboration (OpenSym '16)}, booktitle = {Proceedings of the 12th International Symposium on Open Collaboration (OpenSym '16)}, editor = {Wassermann, Tony}, publisher = {ACM}, address = {New York}, doi = {10.1145/2957792.2957810}, pages = {1 -- 4}, abstract = {Modifications to open-source software (OSS) are often provided in the form of "patch stacks" -- sets of changes (patches) that modify a given body of source code. Maintaining patch stacks over extended periods of time is problematic when the underlying base project changes frequently. This necessitates a continuous and engineering-intensive adaptation of the stack. Nonetheless, long-term maintenance is an important problem for changes that are not integrated into projects, for instance when they are controversial or only of value to a limited group of users. We present and implement a methodology to systematically examine the temporal evolution of patch stacks, track non-functional properties like integrability and maintainability, and estimate the eventual economic and engineering effort required to successfully develop and maintain patch stacks. Our results provide a basis for quantitative research on patch stacks, including statistical analyses and other methods that lead to actionable advice on the construction and long-term maintenance of custom extensions to OSS.}, language = {en} } @inproceedings{JoblinMauererApeletal., author = {Joblin, Mitchell and Mauerer, Wolfgang and Apel, Sven and Siegmund, Janet and Riehle, Dirk}, title = {From Developer Networks to Verified Communities: A Fine-Grained Approach}, series = {2015 IEEE/ACM 37th IEEE International Conference on Software Engineering, 16-24 May 2015, Florence, Italy}, booktitle = {2015 IEEE/ACM 37th IEEE International Conference on Software Engineering, 16-24 May 2015, Florence, Italy}, publisher = {IEEE}, isbn = {978-1-4799-1934-5}, doi = {10.1109/icse.2015.73}, pages = {563 -- 573}, abstract = {Effective software engineering demands a coordinated effort. Unfortunately, a comprehensive view on developer coordination is rarely available to support software-engineering decisions, despite the significant implications on software quality, software architecture, and developer productivity. We present a fine-grained, verifiable, and fully automated approach to capture a view on developer coordination, based on commit information and source-code structure, mined from version-control systems. We apply methodology from network analysis and machine learning to identify developer communities automatically. Compared to previous work, our approach is fine-grained, and identifies statistically significant communities using order-statistics and a community-verification technique based on graph conductance. To demonstrate the scalability and generality of our approach, we analyze ten open-source projects with complex and active histories, written in various programming languages. By surveying 53 open-source developers from the ten projects, we validate the authenticity of inferred community structure with respect to reality. Our results indicate that developers of open-source projects form statistically significant community structures and this particular view on collaboration largely coincides with developers' perceptions of real-world collaboration.}, language = {en} } @misc{Mauerer, author = {Mauerer, Wolfgang}, title = {Open Source in Research and Reality}, series = {Open Source Summit North America and Embedded Linux Conference North America 2020}, journal = {Open Source Summit North America and Embedded Linux Conference North America 2020}, language = {en} } @misc{MauererWagner, author = {Mauerer, Wolfgang and Wagner, Daniel}, title = {Cyclic Tests Unleashed: Large-Scale RT Analysis with Jitterdebugger}, series = {Open Source Summit, North America, 17.07.2019}, journal = {Open Source Summit, North America, 17.07.2019}, language = {en} } @misc{RamsauerDudaMauereretal., author = {Ramsauer, Ralf and Duda, Sebastian and Mauerer, Wolfgang and Bulwahn, Lukas}, title = {The list is our process: An analysis of the kernel's email-based development process}, series = {Linux Plumbers Conference, Lisbon September 9, 2019}, journal = {Linux Plumbers Conference, Lisbon September 9, 2019}, language = {en} } @misc{MauererRamsauer, author = {Mauerer, Wolfgang and Ramsauer, Ralf}, title = {Torturing Git for Fun and Profit}, series = {Microsoft Developer Meetup Regensburg 02.04.2019}, journal = {Microsoft Developer Meetup Regensburg 02.04.2019}, abstract = {In diesem Talk blicken Prof. Dr. Wolfgang Mauerer und Ralf Ramsauer unter die Haube des verteilten Versionskontrollsystems Git. Neben einer genauen Beschreibung der Strukturen und Plumbing APIs, mit denen Git intern Commits erzeugt und verkn{\"u}pft, gehen die Vortragenden auch auf n{\"u}tzliche Features und Standards ein, welche die Kollaborition in großen Open-Source Projekten erleichtern.}, language = {en} } @inproceedings{Mauerer, author = {Mauerer, Wolfgang}, title = {Are any big brothers watching you, and if yes, what can they tell about Debian}, series = {DebConf18 Hsinchu, Taiwan}, booktitle = {DebConf18 Hsinchu, Taiwan}, abstract = {Debian, as a collection of software packages and components, is known to be one of the largest software projects in the history of mankind. Combined with a traceable history over many years, the artefacts created by Debian developers and users make it one of science's favourite targets to quantitatively or qualitatively understand how real-world software development works (or does not), how people collaborate, and many other other related questions. Unfortunately, while scientists make ample use of the resources and artefacts created by FLOSS and friends, the exchange of insights and ideas does not seem to extend in both directions: Developers, users and integrators are often unaware of results obtained in science. This talk will introduce the Debian community to a selection the most important results obtained by scientific (software engineering) research, with a special focus on large-scale socio-technical analysis of projects like Debian, and the possible implications and improvements these may bring to Debian development itself.}, language = {en} } @misc{Mauerer, author = {Mauerer, Wolfgang}, title = {Embedded Linux Quality Assurance: How to Not Lie with Statistics}, series = {Embedded Linux Conference North America ; OpenIoTSummit North America}, journal = {Embedded Linux Conference North America ; OpenIoTSummit North America}, abstract = {Embedded Linux drives an every-increasing number of appliances in many domains and applications, some even real-time and/or safety critical. Traditional quality assurance of such systems is based on testing and formal verification, but the huge amount of code and the rapid dynamics of the Linux ecosystem, as well as fundamental limitations of formal methods make these approaches unsatisfactory. Statistical quality assurance for reliability, error rates, maximal latencies etc. is needed. We will discuss current best practises, how to design and run automated statistical tests that capture relevant information, and how to properly evaluate the resulting data. Practical real-world examples and recipes are played through using the open source R language. Most importantly, we identify common mistakes in (over-)interpreting statistical results and predictions that may eventually harm people.}, language = {en} } @misc{RamsauerKiszkaMauerer, author = {Ramsauer, Ralf and Kiszka, Jan and Mauerer, Wolfgang}, title = {Spectre and Meltdown vs. Real-Time: How Much do Mitigations Cost?}, series = {Embedded Linux Conference Europe 2018, October 22 - 24, 2018, Edinburgh, UK}, journal = {Embedded Linux Conference Europe 2018, October 22 - 24, 2018, Edinburgh, UK}, language = {en} } @misc{RamsauerKiszkaMauerer, author = {Ramsauer, Ralf and Kiszka, Jan and Mauerer, Wolfgang}, title = {Building Mixed Criticality Linux Systems with the Jailhouse Hypervisor}, series = {Embedded Lunux Conference + OpenIoTSummit, Portland, OR, 21.-23.02.2017}, journal = {Embedded Lunux Conference + OpenIoTSummit, Portland, OR, 21.-23.02.2017}, abstract = {The partitioning hypervisor Jaihouse allows us to run safety critical and uncritical applications in parallel on a single SoC. We present our experiences when porting a safety and real-time critical existing application as a Jailhouse guest. It shows a novel and promising approach for implementing mixed-criticality applications with real-time requirement while not loosing the benefits of Linux. This is done by static partitioning of hardware resources; guests do not interfere. We will present a multicopter platform running the real-time critical flight stack in an isolated Jailhouse guest. This proves the practicability of Jailhouse as well as the suitability for real-time safety critical systems by porting an existing application to a Jailhouse cell. We stress its concept and show up current hardware limitations, like undesired behaviour and present possible workarounds and solutions.}, language = {en} } @inproceedings{MurrMauerer, author = {Murr, Florian and Mauerer, Wolfgang}, title = {McFSM: Globally Taming Complex Systems}, series = {2017 IEEE/ACM 3rd International Workshop on Software Engineering for Smart Cyber-Physical Systems (SEsCPS), 21-21 May 2017, Buenos Aires, Argentina}, booktitle = {2017 IEEE/ACM 3rd International Workshop on Software Engineering for Smart Cyber-Physical Systems (SEsCPS), 21-21 May 2017, Buenos Aires, Argentina}, publisher = {IEEE}, doi = {10.1109/SEsCPS.2017.7}, pages = {26 -- 29}, abstract = {Industrial computing devices, in particular cyber-physical, real-time and safety-critical systems, focus on reacting to external events and the need to cooperate with other devices to create a functional system. They are often implemented with languages that focus on a simple, local description of how a component reacts to external input data and stimuli. Despite the trend in modern software architectures to structure systems into largely independent components, the remaining interdependencies still create rich behavioural dynamics even for small systems. Standard and industrial programming approaches do usually not model or extensively describe the global properties of an entire system. Although a large number of approaches to solve this dilemma have been suggested, it remains a hard and error-prone task to implement systems with complex interdependencies correctly. We introduce multiple coupled finite state machines (McFSMs), a novel mechanism that allows us to model and manage such interdependencies. It is based on a consistent, well-structured and simple global description. A sound theoretical foundation is provided, and associated tools allow us to generate efficient low-level code in various programming languages using model-driven techniques. We also present a domain specific language to express McFSMs and their connections to other systems, to model their dynamic behaviour, and to investigate their efficiency and correctness at compile-time.}, language = {en} } @misc{Mauerer, author = {Mauerer, Wolfgang}, title = {Safety, Security, Quality: Artificial Intelligence versus Common Sense}, series = {Open Source Summit North America and Embedded Linux Conference North America 2020}, journal = {Open Source Summit North America and Embedded Linux Conference North America 2020}, language = {en} } @misc{MauererWagner, author = {Mauerer, Wolfgang and Wagner, Daniel}, title = {Cyclic Tests Unleashed: Large-Scale RT Analysis with Jitterdebugger}, series = {Open Source Summit Japan (Tokio) and OSSNA 17.07.2019}, journal = {Open Source Summit Japan (Tokio) and OSSNA 17.07.2019}, language = {en} } @misc{RamsauerDudaBulwahnetal., author = {Ramsauer, Ralf and Duda, Sebastian and Bulwahn, Lukas and Mauerer, Wolfgang}, title = {The list is our process! An analysis of the kernel's email-based development process}, series = {Embedded Linux Conference Europe, Lyon 28.10.2019}, journal = {Embedded Linux Conference Europe, Lyon 28.10.2019}, language = {en} } @misc{RamsauerKiszkaLohmannetal., author = {Ramsauer, Ralf and Kiszka, Jan and Lohmann, Daniel and Mauerer, Wolfgang}, title = {Implementierung gemischt kritischer Systeme durch statische Hardwarepartitionierung}, series = {Fr{\"u}hjahrstreffen der Fachgruppen Betriebssysteme, Hannover, 2. M{\"a}rz 2018}, journal = {Fr{\"u}hjahrstreffen der Fachgruppen Betriebssysteme, Hannover, 2. M{\"a}rz 2018}, language = {de} } @misc{RamsauerLohmannMauerer, author = {Ramsauer, Ralf and Lohmann, Daniel and Mauerer, Wolfgang}, title = {Quantifying Upstream Integration with PaStA}, series = {LTSI Workshop @ ELC17}, journal = {LTSI Workshop @ ELC17}, language = {en} } @misc{Mauerer, author = {Mauerer, Wolfgang}, title = {Socio-Technical Aspects of Long Term Embedded Systems Maintenance}, series = {Embedded Linux Conference, April 4-6, 2016, San Diego}, journal = {Embedded Linux Conference, April 4-6, 2016, San Diego}, language = {en} } @misc{Mauerer, author = {Mauerer, Wolfgang}, title = {Approaches to Ultra Long-Term System Maintenance}, series = {Embedded Linux Conference Europe (Berlin), 2016}, journal = {Embedded Linux Conference Europe (Berlin), 2016}, language = {en} } @inproceedings{Mauerer, author = {Mauerer, Wolfgang}, title = {A Virtual Computing Platform for the Internet of Things}, series = {Embedded Linux Conference (San Diego), 2016}, booktitle = {Embedded Linux Conference (San Diego), 2016}, language = {de} } @misc{Mauerer, author = {Mauerer, Wolfgang}, title = {OSS Community, Health and Ecosystem Research: Theory and, or Theory versus Practice?}, series = {2nd International Workshop on Software Health (SoHEAL@ICSE, Montr{\´e}al), 2019}, journal = {2nd International Workshop on Software Health (SoHEAL@ICSE, Montr{\´e}al), 2019}, language = {en} } @inproceedings{GaborZielinskiFeldetal., author = {Gabor, Thomas and Zielinski, Sebastian and Feld, Sebastian and Roch, Christoph and Seidel, Christian and Neukart, Florian and Galter, Isabella and Mauerer, Wolfgang and Linnhoff-Popien, Claudia}, title = {Assessing Solution Quality of 3SAT on a Quantum Annealing Platform}, series = {Quantum Technology and Optimization Problems : First International Workshop, QTOP 2019, Munich, Germany, March 18, 2019, Proceedings}, volume = {11413}, booktitle = {Quantum Technology and Optimization Problems : First International Workshop, QTOP 2019, Munich, Germany, March 18, 2019, Proceedings}, editor = {Feld, Sebastian and Linnhoff-Popien, Claudia}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-030-14081-6}, doi = {10.1007/978-3-030-14082-3_3}, pages = {23 -- 35}, language = {en} } @article{FeldRochGaboretal., author = {Feld, Sebastian and Roch, Christoph and Gabor, Thomas and Seidel, Christian and Neukart, Florian and Galter, Isabella and Mauerer, Wolfgang and Linnhoff-Popien, Claudia}, title = {A Hybrid Solution Method for the Capacitated Vehicle Routing Problem Using a Quantum Annealer}, series = {Frontiers in ICT}, volume = {6}, journal = {Frontiers in ICT}, publisher = {Frontiers}, doi = {10.3389/fict.2019.00013}, pages = {1 -- 13}, abstract = {he Capacitated Vehicle Routing Problem (CVRP) is an NP-optimization problem (NPO) that has been of great interest for decades for both, science and industry. The CVRP is a variant of the vehicle routing problem characterized by capacity constrained vehicles. The aim is to plan tours for vehicles to supply a given number of customers as efficiently as possible. The problem is the combinatorial explosion of possible solutions, which increases superexponentially with the number of customers. Classical solutions provide good approximations to the globally optimal solution. D-Wave's quantum annealer is a machine designed to solve optimization problems. This machine uses quantum effects to speed up computation time compared to classic computers. The problem on solving the CVRP on the quantum annealer is the particular formulation of the optimization problem. For this, it has to be mapped onto a quadratic unconstrained binary optimization (QUBO) problem. Complex optimization problems such as the CVRP can be translated to smaller subproblems and thus enable a sequential solution of the partitioned problem. This work presents a quantum-classic hybrid solution method for the CVRP. It clarifies whether the implementation of such a method pays off in comparison to existing classical solution methods regarding computation time and solution quality. Several approaches to solving the CVRP are elaborated, the arising problems are discussed, and the results are evaluated in terms of solution quality and computation time.}, language = {en} } @article{Schoenberger, author = {Sch{\"o}nberger, Manuel}, title = {Applicability of Quantum Computing on Database Query Optimization}, series = {SIGMOD '22: proceedings of the 2022 International Conference on Management of Data : June 12-17, 2022, Philadelphia, PA, USA}, journal = {SIGMOD '22: proceedings of the 2022 International Conference on Management of Data : June 12-17, 2022, Philadelphia, PA, USA}, publisher = {ACM}, address = {New York, NY}, doi = {10.1145/3514221.3520257}, pages = {2512 -- 2514}, abstract = {We evaluate the applicability of quantum computing on two fundamental query optimization problems, join order optimization and multi query optimization (MQO). We analyze the problem dimensions that can be solved on current gate-based quantum systems and quantum annealers, the two currently commercially available architectures. First, we evaluate the use of gate-based systems on MQO, previously solved with quantum annealing. We show that, contrary to classical computing, a different architecture requires involved adaptations. We moreover propose a multi-step reformulation for join ordering problems to make them solvable on current quantum systems. Finally, we systematically evaluate our contributions for gate-based quantum systems and quantum annealers. Doing so, we identify the scope of current limitations, as well as the future potential of quantum computing technologies for database systems.}, language = {en} } @inproceedings{RamsauerLohmannMauerer, author = {Ramsauer, Ralf and Lohmann, Daniel and Mauerer, Wolfgang}, title = {System Software for Manufacturing Systems}, series = {Proc. First European Advances in Digital Transformation Conference, (2018)}, booktitle = {Proc. First European Advances in Digital Transformation Conference, (2018)}, language = {en} } @phdthesis{Ramsauer, author = {Ramsauer, Ralf Stefan}, title = {OSS Architecture for Mixed-Criticality Systems}, publisher = {Leibniz Universit{\"a}t Hannover}, pages = {188}, abstract = {Computer-based automation in industrial appliances led to a growing number of logically dependent, but physically separated embedded control units per appliance. Many of those components are safety-critical systems, and require adherence to safety standards, which is inconsonant with the relentless demand for features in those appliances. Features lead to a growing amount of control units per appliance, and to a increasing complexity of the overall software stack, being unfavourable for safety certifications. Modern CPUs provide means to revise traditional separation of concerns design primitives: the consolidation of systems, which yields new engineering challenges that concern the entire software and system stack. Multi-core CPUs favour economic consolidation of formerly separated systems with one efficient single hardware unit. Nonetheless, the system architecture must provide means to guarantee the freedom from interference between domains of different criticality. System consolidation demands for architectural and engineering strategies to fulfil requirements (e.g., real-time or certifiability criteria) in safety-critical environments. In parallel, there is an ongoing trend to substitute ordinary proprietary base platform software components by mature OSS variants for economic and engineering reasons. There are fundamental differences of processual properties in development processes of OSS and proprietary software. OSS in safety-critical systems requires development process assessment techniques to build an evidence-based fundament for certification efforts that is based upon empirical software engineering methods. In this thesis, I will approach from both sides: the software and system engineering perspective. In the first part of this thesis, I focus on the assessment of OSS components: I develop software engineering techniques that allow to quantify characteristics of distributed OSS development processes. I show that ex-post analyses of software development processes can be used to serve as a foundation for certification efforts, as it is required for safety-critical systems. In the second part of this thesis, I present a system architecture based on OSS components that allows for consolidation of mixed-criticality systems on a single platform. Therefore, I exploit virtualisation extensions of modern CPUs to strictly isolate domains of different criticality. The proposed architecture shall eradicate any remaining hypervisor activity in order to preserve realtime capabilities of the hardware by design, while guaranteeing strict isolation across domains.}, language = {en} } @article{SaxFeldZielinskietal., author = {Sax, Irmi and Feld, Sebastian and Zielinski, Sebastian and Gabor, Thomas and Linnhoff-Popien, Claudia and Mauerer, Wolfgang}, title = {Towards Understanding Approximation Complexity on a Quantum Annealer}, series = {Digitale Welt}, volume = {4}, journal = {Digitale Welt}, number = {1}, publisher = {DIGITALE WELT Academy c/o Ludwig-Maximilians-Universit{\"a}t M{\"u}nchen}, address = {M{\"u}nchen}, doi = {10.1007/s42354-019-0244-1}, pages = {104}, abstract = {Many industrially relevant problems can be deterministically solved by computers in principle, but are intractable in practice, as the seminal P/NP dichotomy of complexity theory and Cobham's thesis testify. For the many NP-complete problems, industry needs to resort to using heuristics or approximation algorithms. For approximation algorithms, there is a more refined classification in complexity classes that goes beyond the simple P/NP dichotomy. As it is well known, approximation classes form a hierarchy, that is, FPTAS \subseteq PTAS \subseteq APX \subseteq NPO. This classification gives a more realistic notion of complexity but—unless unexpected breakthroughs happen for fundamental problems like P = NP or related questions— there is no known efficient algorithm that can solve such problems exactly on a realistic computer. Therefore, new ways of computations are sought. Recently, considerable hope was placed on the possible computational powers of quantum computers and quantum annealing (QA) in particular. However, the precise benefits of such a drastic shift in hardware are still unchartered territory to a good extent. Firstly, the exact relations between classical and quantum complexity classes pose many open questions, and secondly, technical details of formulating and implementing quantum algorithms play a crucial role in real-world applications. Guided by the hierarchy of classical optimisation complexity classes, we discuss how to map problems of each class to a quantum annealer. Those problems are the Minimum Multiprocessor Scheduling (MMS) problem, the Minimum Vertex Cover (MVC) problem and the Maximum Independent Set (MIS) problem. We experimentally investigate if and how the degree of approximability influences implementation and run-time performance. Our experiments indicate a discrepancy between classical approximation complexity and QA behaviour: Problems MIS and MVC, members of APX respectively PTAS, exhibit better solution quality on a QA than MMS, which is in FPTAS, even despite the use of preprocessing the for latter. This leads to the hypothesis that traditional classifications do not immediately extend to the quantum annealing domain, at least when the properties of real-world devices are taken into account. A structural reason, why FPTAS problems do not show good solution quality, might be the use of an inequlity in the problem description of the FPTAS problems. Formulating those inequalities on a quantum hardware (mostly done by formulating a Quadratic Unconstrained Binary optimisation (QUBO) problem in form of a matrix) requires a lot of hardware space which makes finding an optimal solution more difficult. Reducing the density of a QUBO is possible by appropriately pruning QUBO matrices. For the problems considered in our evaluation, we find that the achievable solution quality on a real-world machine is unexpectedly robust against pruning, often up to ratios as high as 50\% or more. Since quantum annealers are probabilistic machines by design, the loss in solution quality is only of subordinate relevance, especially considering that the pruning of QUBO matrices allows for solving larger problem instances on hardware of a given capacity. We quantitatively discuss the interplay between these factors.}, language = {en} } @inproceedings{RamsauerKiszkaMauerer, author = {Ramsauer, Ralf and Kiszka, Jan and Mauerer, Wolfgang}, title = {A Novel Software Architecture for Mixed Criticality Systems}, series = {Digital Transformation in Semiconductor Manufacturing: Proceedings of the 1st and 2nd European Advances in Digital Transformation Conference, EADTC 2018, Zittau, Germany and EADTC 2019, Milan, Italy}, booktitle = {Digital Transformation in Semiconductor Manufacturing: Proceedings of the 1st and 2nd European Advances in Digital Transformation Conference, EADTC 2018, Zittau, Germany and EADTC 2019, Milan, Italy}, editor = {Keil, Sophia and Lasch, Rainer and Lindner, Fabian and Lohmer, Jacob}, publisher = {Springer International Publishing}, address = {Cham}, doi = {10.1007/978-3-030-48602-0_11}, pages = {121 -- 128}, abstract = {The advent of multi-core CPUs in nearly all embedded markets has prompted an architectural trend towards combining safety critical and uncritical software on single hardware units. We present a novel architecture for mixed criticality systems based on Linux that allows us to consolidate critical and uncritical parts onto a single hardware unit. CPU virtualisation extensions enable strict and static partitioning of hardware by direct assignment of resources, which allows us to boot additional operating systems or bare metal applications running aside Linux. The hypervisor Jailhouse is at the core of the architecture and ensures that the resulting domains may serve workloads of different criticality and can not interfere in an unintended way. This retains Linux's feature-richness in uncritical parts, while frugal safety and real-time critical applications execute in isolated domains. Architectural simplicity is a central aspect of our approach and a precondition for reliable implementability and successful certification. While standard virtualisation extensions provided by current hardware seem to suffice for a straight forward implementation of our approach, there are a number of further limitations that need to be worked around. This paper discusses the arising issues, and evaluates the suitability of our approach for real-world safety and real-time critical scenarios.}, language = {en} } @inproceedings{SaxFeldZielinskietal., author = {Sax, Irmi and Feld, Sebastian and Zielinski, Sebastian and Gabor, Thomas and Linnhoff-Popien, Claudia and Mauerer, Wolfgang}, title = {Approximate approximation on a quantum annealer}, series = {Proceedings of the 17th ACM International Conference on Computing Frontiers (CF '20): Catania Sicily Italy 11.05.2020 -13.05.2020}, booktitle = {Proceedings of the 17th ACM International Conference on Computing Frontiers (CF '20): Catania Sicily Italy 11.05.2020 -13.05.2020}, editor = {Palesi, Maurizio}, publisher = {Association for Computing Machinery}, address = {New York, NY, United States}, isbn = {9781450379564}, doi = {10.1145/3387902.3392635}, pages = {108 -- 117}, abstract = {Many problems of industrial interest are NP-complete, and quickly exhaust resources of computational devices with increasing input sizes. Quantum annealers (QA) are physical devices that aim at this class of problems by exploiting quantum mechanical properties of nature. However, they compete with efficient heuristics and probabilistic or randomised algorithms on classical machines that allow for finding approximate solutions to large NP-complete problems. While first implementations of QA have become commercially available, their practical benefits are far from fully explored. To the best of our knowledge, approximation techniques have not yet received substantial attention. In this paper, we explore how problems' approximate versions of varying degree can be systematically constructed for quantum annealer programs, and how this influences result quality or the handling of larger problem instances on given set of qubits. We illustrate various approximation techniques on both, simulations and real QA hardware, on different seminal problems, and interpret the results to contribute towards a better understanding of the real-world power and limitations of current-state and future quantum computing.}, language = {en} } @inproceedings{KruegerMauerer, author = {Kr{\"u}ger, Tom and Mauerer, Wolfgang}, title = {Quantum Annealing-Based Software Components}, series = {Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops (ICSE'20): Seoul Republic of Korea 27.06.2020 - 19.07.2020}, booktitle = {Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops (ICSE'20): Seoul Republic of Korea 27.06.2020 - 19.07.2020}, publisher = {Association for Computing Machinery}, address = {New York, NY}, isbn = {9781450379632}, doi = {10.1145/3387940.3391472}, pages = {445 -- 450}, abstract = {Quantum computers have the potential of solving problems more efficiently than classical computers. While first commercial prototypes have become available, the performance of such machines in practical application is still subject to exploration. Quantum computers will not entirely replace classical machines, but serve as accelerators for specific problems. This necessitates integrating quantum computational primitives into existing applications. In this paper, we perform a case study on how to augment existing software with quantum computational primitives for the Boolean satisfiability problem (SAT) implemented using a quantum annealer (QA). We discuss relevant quality measures for quantum components, and show that mathematically equivalent, but structurally different ways of transforming SAT to a QA can lead to substantial differences regarding these qualities. We argue that engineers need to be aware that (and which) details, although they may be less relevant in traditional software engineering, require considerable attention in quantum computing.}, language = {en} } @inproceedings{RamsauerBulwahnLohmannetal., author = {Ramsauer, Ralf and Bulwahn, Lukas and Lohmann, Daniel and Mauerer, Wolfgang}, title = {The Sound of Silence : Mining Security Vulnerabilities from Secret Integration Channels in Open-Source Projects}, series = {Proceedings of the 2020 ACM SIGSAC Conference on Cloud Computing Security Workshop: 09.11.2020, virtual event}, booktitle = {Proceedings of the 2020 ACM SIGSAC Conference on Cloud Computing Security Workshop: 09.11.2020, virtual event}, editor = {Zhang, Yinqian and Sion, Radu}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450380843}, doi = {10.1145/3411495.3421360}, pages = {147 -- 157}, abstract = {Public development processes are a key characteristic of open source projects. However, fixes for vulnerabilities are usually discussed privately among a small group of trusted maintainers, and integrated without prior public involvement. This is supposed to prevent early disclosure, and cope with embargo and non-disclosure agreement (NDA) rules. While regular development activities leave publicly available traces, fixes for vulnerabilities that bypass the standard process do not. We present a data-mining based approach to detect code fragments that arise from such infringements of the standard process. By systematically mapping public development artefacts to source code repositories, we can exclude regular process activities, and infer irregularities that stem from non-public integration channels. For the Linux kernel, the most crucial component of many systems, we apply our method to a period of seven months before the release of Linux 5.4. We find 29 commits that address 12 vulnerabilities. For these vulnerabilities, our approach provides a temporal advantage of 2 to 179 days to design exploits before public disclosure takes place, and fixes are rolled out. Established responsible disclosure approaches in open development processes are supposed to limit premature visibility of security vulnerabilities. However, our approach shows that, instead, they open additional possibilities to uncover such changes that thwart the very premise. We conclude by discussing implications and partial countermeasures.}, language = {en} } @inproceedings{WinkerGroppeUotilaetal., author = {Winker, Tobias and Groppe, Sven and Uotila, Valter Johan Edvard and Yan, Zhengtong and Lu, Jiaheng and Maja, Franz and Mauerer, Wolfgang}, title = {Quantum Machine Learning: Foundation, New Techniques, and Opportunities for Database Research}, series = {SIGMOD '23, proceedings of the 2023 International Conference on Management of Data: June 18-23, 2023, Seattle, WA, USA}, booktitle = {SIGMOD '23, proceedings of the 2023 International Conference on Management of Data: June 18-23, 2023, Seattle, WA, USA}, publisher = {ACM}, address = {New York}, doi = {10.1145/3555041.3589404}, pages = {8}, abstract = {In the last few years, the field of quantum computing has experienced remarkable progress. The prototypes of quantum computers already exist and have been made available to users through cloud services (e.g., IBM Q experience, Google quantum AI, or Xanadu quantum cloud). While fault-tolerant and large-scale quantum computers are not available yet (and may not be for a long time, if ever), the potential of this new technology is undeniable. Quantum algorithms havethe proven ability to either outperform classical approaches for several tasks, or are impossible to be efficiently simulated by classical means under reasonable complexity-theoretic assumptions. Even imperfect current-day technology is speculated to exhibit computational advantages over classical systems. Recent research is using quantum computers to solve machine learning tasks. Meanwhile, the database community already successfully applied various machine learning algorithms for data management tasks, so combining the fields seems to be a promising endeavour. However, quantum machine learning is a new research field for most database researchers. In this tutorial, we provide a fundamental introduction to quantum computing and quantum machine learning and show the potential benefits and applications for database research. In addition, we demonstrate how to apply quantum machine learning to the optimization of join order problem for databases.}, language = {en} } @inproceedings{WinterspergerSafiMauerer, author = {Wintersperger, Karen and Safi, Hila and Mauerer, Wolfgang}, title = {QPU-System Co-Design for Quantum HPC Accelerators?}, series = {Architecture of Computing Systems: 35th International Conference, ARCS 2022, Heilbronn, Germany, September 13-15, 2022, Proceedings}, booktitle = {Architecture of Computing Systems: 35th International Conference, ARCS 2022, Heilbronn, Germany, September 13-15, 2022, Proceedings}, publisher = {Springer}, isbn = {978-3-031-21866-8}, doi = {10.1007/978-3-031-21867-5_7}, pages = {100 -- 114}, abstract = {The use of quantum processing units (QPUs) promises speed-ups for solving computational problems, but the quantum devices currently available possess only a very limited number of qubits and suffer from considerable imperfections. One possibility to progress towards practical utility is to use a co-design approach: Problem formulation and algorithm, but also the physical QPU properties are tailored to the specific application. Since QPUs will likely be used as accelerators for classical computers, details of systemic integration into existing architectures are another lever to influence and improve the practical utility of QPUs. In this work, we investigate the influence of different parameters on the runtime of quantum programs on tailored hybrid CPU-QPU-systems. We study the influence of communication times between CPU and QPU, how adapting QPU designs influences quantum and overall execution performance, and how these factors interact. Using a simple model that allows for estimating which design choices should be subjected to optimisation for a given task, we provide an intuition to the HPC community on potentials and limitations of co-design approaches. We also discuss physical limitations for implementing the proposed changes on real quantum hardware devices.}, language = {en} } @book{Mauerer, author = {Mauerer, Wolfgang}, title = {Linux-Kernelarchitektur : Konzepte Strukturen und Algorithmen von Kernel 2.6}, publisher = {Hanser}, address = {M{\"u}nchen}, isbn = {9783446225664}, language = {de} } @article{MauererSawallischHillieretal., author = {Mauerer, Wolfgang and Sawallisch, J. and Hillier, G. and Oberth{\"u}r, S. and H{\"o}nick, S.}, title = {Real-Time Android: Achieving determinism and ease of use}, series = {Embedded Wold Conference; Nuremberg, Germany, February 25, 2014 - February 27, 2014}, journal = {Embedded Wold Conference; Nuremberg, Germany, February 25, 2014 - February 27, 2014}, pages = {1 -- 7}, language = {en} } @article{GabrielWittmannSychetal., author = {Gabriel, Christian and Wittmann, Christoffer and Sych, Denis and Dong, Ruifang and Mauerer, Wolfgang and Andersen, Ulrik L. and Marquardt, Christoph and Leuchs, Gerd}, title = {A generator for unique quantum random numbers based on vacuum states}, series = {Nature Photonics}, volume = {4}, journal = {Nature Photonics}, number = {10}, publisher = {Springer}, organization = {Springer}, doi = {10.1038/nphoton.2010.197}, pages = {711 -- 715}, abstract = {Random numbers are a valuable component in diverse applications that range from simulations over gambling to cryptography. The quest for true randomness in these applications has engendered a large variety of different proposals for producing random numbers based on the foundational unpredictability of quantum mechanics4,5,6,7,8,9,10,11. However, most approaches do not consider that a potential adversary could have knowledge about the generated numbers, so the numbers are not verifiably random and unique12,13,14,15. Here we present a simple experimental setup based on homodyne measurements that uses the purity of a continuous-variable quantum vacuum state to generate unique random numbers. We use the intrinsic randomness in measuring the quadratures of a mode in the lowest energy vacuum state, which cannot be correlated to any other state. The simplicity of our source, combined with its verifiably unique randomness, are important attributes for achieving high-reliability, high-speed and low-cost quantum random number generators.}, language = {en} } @article{HelwigMauererSilberhorn, author = {Helwig, Wolfram and Mauerer, Wolfgang and Silberhorn, Christine}, title = {Multimode states in decoy-based quantum-key-distribution protocols}, series = {Physical Review A}, volume = {80}, journal = {Physical Review A}, number = {5}, publisher = {American Physical Society}, doi = {10.1103/physreva.80.052326}, abstract = {Every security analysis of quantum-key distribution (QKD) relies on a faithful modeling of the employed quantum states. Many photon sources, such as for instance a parametric down-conversion (PDC) source, require a multimode description but are usually only considered in a single-mode representation. In general, the important claim in decoy-based QKD protocols for indistinguishability between signal and decoy states does not hold for all sources. We derive bounds on the single-photon transmission probability and error rate for multimode states and apply these bounds to the output state of a PDC source. We observe two opposing effects on the secure key rate. First, the multimode structure of the state gives rise to a new attack that decreases the key rate. Second, more contributing modes change the photon number distribution from a thermal toward a Poissonian distribution, which increases the key rate.}, language = {en} } @article{ColdenstrodtRongeLundeenPregnelletal., author = {Coldenstrodt-Ronge, Hendrik B. and Lundeen, Jeff S. and Pregnell, Kenny L. and Feito, Alvaro and Smith, Brian J. and Mauerer, Wolfgang and Silberhorn, Christine and Eisert, Jens and Plenio, Martin B. and Walmsley, Ian A.}, title = {A proposed testbed for detector tomography}, series = {Journal of Modern Optics}, volume = {56}, journal = {Journal of Modern Optics}, number = {2-3}, publisher = {Taylor\&Francis}, doi = {10.1080/09500340802304929}, pages = {432 -- 441}, abstract = {Measurement is the only part of a general quantum system that has yet to be characterised experimentally in a complete manner. Detector tomography provides a procedure for doing just this; an arbitrary measurement device can be fully characterised, and thus calibrated, in a systematic way without access to its components or its design. The result is a reconstructed POVM containing the measurement operators associated with each measurement outcome. We consider two detectors, a single-photon detector and a photon-number counter, and propose an easily realised experimental apparatus to perform detector tomography on them. We also present a method of visualising the resulting measurement operators.}, language = {en} } @article{MauererHelwigSilberhorn, author = {Mauerer, Wolfgang and Helwig, Wolfram and Silberhorn, Christine}, title = {Recent developments in quantum key distribution: Theory and practice}, series = {Annalen der Physik}, volume = {17}, journal = {Annalen der Physik}, number = {2-3}, publisher = {Wiley}, doi = {10.1002/andp.200710284}, pages = {158 -- 175}, abstract = {Quantum key distribution is among the foremost applications of quantum mechanics, both in terms of fundamental physics and as a technology on the brink of commercial deployment. Starting from principal schemes and initial proofs of unconditional security for perfect systems, much effort has gone into providing secure schemes which can cope with numerous experimental imperfections unavoidable in real world implementations. In this paper, we provide a comparison of various schemes and protocols. We analyse their efficiency and performance when implemented with imperfect physical components. We consider how experimental faults are accounted for using effective parameters. We compare various recent protocols and provide guidelines as to which components propose best advances when being improved.}, language = {en} } @article{MauererAvenhausHelwigetal., author = {Mauerer, Wolfgang and Avenhaus, Malte and Helwig, Wolfram and Silberhorn, Christine}, title = {How colors influence numbers: Photon statistics of parametric down-conversion}, series = {Physical Review A (Phys. Rev. A)}, volume = {80}, journal = {Physical Review A (Phys. Rev. A)}, number = {5}, publisher = {American Physical Society}, doi = {10.1103/physreva.80.053815}, abstract = {Parametric down-conversion (PDC) is a technique of ubiquitous experimental significance in the production of nonclassical, photon-number-correlated twin beams. Standard theory of PDC as a two-mode squeezing process predicts and homodyne measurements observe a thermal photon number distribution per beam. Recent experiments have obtained conflicting distributions. In this article, we explain the observation by an a priori theoretical model solely based on directly accessible physical quantities. We compare our predictions with experimental data and find excellent agreement.}, language = {en} } @article{Mauerer, author = {Mauerer, Wolfgang}, title = {Datenvisualisierung mit Ggplot2}, series = {Linux-Magazin}, journal = {Linux-Magazin}, number = {3}, publisher = {Linux New Media AG}, abstract = {Wer seine Daten mit ansehnlichen und informativen Graphen veranschaulichen m{\"o}chte, braucht meist viel Geduld. Die R-Erweiterung Ggplot2 bringt System in die Grafik, dr{\"u}ckt sich in knappem Quellcode aus und bl{\"a}st frischen Wind in den Alltag der Datenvisualisierung.}, language = {en} } @article{RohdeMauererSilberhorn, author = {Rohde, Peter P. and Mauerer, Wolfgang and Silberhorn, Christine}, title = {Spectral structure and decompositions of optical states, and their applications}, series = {New Journal of Physics}, volume = {9}, journal = {New Journal of Physics}, number = {4}, publisher = {IOP Publishing}, doi = {10.1088/1367-2630/9/4/091}, pages = {91}, abstract = {We discuss the spectral structure and decomposition of multi-photon states. Ordinarily 'multi-photon states' and 'Fock states' are regarded as synonymous. However, when the spectral degrees of freedom are included this is not the case, and the class of 'multi-photon' states is much broader than the class of 'Fock' states. We discuss the criteria for a state to be considered a Fock state. We then address the decomposition of general multi-photon states into bases of orthogonal eigenmodes, building on existing multi-mode theory, and introduce an occupation number representation that provides an elegant description of such states. This representation allows us to work in bases imposed by experimental constraints, simplifying calculations in many situations. Finally we apply this technique to several example situations, which are highly relevant for state of the art experiments. These include Hong-Ou-Mandel interference, spectral filtering, finite bandwidth photo-detection, homodyne detection and the conditional preparation of Schr{\"o}dinger kitten and Fock states. Our techniques allow for very simple descriptions of each of these examples.}, language = {en} } @article{MauererSilberhorn, author = {Mauerer, Wolfgang and Silberhorn, Christine}, title = {Quantum key distribution with passive decoy state selection}, series = {Physical Review A}, volume = {75}, journal = {Physical Review A}, number = {5}, publisher = {American Physical Society}, doi = {10.1103/physreva.75.050305}, abstract = {We propose a quantum key distribution scheme which closely matches the performance of a perfect single photon source. It nearly attains the physical upper bound in terms of key generation rate and maximally achievable distance. Our scheme relies on a practical setup based on a parametric downconversion source and present day, nonideal photon-number detection. Arbitrary experimental imperfections which lead to bit errors are included. We select decoy states by classical postprocessing. This allows one to improve the effective signal statistics and achievable distance.}, language = {en} } @book{Mauerer, author = {Mauerer, Wolfgang}, title = {Professional Linux kernel architecture}, series = {Wrox professional guides}, journal = {Wrox professional guides}, publisher = {Wiley}, address = {Indianapolis}, isbn = {9780470343432}, abstract = {Find an introduction to the architecture, concepts and algorithms of the Linux kernel in Professional Linux Kernel Architecture, a guide to the kernel sources and large number of connections among subsystems. Find an introduction to the relevant structures and functions exported by the kernel to userland, understand the theoretical and conceptual aspects of the Linux kernel and Unix derivatives, and gain a deeper understanding of the kernel. Learn how to reduce the vast amount of information contained in the kernel sources and obtain the skills necessary to understand the kernel sources.}, language = {en} } @book{Mauerer, author = {Mauerer, Wolfgang}, title = {Textverarbeitung mit LATEX 2e [2 epsilon] unter UNIX}, publisher = {Hanser}, address = {M{\"u}nchen}, isbn = {9783446189096}, language = {de} } @article{Mauerer, author = {Mauerer, Wolfgang}, title = {Gimp-Perl: Gimp scripting for the rest of us}, series = {Linux Gazette}, journal = {Linux Gazette}, number = {51}, issn = {1934-371X}, language = {en} } @inproceedings{MauererSilberhorn, author = {Mauerer, Wolfgang and Silberhorn, Christine}, title = {Numerical Analysis of Parametric Downconversion}, series = {AIP Conference Proceedings}, volume = {1110}, booktitle = {AIP Conference Proceedings}, number = {1}, publisher = {AIP Publishing}, doi = {10.1063/1.3131312}, abstract = {Parametric downconversion (PDC) is a popular technique to produce twin beams of photons that are entangled in multiple degrees of freedom. The generated states form the basis for numerous applications that require entanglement. An exact quantification of this resource is therefore essential, for instance for quantum cryptography that relies on a complete knowledge of the correlation contained in the state. While the determination of an entanglement monotone for the PDC process is only possible analytically in special cases, an exact calculation must usually be performed numerically. Recent work by Mikhailova et al. [2] analyses a certain class of PDC states for which the concurrence entanglement measure can be obtained by an analytical approximation. In this contribution, we analyse the validity of the approximation by comparison with exact numerical methods.}, language = {en} } @article{AvenhausColdenstrodtRongeLaihoetal., author = {Avenhaus, Malte and Coldenstrodt-Ronge, Hendrik B. and Laiho, K. and Mauerer, Wolfgang and Walmsley, I. A. and Silberhorn, Christine}, title = {Photon Number Statistics of Multimode Parametric Down-Conversion}, series = {Physical Review Letters}, volume = {101}, journal = {Physical Review Letters}, number = {5}, publisher = {American Physical Society}, doi = {10.1103/physrevlett.101.053601}, abstract = {We experimentally analyze the complete photon number statistics of parametric down-conversion and ascertain the influence of multimode effects. Our results clearly reveal a difference between single-mode theoretical description and the measured distributions. Further investigations assure the applicability of loss-tolerant photon number reconstruction and prove strict photon number correlation between signal and idler modes.}, language = {en} } @inproceedings{HofmannRiehleKolassaetal., author = {Hofmann, Gottfried and Riehle, Dirk and Kolassa, Carsten and Mauerer, Wolfgang}, title = {A Dual Model of Open Source License Growth}, series = {Open Source Software: Quality Verification : 9th IFIP WG 2.13 International Conference, OSS 2013, Koper-Capodistria, Slovenia, June 25-28, 2013, Proceedings}, volume = {404}, booktitle = {Open Source Software: Quality Verification : 9th IFIP WG 2.13 International Conference, OSS 2013, Koper-Capodistria, Slovenia, June 25-28, 2013, Proceedings}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-642-38928-3_18}, pages = {245 -- 256}, abstract = {Every open source project needs to decide on an open source license. This decision is of high economic relevance: Just which license is the best one to help the project grow and attract a community? The most common question is: Should the project choose a restrictive (reciprocal) license or a more permissive one? As an important step towards answering this question, this paper analyses actual license choice and correlated project growth from ten years of open source projects. It provides closed analytical models and finds that around 2001 a reversal in license choice occurred from restrictive towards permissive licenses.}, language = {en} } @unpublished{OberthuerMauerer, author = {Oberth{\"u}r, S. and Mauerer, Wolfgang}, title = {Sicherheit von Android-Systemen}, language = {de} } @inproceedings{Mauerer, author = {Mauerer, Wolfgang}, title = {Supershrink Linux - and beyond}, series = {Embedded Linux Conference Europe (Dublin), 2015}, booktitle = {Embedded Linux Conference Europe (Dublin), 2015}, language = {en} } @inproceedings{GabrielWittmannHackeretal., author = {Gabriel, Christian and Wittmann, Ch. and Hacker, B. and Mauerer, Wolfgang and Huntington, E. and Sabuncu, M. and Marquardt, Ch. and Leuchs, G.}, title = {A high-speed secure quantum random number generator based on vacuum states}, series = {IEEE/OSA Conference on Lasers and Electro-Optics (CLEO), 06-11 May 2012, San Jose CA USA}, booktitle = {IEEE/OSA Conference on Lasers and Electro-Optics (CLEO), 06-11 May 2012, San Jose CA USA}, publisher = {Optical Society of America}, abstract = {A high-speed continuous-variable quantum random bit generator with an expected effective bit generation rate of up to 10 GBit/s is presented. The obtained bit sequences are truly random and unique, i.e. they cannot be known by an adversary.}, language = {en} } @inproceedings{UotilaGroppeGruenwaldetal., author = {Uotila, Valter and Groppe, Sven and Gruenwald, Le and Lu, Jiaheng and Mauerer, Wolfgang}, title = {Preface QDSM}, series = {Joint Workshops at 49th International Conference on Very Large Data Bases (VLDBW'23) — International Workshop on Quantum Data Science and Management (QDSM'23), August 28 - September 1, 2023, Vancouver, Canada (CEUR Workshop Proceedings)}, booktitle = {Joint Workshops at 49th International Conference on Very Large Data Bases (VLDBW'23) — International Workshop on Quantum Data Science and Management (QDSM'23), August 28 - September 1, 2023, Vancouver, Canada (CEUR Workshop Proceedings)}, publisher = {RWTH Aachen, Sun SITE Central Europe}, address = {Aachen}, abstract = {The first international workshop on Quantum Data Science and anagement (QDSM), co-located with VLDB 2023, is centered around addressing the possibilities of quantum computing for data science and data management. Quantum computing is a relatively new and emerging field that is believed to have huge computational potential in the future. In the QDSM workshop, we want to provide a venue for discussing and publishing novel results of applying quantum computing to hard data science and data management problems. These problems include join order optimization, designing efficient quantum feature maps, studying possibilities of solving linear programs with quantum algorithms, and divergent index tuning with quantum machine learning. Besides, we include a short and visionary survey on quantum computing for databases. Theworkshop provides a platform for active discussion on these and related topics.}, language = {en} } @article{WinterspergerDommertEhmeretal., author = {Wintersperger, Karen and Dommert, Florian and Ehmer, Thomas and Hoursanov, Andrey and Klepsch, Johannes and Mauerer, Wolfgang and Reuber, Georg and Strohm, Thomas and Yin, Ming and Luber, Sebastian}, title = {Neutral Atom Quantum Computing Hardware: Performance and End-User Perspective}, series = {EPJ Quantum Technology}, volume = {10}, journal = {EPJ Quantum Technology}, publisher = {Springer Nature}, doi = {10.1140/epjqt/s40507-023-00190-1}, pages = {27}, abstract = {We present an industrial end-user perspective on the current state of quantum computing hardware for one specific technological approach, the neutral atom platform. Our aim is to assist developers in understanding the impact of the specific properties of these devices on the effectiveness of algorithm execution. Based on discussions with different vendors and recent literature, we discuss the performance data of the neutral atom platform. Specifically, we focus on the physical qubit architecture, which affects state preparation, qubit-to-qubit connectivity, gate fidelities, native gate instruction set, and individual qubit stability. These factors determine both the quantum-part execution time and the end-to-end wall clock time relevant for end-users, but also the ability to perform fault-tolerant quantum computation in the future. We end with an overview of which applications have been shown to be well suited for the peculiar properties of neutral atom-based quantum computers.}, language = {en} } @inproceedings{TrespUdluftHeinetal., author = {Tresp, Volker and Udluft, Steffen and Hein, Daniel and Hauptmann, Werner and Leib, Martin and Mutschler, Christopher and Scherer, Daniel D. and Mauerer, Wolfgang}, title = {Workshop Summary: Quantum Machine Learning}, series = {2023 IEEE International Conference on Quantum Computing and Engineering, Bellevue, WA, United States, September 17-22, 2023}, booktitle = {2023 IEEE International Conference on Quantum Computing and Engineering, Bellevue, WA, United States, September 17-22, 2023}, publisher = {IEEE}, doi = {10.1109/QCE57702.2023.10174}, language = {en} } @inproceedings{RamsauerLohmannMauerer, author = {Ramsauer, Ralf and Lohmann, Daniel and Mauerer, Wolfgang}, title = {The List is the Process: Reliable Pre-Integration Tracking of Commits on Mailing Lists}, series = {2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE), 25-31 May 2019, Montreal, QC, Canada}, booktitle = {2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE), 25-31 May 2019, Montreal, QC, Canada}, publisher = {IEEE}, doi = {10.1109/ICSE.2019.00088}, pages = {807 -- 818}, abstract = {A considerable corpus of research on software evolution focuses on mining changes in software repositories, but omits their pre-integration history. We present a novel method for tracking this otherwise invisible evolution of software changes on mailing lists by connecting all early revisions of changes to their final version in repositories. Since artefact modifications on mailing lists are communicated by updates to fragments (i.e., patches) only, identifying semantically similar changes is a non-trivial task that our approach solves in a language-independent way. We evaluate our method on high-profile open source software (OSS) projects like the Linux kernel, and validate its high accuracy using an elaborately created ground truth. Our approach can be used to quantify properties of OSS development processes, which is an essential requirement for using OSS in reliable or safety-critical industrial products, where certifiability and conformance to processes are crucial. The high accuracy of our technique allows, to the best of our knowledge, for the first time to quantitatively determine if an open development process effectively aligns with given formal process requirements.}, language = {en} } @inproceedings{SafiWinterspergerMauerer, author = {Safi, Hila and Wintersperger, Karen and Mauerer, Wolfgang}, title = {Influence of HW-SW-Co-Design on Quantum Computing Scalability}, series = {2023 IEEE International Conference on Quantum Software (QSW), Chicago, IL, USA, 02-08 July 2023}, booktitle = {2023 IEEE International Conference on Quantum Software (QSW), Chicago, IL, USA, 02-08 July 2023}, publisher = {IEEE}, isbn = {979-8-3503-0479-4}, doi = {10.1109/QSW59989.2023.00022}, pages = {104 -- 115}, abstract = {The use of quantum processing units (QPUs) promises speed-ups for solving computational problems. Yet, current devices are limited by the number of qubits and suffer from significant imperfections, which prevents achieving quantum advantage. To step towards practical utility, one approach is to apply hardware-software co-design methods. This can involve tailoring problem formulations and algorithms to the quantum execution environment, but also entails the possibility of adapting physical properties of the QPU to specific applications. In this work, we follow the latter path, and investigate how key figures— circuit depth and gate count—required to solve four cornerstone NP-complete problems vary with tailored hardware properties. Our results reveal that achieving near-optimal performance and properties does not necessarily require optimal quantum hardware, but can be satisfied with much simpler structures that can potentially be realised for many hardware approaches.m Using statistical analysis techniques, we additionally identify an underlying general model that applies to all subject problems. This suggests that our results may be universally applicable to other algorithms and problem domains, and tailored QPUs can find utility outside their initially envisaged problem domains. The substantial possible improvements nonetheless highlight the importance of QPU tailoring to progress towards practical deployment and scalability of quantum software.}, language = {en} } @inproceedings{GreiweKruegerMauerer, author = {Greiwe, Felix and Kr{\"u}ger, Tom and Mauerer, Wolfgang}, title = {Effects of Imperfections on Quantum Algorithms}, series = {2023 IEEE International Conference on Quantum Software (QSW), Chicago, IL, USA, 02-08 July 2023}, booktitle = {2023 IEEE International Conference on Quantum Software (QSW), Chicago, IL, USA, 02-08 July 2023}, publisher = {IEEE}, doi = {10.1109/QSW59989.2023.00014}, pages = {31 -- 42}, abstract = {Quantum computers promise considerable speedups over classical approaches, which has raised interest from many disciplines. Since any currently available implementations suffer from noise and imperfections, achieving concrete speedups for meaningful problem sizes remains a major challenge. Yet, imperfections and noise may remain present in quantum computing for a long while. Such limitations play no role in classical software computing, and software engineers are typically not well accustomed to considering such imperfections, albeit they substantially influence core properties of software and systems. In this paper, we show how to model imperfections with an approach tailored to (quantum) software engineers. We intuitively illustrate, using numerical simulations, how imperfections influence core properties of quantum algorithms on NISQ systems, and show possible options for tailoring future NISQ machines to improve system performance in a co-design approach. Our results are obtained from a software framework that we provide in form of an easy-to-use reproduction package. It does not require computer scientists to acquire deep physical knowledge on noise, yet provide tangible and intuitively accessible means of interpreting the influence of noise on common software quality and performance indicators.}, language = {en} } @incollection{YueMauererAlietal., author = {Yue, Tao and Mauerer, Wolfgang and Ali, Shaukat and Taibi, Davide}, title = {Challenges and Opportunities in Quantum Software Architecture}, series = {Software Architecture: Research Roadmaps from the Community}, booktitle = {Software Architecture: Research Roadmaps from the Community}, isbn = {978-3-031-36847-9}, doi = {10.1007/978-3-031-36847-9_1}, pages = {1 -- 23}, abstract = {Quantum computing is a relatively new paradigm that has raised considerable interest in physics and computer science in general but has so far received little attention in software engineering and architecture. Hybrid applications that consist of both quantum and classical components require the development of appropriate quantum software architectures. However, given that quantum software engineering (QSE) in general is a new research area, quantum software architecture-a subresearch area in QSE is also understudied. The goal of this chapter is to provide a list of research challenges and opportunities for such architectures. In addition, to make the content understandable to a broader computer science audience, we provide a brief overview of quantum computing and explain the essential technical foundations.}, language = {en} } @inproceedings{JoblinApelHunsenetal., author = {Joblin, Mitchell and Apel, Sven and Hunsen, Claus and Mauerer, Wolfgang}, title = {Classifying Developers into Core and Peripheral: An Empirical Study on Count and Network Metrics}, series = {2017 IEEE/ACM 39th International Conference on Software Engineering (ICSE), Buenos Aires, Argentina, 20-28 May 2017}, booktitle = {2017 IEEE/ACM 39th International Conference on Software Engineering (ICSE), Buenos Aires, Argentina, 20-28 May 2017}, publisher = {IEEE}, doi = {10.1109/icse.2017.23}, pages = {164 -- 174}, abstract = {Knowledge about the roles developers play in a software project is crucial to understanding the project's collaborative dynamics. In practice, developers are often classified according to the dichotomy of core and peripheral roles. Typically, count-based operationalizations, which rely on simple counts of individual developer activities (e.g., number of commits), are used for this purpose, but there is concern regarding their validity and ability to elicit meaningful insights. To shed light on this issue, we investigate whether count-based operationalizations of developer roles produce consistent results, and we validate them with respect to developers' perceptions by surveying 166 developers. Improving over the state of the art, we propose a relational perspective on developer roles, using fine-grained developer networks modeling the organizational structure, and by examining developer roles in terms of developers' positions and stability within the developer network. In a study of 10 substantial open-source projects, we found that the primary difference between the count-based and our proposed network-based core-peripheral operationalizations is that the network-based ones agree more with developer perception than count-based ones. Furthermore, we demonstrate that a relational perspective can reveal further meaningful insights, such as that core developers exhibit high positional stability, upper positions in the hierarchy, and high levels of coordination with other core developers, which confirms assumptions of previous work.}, language = {en} } @inproceedings{SchoenbergerTrummerMauerer, author = {Sch{\"o}nberger, Manuel and Trummer, Immanuel and Mauerer, Wolfgang}, title = {Quantum Optimisation of General Join Trees}, series = {Joint Workshops at 49th International Conference on Very Large Data Bases (VLDBW'23) — International Workshop on Quantum Data Science and Management (QDSM'23), August 28 - September 1, 2023, Vancouver, Canada (CEUR Workshop Proceedings)}, booktitle = {Joint Workshops at 49th International Conference on Very Large Data Bases (VLDBW'23) — International Workshop on Quantum Data Science and Management (QDSM'23), August 28 - September 1, 2023, Vancouver, Canada (CEUR Workshop Proceedings)}, publisher = {RWTH Aachen, Sun SITE Central Europe}, address = {Aachen}, pages = {1 -- 12}, abstract = {Recent advances in the manufacture of quantum computers attract much attention over a wide range of fields, as early-stage quantum processing units (QPU) have become accessible. While contemporary quantum machines are very limited in size and capabilities, mature QPUs are speculated to eventually excel at optimisation problems. This makes them an attractive technology for database problems, many of which are based on complex optimisation problems with large solution spaces. Yet, the use of quantum approaches on database problems remains largely unexplored. In this paper, we address the long-standing join ordering problem, one of the most extensively researched database problems. Rather than running arbitrary code, QPUs require specific mathematical problem encodings. An encoding for the join ordering problem was recently proposed, allowing first small-scale queries to be optimised on quantum hardware. However, it is based on a faithful transformation of a mixed integer linear programming (MILP) formulation for JO, and inherits all limitations of the MILP method. Most strikingly, the existing encoding only considers a solution space with left-deep join trees, which tend to yield larger costs than general, bushy join trees. We propose a novel QUBO encoding for the join ordering problem. Rather than transforming existing formulations, we construct a native encoding tailored to quantum systems, which allows us to process general bushy join trees. This makes the full potential of QPUs available for solving join order optimisation problems.}, language = {en} } @article{BayerstadlerBecquinBinderetal., author = {Bayerstadler, Andreas and Becquin, Guillaume and Binder, Julia and Botter, Thierry and Ehm, Hans and Ehmer, Thomas and Erdmann, Marvin and Gaus, Norbert and Harbach, Philipp and Hess, Maximilian and Klepsch, Johannes and Leib, Martin and Luber, Sebastian and Luckow, Andre and Mansky, Maximilian and Mauerer, Wolfgang and Neukart, Florian and Niedermeier, Christoph and Palackal, Lilly and Pfeiffer, Ruben and Polenz, Carsten and Sepulveda, Johanna and Sievers, Tammo and Standen, Brian and Streif, Michael and Strohm, Thomas and Utschig-Utschig, Clemens and Volz, Daniel and Weiss, Horst and Winter, Fabian}, title = {Industry quantum computing applications}, series = {EPJ Quantum Technology}, volume = {8}, journal = {EPJ Quantum Technology}, publisher = {Springer}, organization = {Springer}, doi = {10.1140/epjqt/s40507-021-00114-x}, pages = {1 -- 17}, abstract = {Quantum computing promises to overcome computational limitations with better and faster solutions for optimization, simulation, and machine learning problems. Europe and Germany are in the process of successfully establishing research and funding programs with the objective to dvance the technology's ecosystem and industrialization, thereby ensuring digital sovereignty, security, and competitiveness. Such an ecosystem comprises hardware/software solution providers, system integrators, and users from research institutions, start-ups, and industry. The vision of the Quantum Technology and Application Consortium (QUTAC) is to establish and advance the quantum computing ecosystem, supporting the ambitious goals of the German government and various research programs. QUTAC is comprised of ten members representing different industries, in particular automotive manufacturing, chemical and pharmaceutical production, insurance, and technology. In this paper, we survey the current state of quantum computing in these sectors as well as the aerospace industry and identify the contributions of QUTAC to the ecosystem. We propose an application-centric approach for the industrialization of the technology based on proven business impact. This paper identifies 24 different use cases. By formalizing high-value use cases into well-described reference problems and benchmarks, we will guide technological progress and eventually commercialization. Our results will be beneficial to all ecosystem participants, including suppliers, system integrators, software developers, users, policymakers, funding program managers, and investors.}, language = {en} } @unpublished{MurrMauerer, author = {Murr, Florian and Mauerer, Wolfgang}, title = {McFSM: Near Turing-Complete Finite-State Based Programming}, pages = {11}, abstract = {Finite state machines (FSMs) are an appealing mechanism for simple practical computations: They lend themselves to very effcient and deterministic implementation, are easy to understand, and allow for formally proving many properties of interest. Unfortunately, their computational power is deemed insuffcient for many tasks, and their usefulness has been further hampered by the state space explosion problem and other issues when na{\"i}vely trying to scale them to sizes large enough for many real-life applications. This paper expounds on theory and implementation of multiple coupled fnite state machines (McFSMs), a novel mechanism that combines benefits of FSMs with near Turing-complete, practical computing power, and that was designed from the ground up to support static analysis and reasoning. We develop an elaborate category-theoretical foundation based on non-deterministic Mealy machines, which gives a suitable algebraic description for novel ways of blending di\#erent computing models. Our experience is based on a domain specific language and an integrated development environment that can compile McFSM models to multiple target languages, applying it to use-cases based on industrial scenarios. We discuss properties and advantages of McFSMs, explain how the mechanism can interact with real-world systems and existing code without sacrificing provability, determinism or performance. We discuss how McFSMs can be used to replace and improve on commonly employed programming patterns, and show how their effcient handling of large state spaces enables them to be used as core building blocks for distributed, safety critical, and real-time systems of industrial complexity, which contributes to the longdesired goal of providing executable specifications.}, language = {en} } @misc{RamsauerBulwahnLohmannetal., author = {Ramsauer, Ralf and Bulwahn, Lukas and Lohmann, Daniel and Mauerer, Wolfgang}, title = {The Sound of Silence: Mining Security Vulnerabilities from Secret Integration Channels in Open-Source Projects}, series = {MiniDebConf Regensburg (MDC) 2021}, journal = {MiniDebConf Regensburg (MDC) 2021}, address = {Regensburg}, organization = {Ostbayerische Technische Hochschule Regensburg}, language = {en} } @misc{BielmeierMauerer, author = {Bielmeier, Benno and Mauerer, Wolfgang}, title = {Semi-Formal Verification of Embedded Linux Systems Using Trace-Based Models}, series = {Semi-Formal Verification of Embedded Linux Systems Using Trace-Based Models}, journal = {Semi-Formal Verification of Embedded Linux Systems Using Trace-Based Models}, language = {en} } @unpublished{RamsauerHuberSchwarzetal., author = {Ramsauer, Ralf and Huber, Stefan and Schwarz, Konrad and Kiszka, Jan and Mauerer, Wolfgang}, title = {Static Hardware Partitioning on RISC-V - Shortcomings, Limitations, and Prospects}, doi = {10.48550/arXiv.2208.02703}, abstract = {On embedded processors that are increasingly equipped with multiple CPU cores, static hardware partitioning is an established means of consolidating and isolating workloads onto single chips. This architectural pattern is suitable for mixed-criticality workloads that need to satisfy both, real-time and safety requirements, given suitable hardware properties. In this work, we focus on exploiting contemporary virtualisation mechanisms to achieve freedom from interference respectively isolation between workloads. Possibilities to achieve temporal and spatial isolation-while maintaining real-time capabilities-include statically partitioning resources, avoiding the sharing of devices, and ascertaining zero interventions of superordinate control structures. This eliminates overhead due to hardware partitioning, but implies certain hardware capabilities that are not yet fully implemented in contemporary standard systems. To address such hardware limitations, the customisable and configurable RISC-V instruction set architecture offers the possibility of swift, unrestricted modifications. We present findings on the current RISC-V specification and its implementations that necessitate interventions of superordinate control structures. We identify numerous issues adverse to implementing our goal of achieving zero interventions respectively zero overhead: On the design level, and especially with regards to handling interrupts. Based on micro-benchmark measurements, we discuss the implications of our findings, and argue how they can provide a basis for future extensions and improvements of the RISC-V architecture.}, language = {en} } @unpublished{SchoenbergerTrummerMauerer, author = {Sch{\"o}nberger, Manuel and Trummer, Immanuel and Mauerer, Wolfgang}, title = {Quantum-Inspired Digital Annealing for Join Ordering}, series = {Proceedings of the VLDB Endowment}, journal = {Proceedings of the VLDB Endowment}, pages = {14}, abstract = {Finding the optimal join order (JO) is one of the most important problems in query optimisation, and has been extensively considered in research and practise. As it involves huge search spaces, approximation approaches and heuristics are commonly used, which explore a reduced solution space at the cost of solution quality. To explore even large JO search spaces, we may consider special-purpose software, such as mixed-integer linear programming (MILP) solvers, which have successfully solved JO problems. However, even mature solvers cannot overcome the limitations of conventional hardware prompted by the end of Moore's law. We consider quantum-inspired digital annealing hardware, which takes inspiration from quantum processing units (QPUs). Unlike QPUs, which likely remain limited in size and reliability in the near and mid-term future, the digital annealer (DA) can solve large instances of mathematically encoded optimisation problems today. We derive a novel, native encoding for the JO problem tailored to this class of machines that substantially improves over known MILP and quantum-based encodings, and reduces encoding size over the state-of-the-art. By augmenting the computation with a novel readout method, we derive valid join orders for each solution obtained by the (probabilistically operating) DA. Most importantly and despite an extremely large solution space, our approach scales to practically relevant dimensions of around 50 relations and improves result quality over conventionally employed approaches, adding a novel alternative to solving the long-standing JO problem.}, language = {en} } @article{SchoenbergerScherzingerMauerer, author = {Sch{\"o}nberger, Manuel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Ready to Leap (by Co-Design)? Join Order Optimisation on Quantum Hardware}, series = {Proceedings of the ACM on Management of Data, PACMMOD}, volume = {1}, journal = {Proceedings of the ACM on Management of Data, PACMMOD}, number = {1}, publisher = {ACM}, address = {New York, NY,}, doi = {10.1145/3588946}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-56634}, pages = {1 -- 27}, abstract = {The prospect of achieving computational speedups by exploiting quantum phenomena makes the use of quantum processing units (QPUs) attractive for many algorithmic database problems. Query optimisation, which concerns problems that typically need to explore large search spaces, seems like an ideal match for the known quantum algorithms. We present the first quantum implementation of join ordering, which is one of the most investigated and fundamental query optimisation problems, based on a reformulation to quadratic binary unconstrained optimisation problems. We empirically characterise our method on two state-of-the-art approaches (gate-based quantum computing and quantum annealing), and identify speed-ups compared to the best know classical join ordering approaches for input sizes that can be processed with current quantum annealers. However, we also confirm that limits of early-stage technology are quickly reached. Current QPUs are classified as noisy, intermediate scale quantum computers (NISQ), and are restricted by a variety of limitations that reduce their capabilities as compared to ideal future quantum computers, which prevents us from scaling up problem dimensions and reaching practical utility. To overcome these challenges, our formulation accounts for specific QPU properties and limitations, and allows us to trade between achievable solution quality and possible problem size. In contrast to all prior work on quantum computing for query optimisation and database-related challenges, we go beyond currently available QPUs, and explicitly target the scalability limitations: Using insights gained from numerical simulations and our experimental analysis, we identify key criteria for co-designing QPUs to improve their usefulness for join ordering, and show how even relatively minor physical architectural improvements can result in substantial enhancements. Finally, we outline a path towards practical utility of custom-designed QPUs.}, language = {en} } @article{MauererJoblinTamburrietal., author = {Mauerer, Wolfgang and Joblin, Mitchell and Tamburri, Damian and Paradis, Carlos and Kazman, Rick and Apel, Sven}, title = {In Search of Socio-Technical Congruence: A Large-Scale Longitudinal Study}, series = {IEEE Transactions on Software Engineering (TSE)}, volume = {48}, journal = {IEEE Transactions on Software Engineering (TSE)}, number = {8}, publisher = {IEEE}, doi = {10.1109/TSE.2021.3082074}, pages = {3159 -- 3184}, abstract = {This paper describes a large-scale empirical study investigating the relevance of socio-technical congruence over key basic software quality metrics, namely, bugs and churn. That is, we explore whether alignment or misalignment of social communication structures and technical dependencies in large software projects influences software quality. To this end, we have defined a quantitative and operational notion of socio-technical congruence, which we call /socio-technical motif congruence/ (STMC). STMC is a measure of the degree to which developers working on the same file or on two related files, need to communicate. As socio-technical congruence is a complex and multi-faceted phenomenon, the interpretability of the results is one of our main concerns, so we have employed a careful mixed-methods statistical analysis. In particular, we provide analyses with similar techniques as employed by seminal work in the field to ensure comparability of our results with the existing body of work. The major result of our study, based on an analysis of 25 large open-source projects, is that STMC is /not/ related to project quality measures---software bugs and churn---in any temporal scenario. That is, we find no statistical relationship between the alignment of developer tasks and developer communications on one hand, and project outcomes on the other hand. We conclude that, wherefore congruence does matter as literature shows, then its measurable effect lies elsewhere.}, language = {en} } @article{FranzWolfPeriyasamyetal., author = {Franz, Maja and Wolf, Lucas and Periyasamy, Maniraman and Ufrecht, Christian and Scherer, Daniel D. and Plinge, Axel and Mutschler, Christopher and Mauerer, Wolfgang}, title = {Uncovering Instabilities in Variational-Quantum Deep Q-Networks}, series = {Journal of the Franklin Institute}, journal = {Journal of the Franklin Institute}, edition = {In Press, Corrected Proof}, publisher = {Elsevier}, issn = {0016-0032}, doi = {10.1016/j.jfranklin.2022.08.021}, abstract = {Deep Reinforcement Learning (RL) has considerably advanced over the past decade. At the same time, state-of-the-art RL algorithms require a large computational budget in terms of training time to converge. Recent work has started to approach this problem through the lens of quantum computing, which promises theoretical speed-ups for several traditionally hard tasks. In this work, we examine a class of hybrid quantumclassical RL algorithms that we collectively refer to as variational quantum deep Q-networks (VQ-DQN). We show that VQ-DQN approaches are subject to instabilities that cause the learned policy to diverge, study the extent to which this afflicts reproduciblity of established results based on classical simulation, and perform systematic experiments to identify potential explanations for the observed instabilities. Additionally, and in contrast to most existing work on quantum reinforcement learning, we execute RL algorithms on an actual quantum processing unit (an IBM Quantum Device) and investigate differences in behaviour between simulated and physical quantum systems that suffer from implementation deficiencies. Our experiments show that, contrary to opposite claims in the literature, it cannot be conclusively decided if known quantum approaches, even if simulated without physical imperfections, can provide an advantage as compared to classical approaches. Finally, we provide a robust, universal and well-tested implementation of VQ-DQN as a reproducible testbed for future experiments.}, language = {en} } @misc{MintelRamsauerLohmannetal., author = {Mintel, Mario and Ramsauer, Ralf and Lohmann, Daniel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Fork {\`a} la carte f{\"u}r In-Memory-Datenbanken}, series = {Fr{\"u}hjahrstreffen der Fachgruppen Betriebssysteme, Hamburg, 17. M{\"a}rz 2022}, journal = {Fr{\"u}hjahrstreffen der Fachgruppen Betriebssysteme, Hamburg, 17. M{\"a}rz 2022}, language = {de} } @inproceedings{SchoenbergerFranzScherzingeretal., author = {Sch{\"o}nberger, Manuel and Franz, Maja and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Peel | Pile? Cross-Framework Portability of Quantum Software}, series = {2022 IEEE 19th International Conference on Software Architecture Companion (ICSA-C), 12-15 March 2022, Honolulu, HI, USA}, booktitle = {2022 IEEE 19th International Conference on Software Architecture Companion (ICSA-C), 12-15 March 2022, Honolulu, HI, USA}, publisher = {IEEE}, doi = {10.1109/ICSA-C54293.2022.00039}, abstract = {In recent years, various vendors have made quantum software frameworks available. Yet with vendor-specific frameworks, code portability seems at risk, especially in a field where hardware and software libraries have not yet reached a consolidated state, and even foundational aspects of the technologies are still in flux. Accordingly, the development of vendor-independent quantum programming languages and frameworks is often suggested. This follows the established architectural pattern of introducing additional levels of abstraction into software stacks, thereby piling on layers of abstraction. Yet software architecture also provides seemingly less abstract alternatives, namely to focus on hardware-specific formulations of problems that peel off unnecessary layers. In this article, we quantitatively and experimentally explore these strategic alternatives, and compare popular quantum frameworks from the software implementation perspective. We find that for several specific, yet generalisable problems, the mathematical formulation of the problem to be solved is not just sufficiently abstract and serves as precise description, but is likewise concrete enough to allow for deriving framework-specific implementations with little effort. Additionally, we argue, based on analysing dozens of existing quantum codes, that porting between frameworks is actually low-effort, since the quantum- and framework-specific portions are very manageable in terms of size, commonly in the order of mere hundreds of lines of code. Given the current state-of-the-art in quantum programming practice, this leads us to argue in favour of peeling off unnecessary abstraction levels.}, language = {en} } @inproceedings{MauererRamsauerLucasetal., author = {Mauerer, Wolfgang and Ramsauer, Ralf and Lucas, Edson R. F. and Scherzinger, Stefanie}, title = {Silentium! Run-Analyse-Eradicate the Noise out of the DB/OS Stack}, series = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2021): 13.-17. September 2021, Dresden, Deutschland}, booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2021): 13.-17. September 2021, Dresden, Deutschland}, publisher = {Gesellschaft f{\"u}r Informatik}, doi = {10.18420/btw2021-21}, pages = {397 -- 421}, abstract = {When multiple tenants compete for resources, database performance tends to suffer. Yet there are scenarios where guaranteed sub-millisecond latencies are crucial, such as in real-time data processing, IoT devices, or when operating in safety-critical environments. In this paper, we study how to make query latencies deterministic in the face of noise (whether caused by other tenants or unrelated operating system tasks). We perform controlled experiments with an in-memory database engine in a multi-tenant setting, where we successively eradicate noisy interference from within the system software stack, to the point where the engine runs close to bare-metal on the underlying hardware. We show that we can achieve query latencies comparable to the database engine running as the sole tenant, but without noticeably impacting the workload of competing tenants. We discuss these results in the context of ongoing efforts to build custom operating systems for database workloads, and point out that for certain use cases, the margin for improvement is rather narrow. In fact, for scenarios like ours, existing operating systems might just be good enough, provided that they are expertly configured. We then critically discuss these findings in the light of a broader family of database systems (e.g., including disk-based), and how to extend the approach of this paper accordingly. Low-latency databases; tail latency; real-time databases; bounded-time query processing; DB-OS co-engineering}, language = {de} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Nullius in Verba: Reproducibility for Database Systems Research, Revisited}, series = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, booktitle = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, editor = {Ailamaki, Anastasia}, publisher = {IEEE}, address = {Piscataway, NJ}, isbn = {978-1-7281-9184-3}, doi = {10.1109/ICDE51399.2021.00270}, pages = {2377 -- 2380}, abstract = {Over the last decade, reproducibility of experimental results has been a prime focus in database systems research, and many high-profile conferences award results that can be independently verified. Since database systems research involves complex software stacks that non-trivially interact with hardware, sharing experimental setups is anything but trivial: Building a working reproduction package goes far beyond providing a DOI to some repository hosting data, code, and setup instructions.This tutorial revisits reproducible engineering in the face of state-of-the-art technology, and best practices gained in other computer science research communities. In particular, in the hands-on part, we demonstrate how to package entire system software stacks for dissemination. To ascertain long-term reproducibility over decades (or ideally, forever), we discuss why relying on open source technologies massively employed in industry has essential advantages over approaches crafted specifically for research. Supplementary material shows how version control systems that allow for non-linearly rewriting recorded history can document the structured genesis behind experimental setups in a way that is substantially easier to understand, without involvement of the original authors, compared to detour-ridden, strictly historic evolution.}, language = {en} } @inproceedings{ScherzingerMauererKondylakis, author = {Scherzinger, Stefanie and Mauerer, Wolfgang and Kondylakis, Haridimos}, title = {DeBinelle: Semantic Patches for Coupled Database-Application Evolution}, series = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, booktitle = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, editor = {Ailamaki, Anastasia}, publisher = {IEEE}, address = {Piscataway, NJ}, isbn = {978-1-7281-9184-3}, doi = {10.1109/ICDE51399.2021.00307}, pages = {2697 -- 2700}, abstract = {Databases are at the core of virtually any software product. Changes to database schemas cannot be made in isolation, as they are intricately coupled with application code. Such couplings enforce collateral evolution, which is a recognised, important research problem. In this demonstration, we show a new dimension to this problem, in software that supports alternative database backends: vendor-specific SQL dialects necessitate a simultaneous evolution of both, database schema and program code, for all supported DB variants. These near-same changes impose substantial manual effort for software developers. We introduce DeBinelle, a novel framework and domain-specific language for semantic patches that abstracts DB-variant schema changes and coupled program code into a single, unified representation. DeBinelle further offers a novel alternative to manually evolving coupled schemas and code. DeBinelle considerably extends established, seminal results in software engineering research, supporting several programming languages, and the many dialects of SQL. It effectively eliminates the need to perform vendor-specific changes, replacing them with intuitive semantic patches. Our demo of DeBinelle is based on real-world use cases from reference systems for schema evolution.}, language = {en} } @inproceedings{FruthScherzingerMauereretal., author = {Fruth, Michael and Scherzinger, Stefanie and Mauerer, Wolfgang and Ramsauer, Ralf}, title = {Tell-Tale Tail Latencies: Pitfalls and Perils in Database Benchmarking}, series = {Performance evaluation and benchmarking, 13th TPC Technology Conference (TPCTC 2021): Copenhagen, Denmark, August 20, 2021, Revised Selected Papers}, booktitle = {Performance evaluation and benchmarking, 13th TPC Technology Conference (TPCTC 2021): Copenhagen, Denmark, August 20, 2021, Revised Selected Papers}, editor = {Nambiar, Raghunath and Poess, Meikel}, publisher = {Springer}, address = {Cham, Switzerland}, isbn = {9783030944377}, doi = {10.1007/978-3-030-94437-7_8}, pages = {119 -- 134}, abstract = {The performance of database systems is usually characterised by their average-case (i.e., throughput) behaviour in standardised or de-facto standard benchmarks like TPC-X or YCSB. While tails of the latency (i.e., response time) distribution receive considerably less attention, they have been identified as a threat to the overall system performance: In large-scale systems, even a fraction of requests delayed can build up into delays perceivable by end users. To eradicate large tail latencies from database systems, the ability to faithfully record them, and likewise pinpoint them to the root causes, is imminently required. In this paper, we address the challenge of measuring tail latencies using standard benchmarks, and identify subtle perils and pitfalls. In particular, we demonstrate how Java-based benchmarking approaches can substantially distort tail latency observations, and discuss how the discovery of such problems is inhibited by the common focus on throughput performance. We make a case for purposefully re-designing database benchmarking harnesses based on these observations to arrive at faithful characterisations of database performance from multiple important angles.}, language = {en} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {1-2-3 Reproducibility for Quantum Software Experiments}, series = {2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER), Honolulu, HI, USA, 15-18 March 2022}, booktitle = {2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER), Honolulu, HI, USA, 15-18 March 2022}, publisher = {IEEE}, doi = {10.1109/SANER53432.2022.00148}, pages = {1247 -- 1248}, abstract = {Various fields of science face a reproducibility crisis. For quantum software engineering as an emerging field, it is therefore imminent to focus on proper reproducibility engineering from the start. Yet the provision of reproduction packages is almost universally lacking. Actionable advice on how to build such packages is rare, particularly unfortunate in a field with many contributions from researchers with backgrounds outside computer science. In this article, we argue how to rectify this deficiency by proposing a 1-2-3~approach to reproducibility engineering for quantum software experiments: Using a meta-generation mechanism, we generate DOI-safe, long-term functioning and dependency-free reproduction packages. They are designed to satisfy the requirements of professional and learned societies solely on the basis of project-specific research artefacts (source code, measurement and configuration data), and require little temporal investment by researchers. Our scheme ascertains long-term traceability even when the quantum processor itself is no longer accessible. By drastically lowering the technical bar, we foster the proliferation of reproduction packages in quantum software experiments and ease the inclusion of non-CS researchers entering the field.}, language = {en} } @inproceedings{BrainingerMauererScherzinger, author = {Braininger, Dimitri and Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Replicability and Reproducibility of a Schema Evolution Study in Embedded Databases}, series = {Advances in conceptual modeling: ER 2020 Workshops CMAI, CMLS, CMOMM4FAIR, CoMoNoS, EmpER, Vienna, Austria, November 3-6, 2020, Proceedings}, volume = {12584}, booktitle = {Advances in conceptual modeling: ER 2020 Workshops CMAI, CMLS, CMOMM4FAIR, CoMoNoS, EmpER, Vienna, Austria, November 3-6, 2020, Proceedings}, editor = {Grossmann, Georg and Ram, Sudha}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-65846-5}, doi = {10.1007/978-3-030-65847-2_19}, pages = {210 -- 219}, abstract = {Ascertaining the feasibility of independent falsification or repetition of published results is vital to the scientific process, and replication or reproduction experiments are routinely performed in many disciplines. Unfortunately, such studies are only scarcely available in database research, with few papers dedicated to re-evaluating published results. In this paper, we conduct a case study on replicating and reproducing a study on schema evolution in embedded databases. We can exactly repeat the outcome for one out of four database applications studied, and come close in two further cases. By reporting results, efforts, and obstacles encountered, we hope to increase appreciation for the substantial efforts required to ensure reproducibility. By discussing minutiae details required to ascertain reproducible work, we argue that such important, but often ignored aspects of scientific work should receive more credit in the evaluation of future research.}, language = {en} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Educating Future Software Architects in the Art and Science of Analysing Software Data.}, series = {SEUH 2020: Software Engineering im Unterricht der Hochschulen, Tagungsband des 17. Workshops "Software Engineering im Unterricht der Hochschulen", Innsbruck, {\"O}sterreich, 26. - 27.02.2020}, booktitle = {SEUH 2020: Software Engineering im Unterricht der Hochschulen, Tagungsband des 17. Workshops "Software Engineering im Unterricht der Hochschulen", Innsbruck, {\"O}sterreich, 26. - 27.02.2020}, editor = {Krusche, Stephan and Wagner, Stefan}, publisher = {RWTH Aachen}, pages = {56 -- 60}, abstract = {We report the design and teaching experience of a Master-level seminar course on quantitative and empirical software engineering. The course combines elements of traditional literature seminars with active learning by scientific project work, in particular quantitative mixed-method analyses of open source systems. It also provides short introductions and refreshers to data mining and statistical analysis, and discusses the nature and practice of scientific knowledge inference. Student presentations of published research, augmented by summary reports, bridge to standard seminars. We discuss our educational goals and the course structure derived from them. We review research questions addressed by students in mini research reports, and analyse them as tokens on how junior-level software engineers perceive the potential of empirical software engineering research. We assess challenges faced, and discuss possible solutions.}, language = {en} } @article{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Digitale Forschungswerkzeuge : Nachhaltigkeit f{\"u}r Software und Daten}, series = {Forschung \& Lehre (Forschung und Lehre)}, volume = {28}, journal = {Forschung \& Lehre (Forschung und Lehre)}, number = {10}, publisher = {Zentrum f{\"u}r Wissenschaftsmanagement e.V. (ZWM)
}, organization = {Deutscher Hochschullehrerverband}, pages = {816 -- 817}, abstract = {Die wissenschaftliche Reproduktionskrise hat den Blick auf digitale Forschungswerkzeuge intensiviert. Auch wenn der Mehraufwand f{\"u}r Reproduzierbarkeit und Zug{\"a}nglichkeit zunehmend anerkannt wird, existieren noch Defizite in der Umsetzung, wenn es darum geht, die Datenbasis und Forschungswerkzeuge verf{\"u}gbar zu machen}, language = {de} } @misc{SchoenbergerScherzingerMauerer, author = {Sch{\"o}nberger, Manuel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Quantum Computing for DB - Applicability on Multi Query Optimization and Join Order Optimization}, series = {Fr{\"u}hjahrstreffen Fachgruppe Datenbanken in Potsdam, 2022}, journal = {Fr{\"u}hjahrstreffen Fachgruppe Datenbanken in Potsdam, 2022}, language = {en} } @inproceedings{MauererKlessingerScherzinger, author = {Mauerer, Wolfgang and Klessinger, Stefan and Scherzinger, Stefanie}, title = {Beyond the badge: reproducibility engineering as a lifetime skill}, series = {Proceedings 4th International Workshop on Software Engineering Education for the Next Generation SEENG 2022, 17 May 2022, Pittsburgh, PA, USA}, booktitle = {Proceedings 4th International Workshop on Software Engineering Education for the Next Generation SEENG 2022, 17 May 2022, Pittsburgh, PA, USA}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450393362}, doi = {10.1145/3528231.3528359}, pages = {1 -- 4}, abstract = {Ascertaining reproducibility of scientific experiments is receiving increased attention across disciplines. We argue that the necessary skills are important beyond pure scientific utility, and that they should be taught as part of software engineering (SWE) education. They serve a dual purpose: Apart from acquiring the coveted badges assigned to reproducible research, reproducibility engineering is a lifetime skill for a professional industrial career in computer science. SWE curricula seem an ideal fit for conveying such capabilities, yet they require some extensions, especially given that even at flagship conferences like ICSE, only slightly more than one-third of the technical papers (at the 2021 edition) receive recognition for artefact reusability. Knowledge and capabilities in setting up engineering environments that allow for reproducing artefacts and results over decades (a standard requirement in many traditional engineering disciplines), writing semi-literate commit messages that document crucial steps of a decision-making process and that are tightly coupled with code, or sustainably taming dynamic, quickly changing software dependencies, to name a few: They all contribute to solving the scientific reproducibility crisis, and enable software engineers to build sustainable, long-term maintainable, software-intensive, industrial systems. We propose to teach these skills at the undergraduate level, on par with traditional SWE topics.}, language = {en} }