@misc{Mauerer, author = {Mauerer, Wolfgang}, title = {Safety, Security, Quality: Artificial Intelligence versus Common Sense}, series = {Open Source Summit North America and Embedded Linux Conference North America 2020}, journal = {Open Source Summit North America and Embedded Linux Conference North America 2020}, language = {en} } @misc{Mauerer, author = {Mauerer, Wolfgang}, title = {Open Source in Research and Reality}, series = {Open Source Summit North America and Embedded Linux Conference North America 2020}, journal = {Open Source Summit North America and Embedded Linux Conference North America 2020}, language = {en} } @article{SaxFeldZielinskietal., author = {Sax, Irmi and Feld, Sebastian and Zielinski, Sebastian and Gabor, Thomas and Linnhoff-Popien, Claudia and Mauerer, Wolfgang}, title = {Towards Understanding Approximation Complexity on a Quantum Annealer}, series = {Digitale Welt}, volume = {4}, journal = {Digitale Welt}, number = {1}, publisher = {DIGITALE WELT Academy c/o Ludwig-Maximilians-Universit{\"a}t M{\"u}nchen}, address = {M{\"u}nchen}, doi = {10.1007/s42354-019-0244-1}, pages = {104}, abstract = {Many industrially relevant problems can be deterministically solved by computers in principle, but are intractable in practice, as the seminal P/NP dichotomy of complexity theory and Cobham's thesis testify. For the many NP-complete problems, industry needs to resort to using heuristics or approximation algorithms. For approximation algorithms, there is a more refined classification in complexity classes that goes beyond the simple P/NP dichotomy. As it is well known, approximation classes form a hierarchy, that is, FPTAS \subseteq PTAS \subseteq APX \subseteq NPO. This classification gives a more realistic notion of complexity but—unless unexpected breakthroughs happen for fundamental problems like P = NP or related questions— there is no known efficient algorithm that can solve such problems exactly on a realistic computer. Therefore, new ways of computations are sought. Recently, considerable hope was placed on the possible computational powers of quantum computers and quantum annealing (QA) in particular. However, the precise benefits of such a drastic shift in hardware are still unchartered territory to a good extent. Firstly, the exact relations between classical and quantum complexity classes pose many open questions, and secondly, technical details of formulating and implementing quantum algorithms play a crucial role in real-world applications. Guided by the hierarchy of classical optimisation complexity classes, we discuss how to map problems of each class to a quantum annealer. Those problems are the Minimum Multiprocessor Scheduling (MMS) problem, the Minimum Vertex Cover (MVC) problem and the Maximum Independent Set (MIS) problem. We experimentally investigate if and how the degree of approximability influences implementation and run-time performance. Our experiments indicate a discrepancy between classical approximation complexity and QA behaviour: Problems MIS and MVC, members of APX respectively PTAS, exhibit better solution quality on a QA than MMS, which is in FPTAS, even despite the use of preprocessing the for latter. This leads to the hypothesis that traditional classifications do not immediately extend to the quantum annealing domain, at least when the properties of real-world devices are taken into account. A structural reason, why FPTAS problems do not show good solution quality, might be the use of an inequlity in the problem description of the FPTAS problems. Formulating those inequalities on a quantum hardware (mostly done by formulating a Quadratic Unconstrained Binary optimisation (QUBO) problem in form of a matrix) requires a lot of hardware space which makes finding an optimal solution more difficult. Reducing the density of a QUBO is possible by appropriately pruning QUBO matrices. For the problems considered in our evaluation, we find that the achievable solution quality on a real-world machine is unexpectedly robust against pruning, often up to ratios as high as 50\% or more. Since quantum annealers are probabilistic machines by design, the loss in solution quality is only of subordinate relevance, especially considering that the pruning of QUBO matrices allows for solving larger problem instances on hardware of a given capacity. We quantitatively discuss the interplay between these factors.}, language = {en} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Educating Future Software Architects in the Art and Science of Analysing Software Data.}, series = {SEUH 2020: Software Engineering im Unterricht der Hochschulen, Tagungsband des 17. Workshops "Software Engineering im Unterricht der Hochschulen", Innsbruck, {\"O}sterreich, 26. - 27.02.2020}, booktitle = {SEUH 2020: Software Engineering im Unterricht der Hochschulen, Tagungsband des 17. Workshops "Software Engineering im Unterricht der Hochschulen", Innsbruck, {\"O}sterreich, 26. - 27.02.2020}, editor = {Krusche, Stephan and Wagner, Stefan}, publisher = {RWTH Aachen}, pages = {56 -- 60}, abstract = {We report the design and teaching experience of a Master-level seminar course on quantitative and empirical software engineering. The course combines elements of traditional literature seminars with active learning by scientific project work, in particular quantitative mixed-method analyses of open source systems. It also provides short introductions and refreshers to data mining and statistical analysis, and discusses the nature and practice of scientific knowledge inference. Student presentations of published research, augmented by summary reports, bridge to standard seminars. We discuss our educational goals and the course structure derived from them. We review research questions addressed by students in mini research reports, and analyse them as tokens on how junior-level software engineers perceive the potential of empirical software engineering research. We assess challenges faced, and discuss possible solutions.}, language = {en} } @inproceedings{SaxFeldZielinskietal., author = {Sax, Irmi and Feld, Sebastian and Zielinski, Sebastian and Gabor, Thomas and Linnhoff-Popien, Claudia and Mauerer, Wolfgang}, title = {Approximate approximation on a quantum annealer}, series = {Proceedings of the 17th ACM International Conference on Computing Frontiers (CF '20): Catania Sicily Italy 11.05.2020 -13.05.2020}, booktitle = {Proceedings of the 17th ACM International Conference on Computing Frontiers (CF '20): Catania Sicily Italy 11.05.2020 -13.05.2020}, editor = {Palesi, Maurizio}, publisher = {Association for Computing Machinery}, address = {New York, NY, United States}, isbn = {9781450379564}, doi = {10.1145/3387902.3392635}, pages = {108 -- 117}, abstract = {Many problems of industrial interest are NP-complete, and quickly exhaust resources of computational devices with increasing input sizes. Quantum annealers (QA) are physical devices that aim at this class of problems by exploiting quantum mechanical properties of nature. However, they compete with efficient heuristics and probabilistic or randomised algorithms on classical machines that allow for finding approximate solutions to large NP-complete problems. While first implementations of QA have become commercially available, their practical benefits are far from fully explored. To the best of our knowledge, approximation techniques have not yet received substantial attention. In this paper, we explore how problems' approximate versions of varying degree can be systematically constructed for quantum annealer programs, and how this influences result quality or the handling of larger problem instances on given set of qubits. We illustrate various approximation techniques on both, simulations and real QA hardware, on different seminal problems, and interpret the results to contribute towards a better understanding of the real-world power and limitations of current-state and future quantum computing.}, language = {en} } @inproceedings{BrainingerMauererScherzinger, author = {Braininger, Dimitri and Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Replicability and Reproducibility of a Schema Evolution Study in Embedded Databases}, series = {Advances in conceptual modeling: ER 2020 Workshops CMAI, CMLS, CMOMM4FAIR, CoMoNoS, EmpER, Vienna, Austria, November 3-6, 2020, Proceedings}, volume = {12584}, booktitle = {Advances in conceptual modeling: ER 2020 Workshops CMAI, CMLS, CMOMM4FAIR, CoMoNoS, EmpER, Vienna, Austria, November 3-6, 2020, Proceedings}, editor = {Grossmann, Georg and Ram, Sudha}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-65846-5}, doi = {10.1007/978-3-030-65847-2_19}, pages = {210 -- 219}, abstract = {Ascertaining the feasibility of independent falsification or repetition of published results is vital to the scientific process, and replication or reproduction experiments are routinely performed in many disciplines. Unfortunately, such studies are only scarcely available in database research, with few papers dedicated to re-evaluating published results. In this paper, we conduct a case study on replicating and reproducing a study on schema evolution in embedded databases. We can exactly repeat the outcome for one out of four database applications studied, and come close in two further cases. By reporting results, efforts, and obstacles encountered, we hope to increase appreciation for the substantial efforts required to ensure reproducibility. By discussing minutiae details required to ascertain reproducible work, we argue that such important, but often ignored aspects of scientific work should receive more credit in the evaluation of future research.}, language = {en} } @inproceedings{KruegerMauerer, author = {Kr{\"u}ger, Tom and Mauerer, Wolfgang}, title = {Quantum Annealing-Based Software Components}, series = {Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops (ICSE'20): Seoul Republic of Korea 27.06.2020 - 19.07.2020}, booktitle = {Proceedings of the IEEE/ACM 42nd International Conference on Software Engineering Workshops (ICSE'20): Seoul Republic of Korea 27.06.2020 - 19.07.2020}, publisher = {Association for Computing Machinery}, address = {New York, NY}, isbn = {9781450379632}, doi = {10.1145/3387940.3391472}, pages = {445 -- 450}, abstract = {Quantum computers have the potential of solving problems more efficiently than classical computers. While first commercial prototypes have become available, the performance of such machines in practical application is still subject to exploration. Quantum computers will not entirely replace classical machines, but serve as accelerators for specific problems. This necessitates integrating quantum computational primitives into existing applications. In this paper, we perform a case study on how to augment existing software with quantum computational primitives for the Boolean satisfiability problem (SAT) implemented using a quantum annealer (QA). We discuss relevant quality measures for quantum components, and show that mathematically equivalent, but structurally different ways of transforming SAT to a QA can lead to substantial differences regarding these qualities. We argue that engineers need to be aware that (and which) details, although they may be less relevant in traditional software engineering, require considerable attention in quantum computing.}, language = {en} } @inproceedings{RamsauerKiszkaMauerer, author = {Ramsauer, Ralf and Kiszka, Jan and Mauerer, Wolfgang}, title = {A Novel Software Architecture for Mixed Criticality Systems}, series = {Digital Transformation in Semiconductor Manufacturing: Proceedings of the 1st and 2nd European Advances in Digital Transformation Conference, EADTC 2018, Zittau, Germany and EADTC 2019, Milan, Italy}, booktitle = {Digital Transformation in Semiconductor Manufacturing: Proceedings of the 1st and 2nd European Advances in Digital Transformation Conference, EADTC 2018, Zittau, Germany and EADTC 2019, Milan, Italy}, editor = {Keil, Sophia and Lasch, Rainer and Lindner, Fabian and Lohmer, Jacob}, publisher = {Springer International Publishing}, address = {Cham}, doi = {10.1007/978-3-030-48602-0_11}, pages = {121 -- 128}, abstract = {The advent of multi-core CPUs in nearly all embedded markets has prompted an architectural trend towards combining safety critical and uncritical software on single hardware units. We present a novel architecture for mixed criticality systems based on Linux that allows us to consolidate critical and uncritical parts onto a single hardware unit. CPU virtualisation extensions enable strict and static partitioning of hardware by direct assignment of resources, which allows us to boot additional operating systems or bare metal applications running aside Linux. The hypervisor Jailhouse is at the core of the architecture and ensures that the resulting domains may serve workloads of different criticality and can not interfere in an unintended way. This retains Linux's feature-richness in uncritical parts, while frugal safety and real-time critical applications execute in isolated domains. Architectural simplicity is a central aspect of our approach and a precondition for reliable implementability and successful certification. While standard virtualisation extensions provided by current hardware seem to suffice for a straight forward implementation of our approach, there are a number of further limitations that need to be worked around. This paper discusses the arising issues, and evaluates the suitability of our approach for real-world safety and real-time critical scenarios.}, language = {en} } @inproceedings{RamsauerBulwahnLohmannetal., author = {Ramsauer, Ralf and Bulwahn, Lukas and Lohmann, Daniel and Mauerer, Wolfgang}, title = {The Sound of Silence : Mining Security Vulnerabilities from Secret Integration Channels in Open-Source Projects}, series = {Proceedings of the 2020 ACM SIGSAC Conference on Cloud Computing Security Workshop: 09.11.2020, virtual event}, booktitle = {Proceedings of the 2020 ACM SIGSAC Conference on Cloud Computing Security Workshop: 09.11.2020, virtual event}, editor = {Zhang, Yinqian and Sion, Radu}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450380843}, doi = {10.1145/3411495.3421360}, pages = {147 -- 157}, abstract = {Public development processes are a key characteristic of open source projects. However, fixes for vulnerabilities are usually discussed privately among a small group of trusted maintainers, and integrated without prior public involvement. This is supposed to prevent early disclosure, and cope with embargo and non-disclosure agreement (NDA) rules. While regular development activities leave publicly available traces, fixes for vulnerabilities that bypass the standard process do not. We present a data-mining based approach to detect code fragments that arise from such infringements of the standard process. By systematically mapping public development artefacts to source code repositories, we can exclude regular process activities, and infer irregularities that stem from non-public integration channels. For the Linux kernel, the most crucial component of many systems, we apply our method to a period of seven months before the release of Linux 5.4. We find 29 commits that address 12 vulnerabilities. For these vulnerabilities, our approach provides a temporal advantage of 2 to 179 days to design exploits before public disclosure takes place, and fixes are rolled out. Established responsible disclosure approaches in open development processes are supposed to limit premature visibility of security vulnerabilities. However, our approach shows that, instead, they open additional possibilities to uncover such changes that thwart the very premise. We conclude by discussing implications and partial countermeasures.}, language = {en} }