@article{SchoenbergerScherzingerMauerer, author = {Sch{\"o}nberger, Manuel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Ready to Leap (by Co-Design)? Join Order Optimisation on Quantum Hardware}, series = {Proceedings of the ACM on Management of Data, PACMMOD}, volume = {1}, journal = {Proceedings of the ACM on Management of Data, PACMMOD}, number = {1}, publisher = {ACM}, address = {New York, NY,}, doi = {10.1145/3588946}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-56634}, pages = {1 -- 27}, abstract = {The prospect of achieving computational speedups by exploiting quantum phenomena makes the use of quantum processing units (QPUs) attractive for many algorithmic database problems. Query optimisation, which concerns problems that typically need to explore large search spaces, seems like an ideal match for the known quantum algorithms. We present the first quantum implementation of join ordering, which is one of the most investigated and fundamental query optimisation problems, based on a reformulation to quadratic binary unconstrained optimisation problems. We empirically characterise our method on two state-of-the-art approaches (gate-based quantum computing and quantum annealing), and identify speed-ups compared to the best know classical join ordering approaches for input sizes that can be processed with current quantum annealers. However, we also confirm that limits of early-stage technology are quickly reached. Current QPUs are classified as noisy, intermediate scale quantum computers (NISQ), and are restricted by a variety of limitations that reduce their capabilities as compared to ideal future quantum computers, which prevents us from scaling up problem dimensions and reaching practical utility. To overcome these challenges, our formulation accounts for specific QPU properties and limitations, and allows us to trade between achievable solution quality and possible problem size. In contrast to all prior work on quantum computing for query optimisation and database-related challenges, we go beyond currently available QPUs, and explicitly target the scalability limitations: Using insights gained from numerical simulations and our experimental analysis, we identify key criteria for co-designing QPUs to improve their usefulness for join ordering, and show how even relatively minor physical architectural improvements can result in substantial enhancements. Finally, we outline a path towards practical utility of custom-designed QPUs.}, language = {en} } @inproceedings{StoerlMuellerTekleabetal., author = {St{\"o}rl, Uta and M{\"u}ller, Daniel and Tekleab, Alexander and Tolale, Stephane and Stenzel, Julian and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Curating Variational Data in Application Development}, series = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00187}, pages = {1605 -- 1608}, abstract = {Building applications for processing data lakes is a software engineering challenge. We present Darwin, a middleware for applications that operate on variational data. This concerns data with heterogeneous structure, usually stored within a schema-flexible NoSQL database. Darwin assists application developers in essential data and schema curation tasks: Upon request, Darwin extracts a schema description, discovers the history of schema versions, and proposes mappings between these versions. Users of Darwin may interactively choose which mappings are most realistic. Darwin is further capable of rewriting queries at runtime, to ensure that queries also comply with legacy data. Alternatively, Darwin can migrate legacy data to reduce the structural heterogeneity. Using Darwin, developers may thus evolve their data in sync with their code. In our hands-on demo, we curate synthetic as well as real-life datasets.}, language = {en} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Nullius in Verba: Reproducibility for Database Systems Research, Revisited}, series = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, booktitle = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, editor = {Ailamaki, Anastasia}, publisher = {IEEE}, address = {Piscataway, NJ}, isbn = {978-1-7281-9184-3}, doi = {10.1109/ICDE51399.2021.00270}, pages = {2377 -- 2380}, abstract = {Over the last decade, reproducibility of experimental results has been a prime focus in database systems research, and many high-profile conferences award results that can be independently verified. Since database systems research involves complex software stacks that non-trivially interact with hardware, sharing experimental setups is anything but trivial: Building a working reproduction package goes far beyond providing a DOI to some repository hosting data, code, and setup instructions.This tutorial revisits reproducible engineering in the face of state-of-the-art technology, and best practices gained in other computer science research communities. In particular, in the hands-on part, we demonstrate how to package entire system software stacks for dissemination. To ascertain long-term reproducibility over decades (or ideally, forever), we discuss why relying on open source technologies massively employed in industry has essential advantages over approaches crafted specifically for research. Supplementary material shows how version control systems that allow for non-linearly rewriting recorded history can document the structured genesis behind experimental setups in a way that is substantially easier to understand, without involvement of the original authors, compared to detour-ridden, strictly historic evolution.}, language = {en} } @inproceedings{KlettkeAwolinStoerletal., author = {Klettke, Meike and Awolin, Hannes and St{\"o}rl, Uta and M{\"u}ller, Daniel and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Uncovering the evolution history of data lakes}, series = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, booktitle = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, publisher = {IEEE}, doi = {10.1109/BigData.2017.8258204}, pages = {2462 -- 2471}, abstract = {Data accumulating in data lakes can become inaccessible in the long run when its semantics are not available. The heterogeneity of data formats and the sheer volumes of data collections prohibit cleaning and unifying the data manually. Thus, tools for automated data lake analysis are of great interest. In this paper, we target the particular problem of reconstructing the schema evolution history from data lakes. Knowing how the data is structured, and how this structure has evolved over time, enables programmatic access to the lake. By deriving a sequence of schema versions, rather than a single schema, we take into account structural changes over time. Moreover, we address the challenge of detecting inclusion dependencies. This is a prerequisite for mapping between succeeding schema versions, and in particular, detecting nontrivial changes such as a property having been moved or copied. We evaluate our approach for detecting inclusion dependencies using the MovieLens dataset, as well an adaption of a dataset containing botanical descriptions, to cover specific edge cases.}, language = {en} } @inproceedings{ScherzingerStoerlKlettke, author = {Scherzinger, Stefanie and St{\"o}rl, Uta and Klettke, Meike}, title = {A Datalog-based protocol for lazy data migration in agile NoSQL Application development}, series = {Proceedings of the 15th Symposium on Database Programming Languages : SPLASH '15: Conference on Systems, Programming, Languages, and Applications: Software for Humanity, Pittsburgh PA USA, 27.10.2015 - 27.10.2015}, booktitle = {Proceedings of the 15th Symposium on Database Programming Languages : SPLASH '15: Conference on Systems, Programming, Languages, and Applications: Software for Humanity, Pittsburgh PA USA, 27.10.2015 - 27.10.2015}, editor = {Cheney, James and Neumann, Thomas}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450339025}, doi = {10.1145/2815072.2815078}, pages = {41 -- 44}, abstract = {We address a practical challenge in agile web development against NoSQL data stores: Upon a new release of the web application, entities already persisted in production no longer match the application code. Rather than migrating all legacy entities eagerly (prior to the release) and at the cost of application downtime, lazy data migration is a popular alternative: When a legacy entity is loaded by the application, all pending structural changes are applied. Yet correctly migrating legacy data from several releases back, involving more than one entity at-a-time, is not trivial. In this paper, we propose a holistic Datalog ¬non-rec model model for reading, writing, and migrating data. In implementing our model, we may blend established Datalog evaluation algorithms, such as an incremental evaluation with certain rules evaluated bottom-up, and certain rules evaluated top-down with sideways information passing. Our systematic approach guarantees that from the viewpoint of the application, it remains transparent whether data is migrated eagerly or lazily.}, language = {en} } @inproceedings{HauboldSchildgenScherzingeretal., author = {Haubold, Florian and Schildgen, Johannes and Scherzinger, Stefanie and Deßloch, Stefan}, title = {ControVol Flex: Flexible Schema Evolution for NoSQL Application Development}, series = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) : 17. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssyteme" (DBIS) : 06.-10.03.2017 in Stuttgart Deutschland}, booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) : 17. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssyteme" (DBIS) : 06.-10.03.2017 in Stuttgart Deutschland}, editor = {Mitschang, Bernhard}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, abstract = {We demonstrate ControVol Flex, an Eclipse plugin for controlled schema evolution in Java applications backed by NoSQL document stores. The sweet spot of our tool are applications that are deployed continuously against the same production data store: Each new release may bring about schema changes that conflict with legacy data already stored in production. The type system internal to the predecessor tool ControVol is able to detect common schema conflicts, and enables developers to resolve them with the help of object-mapper annotations. Our new tool ControVol Flex lets developers choose their schema-migration strategy, whether all legacy data is to be migrated eagerly by means of NotaQL transformation scripts, or lazily, as declared by object-mapper annotations. Our tool is even capable of carrying out both strategies in combination, eagerly migrating data in the background, while lazily migrating data that is meanwhile accessed by the application. From the viewpoint of the application, it remains transparent how legacy data is migrated: Every read access yields an entity that matches the structure that the current application code expects. Our live demo shows how ControVol Flex gracefully solves a broad range of common schema-evolution tasks.}, language = {en} } @inproceedings{StoerlTekleabKlettkeetal., author = {St{\"o}rl, Uta and Tekleab, Alexander and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta}, title = {In for a Surprise When Migrating NoSQL Data}, series = {2018 IEEE 34th International Conference on Data Engineering (ICDE), 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering (ICDE), 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00202}, pages = {1662}, abstract = {Schema-flexible NoSQL data stores lend themselves nicely for storing versioned data, a product of schema evolution. In this lightning talk, we apply pending schema changes to records that have been persisted several schema versions back. We present first experiments with MongoDB and Cassandra, where we explore the trade-off between applying chains of pending changes stepwise (one after the other), and as composite operations. Contrary to intuition, composite migration is not necessarily faster. The culprit is the computational overhead for deriving the compositions. However, caching composition formulae achieves a speed up: For Cassandra, we can cut the runtime by nearly 80\%. Surprisingly, the relative speedup seems to be system-dependent. Our take away message is that in applying pending schema changes in NoSQL data stores, we need to base our design decisions on experimental evidence rather than on intuition alone.}, language = {en} } @inproceedings{KlettkeStoerlShenavaietal., author = {Klettke, Meike and St{\"o}rl, Uta and Shenavai, Manuel and Scherzinger, Stefanie and Storl, Uta}, title = {NoSQL schema evolution and big data migration at scale}, series = {2016 IEEE International Conference on Big Data (Big Data), 5-8 Dec. 2016, Washington, DC}, booktitle = {2016 IEEE International Conference on Big Data (Big Data), 5-8 Dec. 2016, Washington, DC}, publisher = {IEEE}, doi = {10.1109/BigData.2016.7840924}, pages = {2764 -- 2774}, abstract = {This paper explores scalable implementation strategies for carrying out lazy schema evolution in NoSQL data stores. For decades, schema evolution has been an evergreen in database research. Yet new challenges arise in the context of cloud-hosted data backends: With all database reads and writes charged by the provider, migrating the entire data instance eagerly into a new schema can be prohibitively expensive. Thus, lazy migration may be more cost-efficient, as legacy entities are only migrated in case they are actually accessed by the application. Related work has shown that the overhead of migrating data lazily is affordable when a single evolutionary change is carried out, such as adding a new property. In this paper, we focus on long-term schema evolution, where chains of pending schema evolution operations may have to be applied. Chains occur when legacy entities written several application releases back are finally accessed by the application. We discuss strategies for dealing with chains of evolution operations, in particular, the composition into a single, equivalent composite migration that performs the required version jump. Our experiments with MongoDB focus on scalable implementation strategies. Our lineup further compares the number of write operations, and thus, the operational costs of different data migration strategies.}, language = {en} } @inproceedings{MoellerBertonKlettkeetal., author = {M{\"o}ller, Mark Lukas and Berton, Nicolas and Klettke, Meike and Scherzinger, Stefanie and St{\"o}rl, Uta}, title = {jHound: Large-Scale Profiling of Open JSON Data}, series = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2019), 18. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssysteme" (DBIS) : 4.-8. M{\"a}rz 2019 in Rostock}, volume = {289}, booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2019), 18. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssysteme" (DBIS) : 4.-8. M{\"a}rz 2019 in Rostock}, publisher = {GI - Gesellschaft f{\"u}r Informatik}, address = {Bonn}, isbn = {978-3-88579-683-1}, pages = {557 -- 560}, abstract = {We present jHound, a tool for profiling large collections of JSON data, and apply it to thousands of data sets holding open government data. jHound reports key characteristics of JSON documents, such as their nesting depth. As we show, jHound can help detect structural outliers, and most importantly, badly encoded documents: jHound can pinpoint certain cases of documents that use string-typed values where other native JSON datatypes would have been a better match. Moreover, we can detect certain cases of maladaptively structured JSON documents, which obviously do not comply with good data modeling practices. By interactively exploring particular example documents, we hope to inspire discussions in the community about what makes a good JSON encoding.}, language = {en} } @inproceedings{HillenbrandLevchenkoStoerletal., author = {Hillenbrand, Andrea and Levchenko, Maksym and St{\"o}rl, Uta and Scherzinger, Stefanie and Klettke, Meike}, title = {MigCast : Putting a Price Tag on Data Model Evolution in NoSQL Data Stores}, series = {Proceedings of the 2019 International Conference on Management of Data (SIGMOD/PODS '19) June 2019, Amsterdam, Netherlands}, booktitle = {Proceedings of the 2019 International Conference on Management of Data (SIGMOD/PODS '19) June 2019, Amsterdam, Netherlands}, editor = {Boncz, Peter and Manegold, Stefan and Ailamaki, Anastasia and Deshpande, Amol and Kraska, Tim}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450356435}, doi = {10.1145/3299869.3320223}, pages = {1925 -- 1928}, abstract = {We demonstrate MigCast, a tool-based advisor for exploring data migration strategies in the context of developing NoSQL-backed applications. Users of MigCast can consider their options for evolving their data model along with legacy data already persisted in the cloud-hosted production data-base. They can explore alternative actions as the financial costs are predicted respective to the cloud provider chosen. Thereby they are better equipped to assess potential consequences of imminent data migration decisions. To this end, MigCast maintains an internal cost model, taking into account characteristics of the data instance, expected work-load, data model changes, and cloud provider pricing models. Hence, MigCast enables software project stakeholders to remain in control of the operative costs and to make informed decisions evolving their applications.}, language = {en} } @incollection{StoerlMuellerKlettkeetal., author = {St{\"o}rl, Uta and M{\"u}ller, Daniel and Klettke, Meike and Scherzinger, Stefanie}, title = {Enabling Efficient Agile Software Development of NoSQL-backed Applications}, series = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) : 17. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssyteme" (DBIS) : 06.-10.03.2017 in Stuttgart Deutschland}, booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) : 17. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssyteme" (DBIS) : 06.-10.03.2017 in Stuttgart Deutschland}, editor = {Mitschang, Bernhard}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, isbn = {978-3-88579-659-6}, abstract = {NoSQL databases are popular in agile software development, where a frequently changing database schema imposes challenges for the production database. In this demo, we present Darwin, a middleware for systematic, tool-based support specifically designed for NoSQL database systems. Darwin carries out schema evolution and data migration tasks. To the best of our knowledge, Darwin is the first tool of its kind that supports both eager and lazy NoSQL data migration.}, language = {en} } @inproceedings{ScherzingerSombachWiechetal., author = {Scherzinger, Stefanie and Sombach, Stephanie and Wiech, Katharina and Klettke, Meike and St{\"o}rl, Uta}, title = {Datalution: a tool for continuous schema evolution in NoSQL-backed web applications}, series = {QUDOS 2016: Proceedings of the 2nd International Workshop on Quality-Aware DevOps}, booktitle = {QUDOS 2016: Proceedings of the 2nd International Workshop on Quality-Aware DevOps}, publisher = {ACM}, doi = {10.1145/2945408.2945416}, pages = {38 -- 39}, abstract = {When an incremental release of a web application is deployed, the structure of data already persisted in the production database may no longer match what the application code expects. Traditionally, eager schema migration is called for, where all legacy data is migrated in one go. With the growing popularity of schema-flexible NoSQL data stores, lazy forms of data migration have emerged: Legacy entities are migrated on-the-fly, one at-a-time, when they are loaded by the application. In this demo, we present Datalution, a tool demonstrating the merits of lazy data migration. Datalution can apply chains of pending schema changes, due to its Datalog-based internal representation. The Datalution approach thus ensures that schema evolution, as part of continous deployment, is carried out correctly.}, language = {en} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Educating Future Software Architects in the Art and Science of Analysing Software Data.}, series = {SEUH 2020: Software Engineering im Unterricht der Hochschulen, Tagungsband des 17. Workshops "Software Engineering im Unterricht der Hochschulen", Innsbruck, {\"O}sterreich, 26. - 27.02.2020}, booktitle = {SEUH 2020: Software Engineering im Unterricht der Hochschulen, Tagungsband des 17. Workshops "Software Engineering im Unterricht der Hochschulen", Innsbruck, {\"O}sterreich, 26. - 27.02.2020}, editor = {Krusche, Stephan and Wagner, Stefan}, publisher = {RWTH Aachen}, pages = {56 -- 60}, abstract = {We report the design and teaching experience of a Master-level seminar course on quantitative and empirical software engineering. The course combines elements of traditional literature seminars with active learning by scientific project work, in particular quantitative mixed-method analyses of open source systems. It also provides short introductions and refreshers to data mining and statistical analysis, and discusses the nature and practice of scientific knowledge inference. Student presentations of published research, augmented by summary reports, bridge to standard seminars. We discuss our educational goals and the course structure derived from them. We review research questions addressed by students in mini research reports, and analyse them as tokens on how junior-level software engineers perceive the potential of empirical software engineering research. We assess challenges faced, and discuss possible solutions.}, language = {en} } @article{Scherzinger, author = {Scherzinger, Stefanie}, title = {Build your own SQL-on-Hadoop Query Engine A Report on a Term Project in a Master-level Database Course}, series = {ACM SIGMOD Record}, volume = {48}, journal = {ACM SIGMOD Record}, number = {2}, publisher = {ACM}, doi = {10.1145/3377330.3377336}, pages = {33 -- 38}, abstract = {This is a report on a course taught at OTH Regensburg in the summer term of 2018. The students in this course built their own SQL-on-Hadoop engine as a term project in just 8 weeks. miniHive is written in Python and compiles SQL queries into MapReduce workflows. These are then executed on Hadoop. miniHive performs generic query optimizations (selection and projection pushdown, or cost-based join reordering), as well as MapReduce-specific optimizations. The course was taught in English, using a flipped classroom model. The course material was mainly compiled from third-party teaching videos. This report describes the course setup, the miniHive milestones, and gives a short review of the most successful student projects.}, language = {en} } @inproceedings{ScherzingerSeifertWiese, author = {Scherzinger, Stefanie and Seifert, Christin and Wiese, Lena}, title = {The Best of Both Worlds: Challenges in Linking Provenance and Explainability in Distributed Machine Learning}, series = {2019 IEEE 39th International Conference on Distributed Computing Systems (ICDCS), 7-10 July 2019, Dallas, TX, USA}, booktitle = {2019 IEEE 39th International Conference on Distributed Computing Systems (ICDCS), 7-10 July 2019, Dallas, TX, USA}, publisher = {IEEE}, doi = {10.1109/ICDCS.2019.00161}, pages = {1620 -- 1629}, abstract = {Machine learning experts prefer to think of their input as a single, homogeneous, and consistent data set. However, when analyzing large volumes of data, the entire data set may not be manageable on a single server, but must be stored on a distributed file system instead. Moreover, with the pressing demand to deliver explainable models, the experts may no longer focus on the machine learning algorithms in isolation, but must take into account the distributed nature of the data stored, as well as the impact of any data pre-processing steps upstream in their data analysis pipeline. In this paper, we make the point that even basic transformations during data preparation can impact the model learned, and that this is exacerbated in a distributed setting. We then sketch our vision of end-to-end explainability of the model learned, taking the pre-processing into account. In particular, we point out the potentials of linking the contributions of research on data provenance with the efforts on explainability in machine learning. In doing so, we highlight pitfalls we may experience in a distributed system on the way to generating more holistic explanations for our machine learning models.}, language = {en} } @inproceedings{ScherzingerMauererKondylakis, author = {Scherzinger, Stefanie and Mauerer, Wolfgang and Kondylakis, Haridimos}, title = {DeBinelle: Semantic Patches for Coupled Database-Application Evolution}, series = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, booktitle = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, editor = {Ailamaki, Anastasia}, publisher = {IEEE}, address = {Piscataway, NJ}, isbn = {978-1-7281-9184-3}, doi = {10.1109/ICDE51399.2021.00307}, pages = {2697 -- 2700}, abstract = {Databases are at the core of virtually any software product. Changes to database schemas cannot be made in isolation, as they are intricately coupled with application code. Such couplings enforce collateral evolution, which is a recognised, important research problem. In this demonstration, we show a new dimension to this problem, in software that supports alternative database backends: vendor-specific SQL dialects necessitate a simultaneous evolution of both, database schema and program code, for all supported DB variants. These near-same changes impose substantial manual effort for software developers. We introduce DeBinelle, a novel framework and domain-specific language for semantic patches that abstracts DB-variant schema changes and coupled program code into a single, unified representation. DeBinelle further offers a novel alternative to manually evolving coupled schemas and code. DeBinelle considerably extends established, seminal results in software engineering research, supporting several programming languages, and the many dialects of SQL. It effectively eliminates the need to perform vendor-specific changes, replacing them with intuitive semantic patches. Our demo of DeBinelle is based on real-world use cases from reference systems for schema evolution.}, language = {en} } @inproceedings{FruthScherzingerMauereretal., author = {Fruth, Michael and Scherzinger, Stefanie and Mauerer, Wolfgang and Ramsauer, Ralf}, title = {Tell-Tale Tail Latencies: Pitfalls and Perils in Database Benchmarking}, series = {Performance evaluation and benchmarking, 13th TPC Technology Conference (TPCTC 2021): Copenhagen, Denmark, August 20, 2021, Revised Selected Papers}, booktitle = {Performance evaluation and benchmarking, 13th TPC Technology Conference (TPCTC 2021): Copenhagen, Denmark, August 20, 2021, Revised Selected Papers}, editor = {Nambiar, Raghunath and Poess, Meikel}, publisher = {Springer}, address = {Cham, Switzerland}, isbn = {9783030944377}, doi = {10.1007/978-3-030-94437-7_8}, pages = {119 -- 134}, abstract = {The performance of database systems is usually characterised by their average-case (i.e., throughput) behaviour in standardised or de-facto standard benchmarks like TPC-X or YCSB. While tails of the latency (i.e., response time) distribution receive considerably less attention, they have been identified as a threat to the overall system performance: In large-scale systems, even a fraction of requests delayed can build up into delays perceivable by end users. To eradicate large tail latencies from database systems, the ability to faithfully record them, and likewise pinpoint them to the root causes, is imminently required. In this paper, we address the challenge of measuring tail latencies using standard benchmarks, and identify subtle perils and pitfalls. In particular, we demonstrate how Java-based benchmarking approaches can substantially distort tail latency observations, and discuss how the discovery of such problems is inhibited by the common focus on throughput performance. We make a case for purposefully re-designing database benchmarking harnesses based on these observations to arrive at faithful characterisations of database performance from multiple important angles.}, language = {en} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {1-2-3 Reproducibility for Quantum Software Experiments}, series = {2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER), Honolulu, HI, USA, 15-18 March 2022}, booktitle = {2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER), Honolulu, HI, USA, 15-18 March 2022}, publisher = {IEEE}, doi = {10.1109/SANER53432.2022.00148}, pages = {1247 -- 1248}, abstract = {Various fields of science face a reproducibility crisis. For quantum software engineering as an emerging field, it is therefore imminent to focus on proper reproducibility engineering from the start. Yet the provision of reproduction packages is almost universally lacking. Actionable advice on how to build such packages is rare, particularly unfortunate in a field with many contributions from researchers with backgrounds outside computer science. In this article, we argue how to rectify this deficiency by proposing a 1-2-3~approach to reproducibility engineering for quantum software experiments: Using a meta-generation mechanism, we generate DOI-safe, long-term functioning and dependency-free reproduction packages. They are designed to satisfy the requirements of professional and learned societies solely on the basis of project-specific research artefacts (source code, measurement and configuration data), and require little temporal investment by researchers. Our scheme ascertains long-term traceability even when the quantum processor itself is no longer accessible. By drastically lowering the technical bar, we foster the proliferation of reproduction packages in quantum software experiments and ease the inclusion of non-CS researchers entering the field.}, language = {en} } @inproceedings{BrainingerMauererScherzinger, author = {Braininger, Dimitri and Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Replicability and Reproducibility of a Schema Evolution Study in Embedded Databases}, series = {Advances in conceptual modeling: ER 2020 Workshops CMAI, CMLS, CMOMM4FAIR, CoMoNoS, EmpER, Vienna, Austria, November 3-6, 2020, Proceedings}, volume = {12584}, booktitle = {Advances in conceptual modeling: ER 2020 Workshops CMAI, CMLS, CMOMM4FAIR, CoMoNoS, EmpER, Vienna, Austria, November 3-6, 2020, Proceedings}, editor = {Grossmann, Georg and Ram, Sudha}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-65846-5}, doi = {10.1007/978-3-030-65847-2_19}, pages = {210 -- 219}, abstract = {Ascertaining the feasibility of independent falsification or repetition of published results is vital to the scientific process, and replication or reproduction experiments are routinely performed in many disciplines. Unfortunately, such studies are only scarcely available in database research, with few papers dedicated to re-evaluating published results. In this paper, we conduct a case study on replicating and reproducing a study on schema evolution in embedded databases. We can exactly repeat the outcome for one out of four database applications studied, and come close in two further cases. By reporting results, efforts, and obstacles encountered, we hope to increase appreciation for the substantial efforts required to ensure reproducibility. By discussing minutiae details required to ascertain reproducible work, we argue that such important, but often ignored aspects of scientific work should receive more credit in the evaluation of future research.}, language = {en} } @inproceedings{RinglstetterScherzingerBissyande, author = {Ringlstetter, Andreas and Scherzinger, Stefanie and Bissyand{\´e}, Tegawend{\´e} F.}, title = {Data Model Evolution Using Object-NoSQL Mappers: Folklore or State-of-the-Art?}, series = {2016 IEEE/ACM 2nd International Workshop on Big Data Software Engineering (BIGDSE), 16 May 2016, Austin, TX, USA}, booktitle = {2016 IEEE/ACM 2nd International Workshop on Big Data Software Engineering (BIGDSE), 16 May 2016, Austin, TX, USA}, publisher = {ACM}, doi = {10.1145/2896825.2896827}, pages = {33 -- 36}, abstract = {In big data software engineering, the schema flexibility of NoSQL document stores is a major selling point: When the document store itself does not actively manage a schema, the data model is maintained within the application. Just like object-relational mappers for relational databases, object-NoSQL mappers are part of professional software development with NoSQL document stores. Some mappers go beyond merely loading and storing Java objects: Using dedicated evolution annotations, developers may conveniently add, remove, or rename attributes from stored objects, and also conduct more complex transformations. In this paper, we analyze the dissemination of this technology in Java open source projects. While we find evidence on GitHub that evolution annotations are indeed being used, developers do not employ them so much for evolving the data model, but to solve different tasks instead. Our observations trigger interesting questions for further research.}, language = {en} } @inproceedings{CerqueusdeAlmeidaScherzinger, author = {Cerqueus, Thomas and de Almeida, Eduardo Cunha and Scherzinger, Stefanie}, title = {ControVol: Let Yesterday's Data Catch Up with Today's Application Code}, series = {Proceedings of the 24th International Conference on World Wide Web (WWW '15) ; Florence Italy, 18.05.2015 - 22.0.2015}, booktitle = {Proceedings of the 24th International Conference on World Wide Web (WWW '15) ; Florence Italy, 18.05.2015 - 22.0.2015}, editor = {Gangemi, Aldo and Leonardi, Stefano and Panconesi, Alessandro}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450334730}, doi = {10.1145/2740908.2742719}, pages = {15 -- 16}, abstract = {In building software-as-a-service applications, a flexible development environment is key to shipping early and often. Therefore, schema-flexible data stores are becoming more and more popular. They can store data with heterogeneous structure, allowing for new releases to be pushed frequently, without having to migrate legacy data first. However, the current application code must continue to work with any legacy data that has already been persisted in production. To let legacy data structurally "catch up" with the latest application code, developers commonly employ object mapper libraries with life-cycle annotations. Yet when used without caution, they can cause runtime errors and even data loss. We present ControVol, an IDE plugin that detects evolutionary changes to the application code that are incompatible with legacy data. ControVol warns developers already at development time, and even suggests automatic fixes for lazily migrating legacy data when it is loaded into the application. Thus, ControVol ensures that the structure of legacy data can catch up with the structure expected by the latest software release.}, language = {en} } @inproceedings{CerqueusCunhadeAlmeidaScherzinger, author = {Cerqueus, Thomas and Cunha de Almeida, Eduardo and Scherzinger, Stefanie}, title = {Safely Managing Data Variety in Big Data Software Development}, series = {2015 IEEE/ACM 1st International Workshop on Big Data Software Engineering, 23-23 May 2015, Florence, Italy}, booktitle = {2015 IEEE/ACM 1st International Workshop on Big Data Software Engineering, 23-23 May 2015, Florence, Italy}, publisher = {IEEE}, doi = {10.1109/BIGDSE.2015.9}, pages = {4 -- 10}, abstract = {We consider the task of building Big Data software systems, offered as software-as-a-service. These applications are commonly backed by NoSQL data stores that address the proverbial Vs of Big Data processing: NoSQL data stores can handle large volumes of data and many systems do not enforce a global schema, to account for structural variety in data. Thus, software engineers can design the data model on the go, a flexibility that is particularly crucial in agile software development. However, NoSQL data stores commonly do not yet account for the veracity of changes when it comes to changes in the structure of persisted data. Yet this is an inevitable consequence of agile software development. In most NoSQL-based application stacks, schema evolution is completely handled within the application code, usually involving object mapper libraries. Yet simple code refactorings, such as renaming a class attribute at the source code level, can cause data loss or runtime errors once the application has been deployed to production. We address this pain point by contributing type checking rules that we have implemented within an IDE plug in. Our plug in ControVol statically type checks the object mapper class declarations against the code release history. ControVol is thus capable of detecting common yet risky cases of mismatched data and schema, and can even suggest automatic fixes.}, language = {en} } @inproceedings{HecknerBazoWolffetal., author = {Heckner, Markus and Bazo, Alexander and Wolff, Christian and Scherzinger, Stefanie}, title = {Karel relearns c: teaching good software engineering practices in cs1 with karel the robot}, series = {2018 IEEE Global Engineering Education Conference (EDUCON), 17-20 April 2018, Santa Cruz de Tenerife, Spain}, booktitle = {2018 IEEE Global Engineering Education Conference (EDUCON), 17-20 April 2018, Santa Cruz de Tenerife, Spain}, publisher = {IEEE}, doi = {10.1109/EDUCON.2018.8363402}, pages = {1447 -- 1454}, abstract = {This paper describes our implementation, teaching philosophy, and experiences with our C-based version of the widely known Karel the Robot introductory programming micro-language. Karel enables students to programmatically solve problems, using the C language, in a graphical two-dimensional world by moving the robot around while checking and manipulating its surroundings. We use Karel to solve the dilemma of either demanding too much or not enough from students during the first weeks of an introductory CS course, as interesting problems can be solved with limited input from lectures. Karel enables problem solving from day one of CS1, and encourages good software engineering practices such as top-down design from the beginning. We outline typical problems in the first weeks of CS1. We present a short overview of existing Karel implementations in various programming languages and our rationale for re-implementing Karel. We present our teaching philosophy and use of Karel in the classroom. We demonstrate how Karel is being used from a student perspective, along with a typical programming task. We discuss preliminary results of a survey and interviews with students from a first course in which Karel was used.}, language = {en} } @inproceedings{ScherzingerCerqueusCunhadeAlmeida, author = {Scherzinger, Stefanie and Cerqueus, Thomas and Cunha de Almeida, Eduardo}, title = {ControVol: A framework for controlled schema evolution in NoSQL application development}, series = {2015 IEEE 31st International Conference on Data Engineering, 13-17 April 2015, Seoul, Korea (South)}, booktitle = {2015 IEEE 31st International Conference on Data Engineering, 13-17 April 2015, Seoul, Korea (South)}, publisher = {IEEE}, isbn = {978-1-4799-7964-6}, doi = {10.1109/ICDE.2015.7113402}, pages = {1464 -- 1467}, abstract = {Building scalable web applications on top of NoSQL data stores is becoming common practice. Many of these data stores can easily be accessed programmatically, and do not enforce a schema. Software engineers can design the data model on the go, a flexibility that is crucial in agile software development. The typical tasks of database schema management are now handled within the application code, usually involving object mapper libraries. However, today's Integrated Development Environments (IDEs) lack the proper tool support when it comes to managing the combined evolution of the application code and of the schema. Yet simple refactorings such as renaming an attribute at the source code level can cause irretrievable data loss or runtime errors once the application is serving in production. In this demo, we present ControVol, a framework for controlled schema evolution in application development against NoSQL data stores. ControVol is integrated into the IDE and statically type checks object mapper class declarations against the schema evolution history, as recorded by the code repository. ControVol is capable of warning of common yet risky cases of mismatched data and schema. ControVol is further able to suggest quick fixes by which developers can have these issues automatically resolved.}, language = {en} } @inproceedings{SeifertScherzingerWiese, author = {Seifert, Christin and Scherzinger, Stefanie and Wiese, Lena}, title = {Towards Generating Consumer Labels for Machine Learning Models}, series = {2019 IEEE First International Conference on Cognitive Machine Intelligence (CogMI), 12-14 Dec. 2019, Dallas, TX, USA}, booktitle = {2019 IEEE First International Conference on Cognitive Machine Intelligence (CogMI), 12-14 Dec. 2019, Dallas, TX, USA}, publisher = {IEEE}, doi = {10.1109/CogMI48466.2019.00033}, pages = {173 -- 179}, abstract = {Machine learning (ML) based decision making is becoming commonplace. For persons affected by ML-based decisions, a certain level of transparency regarding the properties of the underlying ML model can be fundamental. In this vision paper, we propose to issue consumer labels for trained and published ML models. These labels primarily target machine learning lay persons, such as the operators of an ML system, the executors of decisions, and the decision subjects themselves. Provided that consumer labels comprehensively capture the characteristics of the trained ML model, consumers are enabled to recognize when human intelligence should supersede artificial intelligence. In the long run, we envision a service that generates these consumer labels (semi-)automatically. In this paper, we survey the requirements that an ML system should meet, and correspondingly, the properties that an ML consumer label could capture. We further discuss the feasibility of operationalizing and benchmarking these requirements in the automated generation of ML consumer labels.}, language = {en} } @inproceedings{HolubovaScherzinger, author = {Holubov{\´a}, Irena and Scherzinger, Stefanie}, title = {Unlocking the potential of nextGen multi-model databases for semantic big data projects}, series = {Proceedings of the International Workshop on Semantic Big Data - (SBD '19) 05.07.2019 - 05.07.2019, Amsterdam, Netherlands}, booktitle = {Proceedings of the International Workshop on Semantic Big Data - (SBD '19) 05.07.2019 - 05.07.2019, Amsterdam, Netherlands}, editor = {Groppe, Sven and Gruenwald, Le}, publisher = {ACM Press}, address = {New York}, isbn = {9781450367660}, doi = {10.1145/3323878.3325807}, pages = {1 -- 6}, abstract = {A new vision in semantic big data processing is to create enterprise data hubs, with a 360° view on all data that matters to a corporation. As we discuss in this paper, a new generation of multi-model database systems seems a promising architectural choice for building such scalable, non-native triple stores. In this paper, we first characterize this new generation of multi-model databases. Then, discussing an example scenario, we show how they allow for agile and flexible schema management, spanning a large design space for creative and incremental data modelling. We identify the challenge of generating sound triple-views from data stored in several, interlinked models, for SPARQL querying. We regard this as one of several appealing research challenges where the semantic big data and the database architecture community may join forces.}, language = {en} }