@misc{MintelRamsauerLohmannetal., author = {Mintel, Mario and Ramsauer, Ralf and Lohmann, Daniel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Fork {\`a} la carte f{\"u}r In-Memory-Datenbanken}, series = {Fr{\"u}hjahrstreffen der Fachgruppen Betriebssysteme, Hamburg, 17. M{\"a}rz 2022}, journal = {Fr{\"u}hjahrstreffen der Fachgruppen Betriebssysteme, Hamburg, 17. M{\"a}rz 2022}, language = {de} } @article{SchoenbergerScherzingerMauerer, author = {Sch{\"o}nberger, Manuel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Ready to Leap (by Co-Design)? Join Order Optimisation on Quantum Hardware}, series = {Proceedings of the ACM on Management of Data, PACMMOD}, volume = {1}, journal = {Proceedings of the ACM on Management of Data, PACMMOD}, number = {1}, publisher = {ACM}, address = {New York, NY,}, doi = {10.1145/3588946}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-56634}, pages = {1 -- 27}, abstract = {The prospect of achieving computational speedups by exploiting quantum phenomena makes the use of quantum processing units (QPUs) attractive for many algorithmic database problems. Query optimisation, which concerns problems that typically need to explore large search spaces, seems like an ideal match for the known quantum algorithms. We present the first quantum implementation of join ordering, which is one of the most investigated and fundamental query optimisation problems, based on a reformulation to quadratic binary unconstrained optimisation problems. We empirically characterise our method on two state-of-the-art approaches (gate-based quantum computing and quantum annealing), and identify speed-ups compared to the best know classical join ordering approaches for input sizes that can be processed with current quantum annealers. However, we also confirm that limits of early-stage technology are quickly reached. Current QPUs are classified as noisy, intermediate scale quantum computers (NISQ), and are restricted by a variety of limitations that reduce their capabilities as compared to ideal future quantum computers, which prevents us from scaling up problem dimensions and reaching practical utility. To overcome these challenges, our formulation accounts for specific QPU properties and limitations, and allows us to trade between achievable solution quality and possible problem size. In contrast to all prior work on quantum computing for query optimisation and database-related challenges, we go beyond currently available QPUs, and explicitly target the scalability limitations: Using insights gained from numerical simulations and our experimental analysis, we identify key criteria for co-designing QPUs to improve their usefulness for join ordering, and show how even relatively minor physical architectural improvements can result in substantial enhancements. Finally, we outline a path towards practical utility of custom-designed QPUs.}, language = {en} } @article{ThorScherzingerSpecht, author = {Thor, Andreas and Scherzinger, Stefanie and Specht, G{\"u}nther}, title = {Editorial}, series = {Datenbank-Spektrum}, volume = {14}, journal = {Datenbank-Spektrum}, number = {2}, publisher = {Springer}, doi = {10.1007/s13222-014-0162-1}, pages = {81 -- 84}, language = {de} } @inproceedings{StoerlMuellerTekleabetal., author = {St{\"o}rl, Uta and M{\"u}ller, Daniel and Tekleab, Alexander and Tolale, Stephane and Stenzel, Julian and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Curating Variational Data in Application Development}, series = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00187}, pages = {1605 -- 1608}, abstract = {Building applications for processing data lakes is a software engineering challenge. We present Darwin, a middleware for applications that operate on variational data. This concerns data with heterogeneous structure, usually stored within a schema-flexible NoSQL database. Darwin assists application developers in essential data and schema curation tasks: Upon request, Darwin extracts a schema description, discovers the history of schema versions, and proposes mappings between these versions. Users of Darwin may interactively choose which mappings are most realistic. Darwin is further capable of rewriting queries at runtime, to ensure that queries also comply with legacy data. Alternatively, Darwin can migrate legacy data to reduce the structural heterogeneity. Using Darwin, developers may thus evolve their data in sync with their code. In our hands-on demo, we curate synthetic as well as real-life datasets.}, language = {en} } @inproceedings{MauererScherzinger, author = {Mauerer, Wolfgang and Scherzinger, Stefanie}, title = {Nullius in Verba: Reproducibility for Database Systems Research, Revisited}, series = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, booktitle = {2021 IEEE 37th International Conference on Data Engineering (ICDE 2021): 19-22 April 2021, Chania, Greece}, editor = {Ailamaki, Anastasia}, publisher = {IEEE}, address = {Piscataway, NJ}, isbn = {978-1-7281-9184-3}, doi = {10.1109/ICDE51399.2021.00270}, pages = {2377 -- 2380}, abstract = {Over the last decade, reproducibility of experimental results has been a prime focus in database systems research, and many high-profile conferences award results that can be independently verified. Since database systems research involves complex software stacks that non-trivially interact with hardware, sharing experimental setups is anything but trivial: Building a working reproduction package goes far beyond providing a DOI to some repository hosting data, code, and setup instructions.This tutorial revisits reproducible engineering in the face of state-of-the-art technology, and best practices gained in other computer science research communities. In particular, in the hands-on part, we demonstrate how to package entire system software stacks for dissemination. To ascertain long-term reproducibility over decades (or ideally, forever), we discuss why relying on open source technologies massively employed in industry has essential advantages over approaches crafted specifically for research. Supplementary material shows how version control systems that allow for non-linearly rewriting recorded history can document the structured genesis behind experimental setups in a way that is substantially easier to understand, without involvement of the original authors, compared to detour-ridden, strictly historic evolution.}, language = {en} } @inproceedings{KlettkeAwolinStoerletal., author = {Klettke, Meike and Awolin, Hannes and St{\"o}rl, Uta and M{\"u}ller, Daniel and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Uncovering the evolution history of data lakes}, series = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, booktitle = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, publisher = {IEEE}, doi = {10.1109/BigData.2017.8258204}, pages = {2462 -- 2471}, abstract = {Data accumulating in data lakes can become inaccessible in the long run when its semantics are not available. The heterogeneity of data formats and the sheer volumes of data collections prohibit cleaning and unifying the data manually. Thus, tools for automated data lake analysis are of great interest. In this paper, we target the particular problem of reconstructing the schema evolution history from data lakes. Knowing how the data is structured, and how this structure has evolved over time, enables programmatic access to the lake. By deriving a sequence of schema versions, rather than a single schema, we take into account structural changes over time. Moreover, we address the challenge of detecting inclusion dependencies. This is a prerequisite for mapping between succeeding schema versions, and in particular, detecting nontrivial changes such as a property having been moved or copied. We evaluate our approach for detecting inclusion dependencies using the MovieLens dataset, as well an adaption of a dataset containing botanical descriptions, to cover specific edge cases.}, language = {en} } @inproceedings{MauererRamsauerLucasetal., author = {Mauerer, Wolfgang and Ramsauer, Ralf and Lucas, Edson R. F. and Scherzinger, Stefanie}, title = {Silentium! Run-Analyse-Eradicate the Noise out of the DB/OS Stack}, series = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2021): 13.-17. September 2021, Dresden, Deutschland}, booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2021): 13.-17. September 2021, Dresden, Deutschland}, publisher = {Gesellschaft f{\"u}r Informatik}, doi = {10.18420/btw2021-21}, pages = {397 -- 421}, abstract = {When multiple tenants compete for resources, database performance tends to suffer. Yet there are scenarios where guaranteed sub-millisecond latencies are crucial, such as in real-time data processing, IoT devices, or when operating in safety-critical environments. In this paper, we study how to make query latencies deterministic in the face of noise (whether caused by other tenants or unrelated operating system tasks). We perform controlled experiments with an in-memory database engine in a multi-tenant setting, where we successively eradicate noisy interference from within the system software stack, to the point where the engine runs close to bare-metal on the underlying hardware. We show that we can achieve query latencies comparable to the database engine running as the sole tenant, but without noticeably impacting the workload of competing tenants. We discuss these results in the context of ongoing efforts to build custom operating systems for database workloads, and point out that for certain use cases, the margin for improvement is rather narrow. In fact, for scenarios like ours, existing operating systems might just be good enough, provided that they are expertly configured. We then critically discuss these findings in the light of a broader family of database systems (e.g., including disk-based), and how to extend the approach of this paper accordingly. Low-latency databases; tail latency; real-time databases; bounded-time query processing; DB-OS co-engineering}, language = {de} } @inproceedings{ScherzingerStoerlKlettke, author = {Scherzinger, Stefanie and St{\"o}rl, Uta and Klettke, Meike}, title = {A Datalog-based protocol for lazy data migration in agile NoSQL Application development}, series = {Proceedings of the 15th Symposium on Database Programming Languages : SPLASH '15: Conference on Systems, Programming, Languages, and Applications: Software for Humanity, Pittsburgh PA USA, 27.10.2015 - 27.10.2015}, booktitle = {Proceedings of the 15th Symposium on Database Programming Languages : SPLASH '15: Conference on Systems, Programming, Languages, and Applications: Software for Humanity, Pittsburgh PA USA, 27.10.2015 - 27.10.2015}, editor = {Cheney, James and Neumann, Thomas}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450339025}, doi = {10.1145/2815072.2815078}, pages = {41 -- 44}, abstract = {We address a practical challenge in agile web development against NoSQL data stores: Upon a new release of the web application, entities already persisted in production no longer match the application code. Rather than migrating all legacy entities eagerly (prior to the release) and at the cost of application downtime, lazy data migration is a popular alternative: When a legacy entity is loaded by the application, all pending structural changes are applied. Yet correctly migrating legacy data from several releases back, involving more than one entity at-a-time, is not trivial. In this paper, we propose a holistic Datalog ¬non-rec model model for reading, writing, and migrating data. In implementing our model, we may blend established Datalog evaluation algorithms, such as an incremental evaluation with certain rules evaluated bottom-up, and certain rules evaluated top-down with sideways information passing. Our systematic approach guarantees that from the viewpoint of the application, it remains transparent whether data is migrated eagerly or lazily.}, language = {en} } @inproceedings{HauboldSchildgenScherzingeretal., author = {Haubold, Florian and Schildgen, Johannes and Scherzinger, Stefanie and Deßloch, Stefan}, title = {ControVol Flex: Flexible Schema Evolution for NoSQL Application Development}, series = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) : 17. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssyteme" (DBIS) : 06.-10.03.2017 in Stuttgart Deutschland}, booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) : 17. Fachtagung des GI-Fachbereichs "Datenbanken und Informationssyteme" (DBIS) : 06.-10.03.2017 in Stuttgart Deutschland}, editor = {Mitschang, Bernhard}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, abstract = {We demonstrate ControVol Flex, an Eclipse plugin for controlled schema evolution in Java applications backed by NoSQL document stores. The sweet spot of our tool are applications that are deployed continuously against the same production data store: Each new release may bring about schema changes that conflict with legacy data already stored in production. The type system internal to the predecessor tool ControVol is able to detect common schema conflicts, and enables developers to resolve them with the help of object-mapper annotations. Our new tool ControVol Flex lets developers choose their schema-migration strategy, whether all legacy data is to be migrated eagerly by means of NotaQL transformation scripts, or lazily, as declared by object-mapper annotations. Our tool is even capable of carrying out both strategies in combination, eagerly migrating data in the background, while lazily migrating data that is meanwhile accessed by the application. From the viewpoint of the application, it remains transparent how legacy data is migrated: Every read access yields an entity that matches the structure that the current application code expects. Our live demo shows how ControVol Flex gracefully solves a broad range of common schema-evolution tasks.}, language = {en} } @inproceedings{StoerlTekleabKlettkeetal., author = {St{\"o}rl, Uta and Tekleab, Alexander and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta}, title = {In for a Surprise When Migrating NoSQL Data}, series = {2018 IEEE 34th International Conference on Data Engineering (ICDE), 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering (ICDE), 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00202}, pages = {1662}, abstract = {Schema-flexible NoSQL data stores lend themselves nicely for storing versioned data, a product of schema evolution. In this lightning talk, we apply pending schema changes to records that have been persisted several schema versions back. We present first experiments with MongoDB and Cassandra, where we explore the trade-off between applying chains of pending changes stepwise (one after the other), and as composite operations. Contrary to intuition, composite migration is not necessarily faster. The culprit is the computational overhead for deriving the compositions. However, caching composition formulae achieves a speed up: For Cassandra, we can cut the runtime by nearly 80\%. Surprisingly, the relative speedup seems to be system-dependent. Our take away message is that in applying pending schema changes in NoSQL data stores, we need to base our design decisions on experimental evidence rather than on intuition alone.}, language = {en} }