@inproceedings{KlettkeStoerlShenavaietal., author = {Klettke, Meike and St{\"o}rl, Uta and Shenavai, Manuel and Scherzinger, Stefanie and Storl, Uta}, title = {NoSQL schema evolution and big data migration at scale}, series = {2016 IEEE International Conference on Big Data (Big Data), 5-8 Dec. 2016, Washington, DC}, booktitle = {2016 IEEE International Conference on Big Data (Big Data), 5-8 Dec. 2016, Washington, DC}, publisher = {IEEE}, doi = {10.1109/BigData.2016.7840924}, pages = {2764 -- 2774}, abstract = {This paper explores scalable implementation strategies for carrying out lazy schema evolution in NoSQL data stores. For decades, schema evolution has been an evergreen in database research. Yet new challenges arise in the context of cloud-hosted data backends: With all database reads and writes charged by the provider, migrating the entire data instance eagerly into a new schema can be prohibitively expensive. Thus, lazy migration may be more cost-efficient, as legacy entities are only migrated in case they are actually accessed by the application. Related work has shown that the overhead of migrating data lazily is affordable when a single evolutionary change is carried out, such as adding a new property. In this paper, we focus on long-term schema evolution, where chains of pending schema evolution operations may have to be applied. Chains occur when legacy entities written several application releases back are finally accessed by the application. We discuss strategies for dealing with chains of evolution operations, in particular, the composition into a single, equivalent composite migration that performs the required version jump. Our experiments with MongoDB focus on scalable implementation strategies. Our lineup further compares the number of write operations, and thus, the operational costs of different data migration strategies.}, language = {en} } @inproceedings{StoerlTekleabKlettkeetal., author = {St{\"o}rl, Uta and Tekleab, Alexander and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta}, title = {In for a Surprise When Migrating NoSQL Data}, series = {2018 IEEE 34th International Conference on Data Engineering (ICDE), 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering (ICDE), 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00202}, pages = {1662}, abstract = {Schema-flexible NoSQL data stores lend themselves nicely for storing versioned data, a product of schema evolution. In this lightning talk, we apply pending schema changes to records that have been persisted several schema versions back. We present first experiments with MongoDB and Cassandra, where we explore the trade-off between applying chains of pending changes stepwise (one after the other), and as composite operations. Contrary to intuition, composite migration is not necessarily faster. The culprit is the computational overhead for deriving the compositions. However, caching composition formulae achieves a speed up: For Cassandra, we can cut the runtime by nearly 80\%. Surprisingly, the relative speedup seems to be system-dependent. Our take away message is that in applying pending schema changes in NoSQL data stores, we need to base our design decisions on experimental evidence rather than on intuition alone.}, language = {en} } @inproceedings{StoerlMuellerTekleabetal., author = {St{\"o}rl, Uta and M{\"u}ller, Daniel and Tekleab, Alexander and Tolale, Stephane and Stenzel, Julian and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Curating Variational Data in Application Development}, series = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00187}, pages = {1605 -- 1608}, abstract = {Building applications for processing data lakes is a software engineering challenge. We present Darwin, a middleware for applications that operate on variational data. This concerns data with heterogeneous structure, usually stored within a schema-flexible NoSQL database. Darwin assists application developers in essential data and schema curation tasks: Upon request, Darwin extracts a schema description, discovers the history of schema versions, and proposes mappings between these versions. Users of Darwin may interactively choose which mappings are most realistic. Darwin is further capable of rewriting queries at runtime, to ensure that queries also comply with legacy data. Alternatively, Darwin can migrate legacy data to reduce the structural heterogeneity. Using Darwin, developers may thus evolve their data in sync with their code. In our hands-on demo, we curate synthetic as well as real-life datasets.}, language = {en} } @inproceedings{KlettkeAwolinStoerletal., author = {Klettke, Meike and Awolin, Hannes and St{\"o}rl, Uta and M{\"u}ller, Daniel and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Uncovering the evolution history of data lakes}, series = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, booktitle = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, publisher = {IEEE}, doi = {10.1109/BigData.2017.8258204}, pages = {2462 -- 2471}, abstract = {Data accumulating in data lakes can become inaccessible in the long run when its semantics are not available. The heterogeneity of data formats and the sheer volumes of data collections prohibit cleaning and unifying the data manually. Thus, tools for automated data lake analysis are of great interest. In this paper, we target the particular problem of reconstructing the schema evolution history from data lakes. Knowing how the data is structured, and how this structure has evolved over time, enables programmatic access to the lake. By deriving a sequence of schema versions, rather than a single schema, we take into account structural changes over time. Moreover, we address the challenge of detecting inclusion dependencies. This is a prerequisite for mapping between succeeding schema versions, and in particular, detecting nontrivial changes such as a property having been moved or copied. We evaluate our approach for detecting inclusion dependencies using the MovieLens dataset, as well an adaption of a dataset containing botanical descriptions, to cover specific edge cases.}, language = {en} } @article{StoerlKlettkeScherzinger, author = {St{\"o}rl, Uta and Klettke, Meike and Scherzinger, Stefanie}, title = {Kurz erkl{\"a}rt: Objekt-NoSQL-Mapping}, series = {Datenbank-Spektrum}, volume = {16}, journal = {Datenbank-Spektrum}, number = {1}, publisher = {Springer}, doi = {10.1007/s13222-016-0212-y}, pages = {83 -- 87}, language = {de} } @article{KlettkeScherzingerStoerl, author = {Klettke, Meike and Scherzinger, Stefanie and St{\"o}rl, Uta}, title = {Datenbanken ohne Schema?}, series = {Datenbank-Spektrum}, volume = {14}, journal = {Datenbank-Spektrum}, number = {2}, publisher = {Springer}, doi = {10.1007/s13222-014-0156-z}, pages = {119 -- 129}, abstract = {In der Entwicklung von interaktiven Web-Anwendungen sind NoSQL-Datenbanksysteme zunehmend beliebt, nicht zuletzt, weil sie flexible Datenmodelle erlauben. Das erleichtert insbesondere ein agiles Projektmanagement, das sich durch h{\"a}ufige Releases und entsprechend h{\"a}ufige {\"A}nderungen am Datenmodell auszeichnet. In diesem Artikel geben wir einen {\"U}berblick {\"u}ber die besonderen Herausforderungen der agilen Anwendungsentwicklung gegen schemalose NoSQL-Datenbanksysteme. Wir stellen Strategien f{\"u}r die Schema-Evolution aus der Praxis vor, und postulieren unsere Vision einer eigenen Schema-Management-Komponente f{\"u}r NoSQL-Datenbanksysteme, die f{\"u}r eine kontinuierliche und systematische Schema-Evolution ausgelegt ist.}, language = {de} } @inproceedings{ScherzingerStoerlKlettke, author = {Scherzinger, Stefanie and St{\"o}rl, Uta and Klettke, Meike}, title = {A Datalog-based protocol for lazy data migration in agile NoSQL Application development}, series = {Proceedings of the 15th Symposium on Database Programming Languages : SPLASH '15: Conference on Systems, Programming, Languages, and Applications: Software for Humanity, Pittsburgh PA USA, 27.10.2015 - 27.10.2015}, booktitle = {Proceedings of the 15th Symposium on Database Programming Languages : SPLASH '15: Conference on Systems, Programming, Languages, and Applications: Software for Humanity, Pittsburgh PA USA, 27.10.2015 - 27.10.2015}, editor = {Cheney, James and Neumann, Thomas}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {9781450339025}, doi = {10.1145/2815072.2815078}, pages = {41 -- 44}, abstract = {We address a practical challenge in agile web development against NoSQL data stores: Upon a new release of the web application, entities already persisted in production no longer match the application code. Rather than migrating all legacy entities eagerly (prior to the release) and at the cost of application downtime, lazy data migration is a popular alternative: When a legacy entity is loaded by the application, all pending structural changes are applied. Yet correctly migrating legacy data from several releases back, involving more than one entity at-a-time, is not trivial. In this paper, we propose a holistic Datalog ¬non-rec model model for reading, writing, and migrating data. In implementing our model, we may blend established Datalog evaluation algorithms, such as an incremental evaluation with certain rules evaluated bottom-up, and certain rules evaluated top-down with sideways information passing. Our systematic approach guarantees that from the viewpoint of the application, it remains transparent whether data is migrated eagerly or lazily.}, language = {en} }