@inproceedings{KlettkeStoerlShenavaietal., author = {Klettke, Meike and St{\"o}rl, Uta and Shenavai, Manuel and Scherzinger, Stefanie and Storl, Uta}, title = {NoSQL schema evolution and big data migration at scale}, series = {2016 IEEE International Conference on Big Data (Big Data), 5-8 Dec. 2016, Washington, DC}, booktitle = {2016 IEEE International Conference on Big Data (Big Data), 5-8 Dec. 2016, Washington, DC}, publisher = {IEEE}, doi = {10.1109/BigData.2016.7840924}, pages = {2764 -- 2774}, abstract = {This paper explores scalable implementation strategies for carrying out lazy schema evolution in NoSQL data stores. For decades, schema evolution has been an evergreen in database research. Yet new challenges arise in the context of cloud-hosted data backends: With all database reads and writes charged by the provider, migrating the entire data instance eagerly into a new schema can be prohibitively expensive. Thus, lazy migration may be more cost-efficient, as legacy entities are only migrated in case they are actually accessed by the application. Related work has shown that the overhead of migrating data lazily is affordable when a single evolutionary change is carried out, such as adding a new property. In this paper, we focus on long-term schema evolution, where chains of pending schema evolution operations may have to be applied. Chains occur when legacy entities written several application releases back are finally accessed by the application. We discuss strategies for dealing with chains of evolution operations, in particular, the composition into a single, equivalent composite migration that performs the required version jump. Our experiments with MongoDB focus on scalable implementation strategies. Our lineup further compares the number of write operations, and thus, the operational costs of different data migration strategies.}, language = {en} } @inproceedings{StoerlMuellerTekleabetal., author = {St{\"o}rl, Uta and M{\"u}ller, Daniel and Tekleab, Alexander and Tolale, Stephane and Stenzel, Julian and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Curating Variational Data in Application Development}, series = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00187}, pages = {1605 -- 1608}, abstract = {Building applications for processing data lakes is a software engineering challenge. We present Darwin, a middleware for applications that operate on variational data. This concerns data with heterogeneous structure, usually stored within a schema-flexible NoSQL database. Darwin assists application developers in essential data and schema curation tasks: Upon request, Darwin extracts a schema description, discovers the history of schema versions, and proposes mappings between these versions. Users of Darwin may interactively choose which mappings are most realistic. Darwin is further capable of rewriting queries at runtime, to ensure that queries also comply with legacy data. Alternatively, Darwin can migrate legacy data to reduce the structural heterogeneity. Using Darwin, developers may thus evolve their data in sync with their code. In our hands-on demo, we curate synthetic as well as real-life datasets.}, language = {en} }