@inproceedings{StoerlMuellerTekleabetal., author = {St{\"o}rl, Uta and M{\"u}ller, Daniel and Tekleab, Alexander and Tolale, Stephane and Stenzel, Julian and Klettke, Meike and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Curating Variational Data in Application Development}, series = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, booktitle = {2018 IEEE 34th International Conference on Data Engineering, 16-19 April 2018, Paris, France}, publisher = {IEEE}, doi = {10.1109/ICDE.2018.00187}, pages = {1605 -- 1608}, abstract = {Building applications for processing data lakes is a software engineering challenge. We present Darwin, a middleware for applications that operate on variational data. This concerns data with heterogeneous structure, usually stored within a schema-flexible NoSQL database. Darwin assists application developers in essential data and schema curation tasks: Upon request, Darwin extracts a schema description, discovers the history of schema versions, and proposes mappings between these versions. Users of Darwin may interactively choose which mappings are most realistic. Darwin is further capable of rewriting queries at runtime, to ensure that queries also comply with legacy data. Alternatively, Darwin can migrate legacy data to reduce the structural heterogeneity. Using Darwin, developers may thus evolve their data in sync with their code. In our hands-on demo, we curate synthetic as well as real-life datasets.}, language = {en} } @inproceedings{KlettkeAwolinStoerletal., author = {Klettke, Meike and Awolin, Hannes and St{\"o}rl, Uta and M{\"u}ller, Daniel and Scherzinger, Stefanie and Storl, Uta and Muller, Daniel}, title = {Uncovering the evolution history of data lakes}, series = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, booktitle = {2017 IEEE International Conference on Big Data (Big Data),,11-14 Dec. 2017, Boston, MA, USA}, publisher = {IEEE}, doi = {10.1109/BigData.2017.8258204}, pages = {2462 -- 2471}, abstract = {Data accumulating in data lakes can become inaccessible in the long run when its semantics are not available. The heterogeneity of data formats and the sheer volumes of data collections prohibit cleaning and unifying the data manually. Thus, tools for automated data lake analysis are of great interest. In this paper, we target the particular problem of reconstructing the schema evolution history from data lakes. Knowing how the data is structured, and how this structure has evolved over time, enables programmatic access to the lake. By deriving a sequence of schema versions, rather than a single schema, we take into account structural changes over time. Moreover, we address the challenge of detecting inclusion dependencies. This is a prerequisite for mapping between succeeding schema versions, and in particular, detecting nontrivial changes such as a property having been moved or copied. We evaluate our approach for detecting inclusion dependencies using the MovieLens dataset, as well an adaption of a dataset containing botanical descriptions, to cover specific edge cases.}, language = {en} }