@misc{SchintkeReinefeldHaridietal.2009, author = {Schintke, Florian and Reinefeld, Alexander and Haridi, Seif and Sch{\"u}tt, Thorsten}, title = {Enhanced Paxos Commit for Transactions on DHTs}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11448}, number = {09-28}, year = {2009}, abstract = {Key/value stores which are built on structured overlay networks often lack support for atomic transactions and strong data consistency among replicas. This is unfortunate, because consistency guarantees and transactions would allow a wide range of additional application domains to benefit from the inherent scalability and fault-tolerance of DHTs. The Scalaris key/value store supports strong data consistency and atomic transactions. It uses an enhanced Paxos Commit protocol with only four communication steps rather than six. This improvement was possible by exploiting information from the replica distribution in the DHT. Scalaris enables implementation of more reliable and scalable infrastructure for collaborative Web services that require strong consistency and atomic changes across multiple items.}, language = {en} } @article{SalemSchintkeSchuettetal.2018, author = {Salem, Farouk and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Data-flow scheduling for a scalable FLESnet}, journal = {CBM Progress Report 2017}, isbn = {978-3-9815227-5-4}, doi = {10.15120/GSI-2018-00485}, pages = {130 -- 131}, year = {2018}, language = {en} } @inproceedings{GholamiSchintkeSchuett2018, author = {Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Checkpoint Scheduling for Shared Usage of Burst-Buffers in Supercomputers}, booktitle = {Proceedings of the 47th International Conference on Parallel Processing Companion; SRMPDS 2018: The 14th International Workshop on Scheduling and Resource Management for Parallel and Distributed Systems}, doi = {10.1145/3229710.3229755}, pages = {44:1 -- 44:10}, year = {2018}, abstract = {User-defined and system-level checkpointing have contrary properties. While user-defined checkpoints are smaller and simpler to recover, system-level checkpointing better knows the global system's state and parameters like the expected mean time to failure (MTTF) per node. Both approaches lead to non-optimal checkpoint time, intervals, sizes, or I/O bandwidth when concurrent checkpoints conflict and compete for it. We combine user-defined and system-level checkpointing to exploit the benefits and avoid the drawbacks of each other. Thus, applications frequently offer to create checkpoints. The system accepts such offers according to the current status and implied costs to recalculate from the last checkpoint or denies them, i.e., immediately lets continue the application without checkpoint creation. To support this approach, we develop economic models for multi-application checkpointing on shared I/O resources that are dedicated for checkpointing (e.g. burst-buffers) by defining an appropriate goal function and solving a global optimization problem. Using our models, the checkpoints of applications on a supercomputer are scheduled to effectively use the available I/O bandwidth and minimize the failure overhead (checkpoint creations plus recalculations). Our simulations show an overall reduction in failure overhead of all nodes of up to 30\% for a typical supercomputer workload (HLRN). We can also derive the most cost effective burst-buffer bandwidth for a given node's MTTF and application workload.}, language = {en} } @misc{GholamiSchintkeSchuettetal.2018, author = {Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Modeling Checkpoint Schedules for Concurrent HPC Applications}, journal = {CoSaS 2018 International Symposium on Computational Science at Scale}, year = {2018}, language = {en} } @inproceedings{SchmidtkeSchintkeSchuett2018, author = {Schmidtke, Robert and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {From Application to Disk: Tracing I/O Through the Big Data Stack}, booktitle = {High Performance Computing ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 24 - 28, 2018, Revised Selected Papers, Workshop on Performance and Scalability of Storage Systems (WOPSSS)}, doi = {10.1007/978-3-030-02465-9_6}, pages = {89 -- 102}, year = {2018}, abstract = {Typical applications in data science consume, process and produce large amounts of data, making disk I/O one of the dominating — and thus worthwhile optimizing — factors of their overall performance. Distributed processing frameworks, such as Hadoop, Flink and Spark, hide a lot of complexity from the programmer when they parallelize these applications across a compute cluster. This exacerbates reasoning about I/O of both the application and the framework, through the distributed file system, such as HDFS, down to the local file systems. We present SFS (Statistics File System), a modular framework to trace each I/O request issued by the application and any JVM-based big data framework involved, mapping these requests to actual disk I/O. This allows detection of inefficient I/O patterns, both by the applications and the underlying frameworks, and builds the basis for improving I/O scheduling in the big data software stack.}, language = {en} } @inproceedings{WeinholdLackorzynskiBierbaumetal.2019, author = {Weinhold, Carsten and Lackorzynski, Adam and Bierbaum, Jan and K{\"u}ttler, Martin and Planeta, Maksym and Weisbach, Hannes and Hille, Matthias and H{\"a}rtig, Hermann and Margolin, Alexander and Sharf, Dror and Levy, Ely and Gak, Pavel and Barak, Amnon and Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander and Lieber, Matthias and Nagel, Wolfgang}, title = {FFMK: A Fast and Fault-Tolerant Microkernel-Based System for Exascale Computing}, booktitle = {Software for Exascale Computing - SPPEXA 2016-2019}, publisher = {Springer}, doi = {10.1007/978-3-030-47956-5_16}, pages = {483 -- 516}, year = {2019}, language = {en} } @inproceedings{KindermannSchintkeFritzsch2012, author = {Kindermann, S. and Schintke, Florian and Fritzsch, B.}, title = {A Collaborative Data Management Infrastructure for Climate Data Analysis}, volume = {14, EGU2012-10569}, booktitle = {Geophysical Research Abstracts}, doi = {10013/epic.39635.d001}, year = {2012}, language = {en} } @incollection{EnkeFiedlerFischeretal.2013, author = {Enke, Harry and Fiedler, Norman and Fischer, Thomas and Gnadt, Timo and Ketzan, Erik and Ludwig, Jens and Rathmann, Torsten and St{\"o}ckle, Gabriel and Schintke, Florian}, title = {Leitfaden zum Forschungsdaten-Management}, booktitle = {Leitfaden zum Forschungsdaten-Management}, editor = {Enke, Harry and Ludwig, Jens}, publisher = {Verlag Werner H{\"u}lsbusch, Gl{\"u}ckstadt}, year = {2013}, language = {en} } @article{EnkePartlReinefeldetal.2012, author = {Enke, Harry and Partl, Adrian and Reinefeld, Alexander and Schintke, Florian}, title = {Handling Big Data in Astronomy and Astrophysics}, volume = {12}, journal = {Datenbank-Spektrum}, number = {3}, publisher = {Springer-Verlag}, doi = {10.1007/s13222-012-0099-1}, pages = {173 -- 181}, year = {2012}, language = {en} } @article{Schintke2013, author = {Schintke, Florian}, title = {XtreemFS \& Scalaris}, journal = {Science \& Technology}, number = {6}, publisher = {Pan European Networks}, pages = {54 -- 55}, year = {2013}, language = {en} }