@misc{SchintkeReinefeldHaridietal.2009, author = {Schintke, Florian and Reinefeld, Alexander and Haridi, Seif and Sch{\"u}tt, Thorsten}, title = {Enhanced Paxos Commit for Transactions on DHTs}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11448}, number = {09-28}, year = {2009}, abstract = {Key/value stores which are built on structured overlay networks often lack support for atomic transactions and strong data consistency among replicas. This is unfortunate, because consistency guarantees and transactions would allow a wide range of additional application domains to benefit from the inherent scalability and fault-tolerance of DHTs. The Scalaris key/value store supports strong data consistency and atomic transactions. It uses an enhanced Paxos Commit protocol with only four communication steps rather than six. This improvement was possible by exploiting information from the replica distribution in the DHT. Scalaris enables implementation of more reliable and scalable infrastructure for collaborative Web services that require strong consistency and atomic changes across multiple items.}, language = {en} } @inproceedings{DoebbelinSchuettReinefeld2013, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Building Large Compressed PDBs for the Sliding Tile Puzzle}, booktitle = {Workshop on Computer Games}, year = {2013}, language = {en} } @article{SalemSchintkeSchuettetal.2018, author = {Salem, Farouk and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Data-flow scheduling for a scalable FLESnet}, journal = {CBM Progress Report 2017}, isbn = {978-3-9815227-5-4}, doi = {10.15120/GSI-2018-00485}, pages = {130 -- 131}, year = {2018}, language = {en} } @inproceedings{GholamiSchintkeSchuett2018, author = {Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Checkpoint Scheduling for Shared Usage of Burst-Buffers in Supercomputers}, booktitle = {Proceedings of the 47th International Conference on Parallel Processing Companion; SRMPDS 2018: The 14th International Workshop on Scheduling and Resource Management for Parallel and Distributed Systems}, doi = {10.1145/3229710.3229755}, pages = {44:1 -- 44:10}, year = {2018}, abstract = {User-defined and system-level checkpointing have contrary properties. While user-defined checkpoints are smaller and simpler to recover, system-level checkpointing better knows the global system's state and parameters like the expected mean time to failure (MTTF) per node. Both approaches lead to non-optimal checkpoint time, intervals, sizes, or I/O bandwidth when concurrent checkpoints conflict and compete for it. We combine user-defined and system-level checkpointing to exploit the benefits and avoid the drawbacks of each other. Thus, applications frequently offer to create checkpoints. The system accepts such offers according to the current status and implied costs to recalculate from the last checkpoint or denies them, i.e., immediately lets continue the application without checkpoint creation. To support this approach, we develop economic models for multi-application checkpointing on shared I/O resources that are dedicated for checkpointing (e.g. burst-buffers) by defining an appropriate goal function and solving a global optimization problem. Using our models, the checkpoints of applications on a supercomputer are scheduled to effectively use the available I/O bandwidth and minimize the failure overhead (checkpoint creations plus recalculations). Our simulations show an overall reduction in failure overhead of all nodes of up to 30\% for a typical supercomputer workload (HLRN). We can also derive the most cost effective burst-buffer bandwidth for a given node's MTTF and application workload.}, language = {en} } @misc{GholamiSchintkeSchuettetal.2018, author = {Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Modeling Checkpoint Schedules for Concurrent HPC Applications}, journal = {CoSaS 2018 International Symposium on Computational Science at Scale}, year = {2018}, language = {en} } @inproceedings{SchmidtkeSchintkeSchuett2018, author = {Schmidtke, Robert and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {From Application to Disk: Tracing I/O Through the Big Data Stack}, booktitle = {High Performance Computing ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 24 - 28, 2018, Revised Selected Papers, Workshop on Performance and Scalability of Storage Systems (WOPSSS)}, doi = {10.1007/978-3-030-02465-9_6}, pages = {89 -- 102}, year = {2018}, abstract = {Typical applications in data science consume, process and produce large amounts of data, making disk I/O one of the dominating — and thus worthwhile optimizing — factors of their overall performance. Distributed processing frameworks, such as Hadoop, Flink and Spark, hide a lot of complexity from the programmer when they parallelize these applications across a compute cluster. This exacerbates reasoning about I/O of both the application and the framework, through the distributed file system, such as HDFS, down to the local file systems. We present SFS (Statistics File System), a modular framework to trace each I/O request issued by the application and any JVM-based big data framework involved, mapping these requests to actual disk I/O. This allows detection of inefficient I/O patterns, both by the applications and the underlying frameworks, and builds the basis for improving I/O scheduling in the big data software stack.}, language = {en} } @inproceedings{WeinholdLackorzynskiBierbaumetal.2019, author = {Weinhold, Carsten and Lackorzynski, Adam and Bierbaum, Jan and K{\"u}ttler, Martin and Planeta, Maksym and Weisbach, Hannes and Hille, Matthias and H{\"a}rtig, Hermann and Margolin, Alexander and Sharf, Dror and Levy, Ely and Gak, Pavel and Barak, Amnon and Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander and Lieber, Matthias and Nagel, Wolfgang}, title = {FFMK: A Fast and Fault-Tolerant Microkernel-Based System for Exascale Computing}, booktitle = {Software for Exascale Computing - SPPEXA 2016-2019}, publisher = {Springer}, doi = {10.1007/978-3-030-47956-5_16}, pages = {483 -- 516}, year = {2019}, language = {en} } @misc{DoebbelinSchuettReinefeld2013, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Building Large Compressed PDBs for the Sliding Tile Puzzle}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-18095}, year = {2013}, abstract = {The performance of heuristic search algorithms depends crucially on the effectiveness of the heuristic. A pattern database (PDB) is a powerful heuristic in the form of a pre-computed lookup table. Larger PDBs provide better bounds and thus allow more cut-offs in the search process. Today, the largest PDB for the 24-puzzle is a 6-6-6-6 PDB with a size of 486 MB. We created 8-8-8, 9-8-7 and 9-9-6 PDBs that are three orders of magnitude larger (up to 1.4 TB) than the 6-6-6-6 PDB. We show how to compute such large PDBs and we present statistical and empirical data on their efficiency. The largest single PDB gives on average an 8-fold improvement over the 6-6-6-6 PDB. Combining several large PDBs gives on average an 12-fold improvement.}, language = {en} } @article{KruberHoegqvistSchuett2011, author = {Kruber, Nico and H{\"o}gqvist, Mikael and Sch{\"u}tt, Thorsten}, title = {The Benefits of Estimated Global Information in DHT Load Balancing}, volume = {0}, journal = {Cluster Computing and the Grid, IEEE International Symposium on}, publisher = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, doi = {10.1109/CCGrid.2011.11}, pages = {382 -- 391}, year = {2011}, language = {en} } @inproceedings{DoebbelinSchuettReinefeld2012, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {An Analysis of SMP Memory Allocators}, booktitle = {Proceedings of the 41st International Conference on Parallel Processing Workshops (Fifth International Workshop on Parallel Programming Models and Systems Software for High-End Computing (P2S2))}, publisher = {IEEE Computer Society}, doi = {10.1109/ICPPW.2012.10}, pages = {48 -- 54}, year = {2012}, language = {en} }