@article{SeibertPetersSchintke, author = {Seibert, Felix and Peters, Mathias and Schintke, Florian}, title = {Improving I/O Performance Through Colocating Interrelated Input Data and Near-Optimal Load Balancing}, series = {Proceedings of the IPDPSW; Fourth IEEE International Workshop on High-Performance Big Data, Deep Learning, and Cloud Computing (HPBDC)}, volume = {2018}, journal = {Proceedings of the IPDPSW; Fourth IEEE International Workshop on High-Performance Big Data, Deep Learning, and Cloud Computing (HPBDC)}, doi = {10.1109/IPDPSW.2018.00081}, pages = {448 -- 457}, abstract = {Most distributed file systems assign new files to storage servers randomly. While working well in some situations, this does not help to optimize the input performance for most MapReduce computations' data access patterns. In this work, we consider an access pattern where input files are partitioned into groups of heterogeneous size. Each group is accessed by exactly one process. We design and implement a data placement strategy that places these file groups together on the same storage server. This colocation approach is combined with near-optimal storage load balancing. To do so, we use a classical scheduling approximation algorithm to solve the NP hard group assignment problem. We argue that local processing is not only beneficial because of reduced network traffic, but especially because it imposes an even resource schedule. Our experiments, based on the parallel processing of remote sensing images, reveal an enormous reduction of network traffic and up to 39 \% faster input read times. Further, simulations show that our approximate assignments limit storage server imbalances to less than 5 \% above the theoretical minimum, in contrast to more than 85 \% with random assignment.}, language = {en} } @inproceedings{SchmidtkeSchintkeSchuett, author = {Schmidtke, Robert and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {From Application to Disk: Tracing I/O Through the Big Data Stack}, series = {High Performance Computing ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 24 - 28, 2018, Revised Selected Papers, Workshop on Performance and Scalability of Storage Systems (WOPSSS)}, booktitle = {High Performance Computing ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 24 - 28, 2018, Revised Selected Papers, Workshop on Performance and Scalability of Storage Systems (WOPSSS)}, doi = {10.1007/978-3-030-02465-9_6}, pages = {89 -- 102}, abstract = {Typical applications in data science consume, process and produce large amounts of data, making disk I/O one of the dominating — and thus worthwhile optimizing — factors of their overall performance. Distributed processing frameworks, such as Hadoop, Flink and Spark, hide a lot of complexity from the programmer when they parallelize these applications across a compute cluster. This exacerbates reasoning about I/O of both the application and the framework, through the distributed file system, such as HDFS, down to the local file systems. We present SFS (Statistics File System), a modular framework to trace each I/O request issued by the application and any JVM-based big data framework involved, mapping these requests to actual disk I/O. This allows detection of inefficient I/O patterns, both by the applications and the underlying frameworks, and builds the basis for improving I/O scheduling in the big data software stack.}, language = {en} } @article{SalemSchintkeSchuettetal., author = {Salem, Farouk and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Data-flow scheduling for a scalable FLESnet}, series = {CBM Progress Report 2017}, journal = {CBM Progress Report 2017}, isbn = {978-3-9815227-5-4}, doi = {10.15120/GSI-2018-00485}, pages = {130 -- 131}, language = {en} } @inproceedings{GholamiSchintkeSchuett, author = {Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Checkpoint Scheduling for Shared Usage of Burst-Buffers in Supercomputers}, series = {Proceedings of the 47th International Conference on Parallel Processing Companion; SRMPDS 2018: The 14th International Workshop on Scheduling and Resource Management for Parallel and Distributed Systems}, booktitle = {Proceedings of the 47th International Conference on Parallel Processing Companion; SRMPDS 2018: The 14th International Workshop on Scheduling and Resource Management for Parallel and Distributed Systems}, doi = {10.1145/3229710.3229755}, pages = {44:1 -- 44:10}, abstract = {User-defined and system-level checkpointing have contrary properties. While user-defined checkpoints are smaller and simpler to recover, system-level checkpointing better knows the global system's state and parameters like the expected mean time to failure (MTTF) per node. Both approaches lead to non-optimal checkpoint time, intervals, sizes, or I/O bandwidth when concurrent checkpoints conflict and compete for it. We combine user-defined and system-level checkpointing to exploit the benefits and avoid the drawbacks of each other. Thus, applications frequently offer to create checkpoints. The system accepts such offers according to the current status and implied costs to recalculate from the last checkpoint or denies them, i.e., immediately lets continue the application without checkpoint creation. To support this approach, we develop economic models for multi-application checkpointing on shared I/O resources that are dedicated for checkpointing (e.g. burst-buffers) by defining an appropriate goal function and solving a global optimization problem. Using our models, the checkpoints of applications on a supercomputer are scheduled to effectively use the available I/O bandwidth and minimize the failure overhead (checkpoint creations plus recalculations). Our simulations show an overall reduction in failure overhead of all nodes of up to 30\% for a typical supercomputer workload (HLRN). We can also derive the most cost effective burst-buffer bandwidth for a given node's MTTF and application workload.}, language = {en} } @misc{GholamiSchintkeSchuettetal., author = {Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Modeling Checkpoint Schedules for Concurrent HPC Applications}, series = {CoSaS 2018 International Symposium on Computational Science at Scale}, journal = {CoSaS 2018 International Symposium on Computational Science at Scale}, language = {en} }