@inproceedings{SchmidtkeSchintkeSchuett2018, author = {Schmidtke, Robert and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {From Application to Disk: Tracing I/O Through the Big Data Stack}, booktitle = {High Performance Computing ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 24 - 28, 2018, Revised Selected Papers, Workshop on Performance and Scalability of Storage Systems (WOPSSS)}, doi = {10.1007/978-3-030-02465-9_6}, pages = {89 -- 102}, year = {2018}, abstract = {Typical applications in data science consume, process and produce large amounts of data, making disk I/O one of the dominating — and thus worthwhile optimizing — factors of their overall performance. Distributed processing frameworks, such as Hadoop, Flink and Spark, hide a lot of complexity from the programmer when they parallelize these applications across a compute cluster. This exacerbates reasoning about I/O of both the application and the framework, through the distributed file system, such as HDFS, down to the local file systems. We present SFS (Statistics File System), a modular framework to trace each I/O request issued by the application and any JVM-based big data framework involved, mapping these requests to actual disk I/O. This allows detection of inefficient I/O patterns, both by the applications and the underlying frameworks, and builds the basis for improving I/O scheduling in the big data software stack.}, language = {en} } @inproceedings{WeinholdLackorzynskiBierbaumetal.2019, author = {Weinhold, Carsten and Lackorzynski, Adam and Bierbaum, Jan and K{\"u}ttler, Martin and Planeta, Maksym and Weisbach, Hannes and Hille, Matthias and H{\"a}rtig, Hermann and Margolin, Alexander and Sharf, Dror and Levy, Ely and Gak, Pavel and Barak, Amnon and Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander and Lieber, Matthias and Nagel, Wolfgang}, title = {FFMK: A Fast and Fault-Tolerant Microkernel-Based System for Exascale Computing}, booktitle = {Software for Exascale Computing - SPPEXA 2016-2019}, publisher = {Springer}, doi = {10.1007/978-3-030-47956-5_16}, pages = {483 -- 516}, year = {2019}, language = {en} } @misc{DoebbelinSchuettReinefeld2013, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Building Large Compressed PDBs for the Sliding Tile Puzzle}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-18095}, year = {2013}, abstract = {The performance of heuristic search algorithms depends crucially on the effectiveness of the heuristic. A pattern database (PDB) is a powerful heuristic in the form of a pre-computed lookup table. Larger PDBs provide better bounds and thus allow more cut-offs in the search process. Today, the largest PDB for the 24-puzzle is a 6-6-6-6 PDB with a size of 486 MB. We created 8-8-8, 9-8-7 and 9-9-6 PDBs that are three orders of magnitude larger (up to 1.4 TB) than the 6-6-6-6 PDB. We show how to compute such large PDBs and we present statistical and empirical data on their efficiency. The largest single PDB gives on average an 8-fold improvement over the 6-6-6-6 PDB. Combining several large PDBs gives on average an 12-fold improvement.}, language = {en} } @article{KruberHoegqvistSchuett2011, author = {Kruber, Nico and H{\"o}gqvist, Mikael and Sch{\"u}tt, Thorsten}, title = {The Benefits of Estimated Global Information in DHT Load Balancing}, volume = {0}, journal = {Cluster Computing and the Grid, IEEE International Symposium on}, publisher = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, doi = {10.1109/CCGrid.2011.11}, pages = {382 -- 391}, year = {2011}, language = {en} } @inproceedings{DoebbelinSchuettReinefeld2012, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {An Analysis of SMP Memory Allocators}, booktitle = {Proceedings of the 41st International Conference on Parallel Processing Workshops (Fifth International Workshop on Parallel Programming Models and Systems Software for High-End Computing (P2S2))}, publisher = {IEEE Computer Society}, doi = {10.1109/ICPPW.2012.10}, pages = {48 -- 54}, year = {2012}, language = {en} } @inproceedings{SchuettDoebbelinReinefeld2013, author = {Sch{\"u}tt, Thorsten and D{\"o}bbelin, Robert and Reinefeld, Alexander}, title = {Forward Perimeter Search with Controlled Use of Memory}, booktitle = {International Joint Conference on Artificial Intelligence, IJCAI-13, Beijing}, year = {2013}, language = {en} } @article{SchuettReinefeldDoebbelin2011, author = {Sch{\"u}tt, Thorsten and Reinefeld, Alexander and D{\"o}bbelin, Robert}, title = {MR-search: massively parallel heuristic search}, volume = {25}, journal = {Concurrency and Computation: Practice and Experience}, number = {1}, doi = {10.1002/cpe.1833}, pages = {40 -- 54}, year = {2011}, language = {en} } @article{SalemSchintkeSchuettetal.2019, author = {Salem, Farouk and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Improving the throughput of a scalable FLESnet using the Data-Flow Scheduler}, journal = {CBM Progress Report 2018}, isbn = {978-3-9815227-6-1}, doi = {10.15120/GSI-2019-01018}, pages = {149 -- 150}, year = {2019}, language = {en} } @inproceedings{HartungSchintkeSchuett2019, author = {Hartung, Marc and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Pinpoint Data Races via Testing and Classification}, booktitle = {2019 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW); 3rd International Workshop on Software Faults (IWSF 2019)}, doi = {10.1109/ISSREW.2019.00100}, pages = {386 -- 393}, year = {2019}, language = {en} } @article{SalemSchintkeSchuettetal.2020, author = {Salem, Farouk and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Scheduling data streams for low latency and high throughput on a Cray XC40 using Libfabric}, volume = {32}, journal = {Concurrency and Computation Practice and Experience}, number = {20}, doi = {10.1002/cpe.5563}, pages = {1 -- 14}, year = {2020}, language = {en} } @article{SkrzypczakSchintkeSchuett2020, author = {Skrzypczak, Jan and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {RMWPaxos: Fault-Tolerant In-Place Consensus Sequences}, volume = {31}, journal = {IEEE Transactions on Parallel and Distributed Systems}, number = {10}, issn = {1045-9219}, arxiv = {http://arxiv.org/abs/2001.03362}, doi = {10.1109/TPDS.2020.2981891}, pages = {2392 -- 2405}, year = {2020}, language = {en} } @article{SchuettSchintkeSkrzypczak2020, author = {Sch{\"u}tt, Thorsten and Schintke, Florian and Skrzypczak, Jan}, title = {Transactions on Red-black and AVL trees in NVRAM}, journal = {arXiv}, arxiv = {http://arxiv.org/abs/2006.16284}, year = {2020}, language = {en} } @inproceedings{SkrzypczakSchintkeSchuett2019, author = {Skrzypczak, Jan and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Linearizable State Machine Replication of State-Based CRDTs without Logs}, booktitle = {Proceedings of the 2019 ACM Symposium on Principles of Distributed Computing, PODC 2019}, doi = {10.1145/3293611.3331568}, pages = {455 -- 457}, year = {2019}, abstract = {General solutions of state machine replication have to ensure that all replicas apply the same commands in the same order, even in the presence of failures. Such strict ordering incurs high synchronization costs due to the use of distributed consensus or a leader. This paper presents a protocol for linearizable state machine replication of conflict-free replicated data types (CRDTs) that neither requires consensus nor a leader. By leveraging the properties of state-based CRDTs—in particular the monotonic growth of a join semilattice—synchronization overhead is greatly reduced. In addition, updates just need a single round trip and modify the state 'in-place' without the need for a log. Furthermore, the message size overhead for coordination consists of a single counter per message. While reads in the presence of concurrent updates are not wait-free without a coordinator, we show that more than 97 \% of reads can be handled in one or two round trips under highly concurrent accesses. Our protocol achieves high throughput without auxiliary processes such as command log management or leader election. It is well suited for all practical scenarios that need linearizable access on CRDT data on a fine-granular scale.}, language = {en} } @inproceedings{SalemSchuettSchintkeetal.2019, author = {Salem, Farouk and Sch{\"u}tt, Thorsten and Schintke, Florian and Reinefeld, Alexander}, title = {Scheduling Data Streams for Low Latency and High Throughput on a Cray XC40 Using Libfabric}, booktitle = {CUG Conference Proceedings}, year = {2019}, abstract = {Achieving efficient many-to-many communication on a given network topology is a challenging task when many data streams from different sources have to be scattered concurrently to many destinations with low variance in arrival times. In such scenarios, it is critical to saturate but not to congest the bisectional bandwidth of the network topology in order to achieve a good aggregate throughput. When there are many concurrent point-to-point connections, the communication pattern needs to be dynamically scheduled in a fine-grained manner to avoid network congestion (links, switches), overload in the node's incoming links, and receive buffer overflow. Motivated by the use case of the Compressed Baryonic Matter experiment (CBM), we study the performance and variance of such communication patterns on a Cray XC40 with different routing schemes and scheduling approaches. We present a distributed Data Flow Scheduler (DFS) that reduces the variance of arrival times from all sources at least 30 times and increases the achieved aggregate bandwidth by up to 50\%.}, language = {en} } @article{SkrzypczakSchintkeSchuett2019, author = {Skrzypczak, Jan and Schintke, Florian and Sch{\"u}tt, Thorsten}, title = {Linearizable State Machine Replication of State-Based CRDTs without Logs}, journal = {arXiv}, arxiv = {http://arxiv.org/abs/1905.08733v1}, year = {2019}, abstract = {General solutions of state machine replication have to ensure that all replicas apply the same commands in the same order, even in the presence of failures. Such strict ordering incurs high synchronization costs caused by distributed consensus or by the use of a leader. This paper presents a protocol for linearizable state machine replication of conflict-free replicated data types (CRDTs) that neither requires consensus nor a leader. By leveraging the properties of state-based CRDTs - in particular the monotonic growth of a join semilattice - synchronization overhead is greatly reduced. In addition, updates just need a single round trip and modify the state `in-place' without the need for a log. Furthermore, the message size overhead for coordination consists of a single counter per message. While reads in the presence of concurrent updates are not wait-free without a coordinator, we show that more than 97\% of reads can be handled in one or two round trips under highly concurrent accesses. Our protocol achieves high throughput without auxiliary processes like command log management or leader election. It is well suited for all practical scenarios that need linearizable access on CRDT data on a fine-granular scale.}, language = {en} } @inproceedings{SchintkeReinefeldHaridietal.2010, author = {Schintke, Florian and Reinefeld, Alexander and Haridi, Seif and Sch{\"u}tt, Thorsten}, title = {Enhanced Paxos Commit for Transactions on DHTs}, booktitle = {CCGRID}, publisher = {IEEE}, doi = {10.1109/CCGRID.2010.41}, pages = {448 -- 454}, year = {2010}, language = {en} } @inproceedings{ReinefeldSchuett2009, author = {Reinefeld, Alexander and Sch{\"u}tt, Thorsten}, title = {Out-of-Core Parallel Frontier Search with MapReduce}, volume = {5976}, booktitle = {HPCS}, publisher = {Springer}, doi = {10.1007/978-3-642-12659-8_24}, pages = {323 -- 336}, year = {2009}, language = {en} } @inproceedings{SchuettReinefeldSchintkeetal.2009, author = {Sch{\"u}tt, Thorsten and Reinefeld, Alexander and Schintke, Florian and Hennig, C.}, title = {Self-Adaptation in Large-Scale Systems: A Study on Structured Overlays Across Multiple Datacenters}, booktitle = {Architectures and Languages for Self-Managing Distributed Systems (SelfMan@SASO)}, doi = {10.1109/SASOW.2010.65}, year = {2009}, language = {en} } @inproceedings{SchuettMoserPlantikowetal.2008, author = {Sch{\"u}tt, Thorsten and Moser, Monika and Plantikow, Stefan and Schintke, Florian and Reinefeld, Alexander}, title = {A Transactional Scalable Distributed Data Store}, booktitle = {1st IEEE International Scalable Computing Challenge, co-located with CCGrid'08}, year = {2008}, language = {en} } @misc{AndrzejakReinefeldSchintkeetal.2008, author = {Andrzejak, Artur and Reinefeld, Alexander and Schintke, Florian and Sch{\"u}tt, Thorsten and Mastroianni, Carlo and Fragopoulou, Paraskevi and Kondo, Derrick and Malecot, Paul and Cosmin Silaghi, Gheorghe and Moura Silva, Luis and Trunfio, Paolo and Zeinalipour-Yazti, Demetris and Zimeo, Eugenio}, title = {Grid Architectural Issues: State-of-the-art and Future Trends}, number = {WHP-0004}, publisher = {Institute on Architectural Issues: Scalability, Dependability, Adaptability, CoreGRID - Network of Excellence}, year = {2008}, language = {en} }