@misc{WendeSteinkeKlemmetal.2014, author = {Wende, Florian and Steinke, Thomas and Klemm, Michael and Reinefeld, Alexander}, title = {Concurrent Kernel Offloading}, journal = {High Performance Parallelism Pearls}, editor = {Reinders, James and Jeffers, Jim}, publisher = {Morgan Kaufman, Elsevier}, isbn = {978-0128021187}, year = {2014}, language = {en} } @misc{WendeSteinkeReinefeld2015, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53560}, year = {2015}, abstract = {With the growing number of hardware components and the increasing software complexity in the upcoming exascale computers, system failures will become the norm rather than an exception for long-running applications. Fault-tolerance can be achieved by the creation of checkpoints during the execution of a parallel program. Checkpoint/Restart (C/R) mechanisms allow for both task migration (even if there were no hardware faults) and restarting of tasks after the occurrence of hardware faults. Affected tasks are then migrated to other nodes which may result in unfortunate process placement and/or oversubscription of compute resources. In this paper we analyze the impact of unfortunate process placement and oversubscription of compute resources on the performance and scalability of two typical HPC application workloads, CP2K and MOM5. Results are given for a Cray XC30/40 with Aries dragonfly topology. Our results indicate that unfortunate process placement has only little negative impact while oversubscription substantially degrades the performance. The latter might be only (partially) beneficial when placing multiple applications with different computational characteristics on the same node.}, language = {en} } @misc{WendeLaubenderSteinke2014, author = {Wende, Florian and Laubender, Guido and Steinke, Thomas}, title = {Integration of Intel Xeon Phi Servers into the HLRN-III Complex: Experiences, Performance and Lessons Learned}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-49990}, year = {2014}, abstract = {The third generation of the North German Supercomputing Alliance (HLRN) compute and storage facilities comprises a Cray XC30 architecture with exclusively Intel Ivy Bridge compute nodes. In the second phase, scheduled for November 2014, the HLRN-III configuration will undergo a substantial upgrade together with the option of integrating accelerator nodes into the system. To support the decision-making process, a four-node Intel Xeon Phi cluster is integrated into the present HLRN-III infrastructure at ZIB. This integration includes user/project management, file system access and job management via the HLRN-III batch system. For selected workloads, in-depth analysis, migration and optimization work on Xeon Phi is in progress. We will report our experiences and lessons learned within the Xeon Phi installation and integration process. For selected examples, initial results of the application evaluation on the Xeon Phi cluster platform will be discussed.}, language = {en} } @inproceedings{DresslerSteinke2014, author = {Dreßler, Sebastian and Steinke, Thomas}, title = {An Automated Approach for Estimating the Memory Footprint of Non-linear Data Objects}, volume = {8374}, booktitle = {Euro-Par 2013: Parallel Processing Workshops}, doi = {10.1007/978-3-642-54420-0_25}, pages = {249 -- 258}, year = {2014}, language = {en} } @misc{WendeSteinkeCordes2014, author = {Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {Multi-threaded Kernel Offloading to GPGPU Using Hyper-Q on Kepler Architecture}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50362}, year = {2014}, abstract = {Small-scale computations usually cannot fully utilize the compute capabilities of modern GPGPUs. With the Fermi GPU architecture Nvidia introduced the concurrent kernel execution feature allowing up to 16 GPU kernels to execute simultaneously on a shared GPU device for a better utilization of the respective resources. Insufficient scheduling capabilities in this respect, however, can significantly reduce the theoretical concurrency level. With the Kepler GPU architecture Nvidia addresses this issue by introducing the Hyper-Q feature with 32 hardware managed work queues for concurrent kernel execution. We investigate the Hyper-Q feature within heterogeneous workloads with multiple concurrent host threads or processes offloading computations to the GPU each. By means of a synthetic benchmark kernel and a hybrid parallel CPU-GPU real-world application, we evaluate the performance obtained with Hyper-Q on GPU and compare it against a kernel reordering mechanism introduced by the authors for the Fermi architecture.}, language = {en} } @article{KruegerGrunzkeGesingetal.2014, author = {Kr{\"u}ger, Jens and Grunzke, Richard and Gesing, Sandra and Breuers, Sebastian and Brinkmann, Andr{\´e} and de la Garza, Luis and Kohlbacher, Oliver and Kruse, Martin and Nagel, Wolfgang and Packschies, Lars and M{\"u}ller-Pfefferkorn, Ralph and Sch{\"a}fer, Patrick and Sch{\"a}rfe, Charlotta and Steinke, Thomas and Schlemmer, Tobias and Warzecha, Klaus Dieter and Zink, Andreas and Herres-Pawlis, Sonja}, title = {The MoSGrid Science Gateway - A Complete Solution for Molecular Simulations}, volume = {10}, journal = {Journal of Chemical Theory and Computation}, number = {6}, doi = {10.1021/ct500159h}, pages = {2232 -- 2245}, year = {2014}, language = {en} } @inproceedings{WendeSteinkeReinefeld2015, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, booktitle = {Proceedings of the 3rd International Conference on Exascale Applications and Software, EASC 2015}, editor = {Gray, A. and Smith, L. and Weiland, M.}, publisher = {The University of Edinburgh}, isbn = {978 -0-9 926615 -1-9}, pages = {13 -- 18}, year = {2015}, language = {en} } @article{GrunzkeBreuersGesingetal.2013, author = {Grunzke, Richard and Breuers, Sebastian and Gesing, Sandra and Herres-Pawlis, Sonja and Kruse, Martin and Blunk, Dirk and de la Garza, Luis and Packschies, Lars and Sch{\"a}fer, Patrick and Sch{\"a}rfe, Charlotta and Schlemmer, Tobias and Steinke, Thomas and Schuller, Bernd and M{\"u}ller-Pfefferkorn, Ralph and J{\"a}kel, Ren{\´e} and Nagel, Wolfgang and Atkinson, Malcolm and Kr{\"u}ger, Jens}, title = {Standards-based metadata management for molecular simulations}, journal = {Concurrency and Computation: Practice and Experience}, doi = {10.1002/cpe.3116}, year = {2013}, language = {en} } @inproceedings{WeinholdLackorzynskiBierbaumetal.2016, author = {Weinhold, Carsten and Lackorzynski, Adam and Bierbaum, Jan and K{\"u}ttler, Martin and Planeta, Maksym and H{\"a}rtig, Hermann and Shiloh, Amnon and Levy, Ely and Ben-Nun, Tal and Barak, Amnon and Steinke, Thomas and Sch{\"u}tt, Thorsten and Fajerski, Jan and Reinefeld, Alexander and Lieber, Matthias and Nagel, Wolfgang}, title = {FFMK: A Fast and Fault-tolerant Microkernel-based System for Exascale Computing}, booktitle = {SPPEXA Symposium 2016}, doi = {10.1007/978-3-319-40528-5_18}, year = {2016}, language = {en} } @inproceedings{FajerskiNoackReinefeldetal.2016, author = {Fajerski, J. and Noack, Matthias and Reinefeld, Alexander and Schintke, Florian and Sch{\"u}tt, Thorsten and Steinke, Thomas}, title = {Fast In-Memory Checkpointing with POSIX API for Legacy Exascale-Applications}, booktitle = {SPPEXA Symposium 2016}, doi = {10.1007/978-3-319-40528-5_19}, year = {2016}, language = {en} }