@inproceedings{WendeSteinke2013, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, booktitle = {Proceeding SC '13 Proceedings of SC13: International Conference for High Performance Computing, Networking, Storage and Analysis Article No. 83 ACM New York, NY, USA, 2013}, doi = {http://dx.doi.org/10.1145/2503210.2503254}, year = {2013}, language = {en} } @misc{BenkridElArabyHuangetal.2012, author = {Benkrid, Khaled and El-Araby, Esam and Huang, Miaoqing and Sano, Kentaro and Steinke, Thomas}, title = {High-Performance Reconfigurable Computing - editorial for Special Issue of the International Journal of Reconfigurable Computing}, volume = {2012}, journal = {International Journal of Reconfigurable Computing}, doi = {10.1155/2012/104963}, pages = {1 -- 2}, year = {2012}, language = {en} } @inproceedings{NoackWendeSteinkeetal.2014, author = {Noack, Matthias and Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {A Unified Programming Model for Intra- and Inter-Node Offloading on Xeon Phi Clusters}, booktitle = {SC '14: Proceedings of the International Conference on High Performance Computing, Networking, Storage and Analysis. SC14, November 16-21, 2014, New Orleans, Louisiana, USA}, doi = {10.1109/SC.2014.22}, year = {2014}, abstract = {Standard offload programming models for the Xeon Phi, e.g. Intel LEO and OpenMP 4.0, are restricted to a single compute node and hence a limited number of coprocessors. Scaling applications across a Xeon Phi cluster/supercomputer thus requires hybrid programming approaches, usually MPI+X. In this work, we present a framework based on heterogeneous active messages (HAM-Offload) that provides the means to offload work to local and remote (co)processors using a unified offload API. Since HAM-Offload provides similar primitives as current local offload frameworks, existing applications can be easily ported to overcome the single-node limitation while keeping the convenient offload programming model. We demonstrate the effectiveness of the framework by using it to enable a real-world application from the field of molecular dynamics to use multiple local and remote Xeon Phis. The evaluation shows good scaling behavior. Compared with LEO, performance is equal for large offloads and significantly better for small offloads.}, language = {en} } @misc{WendeSteinkeKlemmetal.2014, author = {Wende, Florian and Steinke, Thomas and Klemm, Michael and Reinefeld, Alexander}, title = {Concurrent Kernel Offloading}, journal = {High Performance Parallelism Pearls}, editor = {Reinders, James and Jeffers, Jim}, publisher = {Morgan Kaufman, Elsevier}, isbn = {978-0128021187}, year = {2014}, language = {en} } @misc{WendeSteinkeReinefeld2015, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53560}, year = {2015}, abstract = {With the growing number of hardware components and the increasing software complexity in the upcoming exascale computers, system failures will become the norm rather than an exception for long-running applications. Fault-tolerance can be achieved by the creation of checkpoints during the execution of a parallel program. Checkpoint/Restart (C/R) mechanisms allow for both task migration (even if there were no hardware faults) and restarting of tasks after the occurrence of hardware faults. Affected tasks are then migrated to other nodes which may result in unfortunate process placement and/or oversubscription of compute resources. In this paper we analyze the impact of unfortunate process placement and oversubscription of compute resources on the performance and scalability of two typical HPC application workloads, CP2K and MOM5. Results are given for a Cray XC30/40 with Aries dragonfly topology. Our results indicate that unfortunate process placement has only little negative impact while oversubscription substantially degrades the performance. The latter might be only (partially) beneficial when placing multiple applications with different computational characteristics on the same node.}, language = {en} } @article{HeinzeDipankarHenkenetal.2017, author = {Heinze, Rieke and Dipankar, Anurag and Henken, Cintia Carbajal and Moseley, Christopher and Sourdeval, Odran and Tr{\"o}mel, Silke and Xie, Xinxin and Adamidis, Panos and Ament, Felix and Baars, Holger and Barthlott, Christian and Behrendt, Andreas and Blahak, Ulrich and Bley, Sebastian and Brdar, Slavko and Brueck, Matthias and Crewell, Susanne and Deneke, Hartwig and Di Girolamo, Paolo and Evaristo, Raquel and Fischer, J{\"u}rgen and Frank, Christopher and Friederichs, Petra and G{\"o}cke, Tobias and Gorges, Ksenia and Hande, Luke and Hanke, Moritz and Hansen, Akio and Hege, Hans-Christian and Hose, Corinna and Jahns, Thomas and Kalthoff, Norbert and Klocke, Daniel and Kneifel, Stefan and Knippertz, Peter and Kuhn, Alexander and van Laar, Thriza and Macke, Andreas and Maurer, Vera and Mayer, Bernhard and Meyer, Catrin I. and Muppa, Shravan K. and Neggers, Roeland A. J. and Orlandi, Emiliano and Pantillon, Florian and Pospichal, Bernhard and R{\"o}ber, Niklas and Scheck, Leonhard and Seifert, Axel and Seifert, Patric and Senf, Fabian and Siligam, Pavan and Simmer, Clemens and Steinke, Sandra and Stevens, Bjorn and Wapler, Kathrin and Weniger, Michael and Wulfmeyer, Volker and Z{\"a}ngl, G{\"u}nther and Zhang, Dan and Quaas, Johannes}, title = {Large-eddy simulations over Germany using ICON: a comprehensive evaluation}, volume = {143}, journal = {Quarterly Journal of the Royal Meteorological Society}, number = {702}, doi = {10.1002/qj.2947}, pages = {69 -- 100}, year = {2017}, abstract = {Large-eddy simulations (LES) with the new ICOsahedral Non-hydrostatic atmosphere model (ICON) covering Germany are evaluated for four days in spring 2013 using observational data from various sources. Reference simulations with the established Consortium for Small-scale Modelling (COSMO) numerical weather prediction model and further standard LES codes are performed and used as a reference. This comprehensive evaluation approach covers multiple parameters and scales, focusing on boundary-layer variables, clouds and precipitation. The evaluation points to the need to work on parametrizations influencing the surface energy balance, and possibly on ice cloud microphysics. The central purpose for the development and application of ICON in the LES configuration is the use of simulation results to improve the understanding of moist processes, as well as their parametrization in climate models. The evaluation thus aims at building confidence in the model's ability to simulate small- to mesoscale variability in turbulence, clouds and precipitation. The results are encouraging: the high-resolution model matches the observed variability much better at small- to mesoscales than the coarser resolved reference model. In its highest grid resolution, the simulated turbulence profiles are realistic and column water vapour matches the observed temporal variability at short time-scales. Despite being somewhat too large and too frequent, small cumulus clouds are well represented in comparison with satellite data, as is the shape of the cloud size spectrum. Variability of cloud water matches the satellite observations much better in ICON than in the reference model. In this sense, it is concluded that the model is fit for the purpose of using its output for parametrization development, despite the potential to improve further some important aspects of processes that are also parametrized in the high-resolution model.}, language = {en} } @misc{WendeLaubenderSteinke2014, author = {Wende, Florian and Laubender, Guido and Steinke, Thomas}, title = {Integration of Intel Xeon Phi Servers into the HLRN-III Complex: Experiences, Performance and Lessons Learned}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-49990}, year = {2014}, abstract = {The third generation of the North German Supercomputing Alliance (HLRN) compute and storage facilities comprises a Cray XC30 architecture with exclusively Intel Ivy Bridge compute nodes. In the second phase, scheduled for November 2014, the HLRN-III configuration will undergo a substantial upgrade together with the option of integrating accelerator nodes into the system. To support the decision-making process, a four-node Intel Xeon Phi cluster is integrated into the present HLRN-III infrastructure at ZIB. This integration includes user/project management, file system access and job management via the HLRN-III batch system. For selected workloads, in-depth analysis, migration and optimization work on Xeon Phi is in progress. We will report our experiences and lessons learned within the Xeon Phi installation and integration process. For selected examples, initial results of the application evaluation on the Xeon Phi cluster platform will be discussed.}, language = {en} } @inproceedings{DresslerSteinke2014, author = {Dreßler, Sebastian and Steinke, Thomas}, title = {An Automated Approach for Estimating the Memory Footprint of Non-linear Data Objects}, volume = {8374}, booktitle = {Euro-Par 2013: Parallel Processing Workshops}, doi = {10.1007/978-3-642-54420-0_25}, pages = {249 -- 258}, year = {2014}, language = {en} } @misc{WendeSteinkeCordes2014, author = {Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {Multi-threaded Kernel Offloading to GPGPU Using Hyper-Q on Kepler Architecture}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50362}, year = {2014}, abstract = {Small-scale computations usually cannot fully utilize the compute capabilities of modern GPGPUs. With the Fermi GPU architecture Nvidia introduced the concurrent kernel execution feature allowing up to 16 GPU kernels to execute simultaneously on a shared GPU device for a better utilization of the respective resources. Insufficient scheduling capabilities in this respect, however, can significantly reduce the theoretical concurrency level. With the Kepler GPU architecture Nvidia addresses this issue by introducing the Hyper-Q feature with 32 hardware managed work queues for concurrent kernel execution. We investigate the Hyper-Q feature within heterogeneous workloads with multiple concurrent host threads or processes offloading computations to the GPU each. By means of a synthetic benchmark kernel and a hybrid parallel CPU-GPU real-world application, we evaluate the performance obtained with Hyper-Q on GPU and compare it against a kernel reordering mechanism introduced by the authors for the Fermi architecture.}, language = {en} } @article{KruegerGrunzkeGesingetal.2014, author = {Kr{\"u}ger, Jens and Grunzke, Richard and Gesing, Sandra and Breuers, Sebastian and Brinkmann, Andr{\´e} and de la Garza, Luis and Kohlbacher, Oliver and Kruse, Martin and Nagel, Wolfgang and Packschies, Lars and M{\"u}ller-Pfefferkorn, Ralph and Sch{\"a}fer, Patrick and Sch{\"a}rfe, Charlotta and Steinke, Thomas and Schlemmer, Tobias and Warzecha, Klaus Dieter and Zink, Andreas and Herres-Pawlis, Sonja}, title = {The MoSGrid Science Gateway - A Complete Solution for Molecular Simulations}, volume = {10}, journal = {Journal of Chemical Theory and Computation}, number = {6}, doi = {10.1021/ct500159h}, pages = {2232 -- 2245}, year = {2014}, language = {en} }