@inproceedings{NoackWendeSteinkeetal.2014, author = {Noack, Matthias and Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {A Unified Programming Model for Intra- and Inter-Node Offloading on Xeon Phi Clusters}, booktitle = {SC '14: Proceedings of the International Conference on High Performance Computing, Networking, Storage and Analysis. SC14, November 16-21, 2014, New Orleans, Louisiana, USA}, doi = {10.1109/SC.2014.22}, year = {2014}, abstract = {Standard offload programming models for the Xeon Phi, e.g. Intel LEO and OpenMP 4.0, are restricted to a single compute node and hence a limited number of coprocessors. Scaling applications across a Xeon Phi cluster/supercomputer thus requires hybrid programming approaches, usually MPI+X. In this work, we present a framework based on heterogeneous active messages (HAM-Offload) that provides the means to offload work to local and remote (co)processors using a unified offload API. Since HAM-Offload provides similar primitives as current local offload frameworks, existing applications can be easily ported to overcome the single-node limitation while keeping the convenient offload programming model. We demonstrate the effectiveness of the framework by using it to enable a real-world application from the field of molecular dynamics to use multiple local and remote Xeon Phis. The evaluation shows good scaling behavior. Compared with LEO, performance is equal for large offloads and significantly better for small offloads.}, language = {en} } @misc{WendeSteinkeKlemmetal.2014, author = {Wende, Florian and Steinke, Thomas and Klemm, Michael and Reinefeld, Alexander}, title = {Concurrent Kernel Offloading}, journal = {High Performance Parallelism Pearls}, editor = {Reinders, James and Jeffers, Jim}, publisher = {Morgan Kaufman, Elsevier}, isbn = {978-0128021187}, year = {2014}, language = {en} } @misc{WendeSteinkeReinefeld2015, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53560}, year = {2015}, abstract = {With the growing number of hardware components and the increasing software complexity in the upcoming exascale computers, system failures will become the norm rather than an exception for long-running applications. Fault-tolerance can be achieved by the creation of checkpoints during the execution of a parallel program. Checkpoint/Restart (C/R) mechanisms allow for both task migration (even if there were no hardware faults) and restarting of tasks after the occurrence of hardware faults. Affected tasks are then migrated to other nodes which may result in unfortunate process placement and/or oversubscription of compute resources. In this paper we analyze the impact of unfortunate process placement and oversubscription of compute resources on the performance and scalability of two typical HPC application workloads, CP2K and MOM5. Results are given for a Cray XC30/40 with Aries dragonfly topology. Our results indicate that unfortunate process placement has only little negative impact while oversubscription substantially degrades the performance. The latter might be only (partially) beneficial when placing multiple applications with different computational characteristics on the same node.}, language = {en} } @article{HeinzeDipankarHenkenetal.2017, author = {Heinze, Rieke and Dipankar, Anurag and Henken, Cintia Carbajal and Moseley, Christopher and Sourdeval, Odran and Tr{\"o}mel, Silke and Xie, Xinxin and Adamidis, Panos and Ament, Felix and Baars, Holger and Barthlott, Christian and Behrendt, Andreas and Blahak, Ulrich and Bley, Sebastian and Brdar, Slavko and Brueck, Matthias and Crewell, Susanne and Deneke, Hartwig and Di Girolamo, Paolo and Evaristo, Raquel and Fischer, J{\"u}rgen and Frank, Christopher and Friederichs, Petra and G{\"o}cke, Tobias and Gorges, Ksenia and Hande, Luke and Hanke, Moritz and Hansen, Akio and Hege, Hans-Christian and Hose, Corinna and Jahns, Thomas and Kalthoff, Norbert and Klocke, Daniel and Kneifel, Stefan and Knippertz, Peter and Kuhn, Alexander and van Laar, Thriza and Macke, Andreas and Maurer, Vera and Mayer, Bernhard and Meyer, Catrin I. and Muppa, Shravan K. and Neggers, Roeland A. J. and Orlandi, Emiliano and Pantillon, Florian and Pospichal, Bernhard and R{\"o}ber, Niklas and Scheck, Leonhard and Seifert, Axel and Seifert, Patric and Senf, Fabian and Siligam, Pavan and Simmer, Clemens and Steinke, Sandra and Stevens, Bjorn and Wapler, Kathrin and Weniger, Michael and Wulfmeyer, Volker and Z{\"a}ngl, G{\"u}nther and Zhang, Dan and Quaas, Johannes}, title = {Large-eddy simulations over Germany using ICON: a comprehensive evaluation}, volume = {143}, journal = {Quarterly Journal of the Royal Meteorological Society}, number = {702}, doi = {10.1002/qj.2947}, pages = {69 -- 100}, year = {2017}, abstract = {Large-eddy simulations (LES) with the new ICOsahedral Non-hydrostatic atmosphere model (ICON) covering Germany are evaluated for four days in spring 2013 using observational data from various sources. Reference simulations with the established Consortium for Small-scale Modelling (COSMO) numerical weather prediction model and further standard LES codes are performed and used as a reference. This comprehensive evaluation approach covers multiple parameters and scales, focusing on boundary-layer variables, clouds and precipitation. The evaluation points to the need to work on parametrizations influencing the surface energy balance, and possibly on ice cloud microphysics. The central purpose for the development and application of ICON in the LES configuration is the use of simulation results to improve the understanding of moist processes, as well as their parametrization in climate models. The evaluation thus aims at building confidence in the model's ability to simulate small- to mesoscale variability in turbulence, clouds and precipitation. The results are encouraging: the high-resolution model matches the observed variability much better at small- to mesoscales than the coarser resolved reference model. In its highest grid resolution, the simulated turbulence profiles are realistic and column water vapour matches the observed temporal variability at short time-scales. Despite being somewhat too large and too frequent, small cumulus clouds are well represented in comparison with satellite data, as is the shape of the cloud size spectrum. Variability of cloud water matches the satellite observations much better in ICON than in the reference model. In this sense, it is concluded that the model is fit for the purpose of using its output for parametrization development, despite the potential to improve further some important aspects of processes that are also parametrized in the high-resolution model.}, language = {en} } @misc{WendeLaubenderSteinke2014, author = {Wende, Florian and Laubender, Guido and Steinke, Thomas}, title = {Integration of Intel Xeon Phi Servers into the HLRN-III Complex: Experiences, Performance and Lessons Learned}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-49990}, year = {2014}, abstract = {The third generation of the North German Supercomputing Alliance (HLRN) compute and storage facilities comprises a Cray XC30 architecture with exclusively Intel Ivy Bridge compute nodes. In the second phase, scheduled for November 2014, the HLRN-III configuration will undergo a substantial upgrade together with the option of integrating accelerator nodes into the system. To support the decision-making process, a four-node Intel Xeon Phi cluster is integrated into the present HLRN-III infrastructure at ZIB. This integration includes user/project management, file system access and job management via the HLRN-III batch system. For selected workloads, in-depth analysis, migration and optimization work on Xeon Phi is in progress. We will report our experiences and lessons learned within the Xeon Phi installation and integration process. For selected examples, initial results of the application evaluation on the Xeon Phi cluster platform will be discussed.}, language = {en} } @inproceedings{DresslerSteinke2014, author = {Dreßler, Sebastian and Steinke, Thomas}, title = {An Automated Approach for Estimating the Memory Footprint of Non-linear Data Objects}, volume = {8374}, booktitle = {Euro-Par 2013: Parallel Processing Workshops}, doi = {10.1007/978-3-642-54420-0_25}, pages = {249 -- 258}, year = {2014}, language = {en} } @misc{WendeSteinkeCordes2014, author = {Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {Multi-threaded Kernel Offloading to GPGPU Using Hyper-Q on Kepler Architecture}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50362}, year = {2014}, abstract = {Small-scale computations usually cannot fully utilize the compute capabilities of modern GPGPUs. With the Fermi GPU architecture Nvidia introduced the concurrent kernel execution feature allowing up to 16 GPU kernels to execute simultaneously on a shared GPU device for a better utilization of the respective resources. Insufficient scheduling capabilities in this respect, however, can significantly reduce the theoretical concurrency level. With the Kepler GPU architecture Nvidia addresses this issue by introducing the Hyper-Q feature with 32 hardware managed work queues for concurrent kernel execution. We investigate the Hyper-Q feature within heterogeneous workloads with multiple concurrent host threads or processes offloading computations to the GPU each. By means of a synthetic benchmark kernel and a hybrid parallel CPU-GPU real-world application, we evaluate the performance obtained with Hyper-Q on GPU and compare it against a kernel reordering mechanism introduced by the authors for the Fermi architecture.}, language = {en} } @article{KruegerGrunzkeGesingetal.2014, author = {Kr{\"u}ger, Jens and Grunzke, Richard and Gesing, Sandra and Breuers, Sebastian and Brinkmann, Andr{\´e} and de la Garza, Luis and Kohlbacher, Oliver and Kruse, Martin and Nagel, Wolfgang and Packschies, Lars and M{\"u}ller-Pfefferkorn, Ralph and Sch{\"a}fer, Patrick and Sch{\"a}rfe, Charlotta and Steinke, Thomas and Schlemmer, Tobias and Warzecha, Klaus Dieter and Zink, Andreas and Herres-Pawlis, Sonja}, title = {The MoSGrid Science Gateway - A Complete Solution for Molecular Simulations}, volume = {10}, journal = {Journal of Chemical Theory and Computation}, number = {6}, doi = {10.1021/ct500159h}, pages = {2232 -- 2245}, year = {2014}, language = {en} } @inproceedings{WendeSteinkeReinefeld2015, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, booktitle = {Proceedings of the 3rd International Conference on Exascale Applications and Software, EASC 2015}, editor = {Gray, A. and Smith, L. and Weiland, M.}, publisher = {The University of Edinburgh}, isbn = {978 -0-9 926615 -1-9}, pages = {13 -- 18}, year = {2015}, language = {en} } @article{GrunzkeBreuersGesingetal.2013, author = {Grunzke, Richard and Breuers, Sebastian and Gesing, Sandra and Herres-Pawlis, Sonja and Kruse, Martin and Blunk, Dirk and de la Garza, Luis and Packschies, Lars and Sch{\"a}fer, Patrick and Sch{\"a}rfe, Charlotta and Schlemmer, Tobias and Steinke, Thomas and Schuller, Bernd and M{\"u}ller-Pfefferkorn, Ralph and J{\"a}kel, Ren{\´e} and Nagel, Wolfgang and Atkinson, Malcolm and Kr{\"u}ger, Jens}, title = {Standards-based metadata management for molecular simulations}, journal = {Concurrency and Computation: Practice and Experience}, doi = {10.1002/cpe.3116}, year = {2013}, language = {en} } @inproceedings{WeinholdLackorzynskiBierbaumetal.2016, author = {Weinhold, Carsten and Lackorzynski, Adam and Bierbaum, Jan and K{\"u}ttler, Martin and Planeta, Maksym and H{\"a}rtig, Hermann and Shiloh, Amnon and Levy, Ely and Ben-Nun, Tal and Barak, Amnon and Steinke, Thomas and Sch{\"u}tt, Thorsten and Fajerski, Jan and Reinefeld, Alexander and Lieber, Matthias and Nagel, Wolfgang}, title = {FFMK: A Fast and Fault-tolerant Microkernel-based System for Exascale Computing}, booktitle = {SPPEXA Symposium 2016}, doi = {10.1007/978-3-319-40528-5_18}, year = {2016}, language = {en} } @inproceedings{FajerskiNoackReinefeldetal.2016, author = {Fajerski, J. and Noack, Matthias and Reinefeld, Alexander and Schintke, Florian and Sch{\"u}tt, Thorsten and Steinke, Thomas}, title = {Fast In-Memory Checkpointing with POSIX API for Legacy Exascale-Applications}, booktitle = {SPPEXA Symposium 2016}, doi = {10.1007/978-3-319-40528-5_19}, year = {2016}, language = {en} } @misc{MeyerSteinkeBrandletal.2000, author = {Meyer, Michael and Steinke, Thomas and Brandl, Maria and S{\"u}hnel, J{\"u}rgen}, title = {Density functional study of guanine and uracil quartets and of guanine quartet metal/ion complexes}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5842}, number = {00-16}, year = {2000}, abstract = {The structures and interaction energies of guanine and uracil quartets have been determined by B3LYP hybrid density functional calculations. The total interaction energy \$\Delta\$E\$^{T}\$ of the \$\it{C}\$\$_{4h}\$-symmetric guanine quartet consisting of Hoogsteen type base pairs with two hydrogen bonds between two neighbour bases is -66.07 kcal/mol at the highest level. The uracil quartet with C6-H6...O4 interactions between the individual bases has only a small interaction energy of -20.92 kcal/mol and the interaction energy of -24.63 kcal/mol for the alternative structure with N3-H3...O4 hydrogen bonds is only slightly more negative. Cooperative effects contribute between 10 and 25 \\% to all interaction energies. Complexes of metal ions with G-quartets can be classified into different structure types. The one with Ca\$^{2+}\$ in the central cavity adopts a \$\it{C}\$\$_{4h}\$-symmetric structure with coplanar bases, whereas the energies of the planar and non-planar Na\$^{+}\$ complexes are almost identical. The small ions Li\$^{+}\$, Be\$^{2+}\$, Cu\$^{+}\$ and Zn\$^{2+}\$ prefer a non-planar \$\it{S}\$\$_{4}\$-symmetric structure. The lack of co-planarity prevents probably a stacking of these base quartets. The central cavity is too small for K\$^{+}\$ ions and therefore this ion favours in contrast to all other investigated ions a \$\it{C}\$\$_{4}\$-symmetric complex, which is 4.73 kcal/mol more stable than the \$\it{C}\$\$_{4h}\$-symmetric one. The distance 1.665 {\AA} between K\$^{+}\$ and the root mean squares plane of the guanine bases is approximately half of the distance between two stacked G-quartets. The total interaction energy of alkaline earth ion complexes exceeds the ones with alkali ions. Within both groups of ions the interaction energy decreases with an increasing row position in the periodic table. The B3LYP and BLYP methods lead to similar structures and energies. Both methods are suitable for hydrogen-bonded biological systems. Compared with the before mentioned methods the HCTH functional leads to longer hydrogen bonds and different relative energies for two U-quartets. Finally we calculated also structures and relative energies with the MMFF94 forcefield. Contrary to all DFT methods, MMFF94 predicts bifurcated C-H...O contacts in the uracil quartet. In the G-quartet the MMFF94 hydrogen bond distances N2-H22...N7 are shorter than the DFT distances, whereas the N1-H1...O6 distances are longer.}, language = {en} } @article{AlhaddadFoerstnerGrothetal.2022, author = {Alhaddad, Samer and F{\"o}rstner, Jens and Groth, Stefan and Gr{\"u}newald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, F.J. and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, J. and Weiser, Martin and Wende, Florian}, title = {The HighPerMeshes Framework for Numerical Algorithms on Unstructured Grids}, volume = {34}, journal = {Concurrency and Computation: Practice and Experience}, number = {14}, doi = {10.1002/cpe.6616}, year = {2022}, abstract = {Solving PDEs on unstructured grids is a cornerstone of engineering and scientific computing. Heterogeneous parallel platforms, including CPUs, GPUs, and FPGAs, enable energy-efficient and computationally demanding simulations. In this article, we introduce the HPM C++-embedded DSL that bridges the abstraction gap between the mathematical formulation of mesh-based algorithms for PDE problems on the one hand and an increasing number of heterogeneous platforms with their different programming models on the other hand. Thus, the HPM DSL aims at higher productivity in the code development process for multiple target platforms. We introduce the concepts as well as the basic structure of the HPM DSL, and demonstrate its usage with three examples. The mapping of the abstract algorithmic description onto parallel hardware, including distributed memory compute clusters, is presented. A code generator and a matching back end allow the acceleration of HPM code with GPUs. Finally, the achievable performance and scalability are demonstrated for different example problems.}, language = {en} } @inproceedings{CheginiSteinkeWeiser2022, author = {Chegini, Fatemeh and Steinke, Thomas and Weiser, Martin}, title = {Efficient adaptivity for simulating cardiac electrophysiology with spectral deferred correction methods}, arxiv = {http://arxiv.org/abs/2311.07206}, year = {2022}, abstract = {The locality of solution features in cardiac electrophysiology simulations calls for adaptive methods. Due to the overhead incurred by established mesh refinement and coarsening, however, such approaches failed in accelerating the computations. Here we investigate a different route to spatial adaptivity that is based on nested subset selection for algebraic degrees of freedom in spectral deferred correction methods. This combination of algebraic adaptivity and iterative solvers for higher order collocation time stepping realizes a multirate integration with minimal overhead. This leads to moderate but significant speedups in both monodomain and cell-by-cell models of cardiac excitation, as demonstrated at four numerical examples.}, language = {en} } @inproceedings{ChristgauKnaustSteinke2022, author = {Christgau, Steffen and Knaust, Marius and Steinke, Thomas}, title = {A First Step towards Support for MPI Partitioned Communication on SYCL-programmed FPGAs}, booktitle = {IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing, H2RC@SC 2022, Dallas, TX, USA, November 13-18, 2022}, publisher = {IEEE}, doi = {10.1109/H2RC56700.2022.00007}, pages = {9 -- 17}, year = {2022}, abstract = {Version 4.0 of the Message Passing Interface standard introduced the concept of Partitioned Communication which adds support for multiple contributions to a communication buffer. Although initially targeted at multithreaded MPI applications, Partitioned Communication currently receives attraction in the context of accelerators, especially GPUs. In this publication it is demonstrated that this communication concept can also be implemented for SYCL-programmed FPGAs. This includes a discussion of the design space and the presentation of a prototypical implementation. Experimental results show that a lightweight implementation on top of an existing MPI library is possible. In addition, the presented approach also reveals issues in both the SYCL and the MPI standard which need to be addresses for improved support of the intended communication style.}, language = {en} } @inproceedings{BrookFullerSwinburneetal.2022, author = {Brook, Glenn and Fuller, Douglas and Swinburne, John and Christgau, Steffen and L{\"a}uter, Matthias and Rodrigues Pel{\´a}, Ronaldo and Lewin, Stein and Christian, Tuma and Steinke, Thomas}, title = {An Early Scalability Study of Omni-Path Express}, address = {Hamburg}, organization = {ISC 2022 IXPUG}, doi = {10.13140/RG.2.2.21353.57442}, pages = {8}, year = {2022}, abstract = {This work provides a brief description of Omni-Path Express and the current status of its development, stability, and performance. Basic benchmarks that highlight the gains of OPX over PSM2 are provided, and the results of an initial performance and scalability study of several applications are presented.}, language = {en} } @inproceedings{KnaustSeilerReinertetal.2022, author = {Knaust, Marius and Seiler, Enrico and Reinert, Knut and Steinke, Thomas}, title = {Co-Design for Energy Efficient and Fast Genomic Search: Interleaved Bloom Filter on FPGA}, booktitle = {FPGA '22: Proceedings of the 2022 ACM/SIGDA International Symposium on Field-Programmable Gate Arrays}, doi = {10.1145/3490422.3502366}, pages = {180 -- 189}, year = {2022}, abstract = {Next-Generation Sequencing technologies generate a vast and exponentially increasing amount of sequence data. The Interleaved Bloom Filter (IBF) is a novel indexing data structure which is state-of-the-art for distributing approximate queries with an in-memory data structure. With it, a main task of sequence analysis pipelines, (approximately) searching large reference data sets for sequencing reads or short sequence patterns like genes, can be significantly accelerated. To meet performance and energy-efficiency requirements, we chose a co-design approach of the IBF data structure on the FPGA platform. Further, our OpenCL-based implementation allows a seamless integration into the widely used SeqAn C++ library for biological sequence analysis. Our algorithmic design and optimization strategy takes advantage of FPGA-specific features like shift register and the parallelization potential of many bitwise operations. We designed a well-chosen schema to partition data across the different memory domains on the FPGA platform using the Shared Virtual Memory concept. We can demonstrate significant improvements in energy efficiency of up to 19x and in performance of up to 5.6x, respectively, compared to a well-tuned, multithreaded CPU reference.}, language = {en} } @inproceedings{BaumannNoackSteinke2021, author = {Baumann, Tobias and Noack, Matthias and Steinke, Thomas}, title = {Performance Evaluation and Improvements of the PoCL Open-Source OpenCL Implementation on Intel CPUs}, booktitle = {IWOCL'21: International Workshop on OpenCL}, doi = {10.1145/3456669.3456698}, year = {2021}, abstract = {The Portable Computing Language (PoCL) is a vendor independent open-source OpenCL implementation that aims to support a variety of compute devices in a single platform. Evaluating PoCL versus the Intel OpenCL implementation reveals significant performance drawbacks of PoCL on Intel CPUs - which run 92 \% of the TOP500 list. Using a selection of benchmarks, we identify and analyse performance issues in PoCL with a focus on scheduling and vectorisation. We propose a new CPU device-driver based on Intel Threading Building Blocks (TBB), and evaluate LLVM with respect to automatic compiler vectorisation across work-items in PoCL. Using the TBB driver, it is possible to narrow the gap to Intel OpenCL and even outperform it by a factor of up to 1.3× in our proxy application benchmark with a manual vectorisation strategy.}, language = {en} }