@article{MeyerSteinkeSuehnel2007, author = {Meyer, Michael and Steinke, Thomas and S{\"u}hnel, J{\"u}rgen}, title = {Density functional study of isoguanine tetrad and pentad sandwich complexes with alkali metal ions}, series = {Journal of Molecular Modeling}, volume = {13}, journal = {Journal of Molecular Modeling}, number = {2}, publisher = {Springer Berlin / Heidelberg}, pages = {335 -- 345}, year = {2007}, language = {en} } @inproceedings{SchmidtkeLaubenderSteinke, author = {Schmidtke, Robert and Laubender, Guido and Steinke, Thomas}, title = {Big Data Analytics on Cray XC Series DataWarp using Hadoop, Spark and Flink}, series = {CUG Proceedings}, booktitle = {CUG Proceedings}, language = {en} } @article{CordesPreissnerSteinke2003, author = {Cordes, Frank and Preissner, Robert and Steinke, Thomas}, title = {How Does a Protein Work?}, series = {BioTOPics J. of Biotechnology Berlin-Brandenburg}, volume = {18}, journal = {BioTOPics J. of Biotechnology Berlin-Brandenburg}, number = {2}, pages = {4 -- 5}, year = {2003}, language = {en} } @article{ZieglerOgurreckSteinkeetal.2010, author = {Ziegler, Alexander and Ogurreck, Malte and Steinke, Thomas and Beckmann, Felix and Prohaska, Steffen and Ziegler, Andreas}, title = {Opportunities and challenges for digital morphology}, series = {Biology Direct}, volume = {5}, journal = {Biology Direct}, number = {1}, doi = {10.1186/1745-6150-5-45}, pages = {45}, year = {2010}, language = {en} } @article{GrunzkeBreuersGesingetal.2013, author = {Grunzke, Richard and Breuers, Sebastian and Gesing, Sandra and Herres-Pawlis, Sonja and Kruse, Martin and Blunk, Dirk and de la Garza, Luis and Packschies, Lars and Sch{\"a}fer, Patrick and Sch{\"a}rfe, Charlotta and Schlemmer, Tobias and Steinke, Thomas and Schuller, Bernd and M{\"u}ller-Pfefferkorn, Ralph and J{\"a}kel, Ren{\´e} and Nagel, Wolfgang and Atkinson, Malcolm and Kr{\"u}ger, Jens}, title = {Standards-based metadata management for molecular simulations}, series = {Concurrency and Computation: Practice and Experience}, journal = {Concurrency and Computation: Practice and Experience}, doi = {10.1002/cpe.3116}, year = {2013}, language = {en} } @incollection{BaumannLaubenderLaeuteretal., author = {Baumann, Wolfgang and Laubender, Guido and L{\"a}uter, Matthias and Reinefeld, Alexander and Schimmel, Christian and Steinke, Thomas and Tuma, Christian and Wollny, Stefan}, title = {HLRN-III at Zuse Institute Berlin}, series = {Contemporary High Performance Computing: From Petascale toward Exascale, Volume Two}, booktitle = {Contemporary High Performance Computing: From Petascale toward Exascale, Volume Two}, editor = {Vetter, Jeffrey S.}, publisher = {Chapman and Hall/CRC}, isbn = {9781498700627}, pages = {81 -- 114}, language = {en} } @inproceedings{NoackFochtSteinke, author = {Noack, Matthias and Focht, Erich and Steinke, Thomas}, title = {Heterogeneous Active Messages for Offloading on the NEC SX-Aurora TSUBASA}, series = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), Heterogeneity in Computing Workshop (HCW 2019)}, booktitle = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), Heterogeneity in Computing Workshop (HCW 2019)}, abstract = {The NEC SX-Aurora TSUBASA is a new generation of vector processing architectures that combines a standard Intel Xeon host with the newly developed NEC Vector Engine co-processor cards. One way to use these co-processors is offloading suitable parts of the program from the host to the Vector Engines. Currently, the only vendor-provided offloading solutions are the low-level Vector Engine Offloading (VEO) library, and a builtin reverse-offloading mechanism named VHcall. In this work, we extend the portable Heterogeneous Active Messages (HAM) based HAM-Offload framework with support for the NEC SX-Aurora TSUBASA. Therefore, we design, implement, and evaluate two messaging protocols aimed at minimising offloading cost. This sheds some light on how to achieve fast communication between host CPU and the Vector Engines of the NEC SX-Aurora TSUBASA. Compared with VEO, the DMA-based protocol reduces offloading overhead by a factor of 13×. The resulting framework enables users to write portable offload applications with low overhead, that do neither require a language extension like OpenMP, nor a special language like OpenCL. Existing HAM-Offload applications are now ready to run on the NEC SX-Aurora TSUBASA.}, language = {en} } @inproceedings{ChristgauSteinke, author = {Christgau, Steffen and Steinke, Thomas}, title = {Porting a Legacy CUDA Stencil Code to oneAPI}, series = {2020 IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020, New Orleans, LA, USA, May 18-22, 2020}, booktitle = {2020 IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020, New Orleans, LA, USA, May 18-22, 2020}, publisher = {IEEE}, address = {New Orleans}, isbn = {978-1-7281-7445-7}, doi = {10.1109/IPDPSW50202.2020.00070}, pages = {359 -- 367}, abstract = {Recently, Intel released the oneAPI programming environment. With Data Parallel C++ (DPC++), oneAPI enables codes to target multiple hardware architectures like multi-core CPUs, GPUs, and even FPGAs or other hardware using a single source. For legacy codes that were written for Nvidia GPUs, a compatibility tool is provided which facilitates the transition to the SYCL-based DPC++ programming language. This paper presents early experiences when using both the compatibility tool and oneAPI as well the employed extension to the SYCL programming standard for the tsunami simulation code easyWave. A performance study compares the original code running on Xeon processors using OpenMP as well as CUDA with the performance of the DPC++ counter part on multicore CPUs as well as integrated GPUs.}, language = {en} } @inproceedings{ChristgauSteinke, author = {Christgau, Steffen and Steinke, Thomas}, title = {Leveraging a Heterogeneous Memory System for a Legacy Fortran Code: The Interplay of Storage Class Memory, DRAM and OS}, series = {2020 IEEE/ACM Workshop on Memory Centric High Performance Computing (MCHPC)}, booktitle = {2020 IEEE/ACM Workshop on Memory Centric High Performance Computing (MCHPC)}, publisher = {IEEE}, isbn = {978-0-7381-1067-7}, doi = {10.1109/MCHPC51950.2020.00008}, pages = {17 -- 24}, abstract = {Large capacity Storage Class Memory (SCM) opens new possibilities for workloads requiring a large memory footprint. We examine optimization strategies for a legacy Fortran application on systems with an heterogeneous memory configuration comprising SCM and DRAM. We present a performance study for the multigrid solver component of the large-eddy simulation framework PALM for different memory configurations with large capacity SCM. An important optimization approach is the explicit assignment of storage locations depending on the data access characteristic to take advantage of the heterogeneous memory configuration. We are able to demonstrate that an explicit control over memory locations provides better performance compared to transparent hardware settings. As on aforementioned systems the page management by the OS appears as critical performance factor, we study the impact of different huge page settings.}, language = {en} } @inproceedings{NoackReinefeldKrameretal., author = {Noack, Matthias and Reinefeld, Alexander and Kramer, Tobias and Steinke, Thomas}, title = {DM-HEOM: A Portable and Scalable Solver-Framework for the Hierarchical Equations of Motion}, series = {2018 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), 19th IEEE Int. Workshop on Parallel and Distributed Scientific and Engineering Computing (PDSEC 2018)}, booktitle = {2018 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), 19th IEEE Int. Workshop on Parallel and Distributed Scientific and Engineering Computing (PDSEC 2018)}, isbn = {978-1-5386-5555-9}, doi = {10.1109/IPDPSW.2018.00149}, pages = {947 -- 956}, abstract = {Computing the Hierarchical Equations of Motion (HEOM) is by itself a challenging problem, and so is writing portable production code that runs efficiently on a variety of architectures while scaling from PCs to supercomputers. We combined both challenges to push the boundaries of simulating quantum systems, and to evaluate and improve methodologies for scientific software engineering. Our contributions are threefold: We present the first distributed memory implementation of the HEOM method (DM-HEOM), we describe an interdisciplinary development workflow, and we provide guidelines and experiences for designing distributed, performance-portable HPC applications with MPI-3, OpenCL and other state-of-the-art programming models. We evaluated the resulting code on multi- and many-core CPUs as well as GPUs, and demonstrate scalability on a Cray XC40 supercomputer for the PS I molecular light harvesting complex.}, language = {en} } @inproceedings{BaumannNoackSteinke, author = {Baumann, Tobias and Noack, Matthias and Steinke, Thomas}, title = {Performance Evaluation and Improvements of the PoCL Open-Source OpenCL Implementation on Intel CPUs}, series = {IWOCL'21: International Workshop on OpenCL}, booktitle = {IWOCL'21: International Workshop on OpenCL}, doi = {10.1145/3456669.3456698}, abstract = {The Portable Computing Language (PoCL) is a vendor independent open-source OpenCL implementation that aims to support a variety of compute devices in a single platform. Evaluating PoCL versus the Intel OpenCL implementation reveals significant performance drawbacks of PoCL on Intel CPUs - which run 92 \% of the TOP500 list. Using a selection of benchmarks, we identify and analyse performance issues in PoCL with a focus on scheduling and vectorisation. We propose a new CPU device-driver based on Intel Threading Building Blocks (TBB), and evaluate LLVM with respect to automatic compiler vectorisation across work-items in PoCL. Using the TBB driver, it is possible to narrow the gap to Intel OpenCL and even outperform it by a factor of up to 1.3× in our proxy application benchmark with a manual vectorisation strategy.}, language = {en} } @article{AlhaddadFoerstnerGrothetal., author = {Alhaddad, Samer and F{\"o}rstner, Jens and Groth, Stefan and Gr{\"u}newald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, F.J. and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, J. and Weiser, Martin and Wende, Florian}, title = {The HighPerMeshes Framework for Numerical Algorithms on Unstructured Grids}, series = {Concurrency and Computation: Practice and Experience}, volume = {34}, journal = {Concurrency and Computation: Practice and Experience}, number = {14}, doi = {10.1002/cpe.6616}, abstract = {Solving PDEs on unstructured grids is a cornerstone of engineering and scientific computing. Heterogeneous parallel platforms, including CPUs, GPUs, and FPGAs, enable energy-efficient and computationally demanding simulations. In this article, we introduce the HPM C++-embedded DSL that bridges the abstraction gap between the mathematical formulation of mesh-based algorithms for PDE problems on the one hand and an increasing number of heterogeneous platforms with their different programming models on the other hand. Thus, the HPM DSL aims at higher productivity in the code development process for multiple target platforms. We introduce the concepts as well as the basic structure of the HPM DSL, and demonstrate its usage with three examples. The mapping of the abstract algorithmic description onto parallel hardware, including distributed memory compute clusters, is presented. A code generator and a matching back end allow the acceleration of HPM code with GPUs. Finally, the achievable performance and scalability are demonstrated for different example problems.}, language = {en} } @misc{NoackWendeZitzlsbergeretal., author = {Noack, Matthias and Wende, Florian and Zitzlsberger, Georg and Klemm, Michael and Steinke, Thomas}, title = {KART - A Runtime Compilation Library for Improving HPC Application Performance}, issn = {1438-0064}, doi = {10.1007/978-3-319-67630-2_29}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60730}, abstract = {The effectiveness of ahead-of-time compiler optimization heavily depends on the amount of available information at compile time. Input-specific information that is only available at runtime cannot be used, although it often determines loop counts, branching predicates and paths, as well as memory-access patterns. It can also be crucial for generating efficient SIMD-vectorized code. This is especially relevant for the many-core architectures paving the way to exascale computing, which are more sensitive to code-optimization. We explore the design-space for using input-specific information at compile-time and present KART, a C++ library solution that allows developers to compile, link, and execute code (e.g., C, C++ , Fortran) at application runtime. Besides mere runtime compilation of performance-critical code, KART can be used to instantiate the same code multiple times using different inputs, compilers, and options. Other techniques like auto-tuning and code-generation can be integrated into a KART-enabled application instead of being scripted around it. We evaluate runtimes and compilation costs for different synthetic kernels, and show the effectiveness for two real-world applications, HEOM and a WSM6 proxy.}, language = {en} } @inproceedings{NoackWendeSteinkeetal., author = {Noack, Matthias and Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {A Unified Programming Model for Intra- and Inter-Node Offloading on Xeon Phi Clusters}, series = {SC '14: Proceedings of the International Conference on High Performance Computing, Networking, Storage and Analysis. SC14, November 16-21, 2014, New Orleans, Louisiana, USA}, booktitle = {SC '14: Proceedings of the International Conference on High Performance Computing, Networking, Storage and Analysis. SC14, November 16-21, 2014, New Orleans, Louisiana, USA}, doi = {10.1109/SC.2014.22}, abstract = {Standard offload programming models for the Xeon Phi, e.g. Intel LEO and OpenMP 4.0, are restricted to a single compute node and hence a limited number of coprocessors. Scaling applications across a Xeon Phi cluster/supercomputer thus requires hybrid programming approaches, usually MPI+X. In this work, we present a framework based on heterogeneous active messages (HAM-Offload) that provides the means to offload work to local and remote (co)processors using a unified offload API. Since HAM-Offload provides similar primitives as current local offload frameworks, existing applications can be easily ported to overcome the single-node limitation while keeping the convenient offload programming model. We demonstrate the effectiveness of the framework by using it to enable a real-world application from the field of molecular dynamics to use multiple local and remote Xeon Phis. The evaluation shows good scaling behavior. Compared with LEO, performance is equal for large offloads and significantly better for small offloads.}, language = {en} } @inproceedings{WendeSteinkeReinefeld, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, series = {Proceedings of the 3rd International Conference on Exascale Applications and Software, EASC 2015}, booktitle = {Proceedings of the 3rd International Conference on Exascale Applications and Software, EASC 2015}, editor = {Gray, A. and Smith, L. and Weiland, M.}, publisher = {The University of Edinburgh}, isbn = {978 -0-9 926615 -1-9}, pages = {13 -- 18}, language = {en} } @inproceedings{WendeCordesSteinke, author = {Wende, Florian and Cordes, Frank and Steinke, Thomas}, title = {Concurrent Kernel Execution on Xeon Phi within Parallel Heterogeneous Workloads}, series = {Euro-Par 2014: Parallel Processing. 20th International Conference, Porto, Portugal, August 25-29, 2014, Proceedings}, volume = {8632}, booktitle = {Euro-Par 2014: Parallel Processing. 20th International Conference, Porto, Portugal, August 25-29, 2014, Proceedings}, doi = {10.1007/978-3-319-09873-9_66}, pages = {788 -- 799}, language = {en} } @inproceedings{WendeSteinke, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, series = {Proceeding SC '13 Proceedings of SC13: International Conference for High Performance Computing, Networking, Storage and Analysis Article No. 83 ACM New York, NY, USA, 2013}, booktitle = {Proceeding SC '13 Proceedings of SC13: International Conference for High Performance Computing, Networking, Storage and Analysis Article No. 83 ACM New York, NY, USA, 2013}, doi = {http://dx.doi.org/10.1145/2503210.2503254}, language = {en} } @inproceedings{WendeLaubenderSteinke, author = {Wende, Florian and Laubender, Guido and Steinke, Thomas}, title = {Integration of Intel Xeon Phi Servers into the HLRN-III Complex: Experiences, Performance and Lessons Learned}, series = {CUG2014 Proceedings}, booktitle = {CUG2014 Proceedings}, language = {en} } @misc{WendeSteinkeKlemmetal., author = {Wende, Florian and Steinke, Thomas and Klemm, Michael and Reinefeld, Alexander}, title = {Concurrent Kernel Offloading}, series = {High Performance Parallelism Pearls}, journal = {High Performance Parallelism Pearls}, editor = {Reinders, James and Jeffers, Jim}, publisher = {Morgan Kaufman, Elsevier}, isbn = {978-0128021187}, language = {en} } @inproceedings{WendeCordesSteinke2012, author = {Wende, Florian and Cordes, Frank and Steinke, Thomas}, title = {On Improving the Performance of Multi-threaded CUDA Applications with Concurrent Kernel Execution by Kernel Reordering}, series = {Application Accelerators in High Performance Computing (SAAHPC), 2012 Symposium on}, booktitle = {Application Accelerators in High Performance Computing (SAAHPC), 2012 Symposium on}, doi = {10.1109/SAAHPC.2012.12}, pages = {74 -- 83}, year = {2012}, language = {en} }