@inproceedings{WendeMarsmanSteinke2016, author = {Wende, Florian and Marsman, Martijn and Steinke, Thomas}, title = {On Enhancing 3D-FFT Performance in VASP}, booktitle = {CUG Proceedings}, year = {2016}, language = {en} } @inproceedings{NoackReinefeldKrameretal.2018, author = {Noack, Matthias and Reinefeld, Alexander and Kramer, Tobias and Steinke, Thomas}, title = {DM-HEOM: A Portable and Scalable Solver-Framework for the Hierarchical Equations of Motion}, booktitle = {2018 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), 19th IEEE Int. Workshop on Parallel and Distributed Scientific and Engineering Computing (PDSEC 2018)}, isbn = {978-1-5386-5555-9}, doi = {10.1109/IPDPSW.2018.00149}, pages = {947 -- 956}, year = {2018}, abstract = {Computing the Hierarchical Equations of Motion (HEOM) is by itself a challenging problem, and so is writing portable production code that runs efficiently on a variety of architectures while scaling from PCs to supercomputers. We combined both challenges to push the boundaries of simulating quantum systems, and to evaluate and improve methodologies for scientific software engineering. Our contributions are threefold: We present the first distributed memory implementation of the HEOM method (DM-HEOM), we describe an interdisciplinary development workflow, and we provide guidelines and experiences for designing distributed, performance-portable HPC applications with MPI-3, OpenCL and other state-of-the-art programming models. We evaluated the resulting code on multi- and many-core CPUs as well as GPUs, and demonstrate scalability on a Cray XC40 supercomputer for the PS I molecular light harvesting complex.}, language = {en} } @inproceedings{KnaustMayerSteinke2019, author = {Knaust, Marius and Mayer, Florian and Steinke, Thomas}, title = {OpenMP to FPGA Offloading Prototype Using OpenCL SDK}, booktitle = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, doi = {10.1109/IPDPSW.2019.00072}, pages = {387 -- 390}, year = {2019}, abstract = {Field-programmable gate arrays (FPGAs) are of great interest for future high-performance computing and data analytics systems, since they are capable of efficient, highly-parallel data processing. Even though high-level synthesis became more popular in the last years, the effort of porting existing scientific software onto FPGAs is still considerable. We propose to use OpenMP target offloading as a solution, which we implement in a first prototype, making use of the preexisting OpenCL SDK of the FPGA vendor. Early results demonstrate the feasibility of this approach and also reveal that further optimizations will be necessary such that code can be written in an FPGA-agnostic way.}, language = {en} } @inproceedings{ChristgauSteinke2020, author = {Christgau, Steffen and Steinke, Thomas}, title = {Porting a Legacy CUDA Stencil Code to oneAPI}, booktitle = {2020 IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020, New Orleans, LA, USA, May 18-22, 2020}, publisher = {IEEE}, address = {New Orleans}, isbn = {978-1-7281-7445-7}, doi = {10.1109/IPDPSW50202.2020.00070}, pages = {359 -- 367}, year = {2020}, abstract = {Recently, Intel released the oneAPI programming environment. With Data Parallel C++ (DPC++), oneAPI enables codes to target multiple hardware architectures like multi-core CPUs, GPUs, and even FPGAs or other hardware using a single source. For legacy codes that were written for Nvidia GPUs, a compatibility tool is provided which facilitates the transition to the SYCL-based DPC++ programming language. This paper presents early experiences when using both the compatibility tool and oneAPI as well the employed extension to the SYCL programming standard for the tsunami simulation code easyWave. A performance study compares the original code running on Xeon processors using OpenMP as well as CUDA with the performance of the DPC++ counter part on multicore CPUs as well as integrated GPUs.}, language = {en} } @article{AlhaddadFoerstnerGrothetal.2021, author = {Alhaddad, Samer and F{\"o}rstner, Jens and Groth, Stefan and Gr{\"u}newald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, Franz-Josef and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, J{\"u}rgen and Weiser, Martin and Wende, Florian}, title = {HighPerMeshes - A Domain-Specific Language for Numerical Algorithms on Unstructured Grids}, journal = {Euro-Par 2020: Parallel Processing Workshops.}, publisher = {Springer}, doi = {10.1007/978-3-030-71593-9_15}, pages = {185 -- 196}, year = {2021}, abstract = {Solving partial differential equations on unstructured grids is a cornerstone of engineering and scientific computing. Nowadays, heterogeneous parallel platforms with CPUs, GPUs, and FPGAs enable energy-efficient and computationally demanding simulations. We developed the HighPerMeshes C++-embedded Domain-Specific Language (DSL) for bridging the abstraction gap between the mathematical and algorithmic formulation of mesh-based algorithms for PDE problems on the one hand and an increasing number of heterogeneous platforms with their different parallel programming and runtime models on the other hand. Thus, the HighPerMeshes DSL aims at higher productivity in the code development process for multiple target platforms. We introduce the concepts as well as the basic structure of the HighPer-Meshes DSL, and demonstrate its usage with three examples, a Poisson and monodomain problem, respectively, solved by the continuous finite element method, and the discontinuous Galerkin method for Maxwell's equation. The mapping of the abstract algorithmic description onto parallel hardware, including distributed memory compute clusters is presented. Finally, the achievable performance and scalability are demonstrated for a typical example problem on a multi-core CPU cluster.}, language = {en} } @inproceedings{ChristgauSteinke2020, author = {Christgau, Steffen and Steinke, Thomas}, title = {Leveraging a Heterogeneous Memory System for a Legacy Fortran Code: The Interplay of Storage Class Memory, DRAM and OS}, booktitle = {2020 IEEE/ACM Workshop on Memory Centric High Performance Computing (MCHPC)}, publisher = {IEEE}, isbn = {978-0-7381-1067-7}, doi = {10.1109/MCHPC51950.2020.00008}, pages = {17 -- 24}, year = {2020}, abstract = {Large capacity Storage Class Memory (SCM) opens new possibilities for workloads requiring a large memory footprint. We examine optimization strategies for a legacy Fortran application on systems with an heterogeneous memory configuration comprising SCM and DRAM. We present a performance study for the multigrid solver component of the large-eddy simulation framework PALM for different memory configurations with large capacity SCM. An important optimization approach is the explicit assignment of storage locations depending on the data access characteristic to take advantage of the heterogeneous memory configuration. We are able to demonstrate that an explicit control over memory locations provides better performance compared to transparent hardware settings. As on aforementioned systems the page management by the OS appears as critical performance factor, we study the impact of different huge page settings.}, language = {en} } @article{KnoopGronemeierSuehringetal.2017, author = {Knoop, Helge and Gronemeier, Tobias and S{\"u}hring, Matthias and Steinbach, Peter and Noack, Matthias and Wende, Florian and Steinke, Thomas and Knigge, Christoph and Raasch, Siegfried and Ketelsen, Klaus}, title = {Porting the MPI-parallelized LES model PALM to multi-GPU systems and many integrated core processors: an experience report}, journal = {International Journal of Computational Science and Engineering. Special Issue on: Novel Strategies for Programming Accelerators}, edition = {Special Issue on: Novel Strategies for Programming Accelerators}, publisher = {Inderscience}, year = {2017}, abstract = {The computational power and availability of graphics processing units (GPUs), such as the Nvidia Tesla, and Many Integrated Core (MIC) processors, such as the Intel Xeon Phi, on high performance computing (HPC) systems is rapidly evolving. However, HPC applications need to be ported to take advantage of such hardware. This paper is a report on our experience of porting the MPI+OpenMP parallelised large-eddy simulation model (PALM) to multi-GPU as well as to MIC processor environments using the directive-based high level programming paradigm OpenACC and OpenMP, respectively. PALM is a Fortran-based computational fluid dynamics software package, used for the simulation of atmospheric and oceanic boundary layers to answer questions linked to fundamental atmospheric turbulence research, urban modelling, aircraft safety and cloud physics. Development of PALM started in 1997, the project currently entails 140 kLOC and is used on HPC farms of up to 43,200 cores. The main challenges we faced during the porting process are the size and complexity of the PALM code base, its inconsistent modularisation and the complete lack of a unit-test suite. We report the methods used to identify performance issues as well as our experiences with state-of-the-art profiling tools. Moreover, we outline the required porting steps in order to properly execute our code on GPUs and MIC processors, describe the problems and bottlenecks that we encountered during the porting process, and present separate performance tests for both architectures. These performance tests, however, do not provide any benchmark information that compares the performance of the ported code between the two architectures.}, language = {en} } @incollection{NoackWendeZitzlsbergeretal.2017, author = {Noack, Matthias and Wende, Florian and Zitzlsberger, Georg and Klemm, Michael and Steinke, Thomas}, title = {KART - A Runtime Compilation Library for Improving HPC Application Performance}, volume = {10524}, booktitle = {High Performance Computing: ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS, Frankfurt, Germany, June 18-22, 2017, Revised Selected Papers}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-319-67630-2_29}, pages = {389 -- 403}, year = {2017}, abstract = {The effectiveness of ahead-of-time compiler optimization heavily depends on the amount of available information at compile time. Input-specific information that is only available at runtime cannot be used, although it often determines loop counts, branching predicates and paths, as well as memory-access patterns. It can also be crucial for generating efficient SIMD-vectorized code. This is especially relevant for the many-core architectures paving the way to exascale computing, which are more sensitive to code-optimization. We explore the design-space for using input-specific information at compile-time and present KART, a C++ library solution that allows developers tocompile, link, and execute code (e.g., C, C++ , Fortran) at application runtime. Besides mere runtime compilation of performance-critical code, KART can be used to instantiate the same code multiple times using different inputs, compilers, and options. Other techniques like auto-tuning and code-generation can be integrated into a KART-enabled application instead of being scripted around it. We evaluate runtimes and compilation costs for different synthetic kernels, and show the effectiveness for two real-world applications, HEOM and a WSM6 proxy.}, language = {en} } @inproceedings{NoackFochtSteinke2019, author = {Noack, Matthias and Focht, Erich and Steinke, Thomas}, title = {Heterogeneous Active Messages for Offloading on the NEC SX-Aurora TSUBASA}, booktitle = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), Heterogeneity in Computing Workshop (HCW 2019)}, year = {2019}, abstract = {The NEC SX-Aurora TSUBASA is a new generation of vector processing architectures that combines a standard Intel Xeon host with the newly developed NEC Vector Engine co-processor cards. One way to use these co-processors is offloading suitable parts of the program from the host to the Vector Engines. Currently, the only vendor-provided offloading solutions are the low-level Vector Engine Offloading (VEO) library, and a builtin reverse-offloading mechanism named VHcall. In this work, we extend the portable Heterogeneous Active Messages (HAM) based HAM-Offload framework with support for the NEC SX-Aurora TSUBASA. Therefore, we design, implement, and evaluate two messaging protocols aimed at minimising offloading cost. This sheds some light on how to achieve fast communication between host CPU and the Vector Engines of the NEC SX-Aurora TSUBASA. Compared with VEO, the DMA-based protocol reduces offloading overhead by a factor of 13×. The resulting framework enables users to write portable offload applications with low overhead, that do neither require a language extension like OpenMP, nor a special language like OpenCL. Existing HAM-Offload applications are now ready to run on the NEC SX-Aurora TSUBASA.}, language = {en} } @misc{StallingSteinke1996, author = {Stalling, Detlev and Steinke, Thomas}, title = {Visualization of Vector Fields in Quantum Chemistry}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-2124}, number = {SC-96-01}, year = {1996}, abstract = {\small Many interesting phenomena in molecular systems like interactions between macro-molecules, protein-substrate docking, or channeling processes in membranes are gouverned to a high degree by classical Coulomb or van-der-Waals forces. The visualization of these force fields is important for verifying numerical simulations. Moreover, by inspecting the forces visually we can gain deeper insight into the molecular processes. Up to now the visualization of vector fields is quite unusual in computational chemistry. In fact many commercial software packages do not support this topic at all. The reason is not that vector fields are considered unimportant, but mainly because of the lack of adequate visualization methods. In this paper we survey a number of methods for vector field visualization, ranging from well-known concepts like arrow or streamline plots to more advanced techniques like line integral convolution, and show how these can be applied to computational chemistry. A combination of the most meaningful methods in an interactive 3D visualization environment can provide a powerful tool box for analysing simulations in molecular dynamics.}, language = {en} } @misc{BaumannBuschGottschewskietal.1997, author = {Baumann, Wolfgang and Busch, Hubert and Gottschewski, J{\"u}rgen and Steinke, Thomas and St{\"u}ben, Hinnerk}, title = {Berliner Landesh{\"o}chstleistungsrechner Parallelrechner Cray T3D/T3E: Betrieb, Nutzung, Projekte, Perspektiven f{\"u}r den Ausbau}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5504}, number = {TR-97-08}, year = {1997}, abstract = {Seit fast drei Jahren betreibt das Konrad-Zuse-Zentrum f{\"u}r Informationstechnik Berlin (ZIB) Parallelrechner der h{\"o}chsten Leistungsklasse im normalen Rechenzentrumsbetrieb. Bereits im Mai 1995 hat das ZIB {\"u}ber seine Erfahrungen mit dem damals leistungsst{\"a}rksten Parallelrechner Deutschlands berichtet. Das Gesamtkonzept des ZIB sieht weiterhin einen H{\"o}chstleistungsrechner als unabdingbaren Bestandteil des High Performance Scientific Computing (HPSC) im ZIB vor. Der vorliegende Bericht beschreibt die aktuelle Konfiguration, Betriebserfahrungen und die Rechnernutzung sowie typische Rechenleistungen, die f{\"u}r einzelne Anwendungsprogramme erzielt wurden. Beschreibungen der Forschungsgebiete mit den Forschungsgruppen, die den Rechner nutzen und die Anforderungen an den Rechnerausbau, die sich aus deren Arbeiten herleiten, beschließen den Bericht.}, language = {de} } @inproceedings{SteinkePeterBorchert2010, author = {Steinke, Thomas and Peter, Kathrin and Borchert, Sebastian}, title = {Efficiency Considerations of Cauchy Reed-Solomon Implementations on Accelerator and Multi-Core Platforms}, booktitle = {Symposium on Application Accelerators in High Performance Computing (SAAHPC)}, address = {Knoxville, USA}, year = {2010}, language = {en} } @misc{BirkenheuerBlunkBreuersetal.2010, author = {Birkenheuer, Georg and Blunk, Dirk and Breuers, Sebastian and Brinkmann, Andr{\´e} and d. Santos Vieira, I. and Fels, Gregor and Gesing, Sandra and Grunzke, Richard and Herres-Pawlis, Sonja and Kohlbacher, Oliver and Kruber, Nico and Kr{\"u}ger, Jens and Lang, U. and Packschies, Lars and M{\"u}ller-Pfefferkorn, Ralph and Sch{\"a}fer, Patrick and Schmalz, Hans-G{\"u}nther and Steinke, Thomas and Warzecha, Klaus Dieter and Wewior, Martin}, title = {MoSGrid - A Molecular Simulation Grid as a new tool in Computational Chemistry, Biology and Material Science}, journal = {6. German Conference on Chemoinformatics}, year = {2010}, language = {en} } @inproceedings{KruegerBirkenheuerBlunketal.2010, author = {Kr{\"u}ger, Jens and Birkenheuer, Georg and Blunk, Dirk and Breuers, Sebastian and Brinkmann, Andr{\´e} and Fels, Gregor and Gesing, Sandra and Grunzke, Richard and Herres-Pawlis, Sonja and Kohlbacher, Oliver and Kruber, Nico and Lang, U. and Packschies, Lars and M{\"u}ller-Pfefferkorn, Ralph and Sch{\"a}fer, Patrick and Schmalz, Hans-G{\"u}nther and Steinke, Thomas and Warzecha, Klaus Dieter and Wewior, Martin}, title = {Molecular Simulation Grid}, booktitle = {6. German Conference on Chemoinformatics}, publisher = {Gesellschaft Deutscher Chemiker (GDCh)}, year = {2010}, language = {en} } @article{MayKreuchwigSteinkeetal.2010, author = {May, Patrick and Kreuchwig, A. and Steinke, Thomas and Koch, Ina}, title = {PTGL: a database for secondary structure-based protein topologies}, volume = {38}, journal = {Nucleic Acids Res.}, pages = {D326 -- 330}, year = {2010}, language = {en} } @inproceedings{DickmannKasparLoehnhardtetal.2009, author = {Dickmann, Frank and Kaspar, Mathias and L{\"o}hnhardt, Benjamin and Kepper, Nick and Viezens, Fred and Hertel, Frank and Lesnussa, Michael and Mohammed, Yassene and Thiel, Andreas and Steinke, Thomas and Bernarding, Johannes and Krefting, Dagmar and Knoch, Tobias and Sax, Ulrich}, title = {Visualization in Health Grid Environments: A Novel Service and Business Approach}, volume = {5745}, booktitle = {GECON}, publisher = {Springer}, doi = {10.1007/978-3-642-03864-8_12}, pages = {150 -- 159}, year = {2009}, language = {en} } @article{BauerRotherMooretal.2009, author = {Bauer, Raphael and Rother, Kristian and Moor, Peter and Reinert, Knut and Steinke, Thomas and Bujnicki, Janusz and Preissner, Robert}, title = {Fast Structural Alignment of Biomolecules Using a Hash Table, N-Grams and String Descriptors}, volume = {2}, journal = {Algorithms}, number = {2}, doi = {10.3390/a2020692}, pages = {692 -- 709}, year = {2009}, language = {en} } @inproceedings{MayKlauBaueretal.2007, author = {May, Patrick and Klau, Gunnar and Bauer, Markus and Steinke, Thomas}, title = {Accelerated microRNA-Precursor Detection Using the Smith-Waterman Algorithm on FPGAs}, volume = {4360}, booktitle = {GCCB}, editor = {Dubitzky, W.}, publisher = {Springer}, pages = {19 -- 32}, year = {2007}, language = {en} } @article{MeyerSteinkeSuehnel2007, author = {Meyer, Michael and Steinke, Thomas and S{\"u}hnel, J{\"u}rgen}, title = {Density functional study of isoguanine tetrad and pentad sandwich complexes with alkali metal ions}, volume = {13}, journal = {Journal of Molecular Modeling}, number = {2}, publisher = {Springer Berlin / Heidelberg}, pages = {335 -- 345}, year = {2007}, language = {en} } @inproceedings{KotthaPeterSteinkeetal.2007, author = {Kottha, Samatha and Peter, Kathrin and Steinke, Thomas and Bart, Julian and Falkner, J{\"u}rgen and Weisbecker, Anette and Viezens, Fred and Mohammed, Yassene and Sax, Ulrich and Hoheisel, Andreas and Ernst, Thilo and Sommerfeld, Dietmar and Krefting, Dagmar and Vossberg, Michael}, title = {Medical Image Processing in MediGRID}, booktitle = {German E-Science Conference}, publisher = {Max Planck Digital Library}, address = {Baden-Baden}, year = {2007}, language = {en} }