@inproceedings{ChristgauSteinke, author = {Christgau, Steffen and Steinke, Thomas}, title = {Porting a Legacy CUDA Stencil Code to oneAPI}, series = {2020 IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020, New Orleans, LA, USA, May 18-22, 2020}, booktitle = {2020 IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2020, New Orleans, LA, USA, May 18-22, 2020}, publisher = {IEEE}, address = {New Orleans}, isbn = {978-1-7281-7445-7}, doi = {10.1109/IPDPSW50202.2020.00070}, pages = {359 -- 367}, abstract = {Recently, Intel released the oneAPI programming environment. With Data Parallel C++ (DPC++), oneAPI enables codes to target multiple hardware architectures like multi-core CPUs, GPUs, and even FPGAs or other hardware using a single source. For legacy codes that were written for Nvidia GPUs, a compatibility tool is provided which facilitates the transition to the SYCL-based DPC++ programming language. This paper presents early experiences when using both the compatibility tool and oneAPI as well the employed extension to the SYCL programming standard for the tsunami simulation code easyWave. A performance study compares the original code running on Xeon processors using OpenMP as well as CUDA with the performance of the DPC++ counter part on multicore CPUs as well as integrated GPUs.}, language = {en} } @article{ChristgauSchnor, author = {Christgau, Steffen and Schnor, Bettina}, title = {Comparing MPI Passive Target Synchronization on a Non-Cache-Coherent Shared-Memory Processor}, series = {Mitteilungen - Gesellschaft f{\"u}r Informatik e. V., Parallel-Algorithmen und Rechnerstrukturen, ISSN 0177-0454, 28. PARS-Workshop}, journal = {Mitteilungen - Gesellschaft f{\"u}r Informatik e. V., Parallel-Algorithmen und Rechnerstrukturen, ISSN 0177-0454, 28. PARS-Workshop}, number = {35}, pages = {121 -- 132}, language = {en} } @inproceedings{ChristgauSteinke, author = {Christgau, Steffen and Steinke, Thomas}, title = {Leveraging a Heterogeneous Memory System for a Legacy Fortran Code: The Interplay of Storage Class Memory, DRAM and OS}, series = {2020 IEEE/ACM Workshop on Memory Centric High Performance Computing (MCHPC)}, booktitle = {2020 IEEE/ACM Workshop on Memory Centric High Performance Computing (MCHPC)}, publisher = {IEEE}, isbn = {978-0-7381-1067-7}, doi = {10.1109/MCHPC51950.2020.00008}, pages = {17 -- 24}, abstract = {Large capacity Storage Class Memory (SCM) opens new possibilities for workloads requiring a large memory footprint. We examine optimization strategies for a legacy Fortran application on systems with an heterogeneous memory configuration comprising SCM and DRAM. We present a performance study for the multigrid solver component of the large-eddy simulation framework PALM for different memory configurations with large capacity SCM. An important optimization approach is the explicit assignment of storage locations depending on the data access characteristic to take advantage of the heterogeneous memory configuration. We are able to demonstrate that an explicit control over memory locations provides better performance compared to transparent hardware settings. As on aforementioned systems the page management by the OS appears as critical performance factor, we study the impact of different huge page settings.}, language = {en} } @misc{ChristgauSchnor, author = {Christgau, Steffen and Schnor, Bettina}, title = {MPI Passive Target Synchronization on a Non-Cache-Coherent Shared-Memory Processor}, issn = {1438-0064}, doi = {10.12752/7477}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74774}, abstract = {MPI passive target synchronization offers exclusive and shared locks. These are the building blocks for the implementation of applications with Readers \& Writers semantic, like for example distributed hash tables. This paper discusses the implementation of MPI passive target synchronization on a non-cache-coherent multicore, the Intel Single-Chip Cloud Computer. The considered algorithms differ in their communication style (message based versus shared memory), their data structures (centralized versus distributed) and their semantics (with/without Writer preference). It is shown that shared memory approaches scale very well and deliver good performance, even in absence of cache coherence.}, language = {en} } @inproceedings{BrookFullerSwinburneetal., author = {Brook, Glenn and Fuller, Douglas and Swinburne, John and Christgau, Steffen and L{\"a}uter, Matthias and Rodrigues Pel{\´a}, Ronaldo and Lewin, Stein and Christian, Tuma and Steinke, Thomas}, title = {An Early Scalability Study of Omni-Path Express}, address = {Hamburg}, organization = {ISC 2022 IXPUG}, doi = {10.13140/RG.2.2.21353.57442}, pages = {8}, abstract = {This work provides a brief description of Omni-Path Express and the current status of its development, stability, and performance. Basic benchmarks that highlight the gains of OPX over PSM2 are provided, and the results of an initial performance and scalability study of several applications are presented.}, language = {en} } @inproceedings{ChristgauKnaustSteinke, author = {Christgau, Steffen and Knaust, Marius and Steinke, Thomas}, title = {A First Step towards Support for MPI Partitioned Communication on SYCL-programmed FPGAs}, series = {IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing, H2RC@SC 2022, Dallas, TX, USA, November 13-18, 2022}, booktitle = {IEEE/ACM International Workshop on Heterogeneous High-performance Reconfigurable Computing, H2RC@SC 2022, Dallas, TX, USA, November 13-18, 2022}, publisher = {IEEE}, doi = {10.1109/H2RC56700.2022.00007}, pages = {9 -- 17}, abstract = {Version 4.0 of the Message Passing Interface standard introduced the concept of Partitioned Communication which adds support for multiple contributions to a communication buffer. Although initially targeted at multithreaded MPI applications, Partitioned Communication currently receives attraction in the context of accelerators, especially GPUs. In this publication it is demonstrated that this communication concept can also be implemented for SYCL-programmed FPGAs. This includes a discussion of the design space and the presentation of a prototypical implementation. Experimental results show that a lightweight implementation on top of an existing MPI library is possible. In addition, the presented approach also reveals issues in both the SYCL and the MPI standard which need to be addresses for improved support of the intended communication style.}, language = {en} } @inproceedings{ChristgauEveringhamMikolajczaketal., author = {Christgau, Steffen and Everingham, Dylan and Mikolajczak, Florian and Schelten, Niklas and Schnor, Bettina and Schroetter, Max and Stabernack, Benno and Steinert, Fritjof}, title = {Enabling Communication with FPGA-based Network-attached Accelerators for HPC Workloads}, series = {Proceedings of the SC'23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, SC-W 2023, Denver, CO, USA, November 12-17, 2023}, booktitle = {Proceedings of the SC'23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, SC-W 2023, Denver, CO, USA, November 12-17, 2023}, publisher = {ACM}, doi = {10.1145/3624062.3624540}, pages = {530 -- 538}, abstract = {The use of stand-alone, network-coupled Field Programmable Gate Array (FPGA) accelerators is intended to significantly increase the energy efficiency of HPC applications and thus also of HPC data centers. A loose coupling between the nodes of the HPC data center and the FPGAs is established through the high-speed network of the data center. This allows greater flexibility in combining different nodes and accelerators. Both the resulting energy savings and the increased flexibility through the network connection, enable the economical use of FPGAs. This work presents a communication stack to integrate the so-called Network-attached Accelerator (NAA) into the HPC data center. A low-level Remote Direct Memory Access (RDMA) Application Programming Interface (API) and a high-level Remote Procedure Call (RPC) API is designed on top of the RDMA over Converged Ethernet v2 (RoCEv2) communication stack. The experimental results over 100 Gbps RoCEv2 show that our design and implementation deliver performance close to the theoretical maximum.}, language = {en} } @inproceedings{SkoblinHoeflingChristgau, author = {Skoblin, Viktor and H{\"o}fling, Felix and Christgau, Steffen}, title = {Gaining Cross-Platform Parallelism for HAL's Molecular Dynamics Package using SYCL}, series = {29. PARS-Workshop 2023}, volume = {36}, booktitle = {29. PARS-Workshop 2023}, issn = {0177-0454}, abstract = {Molecular dynamics simulations are one of the methods in scientific computing that benefit from GPU acceleration. For those devices, SYCL is a promising API for writing portable codes. In this paper, we present the case study of HAL's MD package that has been successfully migrated from CUDA to SYCL. We describe the different strategies that we followed in the process of porting the code. Following these strategies, we achieved code portability across major GPU vendors. Depending on the actual kernels, both significant performance improvements and regressions are observed. As a side effect of the migration process, we obtained impressing speedups also for execution on CPUs.}, language = {en} }