@misc{ChristgauSchnor2019, author = {Christgau, Steffen and Schnor, Bettina}, title = {MPI Passive Target Synchronization on a Non-Cache-Coherent Shared-Memory Processor}, issn = {1438-0064}, doi = {10.12752/7477}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74774}, year = {2019}, abstract = {MPI passive target synchronization offers exclusive and shared locks. These are the building blocks for the implementation of applications with Readers \& Writers semantic, like for example distributed hash tables. This paper discusses the implementation of MPI passive target synchronization on a non-cache-coherent multicore, the Intel Single-Chip Cloud Computer. The considered algorithms differ in their communication style (message based versus shared memory), their data structures (centralized versus distributed) and their semantics (with/without Writer preference). It is shown that shared memory approaches scale very well and deliver good performance, even in absence of cache coherence.}, language = {en} } @article{ChristgauSchnor2020, author = {Christgau, Steffen and Schnor, Bettina}, title = {Comparing MPI Passive Target Synchronization on a Non-Cache-Coherent Shared-Memory Processor}, journal = {Mitteilungen - Gesellschaft f{\"u}r Informatik e. V., Parallel-Algorithmen und Rechnerstrukturen, ISSN 0177-0454, 28. PARS-Workshop}, number = {35}, pages = {121 -- 132}, year = {2020}, language = {en} } @inproceedings{ChristgauEveringhamMikolajczaketal.2023, author = {Christgau, Steffen and Everingham, Dylan and Mikolajczak, Florian and Schelten, Niklas and Schnor, Bettina and Schroetter, Max and Stabernack, Benno and Steinert, Fritjof}, title = {Enabling Communication with FPGA-based Network-attached Accelerators for HPC Workloads}, booktitle = {Proceedings of the SC'23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, SC-W 2023, Denver, CO, USA, November 12-17, 2023}, publisher = {ACM}, doi = {10.1145/3624062.3624540}, pages = {530 -- 538}, year = {2023}, abstract = {The use of stand-alone, network-coupled Field Programmable Gate Array (FPGA) accelerators is intended to significantly increase the energy efficiency of HPC applications and thus also of HPC data centers. A loose coupling between the nodes of the HPC data center and the FPGAs is established through the high-speed network of the data center. This allows greater flexibility in combining different nodes and accelerators. Both the resulting energy savings and the increased flexibility through the network connection, enable the economical use of FPGAs. This work presents a communication stack to integrate the so-called Network-attached Accelerator (NAA) into the HPC data center. A low-level Remote Direct Memory Access (RDMA) Application Programming Interface (API) and a high-level Remote Procedure Call (RPC) API is designed on top of the RDMA over Converged Ethernet v2 (RoCEv2) communication stack. The experimental results over 100 Gbps RoCEv2 show that our design and implementation deliver performance close to the theoretical maximum.}, language = {en} } @inproceedings{ChristgauDylanLuebkeetal.2025, author = {Christgau, Steffen and Dylan, Everingham and L{\"u}bke, Max and De Lucia, Marco and Puhan, Danny and Schelten, Niklas and Schnor, Bettina and Signer, Hannes and Spazier, Johannes and Stabernack, Benno and Steinert, Fritjof and Yahdzhyiev, Serhii}, title = {On the Usability and Energy Efficiency of High-Level Synthesis for FPGA-based Network-Attached Accelerators}, booktitle = {2025 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, doi = {10.1109/IPDPSW66978.2025.00139}, pages = {886 -- 895}, year = {2025}, abstract = {Heterogeneity in high performance computing systems is one of the most promising approaches towards more energy-efficient computing on one hand and satisfying the raising demand of global computation capacity on the other hand. Besides the well-known key components like CPUs and GPGPUs are domain-specific accelerators like TPUs, FPGAs well known for their energy efficiency. This is especially true for highly specialized use cases. Network-attached accelerators promise more scalability and flexibility for FPGA usage in HPC environments. Easy and efficient programming of those accelerators is, however, still an open issue. Based on a framework for such accelerators which enables decoupling of FPGAs from their host system, we present a workflow using High-Level Synthesis (HLS) to offload application kernels to them. We evaluate this approach against a conventional Hardware Description Language (HDL) based workflow. In addition, we introduce the energy measurement tool EMA and assess the energy efficiency of both HLS and HDL design.}, language = {en} }