@article{AlhaddadFoerstnerGrothetal., author = {Alhaddad, Samer and F{\"o}rstner, Jens and Groth, Stefan and Gr{\"u}newald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, F.J. and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, J. and Weiser, Martin and Wende, Florian}, title = {The HighPerMeshes Framework for Numerical Algorithms on Unstructured Grids}, series = {Concurrency and Computation: Practice and Experience}, volume = {34}, journal = {Concurrency and Computation: Practice and Experience}, number = {14}, doi = {10.1002/cpe.6616}, abstract = {Solving PDEs on unstructured grids is a cornerstone of engineering and scientific computing. Heterogeneous parallel platforms, including CPUs, GPUs, and FPGAs, enable energy-efficient and computationally demanding simulations. In this article, we introduce the HPM C++-embedded DSL that bridges the abstraction gap between the mathematical formulation of mesh-based algorithms for PDE problems on the one hand and an increasing number of heterogeneous platforms with their different programming models on the other hand. Thus, the HPM DSL aims at higher productivity in the code development process for multiple target platforms. We introduce the concepts as well as the basic structure of the HPM DSL, and demonstrate its usage with three examples. The mapping of the abstract algorithmic description onto parallel hardware, including distributed memory compute clusters, is presented. A code generator and a matching back end allow the acceleration of HPM code with GPUs. Finally, the achievable performance and scalability are demonstrated for different example problems.}, language = {en} } @misc{NoackWendeZitzlsbergeretal., author = {Noack, Matthias and Wende, Florian and Zitzlsberger, Georg and Klemm, Michael and Steinke, Thomas}, title = {KART - A Runtime Compilation Library for Improving HPC Application Performance}, issn = {1438-0064}, doi = {10.1007/978-3-319-67630-2_29}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60730}, abstract = {The effectiveness of ahead-of-time compiler optimization heavily depends on the amount of available information at compile time. Input-specific information that is only available at runtime cannot be used, although it often determines loop counts, branching predicates and paths, as well as memory-access patterns. It can also be crucial for generating efficient SIMD-vectorized code. This is especially relevant for the many-core architectures paving the way to exascale computing, which are more sensitive to code-optimization. We explore the design-space for using input-specific information at compile-time and present KART, a C++ library solution that allows developers to compile, link, and execute code (e.g., C, C++ , Fortran) at application runtime. Besides mere runtime compilation of performance-critical code, KART can be used to instantiate the same code multiple times using different inputs, compilers, and options. Other techniques like auto-tuning and code-generation can be integrated into a KART-enabled application instead of being scripted around it. We evaluate runtimes and compilation costs for different synthetic kernels, and show the effectiveness for two real-world applications, HEOM and a WSM6 proxy.}, language = {en} } @inproceedings{NoackWendeSteinkeetal., author = {Noack, Matthias and Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {A Unified Programming Model for Intra- and Inter-Node Offloading on Xeon Phi Clusters}, series = {SC '14: Proceedings of the International Conference on High Performance Computing, Networking, Storage and Analysis. SC14, November 16-21, 2014, New Orleans, Louisiana, USA}, booktitle = {SC '14: Proceedings of the International Conference on High Performance Computing, Networking, Storage and Analysis. SC14, November 16-21, 2014, New Orleans, Louisiana, USA}, doi = {10.1109/SC.2014.22}, abstract = {Standard offload programming models for the Xeon Phi, e.g. Intel LEO and OpenMP 4.0, are restricted to a single compute node and hence a limited number of coprocessors. Scaling applications across a Xeon Phi cluster/supercomputer thus requires hybrid programming approaches, usually MPI+X. In this work, we present a framework based on heterogeneous active messages (HAM-Offload) that provides the means to offload work to local and remote (co)processors using a unified offload API. Since HAM-Offload provides similar primitives as current local offload frameworks, existing applications can be easily ported to overcome the single-node limitation while keeping the convenient offload programming model. We demonstrate the effectiveness of the framework by using it to enable a real-world application from the field of molecular dynamics to use multiple local and remote Xeon Phis. The evaluation shows good scaling behavior. Compared with LEO, performance is equal for large offloads and significantly better for small offloads.}, language = {en} } @inproceedings{ZhaoMarsmanWendeetal., author = {Zhao, Zhengji and Marsman, Martijn and Wende, Florian and Kim, Jeongnim}, title = {Performance of Hybrid MPI/OpenMP VASP on Cray XC40 Based on Intel Knights Landing Many Integrated Core Architecture}, abstract = {With the recent installation of Cori, a Cray XC40 system with Intel Xeon Phi Knights Landing (KNL) many integrated core (MIC) architecture, NERSC is transitioning from the multi-core to the more energy-efficient many-core era. The developers of VASP, a widely used materials science code, have adopted MPI/OpenMP parallelism to better exploit the increased on-node parallelism, wider vector units, and the high bandwidth on-package memory (MCDRAM) of KNL. To achieve optimal performance, KNL specifics relevant for the build, boot and run time setup must be explored. In this paper, we present the performance analysis of representative VASP workloads on Cori, focusing on the effects of the compilers, libraries, and boot/run time options such as the NUMA/MCDRAM modes, Hyper-Threading, huge pages, core specialization, and thread scaling. The paper is intended to serve as a KNL performance guide for VASP users, but it will also benefit other KNL users.}, language = {en} } @inproceedings{WendeMarsmanZhaoetal., author = {Wende, Florian and Marsman, Martijn and Zhao, Zhengji and Kim, Jeongnim}, title = {Porting VASP from MPI to MPI+OpenMP [SIMD]}, series = {Scaling OpenMP for Exascale Performance and Portability - 13th International Workshop on OpenMP, IWOMP 2017, Stony Brook, NY, USA, September 20-22, 2017}, volume = {8766}, booktitle = {Scaling OpenMP for Exascale Performance and Portability - 13th International Workshop on OpenMP, IWOMP 2017, Stony Brook, NY, USA, September 20-22, 2017}, doi = {10.1007/978-3-319-65578-9_8}, pages = {107 -- 122}, abstract = {We describe for the VASP application (a widely used electronic structure code written in FORTRAN) the transition from an MPI-only to a hybrid code base leveraging the three relevant levels of parallelism to be addressed when optimizing for an effective execution on modern computer platforms: multiprocessing, multithreading and SIMD vectorization. To achieve code portability, we draw on MPI parallelization together with OpenMP threading and SIMD constructs. Combining the latter can be challenging in complex code bases. Optimization targets are combining multithreading and vectorization in different calling contexts as well as whole function vectorization. In addition to outlining design decisions made throughout the code transformation process, we will demonstrate the effectiveness of the code adaptations using different compilers (GNU, Intel) and target platforms (CPU, Intel Xeon Phi (KNL)).}, language = {en} } @inproceedings{WendeSteinkeReinefeld, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, series = {Proceedings of the 3rd International Conference on Exascale Applications and Software, EASC 2015}, booktitle = {Proceedings of the 3rd International Conference on Exascale Applications and Software, EASC 2015}, editor = {Gray, A. and Smith, L. and Weiland, M.}, publisher = {The University of Edinburgh}, isbn = {978 -0-9 926615 -1-9}, pages = {13 -- 18}, language = {en} } @inproceedings{WendeCordesSteinke, author = {Wende, Florian and Cordes, Frank and Steinke, Thomas}, title = {Concurrent Kernel Execution on Xeon Phi within Parallel Heterogeneous Workloads}, series = {Euro-Par 2014: Parallel Processing. 20th International Conference, Porto, Portugal, August 25-29, 2014, Proceedings}, volume = {8632}, booktitle = {Euro-Par 2014: Parallel Processing. 20th International Conference, Porto, Portugal, August 25-29, 2014, Proceedings}, doi = {10.1007/978-3-319-09873-9_66}, pages = {788 -- 799}, language = {en} } @inproceedings{WendeSteinke, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, series = {Proceeding SC '13 Proceedings of SC13: International Conference for High Performance Computing, Networking, Storage and Analysis Article No. 83 ACM New York, NY, USA, 2013}, booktitle = {Proceeding SC '13 Proceedings of SC13: International Conference for High Performance Computing, Networking, Storage and Analysis Article No. 83 ACM New York, NY, USA, 2013}, doi = {http://dx.doi.org/10.1145/2503210.2503254}, language = {en} } @inproceedings{WendeLaubenderSteinke, author = {Wende, Florian and Laubender, Guido and Steinke, Thomas}, title = {Integration of Intel Xeon Phi Servers into the HLRN-III Complex: Experiences, Performance and Lessons Learned}, series = {CUG2014 Proceedings}, booktitle = {CUG2014 Proceedings}, language = {en} } @misc{WendeSteinkeKlemmetal., author = {Wende, Florian and Steinke, Thomas and Klemm, Michael and Reinefeld, Alexander}, title = {Concurrent Kernel Offloading}, series = {High Performance Parallelism Pearls}, journal = {High Performance Parallelism Pearls}, editor = {Reinders, James and Jeffers, Jim}, publisher = {Morgan Kaufman, Elsevier}, isbn = {978-0128021187}, language = {en} } @inproceedings{WendeCordesSteinke2012, author = {Wende, Florian and Cordes, Frank and Steinke, Thomas}, title = {On Improving the Performance of Multi-threaded CUDA Applications with Concurrent Kernel Execution by Kernel Reordering}, series = {Application Accelerators in High Performance Computing (SAAHPC), 2012 Symposium on}, booktitle = {Application Accelerators in High Performance Computing (SAAHPC), 2012 Symposium on}, doi = {10.1109/SAAHPC.2012.12}, pages = {74 -- 83}, year = {2012}, language = {en} } @misc{SchneckWeiserWende, author = {Schneck, Jakob and Weiser, Martin and Wende, Florian}, title = {Impact of mixed precision and storage layout on additive Schwarz smoothers}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-71305}, abstract = {The growing discrepancy between CPU computing power and memory bandwidth drives more and more numerical algorithms into a bandwidth- bound regime. One example is the overlapping Schwarz smoother, a highly effective building block for iterative multigrid solution of elliptic equations with higher order finite elements. Two options of reducing the required memory bandwidth are sparsity exploiting storage layouts and representing matrix entries with reduced precision in floating point or fixed point format. We investigate the impact of several options on storage demand and contraction rate, both analytically in the context of subspace correction methods and numerically at an example of solid mechanics. Both perspectives agree on the favourite scheme: fixed point representation of Cholesky factors in nested dissection storage.}, language = {en} } @incollection{NoackWendeZitzlsbergeretal., author = {Noack, Matthias and Wende, Florian and Zitzlsberger, Georg and Klemm, Michael and Steinke, Thomas}, title = {KART - A Runtime Compilation Library for Improving HPC Application Performance}, series = {High Performance Computing: ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS, Frankfurt, Germany, June 18-22, 2017, Revised Selected Papers}, volume = {10524}, booktitle = {High Performance Computing: ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS, Frankfurt, Germany, June 18-22, 2017, Revised Selected Papers}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-319-67630-2_29}, pages = {389 -- 403}, abstract = {The effectiveness of ahead-of-time compiler optimization heavily depends on the amount of available information at compile time. Input-specific information that is only available at runtime cannot be used, although it often determines loop counts, branching predicates and paths, as well as memory-access patterns. It can also be crucial for generating efficient SIMD-vectorized code. This is especially relevant for the many-core architectures paving the way to exascale computing, which are more sensitive to code-optimization. We explore the design-space for using input-specific information at compile-time and present KART, a C++ library solution that allows developers tocompile, link, and execute code (e.g., C, C++ , Fortran) at application runtime. Besides mere runtime compilation of performance-critical code, KART can be used to instantiate the same code multiple times using different inputs, compilers, and options. Other techniques like auto-tuning and code-generation can be integrated into a KART-enabled application instead of being scripted around it. We evaluate runtimes and compilation costs for different synthetic kernels, and show the effectiveness for two real-world applications, HEOM and a WSM6 proxy.}, language = {en} } @article{AlhaddadFoerstnerGrothetal., author = {Alhaddad, Samer and F{\"o}rstner, Jens and Groth, Stefan and Gr{\"u}newald, Daniel and Grynko, Yevgen and Hannig, Frank and Kenter, Tobias and Pfreundt, Franz-Josef and Plessl, Christian and Schotte, Merlind and Steinke, Thomas and Teich, J{\"u}rgen and Weiser, Martin and Wende, Florian}, title = {HighPerMeshes - A Domain-Specific Language for Numerical Algorithms on Unstructured Grids}, series = {Euro-Par 2020: Parallel Processing Workshops.}, journal = {Euro-Par 2020: Parallel Processing Workshops.}, publisher = {Springer}, doi = {10.1007/978-3-030-71593-9_15}, pages = {185 -- 196}, abstract = {Solving partial differential equations on unstructured grids is a cornerstone of engineering and scientific computing. Nowadays, heterogeneous parallel platforms with CPUs, GPUs, and FPGAs enable energy-efficient and computationally demanding simulations. We developed the HighPerMeshes C++-embedded Domain-Specific Language (DSL) for bridging the abstraction gap between the mathematical and algorithmic formulation of mesh-based algorithms for PDE problems on the one hand and an increasing number of heterogeneous platforms with their different parallel programming and runtime models on the other hand. Thus, the HighPerMeshes DSL aims at higher productivity in the code development process for multiple target platforms. We introduce the concepts as well as the basic structure of the HighPer-Meshes DSL, and demonstrate its usage with three examples, a Poisson and monodomain problem, respectively, solved by the continuous finite element method, and the discontinuous Galerkin method for Maxwell's equation. The mapping of the abstract algorithmic description onto parallel hardware, including distributed memory compute clusters is presented. Finally, the achievable performance and scalability are demonstrated for a typical example problem on a multi-core CPU cluster.}, language = {en} } @inproceedings{WendeNoackSteinkeetal., author = {Wende, Florian and Noack, Matthias and Steinke, Thomas and Klemm, Michael and Zitzlsberger, Georg and Newburn, Chris J.}, title = {Portable SIMD Performance with OpenMP* 4.x Compiler Directives}, volume = {Euro-Par 2016: Parallel Processing: 22nd International Conference on Parallel and Distributed Computing}, editor = {Dutot, Pierre-Francois and Trystram, Denis}, publisher = {Springer International Publishing}, isbn = {978-3-319-43659-3}, doi = {10.1007/978-3-319-43659-3_20}, abstract = {Effective vectorization is becoming increasingly important for high performance and energy efficiency on processors with wide SIMD units. Compilers often require programmers to identify opportunities for vectorization, using directives to disprove data dependences. The OpenMP 4.x SIMD directives strive to provide portability. We investigate the ability of current compilers (GNU, Clang, and Intel) to generate SIMD code for microbenchmarks that cover common patterns in scientific codes and for two kernels from the VASP and the MOM5/ERGOM application. We explore coding strategies for improving SIMD performance across different compilers and platforms (Intel® Xeon® processor and Intel® Xeon Phi™ (co)processor). We compare OpenMP* 4.x SIMD vectorization with and without vector data types against SIMD intrinsics and C++ SIMD types. Our experiments show that in many cases portable performance can be achieved. All microbenchmarks are available as open source as a reference for programmers and compiler experts to enhance SIMD code generation.}, language = {en} } @inproceedings{KrzikallaWendeHoehnerbach, author = {Krzikalla, Olaf and Wende, Florian and H{\"o}hnerbach, Markus}, title = {Dynamic SIMD Vector Lane Scheduling}, series = {High Performance Computing, ISC High Performance 2016 International Workshops, ExaComm, E-MuCoCoS, HPC-IODC, IXPUG, IWOPH, P^3MA, VHPC, WOPSSS}, volume = {9945}, booktitle = {High Performance Computing, ISC High Performance 2016 International Workshops, ExaComm, E-MuCoCoS, HPC-IODC, IXPUG, IWOPH, P^3MA, VHPC, WOPSSS}, doi = {10.1007/978-3-319-46079-6_25}, pages = {354 -- 365}, abstract = {A classical technique to vectorize code that contains control flow is a control-flow to data-flow conversion. In that approach statements are augmented with masks that denote whether a given vector lane participates in the statement's execution or idles. If the scheduling of work to vector lanes is performed statically, then some of the vector lanes will run idle in case of control flow divergences or varying work intensities across the loop iterations. With an increasing number of vector lanes, the likelihood of divergences or heavily unbalanced work assignments increases and static scheduling leads to a poor resource utilization. In this paper, we investigate different approaches to dynamic SIMD vector lane scheduling using the Mandelbrot set algorithm as a test case. To overcome the limitations of static scheduling, idle vector lanes are assigned work items dynamically, thereby minimizing per-lane idle cycles. Our evaluation on the Knights Corner and Knights Landing platform shows, that our approaches can lead to considerable performance gains over a static work assignment. By using the AVX-512 vector compress and expand instruction, we are able to further improve the scheduling.}, language = {en} } @article{WendeMarsmanKimetal., author = {Wende, Florian and Marsman, Martijn and Kim, Jeongnim and Vasilev, Fedor and Zhao, Zhengji and Steinke, Thomas}, title = {OpenMP in VASP: Threading and SIMD}, series = {International Journal of Quantum Chemistry}, journal = {International Journal of Quantum Chemistry}, number = {Emerging Architectures in Computational Chemistry}, publisher = {Wiley Online Library}, doi = {10.1002/qua.25851}, pages = {e25851}, abstract = {The Vienna Ab initio Simulation Package (VASP) is a widely used electronic structure code that originally exploits process-level parallelism through the Message Passing Interface (MPI) for work distribution within and across nodes. Architectural changes of modern parallel processors urge programmers to address thread- and data-level parallelism as well to benefit most from the available compute resources within a node. We describe for VASP how to approach for an MPI + OpenMP parallelization including data-level parallelism through OpenMP SIMD constructs together with a generic high-level vector coding scheme. We can demonstrate an improved scalability of VASP and more than 20\% gain over the MPI-only version, as well as a 2x increased performance of collective operations using the multiple-endpoint MPI feature. The high-level vector coding scheme applied to VASP's general gradient approximation routine gives up 9x performance gain on AVX512 platforms with the Intel compiler.}, language = {en} } @inproceedings{Wende, author = {Wende, Florian}, title = {C++ Data Layout Abstractions through Proxy Types}, series = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), 14th International Workshop on Automatic Performance Tunings (iWAPT)}, booktitle = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), 14th International Workshop on Automatic Performance Tunings (iWAPT)}, doi = {10.1109/IPDPSW.2019.00126}, pages = {758 -- 767}, abstract = {Programs that process linearly indexed fields with structured element types in a data-parallel way usually suffer from the fact that compilers fail to generate efficient code if the selected data layout appears inappropriate for the chosen target architecture. If their internal heuristics cannot proof a performance gain from a data-parallel execution, compilers may fall back to scalar code generation. Data access through proxy types together with a customized container is one means to assist the compiler in generating efficient machine code in these cases without changing the user code. We present an automated proxy-type generator (using Clang's LibTooling) and a configurable C++ container that supports different data layouts in a transparent way.}, language = {en} } @inproceedings{WendeNoackSchuettetal., author = {Wende, Florian and Noack, Matthias and Sch{\"u}tt, Thorsten and Sachs, Stephen and Steinke, Thomas}, title = {Application Performance on a Cray XC30 Evaluation System with Xeon Phi Coprocessors at HLRN-III}, series = {Cray User Group}, booktitle = {Cray User Group}, language = {en} } @incollection{NoackWendeOertel, author = {Noack, Matthias and Wende, Florian and Oertel, Klaus-Dieter}, title = {OpenCL: There and Back Again}, series = {High Performance Parallelism Pearls}, volume = {2}, booktitle = {High Performance Parallelism Pearls}, editor = {Reinders, James and Jeffers, Jim}, publisher = {Morgan Kaufman, Elsevier}, isbn = {978-0-12-803819-2}, pages = {355 -- 378}, language = {en} } @inproceedings{WendeMarsmanSteinke, author = {Wende, Florian and Marsman, Martijn and Steinke, Thomas}, title = {On Enhancing 3D-FFT Performance in VASP}, series = {CUG Proceedings}, booktitle = {CUG Proceedings}, language = {en} } @masterthesis{Wende, type = {Bachelor Thesis}, author = {Wende, Florian}, title = {Dynamic Load Balancing on Massively Parallel Computer Architectures}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42166}, school = {Zuse Institute Berlin (ZIB)}, pages = {100}, abstract = {This thesis reports on using dynamic load balancing methods on massively parallel computers in the context of multithreaded computations. In particular we investigate the applicability of a randomized work stealing algorithm to ray tracing and breadth-first search as representatives of real-world applications with dynamic work creation. For our considerations we made use of current massively parallel hardware accelerators: Nvidia Tesla M2090, and Intel Xeon Phi. For both of the two we demonstrate the suitability of the work stealing scheme for the said real-world applications. Also the necessity of dynamic load balancing for irregular computations on such hardware is illustrated.}, language = {en} } @misc{WendeSteinke, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, issn = {1438-0064}, doi = {10.1145/2503210.2503254}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42187}, abstract = {Simulations of the critical Ising model by means of local update algorithms suffer from critical slowing down. One way to partially compensate for the influence of this phenomenon on the runtime of simulations is using increasingly faster and parallel computer hardware. Another approach is using algorithms that do not suffer from critical slowing down, such as cluster algorithms. This paper reports on the Swendsen-Wang multi-cluster algorithm on Intel Xeon Phi coprocessor 5110P, Nvidia Tesla M2090 GPU, and x86 multi-core CPU. We present shared memory versions of the said algorithm for the simulation of the two- and three-dimensional Ising model. We use a combination of local cluster search and global label reduction by means of atomic hardware primitives. Further, we describe an MPI version of the algorithm on Xeon Phi and CPU, respectively. Significant performance improvements over known im plementations of the Swendsen-Wang algorithm are demonstrated.}, language = {en} } @misc{WendeLaubenderSteinke, author = {Wende, Florian and Laubender, Guido and Steinke, Thomas}, title = {Integration of Intel Xeon Phi Servers into the HLRN-III Complex: Experiences, Performance and Lessons Learned}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-49990}, abstract = {The third generation of the North German Supercomputing Alliance (HLRN) compute and storage facilities comprises a Cray XC30 architecture with exclusively Intel Ivy Bridge compute nodes. In the second phase, scheduled for November 2014, the HLRN-III configuration will undergo a substantial upgrade together with the option of integrating accelerator nodes into the system. To support the decision-making process, a four-node Intel Xeon Phi cluster is integrated into the present HLRN-III infrastructure at ZIB. This integration includes user/project management, file system access and job management via the HLRN-III batch system. For selected workloads, in-depth analysis, migration and optimization work on Xeon Phi is in progress. We will report our experiences and lessons learned within the Xeon Phi installation and integration process. For selected examples, initial results of the application evaluation on the Xeon Phi cluster platform will be discussed.}, language = {en} } @misc{WendeSteinkeReinefeld, author = {Wende, Florian and Steinke, Thomas and Reinefeld, Alexander}, title = {The Impact of Process Placement and Oversubscription on Application Performance: A Case Study for Exascale Computing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53560}, abstract = {With the growing number of hardware components and the increasing software complexity in the upcoming exascale computers, system failures will become the norm rather than an exception for long-running applications. Fault-tolerance can be achieved by the creation of checkpoints during the execution of a parallel program. Checkpoint/Restart (C/R) mechanisms allow for both task migration (even if there were no hardware faults) and restarting of tasks after the occurrence of hardware faults. Affected tasks are then migrated to other nodes which may result in unfortunate process placement and/or oversubscription of compute resources. In this paper we analyze the impact of unfortunate process placement and oversubscription of compute resources on the performance and scalability of two typical HPC application workloads, CP2K and MOM5. Results are given for a Cray XC30/40 with Aries dragonfly topology. Our results indicate that unfortunate process placement has only little negative impact while oversubscription substantially degrades the performance. The latter might be only (partially) beneficial when placing multiple applications with different computational characteristics on the same node.}, language = {en} } @misc{WendeSteinkeCordes, author = {Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {Multi-threaded Kernel Offloading to GPGPU Using Hyper-Q on Kepler Architecture}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50362}, abstract = {Small-scale computations usually cannot fully utilize the compute capabilities of modern GPGPUs. With the Fermi GPU architecture Nvidia introduced the concurrent kernel execution feature allowing up to 16 GPU kernels to execute simultaneously on a shared GPU device for a better utilization of the respective resources. Insufficient scheduling capabilities in this respect, however, can significantly reduce the theoretical concurrency level. With the Kepler GPU architecture Nvidia addresses this issue by introducing the Hyper-Q feature with 32 hardware managed work queues for concurrent kernel execution. We investigate the Hyper-Q feature within heterogeneous workloads with multiple concurrent host threads or processes offloading computations to the GPU each. By means of a synthetic benchmark kernel and a hybrid parallel CPU-GPU real-world application, we evaluate the performance obtained with Hyper-Q on GPU and compare it against a kernel reordering mechanism introduced by the authors for the Fermi architecture.}, language = {en} } @misc{Wende, author = {Wende, Florian}, title = {SIMD Enabled Functions on Intel Xeon CPU and Intel Xeon Phi Coprocessor}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-54163}, abstract = {To achieve high floating point compute performance, modern processors draw on short vector SIMD units, as found e.g. in Intel CPUs (SSE, AVX1, AVX2 as well as AVX-512 on the roadmap) and the Intel Xeon Phi coprocessor, to operate an increasingly larger number of operands simultaneously. Making use of SIMD vector operations therefore is essential to get close to the processor's floating point peak performance. Two approaches are typically used by programmers to utilize the vector units: compiler driven vectorization via directives and code annotations, and manual vectorization by means of SIMD intrinsic operations or assembly. In this paper, we investigate the capabilities of the current Intel compiler (version 15 and later) to generate vector code for non-trivial coding patterns within loops. Beside the more or less uniform data-parallel standard loops or loop nests, which are typical candidates for SIMDfication, the occurrence of e.g. (conditional) function calls including branching, and early returns from functions may pose difficulties regarding the effective use of vector operations. Recent improvements of the compiler's capabilities involve the generation of SIMD-enabled functions. We will study the effectiveness of the vector code generated by the compiler by comparing it against hand-coded intrinsics versions of different kinds of functions that are invoked within innermost loops.}, language = {en} } @misc{Wende, type = {Master Thesis}, author = {Wende, Florian}, title = {Simulation of Spin Models on Nvidia Graphics Cards using CUDA}, school = {Humboldt-Universit{\"a}t zu Berlin}, pages = {149}, abstract = {This thesis reports on simulating spin models on Nvidia graphics cards using the CUDA programming model; a particular approach for making GPGPU (General Purpose Computation on Graphics Processing Units) available for a wide range of software developers not necessarily acquainted with (massively) parallel programming. By comparing program execution times for simulations of the Ising model and the Ising spin glass by means of the Metropolis algorithm on Nvidia Tesla C1060 graphics cards and an Intel Core i7-920 quad-core x86 CPU (we used OpenMP to make our simulations run on all 4 execution units of the CPU), we noticed that the Tesla C1060 performed about a factor 5-10 faster than the Core i7-920, depending on the particular model and the accuracy of the calculations (32-bit or 64-bit). We also investigated the reliability of GPGPU computations, especially with respect to the occurrence of soft-errors as suggested in [23]. We noticed faulty program outputs during long-time simulations of the Ising model on ''large'' lattices. We were able to link these problems to overheating of the corresponding graphics cards. Doing Monte Carlo simulations on parallel computer architectures, as was the case in this thesis, suggests to also generate random numbers in a parallel manner. We present implementations of the random number generators Ranlux and Mersenne Twister. In addition, we give an alternative and very efficient approach for producing parallel random numbers on Nvidia graphics cards. We successfully tested all random number generators used in this thesis for their quality by comparing Monte Carlo estimates against exact calculations.}, language = {en} } @article{SchneckWeiserWende, author = {Schneck, Jakob and Weiser, Martin and Wende, Florian}, title = {Impact of mixed precision and storage layout on additive Schwarz smoothers}, series = {Numerical Linear Algebra with Applications}, volume = {28}, journal = {Numerical Linear Algebra with Applications}, number = {4}, doi = {10.1002/nla.2366}, abstract = {The growing discrepancy between CPU computing power and memory bandwidth drives more and more numerical algorithms into a bandwidth-bound regime. One example is the overlapping Schwarz smoother, a highly effective building block for iterative multigrid solution of elliptic equations with higher order finite elements. Two options of reducing the required memory bandwidth are sparsity exploiting storage layouts and representing matrix entries with reduced precision in floating point or fixed point format. We investigate the impact of several options on storage demand and contraction rate, both analytically in the context of subspace correction methods and numerically at an example of solid mechanics. Both perspectives agree on the favourite scheme: fixed point representation of Cholesky factors in nested dissection storage.}, language = {en} } @article{KnoopGronemeierSuehringetal., author = {Knoop, Helge and Gronemeier, Tobias and S{\"u}hring, Matthias and Steinbach, Peter and Noack, Matthias and Wende, Florian and Steinke, Thomas and Knigge, Christoph and Raasch, Siegfried and Ketelsen, Klaus}, title = {Porting the MPI-parallelized LES model PALM to multi-GPU systems and many integrated core processors: an experience report}, series = {International Journal of Computational Science and Engineering. Special Issue on: Novel Strategies for Programming Accelerators}, journal = {International Journal of Computational Science and Engineering. Special Issue on: Novel Strategies for Programming Accelerators}, edition = {Special Issue on: Novel Strategies for Programming Accelerators}, publisher = {Inderscience}, abstract = {The computational power and availability of graphics processing units (GPUs), such as the Nvidia Tesla, and Many Integrated Core (MIC) processors, such as the Intel Xeon Phi, on high performance computing (HPC) systems is rapidly evolving. However, HPC applications need to be ported to take advantage of such hardware. This paper is a report on our experience of porting the MPI+OpenMP parallelised large-eddy simulation model (PALM) to multi-GPU as well as to MIC processor environments using the directive-based high level programming paradigm OpenACC and OpenMP, respectively. PALM is a Fortran-based computational fluid dynamics software package, used for the simulation of atmospheric and oceanic boundary layers to answer questions linked to fundamental atmospheric turbulence research, urban modelling, aircraft safety and cloud physics. Development of PALM started in 1997, the project currently entails 140 kLOC and is used on HPC farms of up to 43,200 cores. The main challenges we faced during the porting process are the size and complexity of the PALM code base, its inconsistent modularisation and the complete lack of a unit-test suite. We report the methods used to identify performance issues as well as our experiences with state-of-the-art profiling tools. Moreover, we outline the required porting steps in order to properly execute our code on GPUs and MIC processors, describe the problems and bottlenecks that we encountered during the porting process, and present separate performance tests for both architectures. These performance tests, however, do not provide any benchmark information that compares the performance of the ported code between the two architectures.}, language = {en} }