@masterthesis{Wende, type = {Bachelor Thesis}, author = {Wende, Florian}, title = {Dynamic Load Balancing on Massively Parallel Computer Architectures}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42166}, school = {Zuse Institute Berlin (ZIB)}, pages = {100}, abstract = {This thesis reports on using dynamic load balancing methods on massively parallel computers in the context of multithreaded computations. In particular we investigate the applicability of a randomized work stealing algorithm to ray tracing and breadth-first search as representatives of real-world applications with dynamic work creation. For our considerations we made use of current massively parallel hardware accelerators: Nvidia Tesla M2090, and Intel Xeon Phi. For both of the two we demonstrate the suitability of the work stealing scheme for the said real-world applications. Also the necessity of dynamic load balancing for irregular computations on such hardware is illustrated.}, language = {en} } @misc{WendeSteinke, author = {Wende, Florian and Steinke, Thomas}, title = {Swendsen-Wang Multi-Cluster Algorithm for the 2D/3D Ising Model on Xeon Phi and GPU}, issn = {1438-0064}, doi = {10.1145/2503210.2503254}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42187}, abstract = {Simulations of the critical Ising model by means of local update algorithms suffer from critical slowing down. One way to partially compensate for the influence of this phenomenon on the runtime of simulations is using increasingly faster and parallel computer hardware. Another approach is using algorithms that do not suffer from critical slowing down, such as cluster algorithms. This paper reports on the Swendsen-Wang multi-cluster algorithm on Intel Xeon Phi coprocessor 5110P, Nvidia Tesla M2090 GPU, and x86 multi-core CPU. We present shared memory versions of the said algorithm for the simulation of the two- and three-dimensional Ising model. We use a combination of local cluster search and global label reduction by means of atomic hardware primitives. Further, we describe an MPI version of the algorithm on Xeon Phi and CPU, respectively. Significant performance improvements over known im plementations of the Swendsen-Wang algorithm are demonstrated.}, language = {en} } @misc{WendeLaubenderSteinke, author = {Wende, Florian and Laubender, Guido and Steinke, Thomas}, title = {Integration of Intel Xeon Phi Servers into the HLRN-III Complex: Experiences, Performance and Lessons Learned}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-49990}, abstract = {The third generation of the North German Supercomputing Alliance (HLRN) compute and storage facilities comprises a Cray XC30 architecture with exclusively Intel Ivy Bridge compute nodes. In the second phase, scheduled for November 2014, the HLRN-III configuration will undergo a substantial upgrade together with the option of integrating accelerator nodes into the system. To support the decision-making process, a four-node Intel Xeon Phi cluster is integrated into the present HLRN-III infrastructure at ZIB. This integration includes user/project management, file system access and job management via the HLRN-III batch system. For selected workloads, in-depth analysis, migration and optimization work on Xeon Phi is in progress. We will report our experiences and lessons learned within the Xeon Phi installation and integration process. For selected examples, initial results of the application evaluation on the Xeon Phi cluster platform will be discussed.}, language = {en} } @misc{WendeSteinkeCordes, author = {Wende, Florian and Steinke, Thomas and Cordes, Frank}, title = {Multi-threaded Kernel Offloading to GPGPU Using Hyper-Q on Kepler Architecture}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50362}, abstract = {Small-scale computations usually cannot fully utilize the compute capabilities of modern GPGPUs. With the Fermi GPU architecture Nvidia introduced the concurrent kernel execution feature allowing up to 16 GPU kernels to execute simultaneously on a shared GPU device for a better utilization of the respective resources. Insufficient scheduling capabilities in this respect, however, can significantly reduce the theoretical concurrency level. With the Kepler GPU architecture Nvidia addresses this issue by introducing the Hyper-Q feature with 32 hardware managed work queues for concurrent kernel execution. We investigate the Hyper-Q feature within heterogeneous workloads with multiple concurrent host threads or processes offloading computations to the GPU each. By means of a synthetic benchmark kernel and a hybrid parallel CPU-GPU real-world application, we evaluate the performance obtained with Hyper-Q on GPU and compare it against a kernel reordering mechanism introduced by the authors for the Fermi architecture.}, language = {en} } @misc{Wende, author = {Wende, Florian}, title = {SIMD Enabled Functions on Intel Xeon CPU and Intel Xeon Phi Coprocessor}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-54163}, abstract = {To achieve high floating point compute performance, modern processors draw on short vector SIMD units, as found e.g. in Intel CPUs (SSE, AVX1, AVX2 as well as AVX-512 on the roadmap) and the Intel Xeon Phi coprocessor, to operate an increasingly larger number of operands simultaneously. Making use of SIMD vector operations therefore is essential to get close to the processor's floating point peak performance. Two approaches are typically used by programmers to utilize the vector units: compiler driven vectorization via directives and code annotations, and manual vectorization by means of SIMD intrinsic operations or assembly. In this paper, we investigate the capabilities of the current Intel compiler (version 15 and later) to generate vector code for non-trivial coding patterns within loops. Beside the more or less uniform data-parallel standard loops or loop nests, which are typical candidates for SIMDfication, the occurrence of e.g. (conditional) function calls including branching, and early returns from functions may pose difficulties regarding the effective use of vector operations. Recent improvements of the compiler's capabilities involve the generation of SIMD-enabled functions. We will study the effectiveness of the vector code generated by the compiler by comparing it against hand-coded intrinsics versions of different kinds of functions that are invoked within innermost loops.}, language = {en} } @misc{Wende, type = {Master Thesis}, author = {Wende, Florian}, title = {Simulation of Spin Models on Nvidia Graphics Cards using CUDA}, school = {Humboldt-Universit{\"a}t zu Berlin}, pages = {149}, abstract = {This thesis reports on simulating spin models on Nvidia graphics cards using the CUDA programming model; a particular approach for making GPGPU (General Purpose Computation on Graphics Processing Units) available for a wide range of software developers not necessarily acquainted with (massively) parallel programming. By comparing program execution times for simulations of the Ising model and the Ising spin glass by means of the Metropolis algorithm on Nvidia Tesla C1060 graphics cards and an Intel Core i7-920 quad-core x86 CPU (we used OpenMP to make our simulations run on all 4 execution units of the CPU), we noticed that the Tesla C1060 performed about a factor 5-10 faster than the Core i7-920, depending on the particular model and the accuracy of the calculations (32-bit or 64-bit). We also investigated the reliability of GPGPU computations, especially with respect to the occurrence of soft-errors as suggested in [23]. We noticed faulty program outputs during long-time simulations of the Ising model on ''large'' lattices. We were able to link these problems to overheating of the corresponding graphics cards. Doing Monte Carlo simulations on parallel computer architectures, as was the case in this thesis, suggests to also generate random numbers in a parallel manner. We present implementations of the random number generators Ranlux and Mersenne Twister. In addition, we give an alternative and very efficient approach for producing parallel random numbers on Nvidia graphics cards. We successfully tested all random number generators used in this thesis for their quality by comparing Monte Carlo estimates against exact calculations.}, language = {en} }