@misc{MoserHaridiShafaatetal.2009, author = {Moser, Monika and Haridi, Seif and Shafaat, Tallat and Sch{\"u}tt, Thorsten and H{\"o}gqvist, Mikael and Reinefeld, Alexander}, title = {Transactional DHT Algorithms}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11532}, number = {09-34}, year = {2009}, abstract = {We present a framework for transactional data access on data stored in a DHT. It allows to atomically read and write items and to run distributed transactions consisting of a sequence of read and write operations on the items. Items are symmetrically replicated in order to achieve durability of data stored in the SON. To provide availability of items despite the unavailability of some replicas, operations on items are quorum-based. They make progress as long as a majority of replicas can be accessed. Our framework processes transactions optimistically with an atomic commit protocol that is based on Paxos atomic commit. We present algorithms for the whole framework with an event based notation. Additionally we discuss the problem of lookup inconsistencies and its implications on the one-copy serializability property of the transaction processing in our framework.}, language = {en} } @misc{SchintkeReinefeldHaridietal.2009, author = {Schintke, Florian and Reinefeld, Alexander and Haridi, Seif and Sch{\"u}tt, Thorsten}, title = {Enhanced Paxos Commit for Transactions on DHTs}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11448}, number = {09-28}, year = {2009}, abstract = {Key/value stores which are built on structured overlay networks often lack support for atomic transactions and strong data consistency among replicas. This is unfortunate, because consistency guarantees and transactions would allow a wide range of additional application domains to benefit from the inherent scalability and fault-tolerance of DHTs. The Scalaris key/value store supports strong data consistency and atomic transactions. It uses an enhanced Paxos Commit protocol with only four communication steps rather than six. This improvement was possible by exploiting information from the replica distribution in the DHT. Scalaris enables implementation of more reliable and scalable infrastructure for collaborative Web services that require strong consistency and atomic changes across multiple items.}, language = {en} } @inproceedings{DoebbelinSchuettReinefeld2013, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Building Large Compressed PDBs for the Sliding Tile Puzzle}, booktitle = {Workshop on Computer Games}, year = {2013}, language = {en} } @article{SalemSchintkeSchuettetal.2018, author = {Salem, Farouk and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Data-flow scheduling for a scalable FLESnet}, journal = {CBM Progress Report 2017}, isbn = {978-3-9815227-5-4}, doi = {10.15120/GSI-2018-00485}, pages = {130 -- 131}, year = {2018}, language = {en} } @misc{GholamiSchintkeSchuettetal.2018, author = {Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Modeling Checkpoint Schedules for Concurrent HPC Applications}, journal = {CoSaS 2018 International Symposium on Computational Science at Scale}, year = {2018}, language = {en} } @inproceedings{WeinholdLackorzynskiBierbaumetal.2019, author = {Weinhold, Carsten and Lackorzynski, Adam and Bierbaum, Jan and K{\"u}ttler, Martin and Planeta, Maksym and Weisbach, Hannes and Hille, Matthias and H{\"a}rtig, Hermann and Margolin, Alexander and Sharf, Dror and Levy, Ely and Gak, Pavel and Barak, Amnon and Gholami, Masoud and Schintke, Florian and Sch{\"u}tt, Thorsten and Reinefeld, Alexander and Lieber, Matthias and Nagel, Wolfgang}, title = {FFMK: A Fast and Fault-Tolerant Microkernel-Based System for Exascale Computing}, booktitle = {Software for Exascale Computing - SPPEXA 2016-2019}, publisher = {Springer}, doi = {10.1007/978-3-030-47956-5_16}, pages = {483 -- 516}, year = {2019}, language = {en} } @misc{DoebbelinSchuettReinefeld2013, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {Building Large Compressed PDBs for the Sliding Tile Puzzle}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-18095}, year = {2013}, abstract = {The performance of heuristic search algorithms depends crucially on the effectiveness of the heuristic. A pattern database (PDB) is a powerful heuristic in the form of a pre-computed lookup table. Larger PDBs provide better bounds and thus allow more cut-offs in the search process. Today, the largest PDB for the 24-puzzle is a 6-6-6-6 PDB with a size of 486 MB. We created 8-8-8, 9-8-7 and 9-9-6 PDBs that are three orders of magnitude larger (up to 1.4 TB) than the 6-6-6-6 PDB. We show how to compute such large PDBs and we present statistical and empirical data on their efficiency. The largest single PDB gives on average an 8-fold improvement over the 6-6-6-6 PDB. Combining several large PDBs gives on average an 12-fold improvement.}, language = {en} } @article{EnkeSteinmetzAdorfetal.2011, author = {Enke, Harry and Steinmetz, Matthias and Adorf, Hans-Martin and Beck-Ratzka, Alexander and Breitling, Frank and Br{\"u}semeister, Thomas and Carlson, Arthur and Ensslin, Torsten and H{\"o}gqvist, Mikael and Nickelt, Iliya and Radke, Thomas and Reinefeld, Alexander and Reiser, Angelika and Scholl, Tobias and Spurzem, Rainer and Steinacker, J{\"u}rgen and Voges, Wolfgang and Wambsganß, Joachim and White, Steve}, title = {AstroGrid-D: Grid technology for astronomical science}, volume = {16}, journal = {New Astronomy}, number = {2}, doi = {10.1016/j.newast.2010.07.005}, pages = {79 -- 93}, year = {2011}, language = {en} } @incollection{FlikReinefeld2012, author = {Flik, T. and Reinefeld, Alexander}, title = {Rechnerorganisation}, booktitle = {H{\"u}tte - Das Ingenieurwissen}, editor = {Czichos, Horst and Hennecke, Manfred}, edition = {34}, publisher = {Springer-Verlag Berlin}, pages = {J6 -- J124}, year = {2012}, language = {en} } @inproceedings{PeterReinefeld2012, author = {Peter, Kathrin and Reinefeld, Alexander}, title = {Consistency and fault tolerance for erasure-coded distributed storage systems}, booktitle = {Proceedings of the fifth international workshop on Data-Intensive Distributed Computing Date}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/2286996.2287002}, pages = {23 -- 32}, year = {2012}, language = {en} } @article{EnkePartlReinefeldetal.2012, author = {Enke, Harry and Partl, Adrian and Reinefeld, Alexander and Schintke, Florian}, title = {Handling Big Data in Astronomy and Astrophysics}, volume = {12}, journal = {Datenbank-Spektrum}, number = {3}, publisher = {Springer-Verlag}, doi = {10.1007/s13222-012-0099-1}, pages = {173 -- 181}, year = {2012}, language = {en} } @inproceedings{DoebbelinSchuettReinefeld2012, author = {D{\"o}bbelin, Robert and Sch{\"u}tt, Thorsten and Reinefeld, Alexander}, title = {An Analysis of SMP Memory Allocators}, booktitle = {Proceedings of the 41st International Conference on Parallel Processing Workshops (Fifth International Workshop on Parallel Programming Models and Systems Software for High-End Computing (P2S2))}, publisher = {IEEE Computer Society}, doi = {10.1109/ICPPW.2012.10}, pages = {48 -- 54}, year = {2012}, language = {en} } @inproceedings{SchuettDoebbelinReinefeld2013, author = {Sch{\"u}tt, Thorsten and D{\"o}bbelin, Robert and Reinefeld, Alexander}, title = {Forward Perimeter Search with Controlled Use of Memory}, booktitle = {International Joint Conference on Artificial Intelligence, IJCAI-13, Beijing}, year = {2013}, language = {en} } @article{SchuettReinefeldDoebbelin2011, author = {Sch{\"u}tt, Thorsten and Reinefeld, Alexander and D{\"o}bbelin, Robert}, title = {MR-search: massively parallel heuristic search}, volume = {25}, journal = {Concurrency and Computation: Practice and Experience}, number = {1}, doi = {10.1002/cpe.1833}, pages = {40 -- 54}, year = {2011}, language = {en} } @incollection{StenderBerlinReinefeld2013, author = {Stender, Jan and Berlin, Michael and Reinefeld, Alexander}, title = {XtreemFS - a File System for the Cloud}, booktitle = {Data Intensive Storage Services for Cloud Environments}, editor = {Kyriazis, D. and Voulodimos, A. and Gogouvitis, S. and Varvarigou, Theodora A.}, publisher = {IGI Global}, doi = {10.4018/978-1-4666-3934-8}, year = {2013}, language = {en} } @misc{SchefflerSipsBehlingetal.2016, author = {Scheffler, Daniel and Sips, Mike and Behling, Robert and Dransch, Doris and Eggert, Daniel and Fajerski, Jan and Freytag, Johann-Christoph and Griffiths, Patrick and Hollstein, Andr{\´e} and Hostert, Patrick and K{\"o}thur, Patrick and Peters, Mathias and Pflugmacher, Dirk and Rabe, Andreas and Reinefeld, Alexander and Schintke, Florian and Segel, Karl}, title = {GeoMultiSens - Scalable Multisensoral Analysis of Satellite Remote Sensing Data}, journal = {ESA Living Planet Symposium, EO Open Science Posters}, year = {2016}, language = {en} } @inproceedings{NoackReinefeldKrameretal.2018, author = {Noack, Matthias and Reinefeld, Alexander and Kramer, Tobias and Steinke, Thomas}, title = {DM-HEOM: A Portable and Scalable Solver-Framework for the Hierarchical Equations of Motion}, booktitle = {2018 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), 19th IEEE Int. Workshop on Parallel and Distributed Scientific and Engineering Computing (PDSEC 2018)}, isbn = {978-1-5386-5555-9}, doi = {10.1109/IPDPSW.2018.00149}, pages = {947 -- 956}, year = {2018}, abstract = {Computing the Hierarchical Equations of Motion (HEOM) is by itself a challenging problem, and so is writing portable production code that runs efficiently on a variety of architectures while scaling from PCs to supercomputers. We combined both challenges to push the boundaries of simulating quantum systems, and to evaluate and improve methodologies for scientific software engineering. Our contributions are threefold: We present the first distributed memory implementation of the HEOM method (DM-HEOM), we describe an interdisciplinary development workflow, and we provide guidelines and experiences for designing distributed, performance-portable HPC applications with MPI-3, OpenCL and other state-of-the-art programming models. We evaluated the resulting code on multi- and many-core CPUs as well as GPUs, and demonstrate scalability on a Cray XC40 supercomputer for the PS I molecular light harvesting complex.}, language = {en} } @inproceedings{SchefflerSipsBehlingetal.2016, author = {Scheffler, Daniel and Sips, Mike and Behling, Robert and Dransch, Doris and Eggert, Daniel and Fajerski, Jan and Freytag, Johann-Christoph and Griffiths, Patrick and Hollstein, Andr{\´e} and Hostert, Patrick and K{\"o}thur, Patrick and Peters, Mathias and Pflugmacher, Dirk and Rabe, Andreas and Reinefeld, Alexander and Schintke, Florian and Segel, Karl}, title = {Geomultisens - a common automatic processing and analysis system for multi-sensor satellite data}, booktitle = {Advancing Horizons for Land Cover Services Entering the Big Data Era, Second joint Workshop of the EARSeL Special Interest Group on Land Use \& Land Cover and the NASA LCLUC Program}, pages = {18 -- 19}, year = {2016}, language = {en} } @article{KramerNoackReinefeldetal.2018, author = {Kramer, Tobias and Noack, Matthias and Reinefeld, Alexander and Rodr{\´i}guez, Mirta and Zelinskyi, Yaroslav}, title = {Efficient calculation of open quantum system dynamics and time-resolved spectroscopy with Distributed Memory HEOM (DM-HEOM)}, volume = {39}, journal = {Journal of Computational Chemistry}, number = {22}, publisher = {Wiley Periodicals, Inc.}, arxiv = {http://arxiv.org/abs/arXiv:1803.03498}, doi = {doi:10.1002/jcc.25354}, pages = {1779 -- 1794}, year = {2018}, abstract = {Time- and frequency resolved optical signals provide insights into the properties of light harvesting molecular complexes, including excitation energies, dipole strengths and orientations, as well as in the exciton energy flow through the complex. The hierarchical equations of motion (HEOM) provide a unifying theory, which allows one to study the combined effects of system-environment dissipation and non-Markovian memory without making restrictive assumptions about weak or strong couplings or separability of vibrational and electronic degrees of freedom. With increasing system size the exact solution of the open quantum system dynamics requires memory and compute resources beyond a single compute node. To overcome this barrier, we developed a scalable variant of HEOM. Our distributed memory HEOM, DM-HEOM, is a universal tool for open quantum system dynamics. It is used to accurately compute all experimentally accessible time- and frequency resolved processes in light harvesting molecular complexes with arbitrary system-environment couplings for a wide range of temperatures and complex sizes.}, language = {en} } @article{KramerNoackReimersetal.2018, author = {Kramer, Tobias and Noack, Matthias and Reimers, Jeffrey R. and Reinefeld, Alexander and Rodr{\´i}guez, Mirta and Yin, Shiwei}, title = {Energy flow in the Photosystem I supercomplex: comparison of approximative theories with DM-HEOM}, volume = {515}, journal = {Chemical Physics}, publisher = {Elsevier B.V.}, arxiv = {http://arxiv.org/abs/arXiv:1805.10484}, doi = {10.1016/j.chemphys.2018.05.028}, pages = {262 -- 271}, year = {2018}, abstract = {We analyze the exciton dynamics in PhotosystemI from Thermosynechococcus elongatus using the distributed memory implementation of the hierarchical equation of motion (DM-HEOM) for the 96 Chlorophylls in the monomeric unit. The exciton-system parameters are taken from a first principles calculation. A comparison of the exact results with Foerster rates and Markovian approximations allows one to validate the exciton transfer times within the complex and to identify deviations from approximative theories. We show the optical absorption, linear, and circular dichroism spectra obtained with DM-HEOM and compare them to experimental results.}, language = {en} }