@misc{AmbellanHanikvonTycowicz, author = {Ambellan, Felix and Hanik, Martin and von Tycowicz, Christoph}, title = {Morphomatics: Geometric morphometrics in non-Euclidean shape spaces}, doi = {10.12752/8544}, abstract = {Morphomatics is an open-source Python library for (statistical) shape analysis developed within the geometric data analysis and processing research group at Zuse Institute Berlin. It contains prototype implementations of intrinsic manifold-based methods that are highly consistent and avoid the influence of unwanted effects such as bias due to arbitrary choices of coordinates.}, language = {en} } @misc{FujiiItoKimetal., author = {Fujii, Koichi and Ito, Naoki and Kim, Sunyoung and Kojima, Masakazu and Shinano, Yuji and Toh, Kim-Chuan}, title = {Solving Challenging Large Scale QAPs}, issn = {1438-0064}, doi = {10.12752/8130}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-81303}, abstract = {We report our progress on the project for solving larger scale quadratic assignment problems (QAPs). Our main approach to solve large scale NP-hard combinatorial optimization problems such as QAPs is a parallel branch-and-bound method efficiently implemented on a powerful computer system using the Ubiquity Generator(UG) framework that can utilize more than 100,000 cores. Lower bounding procedures incorporated in the branch-and-bound method play a crucial role in solving the problems. For a strong lower bounding procedure, we employ the Lagrangian doubly nonnegative (DNN) relaxation and the Newton-bracketing method developed by the authors' group. In this report, we describe some basic tools used in the project including the lower bounding procedure and branching rules, and present some preliminary numerical results. Our next target problem is QAPs with dimension at least 50, as we have succeeded to solve tai30a and sko42 from QAPLIB for the first time.}, language = {en} } @misc{Shinano, author = {Shinano, Yuji}, title = {UG - Ubiquity Generator Framework v1.0.0beta}, doi = {10.12752/8521}, abstract = {UG is a generic framework to parallelize branch-and-bound based solvers (e.g., MIP, MINLP, ExactIP) in a distributed or shared memory computing environment. It exploits the powerful performance of state-of-the-art "base solvers", such as SCIP, CPLEX, etc. without the need for base solver parallelization. UG framework, ParaSCIP(ug[SCIP,MPI]) and FiberSCIP (ug[SCIP,Pthreads]) are available as a beta version. v1.0.0: new documentation and cmake, generalization of ug framework, implementation of selfsplitrampup for fiber- and parascip, better memory and time limit handling.}, language = {en} } @misc{Krause, type = {Master Thesis}, author = {Krause, Jan}, title = {Investigation of Options to Handle 3D MRI Data via Convolutional Neural Networks Application in Knee Osteoarthritits Classification}, pages = {127}, language = {en} } @misc{Shestakov, type = {Master Thesis}, author = {Shestakov, Alexey}, title = {A Deep Learning Method for Automated Detection of Meniscal Tears in Meniscal Sub-Regions in 3D MRI Data}, pages = {96}, abstract = {This work presents a fully automated pipeline, centered around a deep neural network, as well as a method to train that network in an efficient manner, that enables accurate detection of lesions in meniscal anatomical subregions. The network architecture is based on a transformer encoder/decoder. It is trained on DESS and tuned on IW TSE 3D MRI scans sourced from the Osteoarthritis Initiative. Furthermore, it is trained in a multilabel, and multitask fashion, using an auxiliary detection head. The former enables implicit localisation of meniscal defects, that to the best of my knowledge, has not yet been reported elsewhere. The latter enables efficient learning on the entire 3D MRI volume. Thus, the proposed method does not require any expert knowledge at inference. Aggregated inference results from two datasets resulted in an overall AUCROC result of 0.90, 0.91 and 0.93 for meniscal lesion detection anywhere in the knee, in medial and in lateral menisci respectively. These results compare very well to the related work, even though only a fraction of the data has been utilized. Clinical applicability and benefit is yet to be determined.}, language = {en} } @misc{RiberaBorrellQuerRichteretal., author = {Ribera Borrell, Enric and Quer, Jannes and Richter, Lorenz and Sch{\"u}tte, Christof}, title = {Improving control based importance sampling strategies for metastable diffusions via adapted metadynamics}, issn = {1438-0064}, abstract = {Sampling rare events in metastable dynamical systems is often a computationally expensive task and one needs to resort to enhanced sampling methods such as importance sampling. Since we can formulate the problem of finding optimal importance sampling controls as a stochastic optimization problem, this then brings additional numerical challenges and the convergence of corresponding algorithms might as well suffer from metastabilty. In this article we address this issue by combining systematic control approaches with the heuristic adaptive metadynamics method. Crucially, we approximate the importance sampling control by a neural network, which makes the algorithm in principle feasible for high dimensional applications. We can numerically demonstrate in relevant metastable problems that our algorithm is more effective than previous attempts and that only the combination of the two approaches leads to a satisfying convergence and therefore to an efficient sampling in certain metastable settings.}, language = {en} } @misc{TateiwaShinanoYasudaetal., author = {Tateiwa, Nariaki and Shinano, Yuji and Yasuda, Masaya and Kaji, Shizuo and Yamamura, Keiichiro and Fujisawa, Katsuki}, title = {Massively parallel sharing lattice basis reduction}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85209}, abstract = {For cryptanalysis in lattice-based schemes, the performance evaluation of lattice basis reduction using high-performance computers is becoming increasingly important for the determination of the security level. We propose a distributed and asynchronous parallel reduction algorithm based on randomization and DeepBKZ, which is an improved variant of the block Korkine-Zolotarev (BKZ) reduction algorithm. Randomized copies of a lattice basis are distributed to up to 103,680 cores and independently reduced in parallel, while some basis vectors are shared asynchronously among all processes via MPI. There is a trade-off between randomization and information sharing; if a substantial amount of information is shared, all processes will work on the same problem, thereby diminishing the benefit of parallelization. To monitor this balance between randomness and sharing, we propose a metric to quantify the variety of lattice bases. We empirically find an optimal parameter of sharing for high-dimensional lattices. We demonstrate the efficacy of our proposed parallel algorithm and implementation with respect to both performance and scalability through our experiments.}, language = {en} } @misc{TateiwaShinanoYamamuraetal., author = {Tateiwa, Nariaki and Shinano, Yuji and Yamamura, Keiichiro and Yoshida, Akihiro and Kaji, Shizuo and Yasuda, Masaya and Fujisawa, Katsuki}, title = {CMAP-LAP: Configurable Massively Parallel Solver for Lattice Problems}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-82802}, abstract = {Lattice problems are a class of optimization problems that are notably hard. There are no classical or quantum algorithms known to solve these problems efficiently. Their hardness has made lattices a major cryptographic primitive for post-quantum cryptography. Several different approaches have been used for lattice problems with different computational profiles; some suffer from super-exponential time, and others require exponential space. This motivated us to develop a novel lattice problem solver, CMAP-LAP, based on the clever coordination of different algorithms that run massively in parallel. With our flexible framework, heterogeneous modules run asynchronously in parallel on a large-scale distributed system while exchanging information, which drastically boosts the overall performance. We also implement full checkpoint-and-restart functionality, which is vital to high-dimensional lattice problems. Through numerical experiments with up to 103,680 cores, we evaluated the performance and stability of our system and demonstrated its high capability for future massive-scale experiments.}, language = {en} }