@misc{KaehlerSimonHege, author = {K{\"a}hler, Ralf and Simon, Mark and Hege, Hans-Christian}, title = {Fast Volume Rendering of Sparse High-Resolution Datasets Using Adaptive Mesh Refinement Hierarchies}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-6516}, number = {01-25}, abstract = {In this paper we present an algorithm that accelerates 3D texture-based volume rendering of large and sparse data sets. A hierarchical data structure (known as AMR tree) consisting of nested uniform grids is employed in order to efficiently encode regions of interest. The hierarchies resulting from this kind of space partitioning yield a good balance between the amount of volume to render and the number of texture bricks -- a prerequisite for fast rendering. Comparing our approach to an octree based algorithm we show that our algorithm increases rendering performance significantly for sparse data. A further advantage is that less parameter tuning is necessary.}, language = {en} } @misc{KaehlerHege, author = {K{\"a}hler, Ralf and Hege, Hans-Christian}, title = {Interactive Volume Rendering of Adaptive Mesh Refinement Data}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-6561}, number = {01-30}, abstract = {Many phenomena in nature and engineering happen simultaneously on rather diverse spatial and temporal scales, i.e.\ exhibit a multi-scale character. Therefore various hierarchical data structures and numerical schemes have been devised to represent quantitatively such phenomena. A special numerical multilevel technique, associated with a particular hierarchical data structure, is so-called Adaptive Mesh Refinement (AMR). This scheme achieves locally very high spatial and temporal resolutions. Due to its popularity, many scientists are in need of interactive visualization tools for AMR data. In this article we present a 3D texture-based volume rendering algorithm for AMR data, that directly utilizes the hierarchical structure. Thereby interactive rendering even for large data sets is achieved. In particular the problems of interpolation artifacts, opacity corrections, and texture memory limitations are addressed. The algorithm's value in practice is demonstrated with simulation and image data.}, language = {en} } @misc{DeuflhardHegeSeebass, author = {Deuflhard, Peter and Hege, Hans-Christian and Seebass, Martin}, title = {Progress Towards a Combined MRI/Hyperthermia System}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5755}, number = {00-07}, abstract = {Regional hyperthermia, a clinical cancer therapy, is the main topic of the Sonderforschungsbereich Hyperthermia: Scientific Methods and Clinical Applications'' at Berlin. In recent years, technological improvements towards a better concentration of heat to the desired target region have been achieved. These include a rather sophisticated integrated software environment for therapy planning and a new hyperthermia applicator. In a next step, a detailed closed loop monitoring of the actual treatment is to be developed. For this purpose the hyperthermia applicator is combined with an MRI system, which will allow to check the positioning of the patients and to measure individual blood perfusion as well as the 3D temperature distribution. The measurements will then be employed for an on-line control of the whole treatment. In this intended setting, new fast feedback control algorithms will come into play.}, language = {en} } @misc{HegePolthier, author = {Hege, Hans-Christian and Polthier, Konrad}, title = {Visualization and Mathematics '97}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5480}, number = {TR-97-06}, abstract = {This report contains paper abstracts of the workshop "Visualization and Mathematics" held in Berlin-Dahlem in September 1997. The meeting serves as a forum for an international community of researchers and practitioners on the application of visualization techniques in mathematics and on mathematical concepts in visualization. It is the second symposium in a series of workshops bringing together mathematicians and experts from scientific visualization. The themes of the workshop include: \begin{itemize} \item - applications in differential geometry and partial differential equations \item - algorithmic aspects of adaptive and hierarchical techniques in space and time \item - time control of animated objects and corresponding algorithms \item - algorithmic representation of objects for display, storage and exchange \item - new visualization techniques for mathematical structures \item - integration of visualization with symbolic and numerical computation. \end{itemize}}, language = {en} } @misc{HegeMerzkyZachow, author = {Hege, Hans-Christian and Merzky, Andre and Zachow, Stefan}, title = {Distributed Visualization with OpenGL Vizserver: Practical Experiences}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5992}, number = {00-31}, abstract = {The increasing demand for distributed solutions in computing technology does not stop when it comes to visualization techniques. However, the capabilities of todays applications to perform remote rendering are limited by historical design legacys. Especially the popular X11 protokoll, which has been proven to be extremely flexible and usefull for remote 2D graphics applications, breaks down for the case of remote 3D rendering. In this white paper, we give a short overview of generic remote rendering technologies available today, and compare their performance to the recently released vizserver by SGI: a network extension to the SGI OpenGL rendering engines.}, language = {en} } @misc{SchmidtEhrenbergHege, author = {Schmidt-Ehrenberg, Johannes and Hege, Hans-Christian}, title = {Visual Analysis of Molecular Conformations by Means of a Dynamic Density Mixture Model}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-8361}, number = {05-02}, abstract = {We propose an approach for transforming the sampling of a molecular conformation distribution into an analytical model based on Hidden Markov Models. The model describes the sampled shape density as a mixture of multivariate unimodal densities. Thus, it delivers an interpretation of the sampled density as a set of typical shapes that appear with different probabilities and are characterized by their geometry, their variability and transition probabilities between the shapes. The gained model is used to identify atom groups of constant shape that are connected by metastable torsion angles. Based on this description an alignment for the original sampling is computed. As it takes into account the different shapes contained in the sampled set, this alignment allows to compute reasonable average shapes and meaningful shape density plots. Furthermore, it enables us to visualize typical conformations.}, language = {en} } @misc{RosanwoPetzProhaskaetal., author = {Rosanwo, Olufemi and Petz, Christoph and Prohaska, Steffen and Hotz, Ingrid and Hege, Hans-Christian}, title = {Dual Streamline Seeding - Method and Implementation}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11032}, number = {08-49}, abstract = {This work introduces a novel streamline seeding technique based on dual streamlines that are orthogonal to the vector field, instead of tangential. The greedy algorithm presented here produces a net of orthogonal streamlines that is iteratively refined resulting in good domain coverage and a high degree of continuity and uniformity. The algorithm is easy to implement and efficient, and it naturally extends to curved surfaces.}, language = {en} } @misc{BestHege, author = {Best, Christoph and Hege, Hans-Christian}, title = {Visualizing conformations in molecular dynamics}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-3859}, number = {SC-98-42}, abstract = {The Monte Carlo simulation of the dynamics of complex molecules produces trajectories with a large number of different configurations to sample configuration space. It is expected that these configurations can be classified into a small number of conformations representing essential changes in the shape of the molecule. We present a method to visualize these conformations by point sets in the plane based on a geometrical distance measure between individual configurations. It turns out that different conformations appear as well-separated point sets. The method is further improved by performing a cluster analysis of the data set. The point-cluster representation is used to control a three-dimensional molecule viewer application to show individual configurations and conformational changes. The extraction of essential coordinates and visualization of molecular shape is discussed.}, language = {en} } @misc{Hege, author = {Hege, Hans-Christian}, title = {Datenabh{\"a}ngigkeitsanalyse und Programmtransformationen auf CRAY-Rechnern mit dem Fortran-Pr{\"a}prozessor fpp}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4685}, number = {TR-90-04}, abstract = {The FORTRAN preprocessor fpp in the newly introduced Autotasking System of CRAY Research allows automatic vectorization and parallelization on basis of a data dependence analysis. An introduction into data dependence analysis is given, showing how data dependence graphs unveil opportunities for program transformations like vectorization and concurrentization. The report contains a complete description of the preprocessors functionality, its options and directives for increasing the effectiveness of the dependence analyzer and steering the code transformations. Finally, some advice is given for the practical use of fpp on CRAY computers.}, language = {de} } @misc{HegeStueben, author = {Hege, Hans-Christian and St{\"u}ben, Hinnerk}, title = {Vectorization and Parallelization of Irregular Problems via Graph coloring.}, doi = {/10.1145/109025.109042}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-443}, number = {SC-90-16}, abstract = {Efficient implementations of irregular problems on vector and parallel architectures are generally hard to realize. An important class of problems are Gauß-Seidel iteration schemes applied to irregular data sets. The unstructured data dependences arising there prevent restructuring compilers from generating efficient code for vector or parallel machines. It is shown, how to structure the data dependences by decomposing the underlying data set using graph coloring techniques and by specifying a particular execution order already on the algorithm level. Methods to master the irregularities originating from different types of tasks are proposed. An application is given and some open issues and future developments are discussed.}, language = {en} } @misc{BaumannHegeSchwarzetal., author = {Baumann, Wolfgang and Hege, Hans-Christian and Schwarz, Uwe and St{\"u}ben, Hinnerk}, title = {Netzverteilte Visualisierung und Simulation}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5379}, number = {TR-96-08}, abstract = {Dieser Bericht beschreibt die Ergebnisse eines Anwendungsprojektes, das parallel zum Aufbau des Berliner Hochgeschwindigkeitsdatennetzes (Berlin Regional Testbed) am ZIB durchgef{\"u}hrt wurde. Es werden allgemeine Werkzeuge und anwendungsspezifische Arbeitsumgebungen zur netzverteilten Visualisierung und Simulation vorgestellt. Die allgemeinen Werkzeuge unterst{\"u}tzen folgende Aufgaben: Kopplung von Simulationen auf (Hochleistungs-)Rechnern an lokale Grafikarbeitspl{\"a}tze, objektorientierte und verteilte Visualisierung, Remote-Videoaufzeichnung, Bilddatenkompression und digitaler Filmschnitt. Die spezifischen Arbeitsumgebungen wurden f{\"u}r Aufgaben aus den Bereichen Numerische Mathematik, Astrophysik, Strukturforschung, Chemie, Polymerphysik und Str{\"o}mungsmechanik entwickelt.}, language = {de} } @misc{DeuflhardHegeSedlmayred, author = {Deuflhard, Peter and Hege, Hans-Christian and Sedlmayr(ed.), E.}, title = {Scientific Computing in der Theoretischen Physik. Collected Abstracts.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5068}, number = {TR-94-01}, abstract = {Die von der DMV-Fachgruppe Scientific Computing in Kooperation mit dem gleichnamigen GAMM-Fachausschuss organisierte Tagung \glqq Scientific Computing in der Theoretischen Physik\grqq~fand vom 16. - 18. M{\"a}rz 1994 am Fachbereich Mathematik und Informatik der Freien Universit{\"a}t Berlin statt. Ziel des Workshops war, die Kontakte zwischen den Fachleuten der Gebiete {\em Computational Physics} und {\em Scientific Computing} zu intensivieren. Schwerpunkte des Workshops waren numerische Simulation von Transportmodellen der Astrophysik und Halbleiterphysik, Multilevel-Methoden f{\"u}r partielle Differentialgleichungen sowie Monte-Carlo-Methoden und molekulardynamische Verfahren f{\"u}r Probleme der Statistischen Physik und Quantenfeldtheorie. Der Workshop fand ein {\"a}usserst positives Echo und f{\"u}hrte {\"u}ber 70 Teilnehmer der verschiedenen Teilgebiete der Theoretischen Physik und ca. 50 Teilnehmer aus dem Bereich der Numerischen Mathematik zusammen. \originalTeX}, language = {en} } @misc{HegeHoellererStalling, author = {Hege, Hans-Christian and H{\"o}llerer, Tobias and Stalling, Detlev}, title = {Volume Rendering - Mathematicals Models and Algorithmic Aspects.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4994}, number = {TR-93-07}, abstract = {In this paper various algorithms for rendering gaseous phenomena are reviewed. In computer graphics such algorithms are used to model natural scenes containing clouds, fog, flames and so on. On the other hand it has become an important technique in scientific visualization to display three dimensional scalar datasets as cloudy objects. Our emphasis is on this latter subject of so-called {\em direct volume rendering}. All algorithms will be discussed within the framework of linear transport theory. The equation of transfer is derived. This equation is suitable to describe the radiation field in a participating medium where absorption, emission, and scattering of light can occur. Almost all volume rendering algorithms can be shown to solve special cases of the equation of transfer. Related problems like the mapping from data values to model parameters or possible parallelization strategies will be discussed as well.}, language = {en} } @misc{HegePolthier, author = {Hege, Hans-Christian and Polthier, Konrad}, title = {Visualization and Mathematics. International Workshop}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5179}, number = {TR-95-02}, abstract = {International Workshop with support of the Deutsche Forschungsgemeinschaft (DFG) and Max-Planck-Gesellschaft (MPG) Berlin (Dahlem), Germany, May 30 - June 2, 1995}, language = {en} } @misc{HegeSeebassStallingetal., author = {Hege, Hans-Christian and Seebass, Martin and Stalling, Detlev and Z{\"o}ckler, Malte}, title = {A Generalized Marching Cubes Algorithm Based on Non-Binary Classifications}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-2741}, number = {SC-97-05}, abstract = {We present a new technique for generating surface meshes from a uniform set of discrete samples. Our method extends the well-known marching cubes algorithm used for computing polygonal isosurfaces. While in marching cubes each vertex of a cubic grid cell is binary classified as lying above or below an isosurface, in our approach an arbitrary number of vertex classes can be specified. Consequently the resulting surfaces consist of patches separating volumes of two different classes each. Similar to the marching cubes algorithm all grid cells are traversed and classified according to the number of different vertex classes involved and their arrangement. The solution for each configuration is computed based on a model that assigns probabilities to the vertices and interpolates them. We introduce an automatic method to find a triangulation which approximates the boundary surfaces - implicitly given by our model - in a topological correct way. Look-up tables guarantee a high performance of the algorithm. In medical applications our method can be used to extract surfaces from a 3D segmentation of tomographic images into multiple tissue types. The resulting surfaces are well suited for subsequent volumetric mesh generation, which is needed for simulation as well as visualization tasks. The proposed algorithm provides a robust and unique solution, avoiding ambiguities occuring in other methods. The method is of great significance in modeling and animation too, where it can be used for polygonalization of non-manifold implicit surfaces.}, language = {en} } @misc{StallingHege, author = {Stalling, Detlev and Hege, Hans-Christian}, title = {Fast and Resolution Independent Line Integral Convolution}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-1653}, number = {SC-94-37}, abstract = {Line Integral Convolution (LIC) is a powerful technique for generating striking images and animations from vector data. Introduced in 1993, the method has rapidly found many application areas, ranging from computer arts to scientific visualization. Based upon locally filtering an input texture along a curved stream line segment in a vector field, it is able to depict directional information at high spatial resolutions. We present a new method for computing LIC images, which minimizes the total number of stream lines to be computed and thereby reduces computational costs by an order of magnitude compared to the original algorithm. Our methods utilizes fast, error-controlled numerical integrators. Decoupling the characteristic lengths in vector field grid, input texture and output image, it allows to compute filtered images at arbitrary resolution. This feature is of great significance in computer animation as well as in scientific visualization, where it can be used to explore vector data by smoothly enlarging structure of details. We also present methods for improved texture animation, employing constant filter kernels only. To obtain an optimal motion effect, spatial decay of correlation between intensities of distant pixels in the output image has to be controlled. This is achieved by blending different phase shifted box filter animations and by adaptively rescaling the contrast of the output frames.}, language = {en} } @misc{GrammelHegeWunderling, author = {Grammel, Martin and Hege, Hans-Christian and Wunderling, Roland}, title = {On the Impact of Communication Latencies on Distributed Sparse LU Factorization.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-1245}, number = {SC-93-28}, abstract = {Sparse LU factorization offers some potential for parallelism, but at a level of very fine granularity. However, most current distributed memory MIMD architectures have too high communication latencies for exploiting all parallelism available. To cope with this, latencies must be avoided by coarsening the granularity and by message fusion. However, both techniques limit the concurrency, thereby reducing the scalability. In this paper, an implementation of a parallel LU decomposition algorithm for linear programming bases is presented for distributed memory parallel computers with noticable communication latencies. Several design decisions due to latencies, including data distribution and load balancing techniques, are discussed. An approximate performance model is set up for the algorithm, which allows to quantify the impact of latencies on its performance. Finally, experimental results for an Intel iPSC/860 parallel computer are reported and discussed.}, language = {en} } @misc{BeckDeuflhardHegeetal., author = {Beck, Rudolf and Deuflhard, Peter and Hege, Hans-Christian and Seebass, Martin and Stalling, Detlev}, title = {Numerical Algorithms and Visualization in Medical Treament Planning}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-2643}, number = {SC-96-54}, abstract = {After a short summary on therapy planning and the underlying technologies we discuss quantitative medicine by giving a short overview on medical image data, summarizing some applications of computer based treatment planning, and outlining requirements on medical planning systems. Then we continue with a description of our medical planning system {\sf HyperPlan}. It supports typical working steps in therapy planning, like data aquisition, segmentation, grid generation, numerical simulation and optimization, accompanying these with powerful visualization and interaction techniques.}, language = {en} } @misc{StallingZoecklerHege, author = {Stalling, Detlev and Z{\"o}ckler, Malte and Hege, Hans-Christian}, title = {Fast Display of Illuminated Field Lines}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-2686}, number = {SC-96-58}, abstract = {A new technique for interactive vector field visualization using large numbers of properly illuminated field lines is presented. Taking into account ambient, diffuse, and specular reflection terms as well as transparency and depth cueing, we employ a realistic shading model which significantly increases quality and realism of the resulting images. While many graphics workstations offer hardware support for illuminating surface primitives, usually no means for an accurate shading of line primitives are provided. However, we show that proper illumination of lines can be implemented by exploiting the texture mapping capabilities of modern graphics hardware. In this way high rendering performance with interactive frame rates can be achieved. We apply the technique to render large numbers of integral curves of a vector field. The impression of the resulting images can be further improved by a number of visual enhancements, like transparency and depth-cueing. We also describe methods for controlling the distribution of field lines in space. These methods enable us to use illuminated field lines for interactive exploration of vector fields.}, language = {en} } @misc{BattkeStallingHege, author = {Battke, Henrik and Stalling, Detlev and Hege, Hans-Christian}, title = {Fast Line Integral Convolution for Arbitrary Surfaces in 3D}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-2690}, number = {SC-96-59}, abstract = {We describe an extension of the line integral convolution method (LIC) for imaging of vector fields on arbitrary surfaces in 3D space. Previous approaches were limited to curvilinear surfaces, i.e.~surfaces which can be parametrized globally using 2D-coordinates. By contrast our method also handles the case of general, possibly multiply connected surfaces. The method works by tesselating a given surface with triangles. For each triangle local euclidean coordinates are defined and a local LIC texture is computed. No scaling or distortion is involved when mapping the texture onto the surface. The characteristic length of the texture remains constant. In order to exploit the texture hardware of modern graphics computers we have developed a tiling strategy for arranging a large number of triangular texture pieces within a single rectangular texture image. In this way texture memory is utilized optimally and even large textured surfaces can be explored interactively.}, language = {en} } @misc{DeuflhardSeebassStallingetal., author = {Deuflhard, Peter and Seebass, Martin and Stalling, Detlev and Beck, Rudolf and Hege, Hans-Christian}, title = {Hyperthermia Treatment Planning in Clinical Cancer Therapy: Modelling, Simulation and Visualization}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-2958}, number = {SC-97-26}, abstract = {\noindent The speaker and his co-workers in Scientific Computing and Visualization have established a close cooperation with medical doctors at the Rudolf--Virchow--Klinikum of the Humboldt University in Berlin on the topic of regional hyperthermia. In order to permit a patient--specific treatment planning, a special software system ({\sf\small HyperPlan}) has been developed. \noindent A mathematical model of the clinical system ({\it radio frequency applicator with 8 antennas, water bolus, individual patient body}) involves Maxwell's equations in inhomogeneous media and a so--called bio--heat transfer PDE describing the temperature distribution in the human body. The electromagnetic field and the thermal phenomena need to be computed at a speed suitable for the clinical environment. An individual geometric patient model is generated as a quite complicated tetrahedral ``coarse'' grid (several thousands of nodes). Both Maxwell's equations and the bio--heat transfer equation are solved on that 3D--grid by means of {\em adaptive} multilevel finite element methods, which automatically refine the grid where necessary in view of the required accuracy. Finally optimal antenna parameters for the applicator are determined . \noindent All steps of the planning process are supported by powerful visualization methods. Medical images, contours, grids, simulated electromagnetic fields and temperature distributions can be displayed in combination. A number of new algorithms and techniques had to be developed and implemented. Special emphasis has been put on advanced 3D interaction methods and user interface issues.}, language = {en} } @misc{HegeStalling, author = {Hege, Hans-Christian and Stalling, Detlev}, title = {Fast LIC with Higher Order Filter Kernels}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-3439}, number = {SC-97-74}, abstract = {Line integral convolution (LIC) has become a well-known and popular method for visualizing vector fields. The method works by convolving a random input texture along the integral curves of the vector field. In order to accelerate image synthesis significantly, an efficient algorithm has been proposed that utilizes pixel coherence in field line direction. This algorithm, called ``fast LIC'', originally was restricted to simple box-type filter kernels. Here we describe a generalization of fast LIC for piecewise polynomial filter kernels. Expanding the filter kernels in terms of truncated power functions allows us to exploit a certain convolution theorem. The convolution integral is expressed as a linear combination of repeated integrals (or repeated sums in the discrete case). Compared to the original algorithm the additional expense for using higher order filter kernels, e.g.\ of B-spline type, is very low. Such filter kernels produce smoother, less noisier results than a box filter. This is evident from visual investigation, as well as from analysis of pixel correlations. Thus, our method represents a useful extension of the fast LIC algorithm for the creation of high-quality LIC images.}, language = {en} } @misc{ZachowZilskeHege, author = {Zachow, Stefan and Zilske, Michael and Hege, Hans-Christian}, title = {3D reconstruction of individual anatomy from medical image data: Segmentation and geometry processing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10440}, number = {07-41}, abstract = {For medical diagnosis, visualization, and model-based therapy planning three-dimensional geometric reconstructions of individual anatomical structures are often indispensable. Computer-assisted, model-based planning procedures typically cover specific modifications of "virtual anatomy" as well as numeric simulations of associated phenomena, like e.g. mechanical loads, fluid dynamics, or diffusion processes, in order to evaluate a potential therapeutic outcome. Since internal anatomical structures cannot be measured optically or mechanically in vivo, three-dimensional reconstruction of tomographic image data remains the method of choice. In this work the process chain of individual anatomy reconstruction is described which consists of segmentation of medical image data, geometrical reconstruction of all relevant tissue interfaces, up to the generation of geometric approximations (boundary surfaces and volumetric meshes) of three-dimensional anatomy being suited for finite element analysis. All results presented herein are generated with amira ® - a highly interactive software system for 3D data analysis, visualization and geometry reconstruction.}, language = {en} } @misc{LindowBaumHege, author = {Lindow, Norbert and Baum, Daniel and Hege, Hans-Christian}, title = {Ligand Excluded Surface: A New Type of Molecular Surface}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-51194}, abstract = {The most popular molecular surface in molecular visualization is the solvent excluded surface (SES). It provides information about the accessibility of a biomolecule for a solvent molecule that is geometrically approximated by a sphere. During a period of almost four decades, the SES has served for many purposes - including visualization, analysis of molecular interactions and the study of cavities in molecular structures. However, if one is interested in the surface that is accessible to a molecule whose shape differs significantly from a sphere, a different concept is necessary. To address this problem, we generalize the definition of the SES by replacing the probe sphere with the full geometry of the ligand defined by the arrangement of its van der Waals spheres. We call the new surface ligand excluded surface (LES) and present an efficient, grid-based algorithm for its computation. Furthermore, we show that this algorithm can also be used to compute molecular cavities that could host the ligand molecule. We provide a detailed description of its implementation on CPU and GPU. Furthermore, we present a performance and convergence analysis and compare the LES for several molecules, using as ligands either water or small organic molecules.}, language = {en} } @misc{KozlikovaKroneFalketal., author = {Kozlikova, Barbora and Krone, Michael and Falk, Martin and Lindow, Norbert and Baaden, Marc and Baum, Daniel and Viola, Ivan and Parulek, Julius and Hege, Hans-Christian}, title = {Visualization of Biomolecular Structures: State of the Art}, issn = {1438-0064}, doi = {10.2312/eurovisstar.20151112}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-57217}, abstract = {Structural properties of molecules are of primary concern in many fields. This report provides a comprehensive overview on techniques that have been developed in the fields of molecular graphics and visualization with a focus on applications in structural biology. The field heavily relies on computerized geometric and visual representations of three-dimensional, complex, large, and time-varying molecular structures. The report presents a taxonomy that demonstrates which areas of molecular visualization have already been extensively investigated and where the field is currently heading. It discusses visualizations for molecular structures, strategies for efficient display regarding image quality and frame rate, covers different aspects of level of detail, and reviews visualizations illustrating the dynamic aspects of molecular simulation data. The survey concludes with an outlook on promising and important research topics to foster further success in the development of tools that help to reveal molecular secrets.}, language = {en} } @misc{KroneKozlikovaLindowetal., author = {Krone, Michael and Kozlikova, Barbora and Lindow, Norbert and Baaden, Marc and Baum, Daniel and Parulek, Julius and Hege, Hans-Christian and Viola, Ivan}, title = {Visual Analysis of Biomolecular Cavities: State of the Art}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60193}, abstract = {In this report we review and structure the branch of molecular visualization that is concerned with the visual analysis of cavities in macromolecular protein structures. First the necessary background, the domain terminology, and the goals of analytical reasoning are introduced. Based on a comprehensive collection of relevant research works, we present a novel classification for cavity detection approaches and structure them into four distinct classes: grid-based, Voronoi-based, surface-based, and probe-based methods. The subclasses are then formed by their combinations. We match these approaches with corresponding visualization technologies starting with direct 3D visualization, followed with non-spatial visualization techniques that for example abstract the interactions between structures into a relational graph, straighten the cavity of interest to see its profile in one view, or aggregate the time sequence into a single contour plot. We also discuss the current state of methods for the visual analysis of cavities in dynamic data such as molecular dynamics simulations. Finally, we give an overview of the most common tools that are actively developed and used in the structural biology and biochemistry research. Our report is concluded by an outlook on future challenges in the field.}, language = {en} } @misc{HombergBaumWiebeletal., author = {Homberg, Ulrike and Baum, Daniel and Wiebel, Alexander and Prohaska, Steffen and Hege, Hans-Christian}, title = {Definition, Extraction, and Validation of Pore Structures in Porous Materials}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-42510}, abstract = {An intuitive and sparse representation of the void space of porous materials supports the efficient analysis and visualization of interesting qualitative and quantitative parameters of such materials. We introduce definitions of the elements of this void space, here called pore space, based on its distance function, and present methods to extract these elements using the extremal structures of the distance function. The presented methods are implemented by an image processing pipeline that determines pore centers, pore paths and pore constrictions. These pore space elements build a graph that represents the topology of the pore space in a compact way. The representations we derive from μCT image data of realistic soil specimens enable the computation of many statistical parameters and, thus, provide a basis for further visual analysis and application-specific developments. We introduced parts of our pipeline in previous work. In this chapter, we present additional details and compare our results with the analytic computation of the pore space elements for a sphere packing in order to show the correctness of our graph computation.}, language = {en} } @misc{KramerNoackBaumetal.2017, author = {Kramer, Tobias and Noack, Matthias and Baum, Daniel and Hege, Hans-Christian and Heller, Eric J.}, title = {Dust and gas emission from cometary nuclei: the case of comet 67P/Churyumov-Gerasimenko}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66338}, year = {2017}, abstract = {Comets display with decreasing solar distance an increased emission of gas and dust particles, leading to the formation of the coma and tail. Spacecraft missions provide insight in the temporal and spatial variations of the dust and gas sources located on the cometary nucleus. For the case of comet 67P/Churyumov-Gerasimenko (67P/C-G), the long-term obser- vations from the Rosetta mission point to a homogeneous dust emission across the entire illuminated surface. Despite the homogeneous initial dis- tribution, a collimation in jet-like structures becomes visible. We propose that this observation is linked directly to the complex shape of the nucleus and projects concave topographical features into the dust coma. To test this hypothesis, we put forward a gas-dust description of 67P/C-G, where gravitational and gas forces are accurately determined from the surface mesh and the rotation of the nucleus is fully incorporated. The emerging jet-like structures persist for a wide range of gas-dust interactions and show a dust velocity dependent bending.}, language = {en} } @misc{SagnolHegeWeiser, author = {Sagnol, Guillaume and Hege, Hans-Christian and Weiser, Martin}, title = {Using sparse kernels to design computer experiments with tunable precision}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59605}, abstract = {Statistical methods to design computer experiments usually rely on a Gaussian process (GP) surrogate model, and typically aim at selecting design points (combinations of algorithmic and model parameters) that minimize the average prediction variance, or maximize the prediction accuracy for the hyperparameters of the GP surrogate. In many applications, experiments have a tunable precision, in the sense that one software parameter controls the tradeoff between accuracy and computing time (e.g., mesh size in FEM simulations or number of Monte-Carlo samples). We formulate the problem of allocating a budget of computing time over a finite set of candidate points for the goals mentioned above. This is a continuous optimization problem, which is moreover convex whenever the tradeoff function accuracy vs. computing time is concave. On the other hand, using non-concave weight functions can help to identify sparse designs. In addition, using sparse kernel approximations drastically reduce the cost per iteration of the multiplicative weights updates that can be used to solve this problem.}, language = {en} } @misc{KastenReininghausHotzetal., author = {Kasten, Jens and Reininghaus, Jan and Hotz, Ingrid and Hege, Hans-Christian and Noack, Bernd and Daviller, Guillaume and Morzyński, Marek}, title = {Acceleration feature points of unsteady shear flows}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-58397}, abstract = {A framework is proposed for extracting features in 2D transient flows, based on the acceleration field to ensure Galilean invariance. The minima of the acceleration magnitude, i.e. a superset of the acceleration zeros, are extracted and discriminated into vortices and saddle points --- based on the spectral properties of the velocity Jacobian. The extraction of topological features is performed with purely combinatorial algorithms from discrete computational topology. The feature points are prioritized with persistence, as a physically meaningful importance measure. These features are tracked in time with a robust algorithm for tracking features. Thus a space-time hierarchy of the minima is built and vortex merging events are detected. The acceleration feature extraction strategy is applied to three two-dimensional shear flows: (1) an incompressible periodic cylinder wake, (2) an incompressible planar mixing layer and (3) a weakly compressible planar jet. The vortex-like acceleration feature points are shown to be well aligned with acceleration zeros, maxima of the vorticity magnitude, minima of pressure field and minima of λ2.}, language = {en} } @misc{NavaYazdaniHegevonTycowicz, author = {Nava-Yazdani, Esfandiar and Hege, Hans-Christian and von Tycowicz, Christoph}, title = {A Geodesic Mixed Effects Model in Kendall's Shape Space}, series = {Proc. 7th MICCAI workshop on Mathematical Foundations of Computational Anatomy (MFCA)}, journal = {Proc. 7th MICCAI workshop on Mathematical Foundations of Computational Anatomy (MFCA)}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74621}, abstract = {In many applications, geodesic hierarchical models are adequate for the study of temporal observations. We employ such a model derived for manifold-valued data to Kendall's shape space. In particular, instead of the Sasaki metric, we adapt a functional-based metric, which increases the computational efficiency and does not require the implementation of the curvature tensor. We propose the corresponding variational time discretization of geodesics and apply the approach for the estimation of group trends and statistical testing of 3D shapes derived from an open access longitudinal imaging study on osteoarthritis.}, language = {en} } @misc{SakuraiHegeKuhnetal., author = {Sakurai, Daisuke and Hege, Hans-Christian and Kuhn, Alexander and Rust, Henning and Kern, Bastian and Breitkopf, Tom-Lukas}, title = {An Application-Oriented Framework for Feature Tracking in Atmospheric Sciences}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72617}, abstract = {In atmospheric sciences, sizes of data sets grow continuously due to increasing resolutions. A central task is the comparison of spatiotemporal fields, to assess different simulations and to compare simulations with observations. A significant information reduction is possible by focusing on geometric-topological features of the fields or on derived meteorological objects. Due to the huge size of the data sets, spatial features have to be extracted in time slices and traced over time. Fields with chaotic component, i.e. without 1:1 spatiotemporal correspondences, can be compared by looking upon statistics of feature properties. Feature extraction, however, requires a clear mathematical definition of the features - which many meteorological objects still lack. Traditionally, object extractions are often heuristic, defined only by implemented algorithms, and thus are not comparable. This work surveys our framework designed for efficient development of feature tracking methods and for testing new feature definitions. The framework supports well-established visualization practices and is being used by atmospheric researchers to diagnose and compare data.}, language = {en} } @misc{LindowBaumHege, author = {Lindow, Norbert and Baum, Daniel and Hege, Hans-Christian}, title = {Atomic Accessibility Radii for Molecular Dynamics Analysis}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-68468}, abstract = {In molecular structure analysis and visualization, the molecule's atoms are often modeled as hard spheres parametrized by their positions and radii. While the atom positions result from experiments or molecular simulations, for the radii typically values are taken from literature. Most often, van der Waals (vdW) radii are used, for which diverse values exist. As a consequence, different visualization and analysis tools use different atomic radii, and the analyses are less objective than often believed. Furthermore, for the geometric accessibility analysis of molecular structures, vdW radii are not well suited. The reason is that during the molecular dynamics simulation, depending on the force field and the kinetic energy in the system, non-bonded atoms can come so close to each other that their vdW spheres intersect. In this paper, we introduce a new kind of atomic radius, called atomic accessibility radius', that better characterizes the accessibility of an atom in a given molecular trajectory. The new radii reflect the movement possibilities of atoms in the simulated physical system. They are computed by solving a linear program that maximizes the radii of the atoms under the constraint that non-bonded spheres do not intersect in the considered molecular trajectory. Using this data-driven approach, the actual accessibility of atoms can be visualized more precisely.}, language = {en} } @misc{LindowBaumLeborgneetal., author = {Lindow, Norbert and Baum, Daniel and Leborgne, Morgan and Hege, Hans-Christian}, title = {Interactive Visualization of RNA and DNA Structures}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69704}, abstract = {The analysis and visualization of nucleic acids (RNA and DNA) play an increasingly important role due to the growing number of known 3-dimensional structures of such molecules. The great complexity of these structures, in particular, those of RNA, demands interactive visualization to get deeper insights into the relationship between the 2D secondary structure motifs and their 3D tertiary structures. Over the last decades, a lot of research in molecular visualization has focused on the visual exploration of protein structures while nucleic acids have only been marginally addressed. In contrast to proteins, which are composed of amino acids, the ingredients of nucleic acids are nucleotides. They form structuring patterns that differ from those of proteins and, hence, also require different visualization and exploration techniques. In order to support interactive exploration of nucleic acids, the computation of secondary structure motifs as well as their visualization in 2D and 3D must be fast. Therefore, in this paper, we focus on the performance of both the computation and visualization of nucleic acid structure. For the first time, we present a ray casting-based visualization of RNA and DNA secondary and tertiary structures, which enables real-time visualization of even large molecular dynamics trajectories. Furthermore, we provide a detailed description of all important aspects to visualize nucleic acid secondary and tertiary structures. With this, we close an important gap in molecular visualization.}, language = {en} } @misc{AgudoJacomeHegePaetschetal., author = {Agudo J{\´a}come, Leonardo and Hege, Hans-Christian and Paetsch, Olaf and P{\"o}thkow, Kai}, title = {Three-Dimensional Reconstruction and Quantification of Dislocation Substructures from Transmission Electron Microscopy Stereo-Pairs}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-70339}, abstract = {A great amount of material properties is strongly influenced by dislocations, the carriers of plastic deformation. It is therefore paramount to have appropriate tools to quantify dislocation substructures with regard to their features, e.g., dislocation density, Burgers vectors or line direction. While the transmission electron microscope (TEM) has been the most widely-used equipment implemented to investigate dislocations, it usually is limited to the two-dimensional (2D) observation of three-dimensional (3D) structures. We reconstruct, visualize and quantify 3D dislocation substructure models from only two TEM images (stereo-pairs) and assess the results. The reconstruction is based on the manual interactive tracing of filiform objects on both images of the stereo-pair. The reconstruction and quantification method are demonstrated on dark field (DF) scanning (S)TEM micrographs of dislocation substructures imaged under diffraction contrast conditions. For this purpose, thick regions (> 300 nm) of TEM foils are analyzed, which are extracted from a Ni-base superalloy single crystal after high temperature creep deformation. It is shown how the method allows 3D quantification from stereo-pairs in a wide range of tilt conditions, achieving line length and orientation uncertainties of 3 \% and 7°, respectively. Parameters that affect the quality of such reconstructions are discussed.}, language = {en} } @misc{EhlkeRammLameckeretal., author = {Ehlke, Moritz and Ramm, Heiko and Lamecker, Hans and Hege, Hans-Christian and Zachow, Stefan}, title = {Fast Generation of Virtual X-ray Images from Deformable Tetrahedral Meshes}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41896}, abstract = {We propose a novel GPU-based approach to render virtual X-ray projections of deformable tetrahedral meshes. These meshes represent the shape and the internal density distribution of a particular anatomical structure and are derived from statistical shape and intensity models (SSIMs). We apply our method to improve the geometric reconstruction of 3D anatomy (e.g.\ pelvic bone) from 2D X-ray images. For that purpose, shape and density of a tetrahedral mesh are varied and virtual X-ray projections are generated within an optimization process until the similarity between the computed virtual X-ray and the respective anatomy depicted in a given clinical X-ray is maximized. The OpenGL implementation presented in this work deforms and projects tetrahedral meshes of high resolution (200.000+ tetrahedra) at interactive rates. It generates virtual X-rays that accurately depict the density distribution of an anatomy of interest. Compared to existing methods that accumulate X-ray attenuation in deformable meshes, our novel approach significantly boosts the deformation/projection performance. The proposed projection algorithm scales better with respect to mesh resolution and complexity of the density distribution, and the combined deformation and projection on the GPU scales better with respect to the number of deformation parameters. The gain in performance allows for a larger number of cycles in the optimization process. Consequently, it reduces the risk of being stuck in a local optimum. We believe that our approach contributes in orthopedic surgery, where 3D anatomy information needs to be extracted from 2D X-rays to support surgeons in better planning joint replacements.}, language = {en} } @misc{NavaYazdaniHegevonTycowiczetal., author = {Nava-Yazdani, Esfandiar and Hege, Hans-Christian and von Tycowicz, Christoph and Sullivan, T. J.}, title = {A Shape Trajectories Approach to Longitudinal Statistical Analysis}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69759}, abstract = {For Kendall's shape space we determine analytically Jacobi fields and parallel transport, and compute geodesic regression. Using the derived expressions, we can fully leverage the geometry via Riemannian optimization and reduce the computational expense by several orders of magnitude. The methodology is demonstrated by performing a longitudinal statistical analysis of epidemiological shape data. As application example we have chosen 3D shapes of knee bones, reconstructed from image data of the Osteoarthritis Initiative. Comparing subject groups with incident and developing osteoarthritis versus normal controls, we find clear differences in the temporal development of femur shapes. This paves the way for early prediction of incident knee osteoarthritis, using geometry data only.}, language = {en} } @misc{GuentherKuhnHegeetal., author = {G{\"u}nther, Tobias and Kuhn, Alexander and Hege, Hans-Christian and Theisel, Holger}, title = {MCFTLE: Monte Carlo Rendering of Finite-Time Lyapunov Exponent Fields}, issn = {1438-0064}, doi = {10.1111/cgf.12914}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59054}, abstract = {Traditionally, Lagrangian fields such as finite-time Lyapunov exponents (FTLE) are precomputed on a discrete grid and are ray casted afterwards. This, however, introduces both grid discretization errors and sampling errors during ray marching. In this work, we apply a progressive, view-dependent Monte Carlo-based approach for the visualization of such Lagrangian fields in time-dependent flows. Our ap- proach avoids grid discretization and ray marching errors completely, is consistent, and has a low memory consumption. The system provides noisy previews that con- verge over time to an accurate high-quality visualization. Compared to traditional approaches, the proposed system avoids explicitly predefined fieldline seeding structures, and uses a Monte Carlo sampling strategy named Woodcock tracking to distribute samples along the view ray. An acceleration of this sampling strategy requires local upper bounds for the FTLE values, which we progressively acquire during the rendering. Our approach is tailored for high-quality visualizations of complex FTLE fields and is guaranteed to faithfully represent detailed ridge surface structures as indicators for Lagrangian coherent structures (LCS). We demonstrate the effectiveness of our approach by using a set of analytic test cases and real-world numerical simulations.}, language = {en} } @misc{KuhnEngelkeFlatkenetal., author = {Kuhn, Alexander and Engelke, Wito and Flatken, Markus and Hege, Hans-Christian and Hotz, Ingrid}, title = {Topology-based Analysis for Multimodal Atmospheric Data of Volcano Eruptions}, issn = {1438-0064}, doi = {10.1007/978-3-319-44684-4_2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-57043}, abstract = {Many scientific applications deal with data from a multitude of different sources, e.g., measurements, imaging and simulations. Each source provides an additional perspective on the phenomenon of interest, but also comes with specific limitations, e.g. regarding accuracy, spatial and temporal availability. Effectively combining and analyzing such multimodal and partially incomplete data of limited accuracy in an integrated way is challenging. In this work, we outline an approach for an integrated analysis and visualization of the atmospheric impact of volcano eruptions. The data sets comprise observation and imaging data from satellites as well as results from numerical particle simulations. To analyze the clouds from the volcano eruption in the spatiotemporal domain we apply topological methods. Extremal structures reveal structures in the data that support clustering and comparison. We further discuss the robustness of those methods with respect to different properties of the data and different parameter setups. Finally we outline open challenges for the effective integrated visualization using topological methods.}, language = {en} } @misc{WiebelVosHege, author = {Wiebel, Alexander and Vos, Frans M. and Hege, Hans-Christian}, title = {Perception-Oriented Picking of Structures in Direct Volumetric Renderings}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14343}, number = {11-45}, abstract = {Radiologists from all application areas are trained to read slice-based visualizations of 3D medical image data. Despite the numerous examples of sophisticated three-dimensional renderings, especially all variants of direct volume rendering, such methods are often considered not very useful by radiologists who prefer slice-based visualization. Just recently there have been attempts to bridge this gap between 2D and 3D renderings. These attempts include specialized techniques for volume picking that result in repositioning slices. In this paper, we present a new volume picking technique that, in contrast to previous work, does not require pre-segmented data or metadata. The positions picked by our method are solely based on the data itself, the transfer function and, most importantly, on the way the volumetric rendering is perceived by viewers. To demonstrate the usefulness of the proposed method we apply it for automatically repositioning slices in an abdominal MRI scan, a data set from a flow simulation and a number of other volumetric scalar fields. Furthermore we discuss how the method can be implemented in combination with various different volumetric rendering techniques.}, language = {en} } @misc{BaumLindowHegeetal., author = {Baum, Daniel and Lindow, Norbert and Hege, Hans-Christian and Lepper, Verena and Siopi, Tzulia and Kutz, Frank and Mahlow, Kristin and Mahnke, Heinz-Eberhard}, title = {Revealing hidden text in rolled and folded papyri}, issn = {1438-0064}, doi = {10.1007/s00339-017-0808-6}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61826}, abstract = {Ancient Egyptian papyri are often folded, rolled up or kept as small packages, sometimes even sealed. Physically unrolling or unfolding these packages might severely damage them. We demonstrate a way to get access to the hidden script without physical unfolding by employing computed tomography and mathematical algorithms for virtual unrolling and unfolding. Our algorithmic approaches are combined with manual interaction. This provides the necessary flexibility to enable the unfolding of even complicated and partly damaged papyrus packages. In addition, it allows us to cope with challenges posed by the structure of ancient papyrus, which is rather irregular, compared to other writing substrates like metallic foils or parchment. Unfolding of packages is done in two stages. In the first stage, we virtually invert the physical folding process step by step until the partially unfolded package is topologically equivalent to a scroll or a papyrus sheet folded only along one fold line. To minimize distortions at this stage, we apply the method of moving least squares. In the second stage, the papyrus is simply flattened, which requires the definition of a medial surface. We have applied our software framework to several papyri. In this work, we present the results of applying our approaches to mockup papyri that were either rolled or folded along perpendicular fold lines. In the case of the folded papyrus, our approach represents the first attempt to address the unfolding of such complicated folds.}, language = {en} } @misc{PapazovHege, author = {Papazov, Chavdar and Hege, Hans-Christian}, title = {Blue-noise Optimized Point Sets Based on Procrustes Analysis}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65356}, abstract = {In this paper, we propose a new method for optimizing the blue noise characteristics of point sets. It is based on Procrustes analysis, a technique for adjusting shapes to each other by applying optimal elements of an appropriate transformation group. We adapt this technique to the problem at hand and introduce a very simple, efficient and provably convergent point set optimizer.}, language = {en} } @misc{KlindtBaumProhaskaetal., author = {Klindt, Marco and Baum, Daniel and Prohaska, Steffen and Hege, Hans-Christian}, title = {iCon.text - a customizable iPad app for kiosk applications in museum exhibitions}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-17731}, abstract = {We present iCon.text, a kiosk platform for the iPad centered around artefacts, whose content and layout can be tailored without programming skills for specific museum exhibitions. The central metaphor to access information is a virtual postcard with one front and a customizable number of back sides that provide details about exhibits to museum visitors in textual and image form. Back sides can link to others cards. Access to these postcards is possible through one or more navigation views that can be navigated to from a navigation bar. The entry point to the application is designed as a multitouch interactive pile of cards in a playful manner that allows visitors of any age an easy approach to the presentation and interaction metaphor. To directly access a certain postcard, a mosaic view can be uitilized to provide an overview about all available exhibits. A category view groups postcards into themes. Locating artefacts on a zoomable map or exhibition floor plan allows for conveying information about spatial contexts between different objects and their location. Furthermore, contexts can be illustrated with a two stage view comprising an overview and corresponding detail views to provide further insights into the spatial, temporal, and thematic contexts of artefacts. The application scaffolding allows the design of bilingual presentations to support exhibitions with an international audience. The logo of the presenting institution or exhibition can be incorporated to display the the kiosk's corporate design branding and to access an imprint or further informations. Usage is logged into files to provide a basis for extracting statistical information about the usage. The details about the exhibits are presented as images and as such impose no limit to the design choices made by the content provider or exhibition designer. The application (enhanced with a panoramic view) has been integrated successfully into a large special exhibition about the ancient city of Pergamon 2011/2012 at the Pergamon Museum Berlin within the interdisciplinary project "Berlin Sculpture Network".}, language = {en} } @misc{EggerDercksenUdvaryetal., author = {Egger, Robert and Dercksen, Vincent J. and Udvary, Daniel and Hege, Hans-Christian and Oberlaender, Marcel}, title = {Generation of dense statistical connectomes from sparse morphological data}, issn = {1438-0064}, doi = {10.3389/fnana.2014.00129}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53075}, abstract = {Sensory-evoked signal flow, at cellular and network levels, is primarily determined by the synaptic wiring of the underlying neuronal circuitry. Measurements of synaptic innervation, connection probabilities and sub-cellular organization of synaptic inputs are thus among the most active fields of research in contemporary neuroscience. Methods to measure these quantities range from electrophysiological recordings over reconstructions of dendrite-axon overlap at light-microscopic levels to dense circuit reconstructions of small volumes at electron-microscopic resolution. However, quantitative and complete measurements at subcellular resolution and mesoscopic scales to obtain all local and long-range synaptic in/outputs for any neuron within an entire brain region are beyond present methodological limits. Here, we present a novel concept, implemented within an interactive software environment called NeuroNet, which allows (i) integration of sparsely sampled (sub)cellular morphological data into an accurate anatomical reference frame of the brain region(s) of interest, (ii) up-scaling to generate an average dense model of the neuronal circuitry within the respective brain region(s) and (iii) statistical measurements of synaptic innervation between all neurons within the model. We illustrate our approach by generating a dense average model of the entire rat vibrissal cortex, providing the required anatomical data, and illustrate how to measure synaptic innervation statistically. Comparing our results with data from paired recordings in vitro and in vivo, as well as with reconstructions of synaptic contact sites at light- and electron-microscopic levels, we find that our in silico measurements are in line with previous results.}, language = {en} } @misc{DercksenHegeOberlaender2013, author = {Dercksen, Vincent J. and Hege, Hans-Christian and Oberlaender, Marcel}, title = {The Filament Editor: An Interactive Software Environment for Visualization, Proof-Editing and Analysis of 3D Neuron Morphology}, issn = {1438-0064}, doi = {10.1007/s12021-013-9213-2}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-43157}, year = {2013}, abstract = {Neuroanatomical analysis, such as classification of cell types, depends on reliable reconstruction of large numbers of complete 3D dendrite and axon morphologies. At present, the majority of neuron reconstructions are obtained from preparations in a single tissue slice in vitro, thus suffering from cut off dendrites and, more dramatically, cut off axons. In general, axons can innervate volumes of several cubic millimeters and may reach path lengths of tens of centimeters. Thus, their complete reconstruction requires in vivo labeling, histological sectioning and imaging of large fields of view. Unfortunately, anisotropic background conditions across such large tissue volumes, as well as faintly labeled thin neurites, result in incomplete or erroneous automated tracings and even lead experts to make annotation errors during manual reconstructions. Consequently, tracing reliability renders the major bottleneck for reconstructing complete 3D neuron morphologies. Here, we present a novel set of tools, integrated into a software environment named 'Filament Editor', for creating reliable neuron tracings from sparsely labeled in vivo datasets. The Filament Editor allows for simultaneous visualization of complex neuronal tracings and image data in a 3D viewer, proof-editing of neuronal tracings, alignment and interconnection across sections, and morphometric analysis in relation to 3D anatomical reference structures. We illustrate the functionality of the Filament Editor on the example of in vivo labeled axons and demonstrate that for the exemplary dataset the final tracing results after proof-editing are independent of the expertise of the human operator.}, language = {en} } @misc{StoppelHegeWiebel, author = {Stoppel, Sergej and Hege, Hans-Christian and Wiebel, Alexander}, title = {Visibility-Driven Depth Determination of Surface Patches in Direct Volume Rendering}, issn = {1438-0064}, doi = {10.2312/eurovisshort.20141164}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50266}, abstract = {This paper presents an algorithm called surfseek for selecting surfaces on the most visible features in direct volume rendering (DVR). The algorithm is based on a previously published technique (WYSIWYP) for picking 3D locations in DVR. The new algorithm projects a surface patch on the DVR image, consisting of multiple rays. For each ray the algorithm uses WYSIWYP or a variant of it to find the candidates for the most visible locations along the ray. Using these candidates the algorithm constructs a graph and computes a minimum cut on this graph. The minimum cut represents a very visible but relatively smooth surface. In the last step the selected surface is displayed. We provide examples for the results in real-world dataset as well as in artificially generated datasets.}, language = {en} } @misc{WiebelPreisVosetal., author = {Wiebel, Alexander and Preis, Philipp and Vos, Frans M. and Hege, Hans-Christian}, title = {Computation and Application of 3D Strokes on Visible Structures in Direct Volume Rendering}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-18070}, abstract = {In this paper we describe VisiTrace, a novel technique to draw 3D lines in 3D volume rendered images. It allows to draw strokes in the 2D space of the screen to produce 3D lines that run on top or in the center of structures actually visible in the volume rendering. It can handle structures that only shortly occlude the structure that has been visible at the starting point of the stroke and is able to ignore such structures. For this purpose a shortest path algorithm finding the optimal curve in a specially designed graph data structure is employed. We demonstrate the usefulness of the technique by applying it to MRI data from medicine and engineering, and show how the method can be used to mark or analyze structures in the example data sets, and to automatically obtain good views toward the selected structures.}, language = {en} } @misc{StallingZoecklerSanderetal., author = {Stalling, Detlev and Z{\"o}ckler, Malte and Sander, Oliver and Hege, Hans-Christian}, title = {Weighted Labels for 3D Image Segmentation}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-3828}, number = {SC-98-39}, abstract = {Segmentation tools in medical imaging are either based on editing geometric curves or on the assignment of region labels to image voxels. While the first approach is well suited to describe smooth contours at subvoxel accuracy, the second approach is conceptually more simple and guarantees a unique classification of image areas. However, contours extracted from labeled images typically exhibit strong staircase artifacts and are not well suited to represent smooth tissue boundaries. In this paper we describe how this drawback can be circumvented by supplementing region labels with additional weights. We integrated our approach into an interactive segmentation system providing a well-defined set of manual and semi-automatic editing tools. All tools update both region labels as well as the corresponding weights simultaneously, thus allowing one to define segmentation results at high resolution. We applied our techniques to generate 3D polygonal models of anatomical structures.}, language = {en} } @misc{MahnkeArltBaumetal., author = {Mahnke, Heinz-Eberhard and Arlt, Tobias and Baum, Daniel and Hege, Hans-Christian and Herter, Felix and Lindow, Norbert and Manke, Ingo and Siopi, Tzulia and Menei, Eve and Etienne, Marc and Lepper, Verena}, title = {Virtual unfolding of folded papyri}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-74338}, abstract = {The historical importance of ancient manuscripts is unique since they provide information about the heritage of ancient cultures. Often texts are hidden in rolled or folded documents. Due to recent impro- vements in sensitivity and resolution, spectacular disclosures of rolled hidden texts were possible by X-ray tomography. However, revealing text on folded manuscripts is even more challenging. Manual unfolding is often too risky in view of the fragile condition of fragments, as it can lead to the total loss of the document. X-ray tomography allows for virtual unfolding and enables non-destructive access to hid- den texts. We have recently demonstrated the procedure and tested unfolding algorithms on a mockup sample. Here, we present results on unfolding ancient papyrus packages from the papyrus collection of the Mus{\´e}e du Louvre, among them objects folded along approximately orthogonal folding lines. In one of the packages, the first identification of a word was achieved, the Coptic word for "Lord".}, language = {en} } @misc{ZoecklerStallingHege, author = {Z{\"o}ckler, Malte and Stalling, Detlev and Hege, Hans-Christian}, title = {Fast and Intuitive Generation of Geometric Shape Transitions}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4219}, number = {SC-99-33}, abstract = {We describe a novel method for continuously transforming two triangulated models of arbitrary topology into each other. Equal global topology for both objects is assumed, extensions for genus changes during metamorphosis are provided. The proposed method addresses the major challenge in 3D metamorphosis, namely specifying the morphing process intuitively, with minimal user interaction and sufficient detail. Corresponding regions and point features are interactively identified. These regions are parametrized automatically and consistently, providing a basis for smooth interpolation. Utilizing suitable 3D interaction techniques a simple and intuitive control over the whole morphing process is offered.}, language = {en} } @misc{BengerHegeMerzkyetal., author = {Benger, Werner and Hege, Hans-Christian and Merzky, Andre and Radke, Thomas and Seidel, Edward}, title = {Efficient Distributed File I/O for Visualization in Grid Environments}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4326}, number = {SC-99-43}, abstract = {Large scale simulations running in metacomputing environments face the problem of efficient file I/O. For efficiency it is desirable to write data locally, distributed across the computing environment, and then to minimize data transfer, i.e.\ reduce remote file access. Both aspects require I/O approaches which differ from existing paradigms. For the data output of distributed simulations, one wants to use fast local parallel I/O for all participating nodes, producing a single distributed logical file, while keeping changes to the simulation code as small as possible. For reading the data file as in postprocessing and file based visualization, one wants to have efficient partial access to remote and distributed files, using a global naming scheme and efficient data caching, and again keeping the changes to the postprocessing code small. However, all available software solutions require the entire data to be staged locally (involving possible data recombination and conversion), or suffer from the performance problems of remote or distributed file systems. In this paper we show how to interface the HDF5 I/O library via its flexible Virtual File Driver layer to the Globus Data Grid. We show, that combining these two toolkits in a suitable way provides us with a new I/O framework, which allows efficient, secure, distributed and parallel file I/O in a metacomputing environment.}, language = {en} } @misc{BengerHegeHeusler, author = {Benger, Werner and Hege, Hans-Christian and Heusler, Stefan}, title = {Visions of Numerical Relativity 1999}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4429}, number = {SC-99-53}, abstract = {We present visualizations of recent supercomputer simulations from numerical relativity, exploiting the progress in visualization techniques and numerical methods also from an artistic point of view. The sequences have been compiled into a video tape, showing colliding black holes, orbiting and merging neutron stars as well as collapsing gravitational waves. In this paper we give some background information and provide a glance at the presented sequences.}, language = {en} } @misc{SchmidtEhrenbergHege, author = {Schmidt-Ehrenberg, Johannes and Hege, Hans-Christian}, title = {Visualizing Quantum Mechanical Phenomena}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4287}, number = {SC-99-39}, abstract = {In this paper we discuss several ways to visualize stationary and non-stationary quantum mechanical systems. We demonstrate an approach for the quantitative interpretation of probability density isovalues which yields a reasonable correlation between isosurfaces for different timesteps. As an intuitive quantity for visualizing the momentum of a quantum system we propose the probability flow density which can be treated by vector field visualization techniques. Finally, we discuss the visualization of non-stationary systems by a sequence of single timestep images.}, language = {en} } @misc{HegeHutanuKaehleretal., author = {Hege, Hans-Christian and Hutanu, Andrei and K{\"a}hler, Ralf and Merzky, Andr{\´e} and Radke, Thomas and Seidel, Edward and Ullmer, Brygg}, title = {Progressive Retrieval and Hierarchical Visualization of Large Remote Data}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-7623}, number = {03-40}, abstract = {\noindent The size of data sets produced on remote supercomputer facilities frequently exceeds the processing capabilities of local visualization workstations. This phenomenon increasingly limits scientists when analyzing results of large-scale scientific simulations. That problem gets even more prominent in scientific collaborations, spanning large virtual organizations, working on common shared sets of data distributed in Grid environments. In the visualization community, this problem is addressed by distributing the visualization pipeline. In particular, early stages of the pipeline are executed on resources closer to the initial (remote) locations of the data sets. \noindent This paper presents an efficient technique for placing the first two stages of the visualization pipeline (data access and data filter) onto remote resources. This is realized by exploiting the ``extended retrieve'' feature of GridFTP for flexible, high performance access to very large HDF5 files. We reduce the number of network transactions for filtering operations by utilizing a server side data processing plugin, and hence reduce latency overhead compared to GridFTP partial file access. The paper further describes the application of hierarchical rendering techniques on remote uniform data sets, which make use of the remote data filtering stage.}, language = {en} } @misc{KaehlerHege, author = {K{\"a}hler, Ralf and Hege, Hans-Christian}, title = {Visualization of Time-Dependent Adaptive Mesh Refinement Data}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-7384}, number = {03-16}, abstract = {Analysis of phenomena that simultaneously occur on quite different spatial and temporal scales require adaptive, hierarchical schemes to reduce computational and storage demands. For data represented as grid functions, the key are adaptive, hierarchical, time-dependent grids that resolve spatio-temporal details without too much redundancy. Here, so-called AMR grids gain increasing popularity. For visualization and feature identification/tracking, the underlying continuous function has to be faithfully reconstructed by spatial and temporal interpolation. Well designed interpolation methods yield better results and help to reduce the amount of data to be stored. We address the problem of temporal interpolation of AMR grid data, e.g.\ for creation of smooth animations or feature tracking. Intermediate grid hierarchies are generated by merging the cells on all refinement levels that are present in the key frames considered. Utilizing a clustering algorithm a structure of nested grids is induced on the resulting collection of cells. The grid functions are mapped to the intermediate hierarchy, thus allowing application of appropriate interpolation techniques.}, language = {en} } @misc{BengerHege, author = {Benger, Werner and Hege, Hans-Christian}, title = {Tensor Splats}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-7393}, number = {03-17}, abstract = {An improved general-purpose technique for the visualization of symmetric positive definite tensor fields of rank two is described. It is based on a splatting technique that is built from tiny transparent glyph primitives which are capable to incorporate the full directional information content of a tensor. The result is an information-rich image that allows to read off the preferred directions in a tensor field at each point of a three-dimensional volume or two-dimensional surface. It is useful for analyzing slices or volumes of a three-dimensional tensor field and can be overlayed with standard volume rendering or color mapping. The application of the rendering technique is demonstrated on general relativistic data and the diffusion tensor field of a human brain.}, language = {en} } @misc{ZoecklerReinBrandtetal., author = {Z{\"o}ckler, Malte and Rein, Karlheinz and Brandt, Robert and Stalling, Detlev and Hege, Hans-Christian}, title = {Creating Virtual Insect Brains with Amira}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-6589}, number = {01-32}, abstract = {By combining techniques of preparation, histology, confocal microscopy, data visualization and data processing, we have created and recently published a standard brain model for drosophila and honey bee brains. This report describes the algorithms and implementation of the corresponding software modules. At the same time it serves as a user's guide for scientist who want to reproduce the results for differerent species or mutants.}, language = {en} } @misc{StallingSeebassZoeckleretal., author = {Stalling, Detlev and Seebass, Martin and Z{\"o}ckler, Malte and Hege, Hans-Christian}, title = {Hyperthermia Treatment Planning with HyperPlan - User's Manual}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5957}, number = {00-27}, abstract = {HyperPlan is a software system for performing 3D-simulations and treatment planning in regional hyperthermia. It allows the user to understand the complex effects of electromagnetic wave propagation and heat transport inside a patient's body. Optimized power amplitudes and phase settings can be calculated for the BSD radiowave applicators Sigma 60 and Sigma 2000 (eye-applicator). HyperPlan is built on top of the modular, object-oriented visualization system Amira. This system already contains powerful algorithms for image processing, geometric modelling and 3D graphics display. HyperPlan provides a number of hyperthermia-specific modules, allowing the user to create 3D tetrahedral patient models suitable for treatment planning. In addition, all numerical simulation modules required for hyperthermia simulation are part of HyperPlan. This guide provides a step-by-step introduction to hyperthermia planning using HyperPlan. It also describes the usage of the underlying visualization system Amira.}, language = {en} } @misc{DeuflhardHege, author = {Deuflhard, Peter and Hege, Hans-Christian}, title = {Die Vision einer individuellen quantitativen Medizin}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-8805}, number = {05-47}, abstract = {Die Autoren schreiben dieses Papier aus der eingeschr{\"a}nkten Sicht der Mathematik und der Informationstechnik. Um den speziellen Beitrag dieser Disziplinen {\"u}berhaupt diskutieren zu k{\"o}nnen, sehen wir uns jedoch gezwungen, einen Rahmen abzustecken, den wir f{\"u}r das Jahr 2020 vorhersehen -- nach Wahrscheinlichkeit und aus unserem engeren fachlichen Blickwinkel. Vorab bitten wir schon einmal bei den medizinischen Fachleuten um Nachsicht, wenn wir uns in ihrem Revier allzu dillettantisch bewegen. Vielleicht f{\"o}rdert aber auch unser eingeschr{\"a}nkter Blickwinkel ansonsten unbedachte Aspekte zutage -- das hoffen wir zumindest.}, language = {de} } @misc{HoerthBaumKnoeteletal., author = {Hoerth, Rebecca M. and Baum, Daniel and Kn{\"o}tel, David and Prohaska, Steffen and Willie, Bettina M. and Duda, Georg and Hege, Hans-Christian and Fratzl, Peter and Wagermaier, Wolfgang}, title = {Registering 2D and 3D Imaging Data of Bone during Healing}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53426}, abstract = {Purpose/Aims of the Study: Bone's hierarchical structure can be visualized using a variety of methods. Many techniques, such as light and electron microscopy generate two-dimensional (2D) images, while micro computed tomography (μCT) allows a direct representation of the three-dimensional (3D) structure. In addition, different methods provide complementary structural information, such as the arrangement of organic or inorganic compounds. The overall aim of the present study is to answer bone research questions by linking information of different 2D and 3D imaging techniques. A great challenge in combining different methods arises from the fact that they usually reflect different characteristics of the real structure. Materials and Methods: We investigated bone during healing by means of μCT and a couple of 2D methods. Backscattered electron images were used to qualitatively evaluate the tissue's calcium content and served as a position map for other experimental data. Nanoindentation and X-ray scattering experiments were performed to visualize mechanical and structural properties. Results: We present an approach for the registration of 2D data in a 3D μCT reference frame, where scanning electron microscopies serve as a methodic link. Backscattered electron images are perfectly suited for registration into μCT reference frames, since both show structures based on the same physical principles. We introduce specific registration tools that have been developed to perform the registration process in a semi-automatic way. Conclusions: By applying this routine, we were able to exactly locate structural information (e.g. mineral particle properties) in the 3D bone volume. In bone healing studies this will help to better understand basic formation, remodeling and mineralization processes.}, language = {en} } @misc{VohraHarthIsoeetal., author = {Vohra, Sumit Kumar and Harth, Philipp and Isoe, Yasuko and Bahl, Armin and Fotowat, Haleh and Engert, Florian and Hege, Hans-Christian and Baum, Daniel}, title = {A Visual Interface for Exploring Hypotheses about Neural Circuits}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89932}, abstract = {One of the fundamental problems in neurobiological research is to understand how neural circuits generate behaviors in response to sensory stimuli. Elucidating such neural circuits requires anatomical and functional information about the neurons that are active during the processing of the sensory information and generation of the respective response, as well as an identification of the connections between these neurons. With modern imaging techniques, both morphological properties of individual neurons as well as functional information related to sensory processing, information integration and behavior can be obtained. Given the resulting information, neurobiologists are faced with the task of identifying the anatomical structures down to individual neurons that are linked to the studied behavior and the processing of the respective sensory stimuli. Here, we present a novel interactive tool that assists neurobiologists in the aforementioned task by allowing them to extract hypothetical neural circuits constrained by anatomical and functional data. Our approach is based on two types of structural data: brain regions that are anatomically or functionally defined, and morphologies of individual neurons. Both types of structural data are interlinked and augmented with additional information. The presented tool allows the expert user to identify neurons using Boolean queries. The interactive formulation of these queries is supported by linked views, using, among other things, two novel 2D abstractions of neural circuits. The approach was validated in two case studies investigating the neural basis of vision-based behavioral responses in zebrafish larvae. Despite this particular application, we believe that the presented tool will be of general interest for exploring hypotheses about neural circuits in other species, genera and taxa.}, language = {en} } @misc{NavaYazdaniHegevonTycowicz, author = {Nava-Yazdani, Esfandiar and Hege, Hans-Christian and von Tycowicz, Christoph}, title = {A Hierarchical Geodesic Model for Longitudinal Analysis on Manifolds}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85187}, abstract = {In many applications, geodesic hierarchical models are adequate for the study of temporal observations. We employ such a model derived for manifold-valued data to Kendall's shape space. In particular, instead of the Sasaki metric, we adapt a functional-based metric, which increases the computational efficiency and does not require the implementation of the curvature tensor. We propose the corresponding variational time discretization of geodesics and employ the approach for longitudinal analysis of 2D rat skulls shapes as well as 3D shapes derived from an imaging study on osteoarthritis. Particularly, we perform hypothesis test and estimate the mean trends.}, language = {en} }