@article{TomholtBaumWoodetal., author = {Tomholt, Lara and Baum, Daniel and Wood, Robert J. and Weaver, James C.}, title = {High-throughput segmentation, data visualization, and analysis of sea star skeletal networks}, series = {Journal of Structural Biology}, volume = {215}, journal = {Journal of Structural Biology}, number = {2}, doi = {10.1016/j.jsb.2023.107955}, pages = {107955}, abstract = {The remarkably complex skeletal systems of the sea stars (Echinodermata, Asteroidea), consisting of hundreds to thousands of individual elements (ossicles), have intrigued investigators for more than 150 years. While the general features and structural diversity of isolated asteroid ossicles have been well documented in the literature, the task of mapping the spatial organization of these constituent skeletal elements in a whole-animal context represents an incredibly laborious process, and as such, has remained largely unexplored. To address this unmet need, particularly in the context of understanding structure-function relationships in these complex skeletal systems, we present an integrated approach that combines micro-computed tomography, semi-automated ossicle segmentation, data visualization tools, and the production of additively manufactured tangible models to reveal biologically relevant structural data that can be rapidly analyzed in an intuitive manner. In the present study, we demonstrate this high-throughput workflow by segmenting and analyzing entire skeletal systems of the giant knobby star, Pisaster giganteus, at four different stages of growth. The in-depth analysis, presented herein, provides a fundamental understanding of the three-dimensional skeletal architecture of the sea star body wall, the process of skeletal maturation during growth, and the relationship between skeletal organization and morphological characteristics of individual ossicles. The widespread implementation of this approach for investigating other species, subspecies, and growth series has the potential to fundamentally improve our understanding of asteroid skeletal architecture and biodiversity in relation to mobility, feeding habits, and environmental specialization in this fascinating group of echinoderms.}, language = {en} } @article{SterzikLichtenbergKroneetal., author = {Sterzik, Anna and Lichtenberg, Nils and Krone, Michael and Baum, Daniel and Cunningham, Douglas W. and Lawonn, Kai}, title = {Enhancing molecular visualization: Perceptual evaluation of line variables with application to uncertainty visualization}, series = {Computers \& Graphics}, volume = {114}, journal = {Computers \& Graphics}, doi = {10.1016/j.cag.2023.06.006}, pages = {401 -- 413}, abstract = {Data are often subject to some degree of uncertainty, whether aleatory or epistemic. This applies both to experimental data acquired with sensors as well as to simulation data. Displaying these data and their uncertainty faithfully is crucial for gaining knowledge. Specifically, the effective communication of the uncertainty can influence the interpretation of the data and the user's trust in the visualization. However, uncertainty-aware visualization has gotten little attention in molecular visualization. When using the established molecular representations, the physicochemical attributes of the molecular data usually already occupy the common visual channels like shape, size, and color. Consequently, to encode uncertainty information, we need to open up another channel by using feature lines. Even though various line variables have been proposed for uncertainty visualizations, they have so far been primarily used for two-dimensional data and there has been little perceptual evaluation. Thus, we conducted two perceptual studies to determine the suitability of the line variables blur, dashing, grayscale, sketchiness, and width for distinguishing several values in molecular visualizations. While our work was motivated by uncertainty visualization, our techniques and study results also apply to other types of scalar data.}, language = {en} } @article{ZemannLeSherlocketal., author = {Zemann, Berit and Le, Mai-Lee Van and Sherlock, Rob E. and Baum, Daniel and Katija, Kakani and Stach, Thomas}, title = {Evolutionary traces of miniaturization in a giant - Comparative anatomy of brain and brain nerves in Bathochordaeus stygius (Tunicata, Appendicularia)}, series = {Journal of Morphology}, volume = {284}, journal = {Journal of Morphology}, number = {7}, doi = {10.1002/jmor.21598}, language = {en} } @article{KlenertLepperBaum, author = {Klenert, Nicolas and Lepper, Verena and Baum, Daniel}, title = {A Local Iterative Approach for the Extraction of 2D Manifolds from Strongly Curved and Folded Thin-Layer Structures}, series = {IEEE Transactions on Visualization and Computer Graphics}, journal = {IEEE Transactions on Visualization and Computer Graphics}, doi = {10.1109/TVCG.2023.3327403}, abstract = {Ridge surfaces represent important features for the analysis of 3-dimensional (3D) datasets in diverse applications and are often derived from varying underlying data including flow fields, geological fault data, and point data, but they can also be present in the original scalar images acquired using a plethora of imaging techniques. Our work is motivated by the analysis of image data acquired using micro-computed tomography (μCT) of ancient, rolled and folded thin-layer structures such as papyrus, parchment, and paper as well as silver and lead sheets. From these documents we know that they are 2-dimensional (2D) in nature. Hence, we are particularly interested in reconstructing 2D manifolds that approximate the document's structure. The image data from which we want to reconstruct the 2D manifolds are often very noisy and represent folded, densely-layered structures with many artifacts, such as ruptures or layer splitting and merging. Previous ridge-surface extraction methods fail to extract the desired 2D manifold for such challenging data. We have therefore developed a novel method to extract 2D manifolds. The proposed method uses a local fast marching scheme in combination with a separation of the region covered by fast marching into two sub-regions. The 2D manifold of interest is then extracted as the surface separating the two sub-regions. The local scheme can be applied for both automatic propagation as well as interactive analysis. We demonstrate the applicability and robustness of our method on both artificial data as well as real-world data including folded silver and papyrus sheets.}, language = {en} } @article{FogalliPeresLineBaum, author = {Fogalli, Giovani Bressan and Peres Line, S{\´e}rgio Roberto and Baum, Daniel}, title = {Segmentation of tooth enamel microstructure images using classical image processing and U-Net approaches}, series = {Frontiers in Imaging}, volume = {2}, journal = {Frontiers in Imaging}, doi = {10.3389/fimag.2023.1215764}, abstract = {Tooth enamel is the hardest tissue in human organism, formed by prism layers in regularly alternating directions. These prisms form the Hunter-Schreger Bands (HSB) pattern when under side illumination, which is composed of light and dark stripes resembling fingerprints. We have shown in previous works that HSB pattern is highly variable, seems to be unique for each tooth and can be used as a biometric method for human identification. Since this pattern cannot be acquired with sensors, the HSB region in the digital photograph must be identified and correctly segmented from the rest of the tooth and background. Although these areas can be manually removed, this process is not reliable as excluded areas can vary according to the individual's subjective impression. Therefore, the aim of this work was to develop an algorithm that automatically selects the region of interest (ROI), thus, making the entire biometric process straightforward. We used two different approaches: a classical image processing method which we called anisotropy-based segmentation (ABS) and a machine learning method known as U-Net, a fully convolutional neural network. Both approaches were applied to a set of extracted tooth images. U-Net with some post processing outperformed ABS in the segmentation task with an Intersection Over Union (IOU) of 0.837 against 0.766. Even with a small dataset, U-Net proved to be a potential candidate for fully automated in-mouth application. However, the ABS technique has several parameters which allow a more flexible segmentation with interactive adjustments specific to image properties.}, language = {en} } @article{ToulkeridouGutierrezBaumetal., author = {Toulkeridou, Evropi and Gutierrez, Carlos Enrique and Baum, Daniel and Doya, Kenji and Economo, Evan P.}, title = {Automated segmentation of insect anatomy from micro-CT images using deep learning}, series = {Natural Sciences}, volume = {3}, journal = {Natural Sciences}, number = {4}, doi = {10.1002/ntls.20230010}, abstract = {Three-dimensional (3D) imaging, such as micro-computed tomography (micro-CT), is increasingly being used by organismal biologists for precise and comprehensive anatomical characterization. However, the segmentation of anatomical structures remains a bottleneck in research, often requiring tedious manual work. Here, we propose a pipeline for the fully-automated segmentation of anatomical structures in micro-CT images utilizing state-of-the-art deep learning methods, selecting the ant brain as a test case. We implemented the U-Net architecture for 2D image segmentation for our convolutional neural network (CNN), combined with pixel-island detection. For training and validation of the network, we assembled a dataset of semi-manually segmented brain images of 76 ant species. The trained network predicted the brain area in ant images fast and accurately; its performance tested on validation sets showed good agreement between the prediction and the target, scoring 80\% Intersection over Union (IoU) and 90\% Dice Coefficient (F1) accuracy. While manual segmentation usually takes many hours for each brain, the trained network takes only a few minutes. Furthermore, our network is generalizable for segmenting the whole neural system in full-body scans, and works in tests on distantly related and morphologically divergent insects (e.g., fruit flies). The latter suggests that methods like the one presented here generally apply across diverse taxa. Our method makes the construction of segmented maps and the morphological quantification of different species more efficient and scalable to large datasets, a step toward a big data approach to organismal anatomy.}, language = {en} } @article{LongrenEigenShubitidzeetal., author = {Longren, Luke L. and Eigen, Lennart and Shubitidze, Ani and Lieschnegg, Oliver and Baum, Daniel and Nyakatura, John A. and Hildebrandt, Thomas and Brecht, Michael}, title = {Dense Reconstruction of Elephant Trunk Musculature}, series = {Current Biology}, volume = {33}, journal = {Current Biology}, doi = {10.1016/j.cub.2023.09.007}, pages = {1 -- 8}, abstract = {The elephant trunk operates as a muscular hydrostat and is actuated by the most complex musculature known in animals. Because the number of trunk muscles is unclear, we performed dense reconstructions of trunk muscle fascicles, elementary muscle units, from microCT scans of an Asian baby elephant trunk. Muscle architecture changes markedly across the trunk. Trunk tip and finger consist of about 8,000 extraordinarily filigree fascicles. The dexterous finger consists exclusively of microscopic radial fascicles pointing to a role of muscle miniaturization in elephant dexterity. Radial fascicles also predominate (at 82\% volume) the remainder of the trunk tip and we wonder if radial muscle fascicles are of particular significance for fine motor control of the dexterous trunk tip. By volume, trunk-shaft muscles comprise one-third of the numerous, small radial muscle fascicles, two-thirds of the three subtypes of large longitudinal fascicles (dorsal longitudinals, ventral outer obliques, and ventral inner obliques), and a small fraction of transversal fascicles. Shaft musculature is laterally, but not radially, symmetric. A predominance of dorsal over ventral radial muscles and of ventral over dorsal longitudinal muscles may result in a larger ability of the shaft to extend dorsally than ventrally and to bend inward rather than outward. There are around 90,000 trunk muscle fascicles. While primate hand control is based on fine control of contraction by the convergence of many motor neurons on a small set of relatively large muscles, evolution of elephant grasping has led to thousands of microscopic fascicles, which probably outnumber facial motor neurons.}, language = {en} } @article{KiewiszBaumMuellerReichertetal., author = {Kiewisz, Robert and Baum, Daniel and M{\"u}ller-Reichert, Thomas and Fabig, Gunar}, title = {Serial-section electron tomography and quantitative analysis of the microtubule organization in 3D-reconstructed mitotic spindles}, series = {Bio-protocol}, volume = {13}, journal = {Bio-protocol}, number = {20}, doi = {10.21769/BioProtoc.4849}, language = {en} } @article{VohraHarthIsoeetal., author = {Vohra, Sumit Kumar and Harth, Philipp and Isoe, Yasuko and Bahl, Armin and Fotowat, Haleh and Engert, Florian and Hege, Hans-Christian and Baum, Daniel}, title = {A Visual Interface for Exploring Hypotheses about Neural Circuits}, series = {IEEE Transactions on Visualization and Computer Graphics}, journal = {IEEE Transactions on Visualization and Computer Graphics}, doi = {10.1109/TVCG.2023.3243668}, abstract = {One of the fundamental problems in neurobiological research is to understand how neural circuits generate behaviors in response to sensory stimuli. Elucidating such neural circuits requires anatomical and functional information about the neurons that are active during the processing of the sensory information and generation of the respective response, as well as an identification of the connections between these neurons. With modern imaging techniques, both morphological properties of individual neurons as well as functional information related to sensory processing, information integration and behavior can be obtained. Given the resulting information, neurobiologists are faced with the task of identifying the anatomical structures down to individual neurons that are linked to the studied behavior and the processing of the respective sensory stimuli. Here, we present a novel interactive tool that assists neurobiologists in the aforementioned task by allowing them to extract hypothetical neural circuits constrained by anatomical and functional data. Our approach is based on two types of structural data: brain regions that are anatomically or functionally defined, and morphologies of individual neurons. Both types of structural data are interlinked and augmented with additional information. The presented tool allows the expert user to identify neurons using Boolean queries. The interactive formulation of these queries is supported by linked views, using, among other things, two novel 2D abstractions of neural circuits. The approach was validated in two case studies investigating the neural basis of vision-based behavioral responses in zebrafish larvae. Despite this particular application, we believe that the presented tool will be of general interest for exploring hypotheses about neural circuits in other species, genera and taxa.}, language = {en} } @misc{VohraHarthIsoeetal., author = {Vohra, Sumit Kumar and Harth, Philipp and Isoe, Yasuko and Bahl, Armin and Fotowat, Haleh and Engert, Florian and Hege, Hans-Christian and Baum, Daniel}, title = {A Visual Interface for Exploring Hypotheses about Neural Circuits}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-89932}, abstract = {One of the fundamental problems in neurobiological research is to understand how neural circuits generate behaviors in response to sensory stimuli. Elucidating such neural circuits requires anatomical and functional information about the neurons that are active during the processing of the sensory information and generation of the respective response, as well as an identification of the connections between these neurons. With modern imaging techniques, both morphological properties of individual neurons as well as functional information related to sensory processing, information integration and behavior can be obtained. Given the resulting information, neurobiologists are faced with the task of identifying the anatomical structures down to individual neurons that are linked to the studied behavior and the processing of the respective sensory stimuli. Here, we present a novel interactive tool that assists neurobiologists in the aforementioned task by allowing them to extract hypothetical neural circuits constrained by anatomical and functional data. Our approach is based on two types of structural data: brain regions that are anatomically or functionally defined, and morphologies of individual neurons. Both types of structural data are interlinked and augmented with additional information. The presented tool allows the expert user to identify neurons using Boolean queries. The interactive formulation of these queries is supported by linked views, using, among other things, two novel 2D abstractions of neural circuits. The approach was validated in two case studies investigating the neural basis of vision-based behavioral responses in zebrafish larvae. Despite this particular application, we believe that the presented tool will be of general interest for exploring hypotheses about neural circuits in other species, genera and taxa.}, language = {en} }