@inproceedings{GreweZachow2016, author = {Grewe, Carl Martin and Zachow, Stefan}, title = {Fully Automated and Highly Accurate Dense Correspondence for Facial Surfaces}, volume = {9914}, booktitle = {Computer Vision - ECCV 2016 Workshops}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-319-48881-3_38}, pages = {552 -- 568}, year = {2016}, abstract = {We present a novel framework for fully automated and highly accurate determination of facial landmarks and dense correspondence, e.g. a topologically identical mesh of arbitrary resolution, across the entire surface of 3D face models. For robustness and reliability of the proposed approach, we are combining 2D landmark detectors and 3D statistical shape priors with a variational matching method. Instead of matching faces in the spatial domain only, we employ image registration to align the 2D parametrization of the facial surface to a planar template we call the Unified Facial Parameter Domain (ufpd). This allows us to simultaneously match salient photometric and geometric facial features using robust image similarity measures while reasonably constraining geometric distortion in regions with less significant features. We demonstrate the accuracy of the dense correspondence established by our framework on the BU3DFE database with 2500 facial surfaces and show, that our framework outperforms current state-of-the-art methods with respect to the fully automated location of facial landmarks.}, language = {en} } @incollection{GreweSchreiber2016, author = {Grewe, Carl Martin and Schreiber, Lisa}, title = {Digitale Bildarchive. Archivierung und Codierung der Gef{\"u}hle}, booktitle = {+ultra gestaltung schafft wissen}, publisher = {Seemann Henschel}, isbn = {978-3-86502-378-0}, pages = {285 -- 290}, year = {2016}, language = {de} } @inproceedings{GreweleRouxPilzetal.2018, author = {Grewe, Carl Martin and le Roux, Gabriel and Pilz, Sven-Kristofer and Zachow, Stefan}, title = {Spotting the Details: The Various Facets of Facial Expressions}, booktitle = {IEEE International Conference on Automatic Face and Gesture Recognition}, doi = {10.1109/FG.2018.00049}, pages = {286 -- 293}, year = {2018}, language = {en} } @article{WeissGreweOlderbaketal.2020, author = {Weiss, Selina and Grewe, Carl Martin and Olderbak, Sally and Goecke, Benjamin and Kaltwasser, Laura and Hildebrandt, Andrea}, title = {Symmetric or not? A holistic approach to the measurement of fluctuating asymmetry from facial photographs}, volume = {166}, journal = {Personality and Individual Differences}, doi = {https://doi.org/10.1016/j.paid.2020.110137}, pages = {1 -- 12}, year = {2020}, language = {en} } @article{WeissGreweOlderbaketal.2020, author = {Weiss, Selina and Grewe, Carl Martin and Olderbak, Sally and Goecke, Benjamin and Kaltwasser, Laura and Hildebrandt, Andrea}, title = {Symmetric or not? A holistic approach to the measurement of fluctuating asymmetry from facial photographs}, journal = {PsyArXiv}, doi = {10.31234/osf.io/s534t}, year = {2020}, language = {en} } @misc{GreweSchreiber2017, author = {Grewe, Carl Martin and Schreiber, Lisa}, title = {Digital Image Archive. The Archiving and Coding of Emotions}, journal = {+ultra. Knowledge \& Gestaltung}, editor = {Doll, Nikola and Bredekamp, Horst and Sch{\"a}ffner, Wolfgang}, publisher = {Seemann Henschel}, pages = {281 -- 286}, year = {2017}, language = {en} } @misc{GreweZachow2017, author = {Grewe, Carl Martin and Zachow, Stefan}, title = {Face to Face-Interface}, journal = {+ultra. Knowledge \& Gestaltung}, editor = {Doll, Nikola and Bredekamp, Horst and Sch{\"a}ffner, Wolfgang}, publisher = {Seemann Henschel}, pages = {320 -- 321}, year = {2017}, language = {en} } @misc{GreweLeRouxPilzetal.2018, author = {Grewe, Carl Martin and Le Roux, Gabriel and Pilz, Sven-Kristofer and Zachow, Stefan}, title = {Spotting the Details: The Various Facets of Facial Expressions}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-67696}, year = {2018}, abstract = {3D Morphable Models (MM) are a popular tool for analysis and synthesis of facial expressions. They represent plausible variations in facial shape and appearance within a low-dimensional parameter space. Fitted to a face scan, the model's parameters compactly encode its expression patterns. This expression code can be used, for instance, as a feature in automatic facial expression recognition. For accurate classification, an MM that can adequately represent the various characteristic facets and variants of each expression is necessary. Currently available MMs are limited in the diversity of expression patterns. We present a novel high-quality Facial Expression Morphable Model built from a large-scale face database as a tool for expression analysis and synthesis. Establishment of accurate dense correspondence, up to finest skin features, enables a detailed statistical analysis of facial expressions. Various characteristic shape patterns are identified for each expression. The results of our analysis give rise to a new facial expression code. We demonstrate the advantages of such a code for the automatic recognition of expressions, and compare the accuracy of our classifier to state-of-the-art.}, language = {en} } @misc{GreweLameckerZachow2013, author = {Grewe, Carl Martin and Lamecker, Hans and Zachow, Stefan}, title = {Landmark-based Statistical Shape Analysis}, journal = {Auxology - Studying Human Growth and Development url}, editor = {Hermanussen, Michael}, publisher = {Schweizerbart Verlag, Stuttgart}, pages = {199 -- 201}, year = {2013}, language = {en} } @misc{GreweLameckerZachow2011, author = {Grewe, Carl Martin and Lamecker, Hans and Zachow, Stefan}, title = {Digital morphometry: The Potential of Statistical Shape Models}, journal = {Anthropologischer Anzeiger. Journal of Biological and Clinical Anthropology}, pages = {506 -- 506}, year = {2011}, language = {en} } @misc{WilsonBuecherGreweetal.2015, author = {Wilson, David and B{\"u}cher, Pia and Grewe, Carl Martin and Anglin, Carolyn and Zachow, Stefan and Michael, Dunbar}, title = {Validation of Three Dimensional Models of the Distal Femur Created from Surgical Navigation Point Cloud Data}, journal = {15th Annual Meeting of the International Society for Computer Assisted Orthopaedic Surgery (CAOS)}, year = {2015}, language = {en} } @misc{WilsonBuecherGreweetal.2015, author = {Wilson, David and B{\"u}cher, Pia and Grewe, Carl Martin and Mocanu, Valentin and Anglin, Carolyn and Zachow, Stefan and Dunbar, Michael}, title = {Validation of Three Dimensional Models of the Distal Femur Created from Surgical Navigation Data}, journal = {Orthopedic Research Society Annual Meeting}, address = {Las Vegas, Nevada}, year = {2015}, language = {en} } @article{WilsonAnglinAmbellanetal.2017, author = {Wilson, David and Anglin, Carolyn and Ambellan, Felix and Grewe, Carl Martin and Tack, Alexander and Lamecker, Hans and Dunbar, Michael and Zachow, Stefan}, title = {Validation of three-dimensional models of the distal femur created from surgical navigation point cloud data for intraoperative and postoperative analysis of total knee arthroplasty}, volume = {12}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {12}, publisher = {Springer}, doi = {10.1007/s11548-017-1630-5}, pages = {2097 -- 2105}, year = {2017}, abstract = {Purpose: Despite the success of total knee arthroplasty there continues to be a significant proportion of patients who are dissatisfied. One explanation may be a shape mismatch between pre and post-operative distal femurs. The purpose of this study was to investigate a method to match a statistical shape model (SSM) to intra-operatively acquired point cloud data from a surgical navigation system, and to validate it against the pre-operative magnetic resonance imaging (MRI) data from the same patients. Methods: A total of 10 patients who underwent navigated total knee arthroplasty also had an MRI scan less than 2 months pre-operatively. The standard surgical protocol was followed which included partial digitization of the distal femur. Two different methods were employed to fit the SSM to the digitized point cloud data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). The available MRI data were manually segmented and the reconstructed three-dimensional surfaces used as ground truth against which the statistical shape model fit was compared. Results: For both approaches, the difference between the statistical shape model-generated femur and the surface generated from MRI segmentation averaged less than 1.7 mm, with maximum errors occurring in less clinically important areas. Conclusion: The results demonstrated good correspondence with the distal femoral morphology even in cases of sparse data sets. Application of this technique will allow for measurement of mismatch between pre and post-operative femurs retrospectively on any case done using the surgical navigation system and could be integrated into the surgical navigation unit to provide real-time feedback.}, language = {en} } @article{GreweSchreiberZachow2015, author = {Grewe, Carl Martin and Schreiber, Lisa and Zachow, Stefan}, title = {Fast and Accurate Digital Morphometry of Facial Expressions}, volume = {31}, journal = {Facial Plastic Surgery}, number = {05}, publisher = {Thieme Medical Publishers}, address = {New York}, doi = {10.1055/s-0035-1564720}, pages = {431 -- 438}, year = {2015}, language = {en} } @misc{Grewe2015, author = {Grewe, Carl Martin}, title = {3D Digital Morphology of Human Faces}, journal = {Anthropologie der Wahrnehmung. Marsilius-Sommerakademie Heidelberg, 14.-20. September}, year = {2015}, language = {en} } @misc{GreweZachow2021, author = {Grewe, C. Martin and Zachow, Stefan}, title = {Release of the FexMM for the Open Virtual Mirror Framework}, doi = {10.12752/8532}, year = {2021}, abstract = {THIS MODEL IS FOR NON-COMMERCIAL RESEARCH PURPOSES. ONLY MEMBERS OF UNIVERSITIES OR NON-COMMERCIAL RESEARCH INSTITUTES ARE ELIGIBLE TO APPLY. 1. Download, fill, and sign the form available from: https://media.githubusercontent.com/media/mgrewe/ovmf/main/data/fexmm_license_agreement.pdf 2. Send the signed form to: fexmm@zib.de NOTE: Use an official email address of your institution for the request.}, language = {en} } @article{GreweLiuHildebrandtetal.2022, author = {Grewe, Carl Martin and Liu, Tuo and Hildebrandt, Andrea and Zachow, Stefan}, title = {The Open Virtual Mirror Framework for Enfacement Illusions - Enhancing the Sense of Agency With Avatars That Imitate Facial Expressions}, journal = {Behavior Research Methods}, publisher = {Springer}, doi = {10.3758/s13428-021-01761-9}, year = {2022}, language = {de} } @phdthesis{Grewe2023, author = {Grewe, Martin}, title = {An Extended 3D Morphable Face Model with Applications in Experimental Psychology}, doi = {10.14279/depositonce-18589}, school = {Technische Universit{\"a}t Berlin}, pages = {187}, year = {2023}, abstract = {Our faces and facial expressions are an important means of communication and social interaction. One goal of the behavioral sciences is to better understand how the features of the faces that we look at influence our behavior. These include static features like facial proportions or the shape and color of certain parts of a face which primarily constitute facial identity, as well as dynamic movements resulting from the activation of the mimic musculature. Experimental psychology provides an empirical approach to this endeavor. In experiments, participants are typically exposed to images or videos of realistic faces with specifically controlled features. By analysis of the reactions to such stimuli, conclusions can be drawn about the influence of facial features on the participants' behavior. Psychologists today mostly generate face stimuli with the help of digital tools. Image editing with Photoshop is highly flexible, but also time-consuming and subjective. Using tools like Psychomorph or Fantamorph is easier and more objective, but does not allow specific control over facial features. In contrast, stimulus generation with 3D Morphable Face Models (3DMMs) offers a better balance between objectivity, ease of use, and flexibility. 3DMMs are statistical models which have been determined from 3D scans of real people's faces and facial expressions. After these training scans have been brought into correspondence, methods like principal component analysis (PCA) can be used to determine the major modes of variation of facial shape and texture in the data. Such modes typically vary the overall facial proportions, expressions, or skin color. They can be individually controlled and flexibly combined to generate new faces and facial expressions. The plausibility of the generated faces can be ensured by having the mode combinations follow the multivariate distribution of the training data. 3DMMs have been mostly used by psychologists for the generation of stimulus images of faces with neutral expression. Static and dynamic stimuli of facial expressions are also of great interest, but generation with 3DMMs is less common. A problem is that the majority of current 3DMMs can only generate facial movements according to the six prototypic expressions of anger, disgust, fear, happiness, sadness, and surprise. More diverse or subtle expressions are often impossible. Among other reasons, this is due to the difficulty in establishing accurate correspondence in the training data. Further, the modes of most 3DMMs were created by means of PCA. These modes often lack interpretability, fail to generate facial details, and rarely provide psychologists a specific control over identity or expression features. Some 3DMMs also generate subtle artifacts that might lead to undesired effects during face perception. They are also less realistic than faces which were designed by artistic experts for recent computer games and animated movies. Last but not least, current 3DMMs have probably not yet been used for interactive experiments in virtual reality (VR) for technical reasons. Although they provide many advantages also beyond the generation of static or dynamic stimuli, the limitations of current 3DMMs have so far prevented a widespread usage in experimental psychology. The goal of this dissertation is to foster the creation and usage of 3DMMs in this context. To this end, we make three major contributions. First, we describe a matching method that establishes correspondence for 3D face scans with a very high accuracy. Unlike the most commonly used methods, it transforms the facial features into a 2D intermediate representation so that they can be aligned to a reference using image registration. We perform experiments with a large database of 3D scans of faces and facial expressions showing that our method outperforms previous approaches. Second, the 3D scans which were previously brought into correspondence are used for the creation of a 3DMM whose resolution is an order of magnitude higher than that of most existing models. We learn a variety of meaningful modes that, e.g., vary features only in specific regions of the face, or that are related to demographic factors such as ethnicity and age. Further, modes of local facial movements are established that can be flexibly combined into a large variety of expressions. We evaluate the quality of the newly created 3DMM in two experiments. Our results show its advantages over previous models, especially the higher degree of realism of dynamic stimuli of facial expressions which were created with our model. Third, we demonstrate that 3DMMs can not only be used for the generation of stimuli. We develop two experimental methods that are readily applicable in experimental psychology. Initially, we create 3D avatar faces with our 3DMM that are readily applicable in VR. They are used in a new open source framework for virtual mirror experiments on self-face perception. A study is conducted which demonstrates the advantages of the framework over previous methods. Furthermore, our 3DMM is used to create a method for improved control of facial asymmetry in existing stimulus photographs. We show that the method accounts for different dimensions of facial asymmetry and is less sensitive than previous approaches to extrinsic factors like the posture of the head. The different methods are evaluated in a study investigating the influence of facial asymmetry on ratings of attractiveness, femininity, and masculinity. The results indicate the benefits and validity of our method.}, language = {en} } @article{GreweLiuKahletal.2021, author = {Grewe, Carl Martin and Liu, Tuo and Kahl, Christoph and Andrea, Hildebrandt and Zachow, Stefan}, title = {Statistical Learning of Facial Expressions Improves Realism of Animated Avatar Faces}, volume = {2}, journal = {Frontiers in Virtual Reality}, publisher = {Frontiers}, doi = {10.3389/frvir.2021.619811}, pages = {1 -- 13}, year = {2021}, language = {en} }