@phdthesis{CastilloAlejandre2019, author = {Castillo Alejandre, Susana}, title = {Digital humanity: the temporal and semantic structure of dynamic conversational facial expressions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-51562}, school = {BTU Cottbus - Senftenberg}, year = {2019}, abstract = {This thesis focuses on establishing a - theoretically founded and empirically derived - novel methodological pipeline to provide Embodied Conversational Agents (ECAs) with natural facial expressions and desired personality traits. After giving an overview on the content of this thesis, we dedicate its second part to derive the Semantic Space (SSp) for facial expressions, finding that the same space is used for expression words, expression videos, and motion-capture-based point-cloud animations. The process involved the creation of a new facial expression database using Motion Capture (MoCap) technology. Our technique can be used to empirically map specific motion trajectories (including their frequency-decomposition) onto specific perceptual attributes, allowing the targeted creation of novel animations with the desired perceptual traits, as exemplified in the third part of this thesis. Before addressing our final conclusions and, on the grounds that the systematic differences between individuals while performing the same facial expressions are related to their personality, we devote the fourth part of this thesis to the study of the mapping between personality and expressive facial motions.}, subject = {Facial Animation; Cognitive-based behavioural modelling; Motion capture; Personality; Gesichtsanimation; Kognitiv-basierte Verhaltensmodellierung; Pers{\"o}nlichkeit; Merkmalsextraktion; Motion Capturing; Mimik; Computeranimation}, language = {en} } @phdthesis{Legde2021, author = {Legde, Katharina}, title = {Projecting motion capture : designing and implementing a modular and flexible facial animation pipeline to evaluate different perceptual effects}, doi = {10.26127/BTUOpen-5604}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-56043}, school = {BTU Cottbus - Senftenberg}, year = {2021}, abstract = {We can not not communicate, humans use body language and facial expressions to communicate verbally and non-verbally with others. Humans are experts at deciphering and understanding facial expressions. Thus, synthesizing them on a virtual face becomes a very challenging task. Realistic facial animations have various applications like movies, games or affective interfaces. Often motion capture recordings of real humans are used to provide the virtual face with realistic motion. Animation controls need to be established to transfer the captured motion onto the virtual face. To mimic the full essence of the captured motion, these animation controls need to be carefully planned beforehand which is accompanied with high effort in terms of spent time and money. The presented thesis would like to offer an alternative approach to the common performance-driven facial animation techniques. Instead of using motion capture to drive a pre-defined blend-shape rig, this thesis projects motion capture directly onto a facial mesh. Throughout the proposed pipeline, different methods for retargeting, rigging and skinning are evaluated. Special attention is given to the fact that the origin of motion capture and the appearance of the virtual face does not need to be identical. A constraint for this approach to work is clean motion capture data. For that, this thesis offers an automatic way to clean facial motion trajectories and establishes stable and coherent motion curves. To not just give insights about the capability of the proposed pipeline but also to provide valuable results in the field of perception of virtual avatars, perceptual experiments are conducted. These experiments reveal the functionality of the pipeline and show that it is possible to re-use motion capture on different virtual faces. Additionally, the proposed pipeline is used as a tool to investigate into the perception of non-verbal communication for virtual avatars.}, subject = {Facial animation; Motion capture; Retargeting; Skinning; Perception; Gesichtsanimation; Motion Capture; Retargeting; Skinning; Wahrnehmung; Gesichtserkennung; Mimik; Merkmalsextraktion; Motion Capturing}, language = {en} }