@techreport{MurruKrugSchmidetal., type = {Working Paper}, author = {Murru, Roberto and Krug, Jonas and Schmid, Tom and Steba, Garri and Giacinto, Giorgio and von Hoffmann, Alexander}, title = {AI Generated Music Using Speech Emotion Recognition}, doi = {10.34646/thn/ohmdok-1201}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:92-opus4-12016}, pages = {1 -- 9}, abstract = {This study aims to compare two different implementations of speech emotion recognition models.The emphasis is directed towards evaluating their efficacy in capturing and characterizing dialogues portrayed by actors within a film scene to create suitable musical intervals. The goal of the overarching research intends to derive indications to enhance the compositional process of film scores by recognizing the emotion in a particular scene. Based on established deep learning models, the study delves into the exploration of two distinct emotion classification metrics: The Six Emotion Prediction and the Valence/Arousal/Dominance Prediction. To facilitate a comparative analysis, a preliminary study an a following survey is deployed. The preliminary study confirms a significant difference in the generated MIDI data. For this reason, a survey is essential to detect the better fitting algorithm. Participants are tasked to rate the affective suitability of eight generated interval sequences to the corresponding film scenes. The Suitability is verified quantitatively using a bidirectional rating system. Both model assessments are conducted within a uniform sound design, thus ensuring unbiased conditions for evaluation. Upon a thorough examination of our extensive analysis, a preference for method A becomes increasingly evident.}, language = {en} }