{"created":"2024-12-12T07:21:09.625986+00:00","id":2009845,"links":{},"metadata":{"_buckets":{"deposit":"edb265b4-7bfb-41b9-906a-e390ec0680ea"},"_deposit":{"created_by":7,"id":"2009845","owners":[7],"pid":{"revision_id":0,"type":"depid","value":"2009845"},"status":"published"},"_oai":{"id":"oai:tokushima-u.repo.nii.ac.jp:02009845","sets":["1713853213384:1713853296295:1716268429788:1716268708963"]},"author_link":[],"item_10001_alternative_title_1":{"attribute_name":"タイトル別表記","attribute_value_mlt":[{"subitem_alternative_title":"深層学習を用いたビデオ画像における人間の感情認識に関する研究","subitem_alternative_title_language":"ja"}]},"item_10001_biblio_info_7":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2022-03-01","bibliographicIssueDateType":"Issued"}}]},"item_10001_description_5":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"From the beginning of this century, Artificial Intelligence (AI) has evolved to handle problems in image recognition, classification, segmentation, etc. AI learning is categorized by supervised, semi-supervised, unsupervised or reinforcement learning. Some researchers have said that the future of AI is self­awareness, which is based on reinforcement learning by rewards based on task success. Moreover, it is said that the reward would be harvested from human reactions, specially emotion recognition. On the other hand, emotion recognition is a new inspiring field, but the lack of enough amount of data for training an AI system is the major problem. Fortunately, in the near future, it will be necessary to correctly recognize human emotions because image and video dataset availability is rapidly increasing.\nEmotions are mental reactions (such as anger, fear, etc.) marked by relatively strong feelings and usually causing physical reactions to previous actions in a short time duration focused on specific objects. In this Work, we are focusing on emotion recognition using face, body part, and intonation.\nAs stated earlier, automatic understanding of human emotion in a wild setting using audiovisual signals is extremely challenging. Latent continuous dimensions can be used to accomplish the analysis of human emotional states, behaviors, and reactions displayed in real-world settings. Moreover, Valence and Arousal combinations constitute well-known and effective representations of emotions. In this thesis, a new Non-inertial loss function is proposed to train emotion recognition deep learning models. It is evaluated in wild settings using four types of candidate networks with different pipelines and sequence lengths. It is then compared to the Concordance Correlation Coefficient (CCC) and Mean Squared Error (MSE) losses commonly used for training. To prove its effectiveness on efficiency and stability in continuous or non-continuous input data, experiments were performed using the Aff-Wild dataset. Encouraging results were obtained.\nThe contributions of the proposed method Non-Inertial loss function are as follows:\n1.The new loss function allows for Valence and Arousal to be viewed together.\n2.Ability to train on less data.\n3.Better results.\n4.Faster training times.\nThe rest of this thesis explains our motivation, the proposed methods and finally presents our results.","subitem_description_language":"en","subitem_description_type":"Abstract"}]},"item_10001_version_type_20":{"attribute_name":"出版タイプ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_be7fb7dd8ff6fe43","subitem_version_type":"NA"}]},"item_1714461018643":{"attribute_name":"報告番号","attribute_value_mlt":[{"subitem_dissertationnumber":"甲第3579号"}]},"item_1714461102074":{"attribute_name":"学位名","attribute_value_mlt":[{"subitem_degreename":"博士(工学)","subitem_degreename_language":"ja"}]},"item_1714461118377":{"attribute_name":"学位授与年月日","attribute_value_mlt":[{"subitem_dategranted":"2022-03-01"}]},"item_1714461137393":{"attribute_name":"学位授与機関","attribute_value_mlt":[{"subitem_degreegrantor":[{"subitem_degreegrantor_language":"ja","subitem_degreegrantor_name":"徳島大学"}]}]},"item_1715043197608":{"attribute_name":"アクセス権","attribute_value_mlt":[{"subitem_access_right":"open access"}]},"item_1718868208704":{"attribute_name":"備考","attribute_value_mlt":[{"subitem_textarea_language":"ja","subitem_textarea_value":"内容要旨・審査要旨・論文本文の公開\n学位授与者所属 : 徳島大学大学院先端技術科学教育部(システム創生工学専攻)"}]},"item_1718868303842":{"attribute_name":"学位記番号","attribute_value_mlt":[{"subitem_text_language":"ja","subitem_text_value":"甲先第421号"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"ジャルガルサイハン, オリギル","creatorNameLang":"ja"},{"creatorName":"ジャルガルサイハン, オリギル","creatorNameLang":"ja-Kana"},{"creatorName":"Jargalsaikhan, Orgil","creatorNameLang":"en"}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_access","date":[{"dateType":"Available","dateValue":"2022-04-28"}],"displaytype":"detail","filename":"k3579_abstract.pdf","filesize":[{"value":"47 KB"}],"format":"application/pdf","mimetype":"application/pdf","url":{"objectType":"abstract","url":"https://tokushima-u.repo.nii.ac.jp/record/2009845/files/k3579_abstract.pdf"},"version_id":"f523c6aa-f479-4cfc-bf1b-76c0f800b8b0"},{"accessrole":"open_access","date":[{"dateType":"Available","dateValue":"2022-04-28"}],"displaytype":"detail","filename":"k3579_review.pdf","filesize":[{"value":"55.5 KB"}],"format":"application/pdf","mimetype":"application/pdf","url":{"objectType":"other","url":"https://tokushima-u.repo.nii.ac.jp/record/2009845/files/k3579_review.pdf"},"version_id":"8fa90c9a-d790-4877-88a2-e12e946120ac"},{"accessrole":"open_access","date":[{"dateType":"Available","dateValue":"2022-07-01"}],"displaytype":"detail","filename":"k3579_fulltext.pdf","filesize":[{"value":"22.7 MB"}],"format":"application/pdf","mimetype":"application/pdf","url":{"objectType":"fulltext","url":"https://tokushima-u.repo.nii.ac.jp/record/2009845/files/k3579_fulltext.pdf"},"version_id":"7c8a9d82-89b1-4464-99a2-d714cbe98af2"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Emotion recognition","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"EVM-Transformer network","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"emotion classification","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"video to sequence","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"facial emotion recognition","subitem_subject_language":"en","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"doctoral thesis","resourceuri":"http://purl.org/coar/resource_type/c_db06"}]},"item_title":"A study on Human Emotion Recognition in Video Images using Deep Learning","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"A study on Human Emotion Recognition in Video Images using Deep Learning","subitem_title_language":"en"}]},"item_type_id":"40001","owner":"7","path":["1716268708963"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2022-04-28"},"publish_date":"2022-04-28","publish_status":"0","recid":"2009845","relation_version_is_last":true,"title":["A study on Human Emotion Recognition in Video Images using Deep Learning"],"weko_creator_id":"7","weko_shared_id":-1},"updated":"2024-12-12T07:21:18.905245+00:00"}