<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Butko, N.</style></author><author><style face="normal" font="default" size="100%">Theocharous, G.</style></author><author><style face="normal" font="default" size="100%">Philipose, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Automated facial affect analysis for one-on-one tutoring applications</style></title><secondary-title><style face="normal" font="default" size="100%">2011 IEEE International Conference on Automatic Face Gesture Recognition and Workshops (FG 2011)</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">automated facial affect analysis</style></keyword><keyword><style  face="normal" font="default" size="100%">automated tutoring system</style></keyword><keyword><style  face="normal" font="default" size="100%">behavioural sciences computing</style></keyword><keyword><style  face="normal" font="default" size="100%">computer vision technique</style></keyword><keyword><style  face="normal" font="default" size="100%">Context</style></keyword><keyword><style  face="normal" font="default" size="100%">decision making</style></keyword><keyword><style  face="normal" font="default" size="100%">education</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotion recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">face recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Human</style></keyword><keyword><style  face="normal" font="default" size="100%">human computer interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">Labeling</style></keyword><keyword><style  face="normal" font="default" size="100%">Machine Learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Mood</style></keyword><keyword><style  face="normal" font="default" size="100%">n Histograms</style></keyword><keyword><style  face="normal" font="default" size="100%">one-on-one tutoring application</style></keyword><keyword><style  face="normal" font="default" size="100%">s Intelligent tutoring systems</style></keyword><keyword><style  face="normal" font="default" size="100%">student mood analysis</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Santa Barbara, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-9140-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;In this paper, we explore the use of computer vision techniques to analyze students' moods during one-on-one teaching interactions. The eventual goal is to create automated tutoring systems that are sensitive to the student's mood and affective state. We find that the problem of accurately determining a child's mood from a single video frame is surprisingly difficult, even for humans. However when the system is allowed to make decisions based on information from 10 to 30 seconds of video, excellent performance may be obtained.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">12007758</style></accession-num></record></records></xml>