<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Wu, T.</style></author><author><style face="normal" font="default" size="100%">Butko, N.</style></author><author><style face="normal" font="default" size="100%">Ruvulo, P.</style></author><author><style face="normal" font="default" size="100%">Bartlett, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning to Make Facial Expressions</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE 8th International Conference on Development and Learning, 2009. ICDL 2009</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Actuators</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotion recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">face detection</style></keyword><keyword><style  face="normal" font="default" size="100%">face recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">facial motor parameters</style></keyword><keyword><style  face="normal" font="default" size="100%">Feedback</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">learning (artificial intelligence)</style></keyword><keyword><style  face="normal" font="default" size="100%">Machine Learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Magnetic heads</style></keyword><keyword><style  face="normal" font="default" size="100%">Pediatrics</style></keyword><keyword><style  face="normal" font="default" size="100%">real-time facial expression recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot sensing systems</style></keyword><keyword><style  face="normal" font="default" size="100%">robotic head</style></keyword><keyword><style  face="normal" font="default" size="100%">Robots</style></keyword><keyword><style  face="normal" font="default" size="100%">self-guided learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Servomechanisms</style></keyword><keyword><style  face="normal" font="default" size="100%">Servomotors</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">06/2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Shanghai</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-4117-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px;&quot;&gt;This paper explores the process of self-guided learning of realistic facial expression production by a robotic head with 31 degrees of freedom. Facial motor parameters were learned using feedback from real-time facial expression recognition from video. The experiments show that the mapping of servos to expressions was learned in under one-hour of training time. We discuss how our work may help illuminate the computational study of how infants learn to make facial expressions.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10801981</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Eckhardt, M.</style></author><author><style face="normal" font="default" size="100%">Virnes, M.</style></author><author><style face="normal" font="default" size="100%">Rodriguez, A</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Sociable robot improves toddler vocabulary skills</style></title><secondary-title><style face="normal" font="default" size="100%">2009 4th ACM/IEEE International Conference on Human-Robot Interaction (HRI)</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Algorithms</style></keyword><keyword><style  face="normal" font="default" size="100%">autonomously operated robot</style></keyword><keyword><style  face="normal" font="default" size="100%">Early Childhood Education Center</style></keyword><keyword><style  face="normal" font="default" size="100%">Educational institutions</style></keyword><keyword><style  face="normal" font="default" size="100%">Educational robots</style></keyword><keyword><style  face="normal" font="default" size="100%">Games</style></keyword><keyword><style  face="normal" font="default" size="100%">human factors</style></keyword><keyword><style  face="normal" font="default" size="100%">Human-robot interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">intervention period</style></keyword><keyword><style  face="normal" font="default" size="100%">Pediatrics</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot sensing systems</style></keyword><keyword><style  face="normal" font="default" size="100%">robotics</style></keyword><keyword><style  face="normal" font="default" size="100%">sociable robot</style></keyword><keyword><style  face="normal" font="default" size="100%">social aspects of automation</style></keyword><keyword><style  face="normal" font="default" size="100%">time 2 week</style></keyword><keyword><style  face="normal" font="default" size="100%">toddler vocabulary skills</style></keyword><keyword><style  face="normal" font="default" size="100%">Ubiquitous computering</style></keyword><keyword><style  face="normal" font="default" size="100%">Vocabulary</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">La Jolla, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-60558-404-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;We report results of a study in which a low cost sociable robot was immersed at an Early Childhood Education Center for a period of 2 weeks. The study was designed to investigate whether the robot, which operated fully autonomously during the intervention period, could improve target vocabulary skills of 18-24 month of age toddlers. The results showed a 27% improvement in knowledge of the target words taught by the robot when compared to a matched set of control words. The results suggest that sociable robots may be an effective and low cost technology to enrich Early Childhood Education environments.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">12908586</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A barebones communicative robot based on social contingency and Infomax Control</style></title><secondary-title><style face="normal" font="default" size="100%">The 17th IEEE International Symposium on Robot and Human Interactive Communication, 2008. RO-MAN 2008</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Actuators</style></keyword><keyword><style  face="normal" font="default" size="100%">barebones communicative robot</style></keyword><keyword><style  face="normal" font="default" size="100%">Communication system control</style></keyword><keyword><style  face="normal" font="default" size="100%">Delay</style></keyword><keyword><style  face="normal" font="default" size="100%">Detectors</style></keyword><keyword><style  face="normal" font="default" size="100%">Human robot interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">human-model updating capability</style></keyword><keyword><style  face="normal" font="default" size="100%">humanoid robots</style></keyword><keyword><style  face="normal" font="default" size="100%">Hydrogen</style></keyword><keyword><style  face="normal" font="default" size="100%">Infomax control</style></keyword><keyword><style  face="normal" font="default" size="100%">man-machine systems</style></keyword><keyword><style  face="normal" font="default" size="100%">Pediatrics</style></keyword><keyword><style  face="normal" font="default" size="100%">policy improvement</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot control</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot sensing systems</style></keyword><keyword><style  face="normal" font="default" size="100%">Scheduling</style></keyword><keyword><style  face="normal" font="default" size="100%">social contingency</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Munich</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-2212-8</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;In this paper, we present a barebones robot which is capable of interacting with humans based on social contingency. It expands the previous work of a contingency detector into having both human-model updating (developmental capability) and policy improvement (learning capability) based on the framework of Infomax control. The proposed new controller interacts with humans in both active and responsive ways handling the turn-taking between them.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10174337</style></accession-num></record></records></xml>