<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Alač, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Malmir, M.</style></author><author><style face="normal" font="default" size="100%">Nakano, Y.</style></author><author><style face="normal" font="default" size="100%">Satoh, K.</style></author><author><style face="normal" font="default" size="100%">Bekki, D.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Grounding a Sociable Robot’s Movements in Multimodal, Situational Engagements</style></title><secondary-title><style face="normal" font="default" size="100%">New Frontiers in Artificial Intelligence</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Artificial Intelligence (incl. Robotics)</style></keyword><keyword><style  face="normal" font="default" size="100%">Computer Appl. in Administrative Data Processing</style></keyword><keyword><style  face="normal" font="default" size="100%">Data Mining and Knowledge Discovery</style></keyword><keyword><style  face="normal" font="default" size="100%">Information Storage and Retrieval</style></keyword><keyword><style  face="normal" font="default" size="100%">Information Systems Applications (incl. Internet)</style></keyword><keyword><style  face="normal" font="default" size="100%">Mathematical Logic and Formal Languages</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://link.springer.com/chapter/10.1007/978-3-319-10061-6_18</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer International Publishing</style></publisher><pages><style face="normal" font="default" size="100%">267-281</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;table cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;t1&quot;&gt;
	&lt;tbody&gt;
		&lt;tr&gt;
			&lt;td class=&quot;td1&quot; valign=&quot;top&quot;&gt;
			&lt;p class=&quot;p1&quot;&gt;To deal with the question of what a sociable robot is, we describe how an educational robot is encountered by children, teachers and designers in a preschool. We consider the importance of the robot’s body by focusing on how its movements are contingently embedded in interactional situations. We point out that the effects of agency that these movements generate are inseparable from their grounding in locally coordinated, multimodal actions and interactions.&lt;/p&gt;
			&lt;/td&gt;
		&lt;/tr&gt;
	&lt;/tbody&gt;
&lt;/table&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Malmir, M.</style></author><author><style face="normal" font="default" size="100%">Forster, D.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">HRI as a Tool to Monitor Socio-Emotional Development in Early Childhood Education</style></title><secondary-title><style face="normal" font="default" size="100%">HRI 2014 2nd Workshop on &quot;Applications for Emotional Robot&quot;</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2014</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Bielefeld</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p class=&quot;p1&quot;&gt;Sociable robots are benefiting from machine perception systems that automatically recognize social behavior (e.g., detect and recognize people, recognize their facial expressions and gestures).&amp;nbsp;&lt;/p&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Malmir, M.</style></author><author><style face="normal" font="default" size="100%">Forster, D.</style></author><author><style face="normal" font="default" size="100%">Youngstrom, K.</style></author><author><style face="normal" font="default" size="100%">Morrison, L.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Home Alone: Social Robots for Digital Ethnography of Toddler Behavior</style></title><secondary-title><style face="normal" font="default" size="100%">roceedings of the IEEE International Conference on Computer Vision Workshops</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Johnson, D.</style></author><author><style face="normal" font="default" size="100%">Malmir, M.</style></author><author><style face="normal" font="default" size="100%">Forster, D.</style></author><author><style face="normal" font="default" size="100%">Alač, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Design and early evaluation of the RUBI-5 sociable robots</style></title><secondary-title><style face="normal" font="default" size="100%">Development and Learning and Epigenetic Robotics (ICDL), 2012 IEEE International Conference on</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year><pub-dates><date><style  face="normal" font="default" size="100%">11/2012</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">San Diego, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4673-4964-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><accession-num><style face="normal" font="default" size="100%">13220365</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Butko, N.</style></author><author><style face="normal" font="default" size="100%">Theocharous, G.</style></author><author><style face="normal" font="default" size="100%">Philipose, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Automated facial affect analysis for one-on-one tutoring applications</style></title><secondary-title><style face="normal" font="default" size="100%">2011 IEEE International Conference on Automatic Face Gesture Recognition and Workshops (FG 2011)</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">automated facial affect analysis</style></keyword><keyword><style  face="normal" font="default" size="100%">automated tutoring system</style></keyword><keyword><style  face="normal" font="default" size="100%">behavioural sciences computing</style></keyword><keyword><style  face="normal" font="default" size="100%">computer vision technique</style></keyword><keyword><style  face="normal" font="default" size="100%">Context</style></keyword><keyword><style  face="normal" font="default" size="100%">decision making</style></keyword><keyword><style  face="normal" font="default" size="100%">education</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotion recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">face recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Human</style></keyword><keyword><style  face="normal" font="default" size="100%">human computer interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">Labeling</style></keyword><keyword><style  face="normal" font="default" size="100%">Machine Learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Mood</style></keyword><keyword><style  face="normal" font="default" size="100%">n Histograms</style></keyword><keyword><style  face="normal" font="default" size="100%">one-on-one tutoring application</style></keyword><keyword><style  face="normal" font="default" size="100%">s Intelligent tutoring systems</style></keyword><keyword><style  face="normal" font="default" size="100%">student mood analysis</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Santa Barbara, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-9140-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;In this paper, we explore the use of computer vision techniques to analyze students' moods during one-on-one teaching interactions. The eventual goal is to create automated tutoring systems that are sensitive to the student's mood and affective state. We find that the problem of accurately determining a child's mood from a single video frame is surprisingly difficult, even for humans. However when the system is allowed to make decisions based on information from 10 to 30 seconds of video, excellent performance may be obtained.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">12007758</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Littlewort, G.</style></author><author><style face="normal" font="default" size="100%">Whitehill, J.</style></author><author><style face="normal" font="default" size="100%">Wu, T.</style></author><author><style face="normal" font="default" size="100%">Fasel, I.</style></author><author><style face="normal" font="default" size="100%">Frank, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Bartlett, M.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The computer expression recognition toolbox (CERT)</style></title><secondary-title><style face="normal" font="default" size="100%">2011 IEEE International Conference on Automatic Face Gesture Recognition and Workshops (FG 2011)</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">3D orientation</style></keyword><keyword><style  face="normal" font="default" size="100%">Accuracy</style></keyword><keyword><style  face="normal" font="default" size="100%">automatic real-time facial expression recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">CERT</style></keyword><keyword><style  face="normal" font="default" size="100%">computer expression recognition toolbox</style></keyword><keyword><style  face="normal" font="default" size="100%">Detectors</style></keyword><keyword><style  face="normal" font="default" size="100%">dual core laptop</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotion recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Encoding</style></keyword><keyword><style  face="normal" font="default" size="100%">extended Cohn-Kanade</style></keyword><keyword><style  face="normal" font="default" size="100%">Face</style></keyword><keyword><style  face="normal" font="default" size="100%">face recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">facial action unit coding system</style></keyword><keyword><style  face="normal" font="default" size="100%">facial expression dataset</style></keyword><keyword><style  face="normal" font="default" size="100%">Facial features</style></keyword><keyword><style  face="normal" font="default" size="100%">FACS</style></keyword><keyword><style  face="normal" font="default" size="100%">Gold</style></keyword><keyword><style  face="normal" font="default" size="100%">Image coding</style></keyword><keyword><style  face="normal" font="default" size="100%">software tool</style></keyword><keyword><style  face="normal" font="default" size="100%">software tools</style></keyword><keyword><style  face="normal" font="default" size="100%">two-alternative forced choice task</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2011</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Santa Barbara, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-9140-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p class=&quot;p1&quot;&gt;We present the Computer Expression Recognition Toolbox (CERT), a software tool for fully automatic real-time facial expression recognition, and officially release it for free academic use. CERT can automatically code the intensity of 19 different facial actions from the Facial Action Unit Coding System (FACS) and 6 different prototypical facial expressions. It also estimates the locations of 10 facial features as well as the 3-D orientation (yaw, pitch, roll) of the head. On a database of posed facial expressions, Extended Cohn-Kanade (CK+[1]), CERT achieves an average recognition performance (probability of correctness on a two-alternative forced choice (2AFC) task between one positive and one negative example) of 90.1% when analyzing facial actions. On a spontaneous facial expression dataset, CERT achieves an accuracy of nearly 80%. In a standard dual core laptop, CERT can process 320 × 240 video images in real time at approximately 10 frames per second.&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">12007742</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Alač, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">When a robot is social: Spatial arrangements and multimodal semiotic engagement in the practice of social robotics</style></title><secondary-title><style face="normal" font="default" size="100%">Social Studies of Science</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">body</style></keyword><keyword><style  face="normal" font="default" size="100%">design</style></keyword><keyword><style  face="normal" font="default" size="100%">gesture</style></keyword><keyword><style  face="normal" font="default" size="100%">human–robot interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">laboratory</style></keyword><keyword><style  face="normal" font="default" size="100%">social agency</style></keyword><keyword><style  face="normal" font="default" size="100%">social robotics</style></keyword><keyword><style  face="normal" font="default" size="100%">spatial organization</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2011</style></year><pub-dates><date><style  face="normal" font="default" size="100%">12/2011</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">41</style></volume><pages><style face="normal" font="default" size="100%">893-926</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p class=&quot;p1&quot;&gt;Social roboticists design their robots to function as social agents in interaction with humans and other robots. Although we do not deny that the robot’s design features are crucial for attaining this aim, we point to the relevance of spatial organization and coordination between the robot and the humans who interact with it. We recover these interactions through an observational study of a social robotics laboratory and examine them by applying a multimodal interactional analysis to two moments of robotics practice. We describe the vital role of roboticists and of the group of preverbal infants, who are involved in a robot’s design activity, and we argue that the robot’s social character is intrinsically related to the subtleties of human interactional moves in laboratories of social robotics. This human involvement in the robot’s social agency is not simply controlled by individual will. Instead, the human–machine couplings are demanded by the situational dynamics in which the robot is lodged.&lt;/p&gt;
</style></abstract><issue><style face="normal" font="default" size="100%">6</style></issue><section><style face="normal" font="default" size="100%">893</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Butko, N.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Infomax Control of Eye Movements</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Transactions on Autonomous Mental Development</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">active information gathering</style></keyword><keyword><style  face="normal" font="default" size="100%">autonomous computer program</style></keyword><keyword><style  face="normal" font="default" size="100%">autonomous physical agent</style></keyword><keyword><style  face="normal" font="default" size="100%">Computer vision</style></keyword><keyword><style  face="normal" font="default" size="100%">dynamic tracking task</style></keyword><keyword><style  face="normal" font="default" size="100%">Eye movement</style></keyword><keyword><style  face="normal" font="default" size="100%">eye movement strategy</style></keyword><keyword><style  face="normal" font="default" size="100%">face detection</style></keyword><keyword><style  face="normal" font="default" size="100%">faces</style></keyword><keyword><style  face="normal" font="default" size="100%">Infomax control</style></keyword><keyword><style  face="normal" font="default" size="100%">motor system</style></keyword><keyword><style  face="normal" font="default" size="100%">object detection</style></keyword><keyword><style  face="normal" font="default" size="100%">optimal control</style></keyword><keyword><style  face="normal" font="default" size="100%">optimal eye movement controller</style></keyword><keyword><style  face="normal" font="default" size="100%">policy gradient</style></keyword><keyword><style  face="normal" font="default" size="100%">probabilistic model</style></keyword><keyword><style  face="normal" font="default" size="100%">sensory system</style></keyword><keyword><style  face="normal" font="default" size="100%">static scenes</style></keyword><keyword><style  face="normal" font="default" size="100%">Visual Perception</style></keyword><keyword><style  face="normal" font="default" size="100%">visual search</style></keyword><keyword><style  face="normal" font="default" size="100%">visual system</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><volume><style face="normal" font="default" size="100%">2</style></volume><pages><style face="normal" font="default" size="100%">91-107</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;Recently, infomax methods of optimal control have begun to reshape how we think about active information gathering. We show how such methods can be used to formulate the problem of choosing where to look. We show how an optimal eye movement controller can be learned from subjective experiences of information gathering, and we explore in simulation properties of the optimal controller. This controller outperforms other eye movement strategies proposed in the literature. The learned eye movement strategies are tailored to the specific visual system of the learner-we show that agents with different kinds of eyes should follow different eye movement strategies. Then we use these insights to build an autonomous computer program that follows this approach and learns to search for faces in images faster than current state-of-the-art techniques. The context of these results is search in static scenes, but the approach extends easily, and gives further efficiency gains, to dynamic tracking tasks. A limitation of infomax methods is that they require probabilistic models of uncertainty of the sensory system, the motor system, and the external world. In the final section of this paper, we propose future avenues of research by which autonomous physical agents may use developmental experience to subjectively characterize the uncertainties they face.&lt;/span&gt;&lt;/p&gt;
</style></abstract><issue><style face="normal" font="default" size="100%">2</style></issue><section><style face="normal" font="default" size="100%">91</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Warning: The author of this document may have no mental states. Read at your own risk</style></title><secondary-title><style face="normal" font="default" size="100%">Interaction Studies</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><volume><style face="normal" font="default" size="100%">11</style></volume><pages><style face="normal" font="default" size="100%">238-245</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">2</style></issue><section><style face="normal" font="default" size="100%">238</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Meltzoff, A.</style></author><author><style face="normal" font="default" size="100%">Kuhl, P.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Sejnowski, T.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Foundations for a New Science of Learning</style></title><secondary-title><style face="normal" font="default" size="100%">Science</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year></dates><volume><style face="normal" font="default" size="100%">325</style></volume><pages><style face="normal" font="default" size="100%">284-288</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;Human learning is distinguished by the range and complexity of skills that can be learned and the degree of abstraction that can be achieved compared with those of other species. Homo sapiens is also the only species that has developed formal ways to enhance learning: teachers, schools, and curricula. Human infants have an intense interest in people and their behavior and possess powerful implicit learning mechanisms that are affected by social interaction. Neuroscientists are beginning to understand the brain mechanisms underlying learning and how shared brain systems for perception and action support social learning. Machine learning algorithms are being developed that allow robots and computers to learn autonomously. New insights from many different fields are converging to create a new science of learning that may transform educational practices.&lt;/span&gt;&lt;/p&gt;
</style></abstract><issue><style face="normal" font="default" size="100%">5938</style></issue><section><style face="normal" font="default" size="100%">284</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Wu, T.</style></author><author><style face="normal" font="default" size="100%">Butko, N.</style></author><author><style face="normal" font="default" size="100%">Ruvulo, P.</style></author><author><style face="normal" font="default" size="100%">Bartlett, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning to Make Facial Expressions</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE 8th International Conference on Development and Learning, 2009. ICDL 2009</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Actuators</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotion recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">face detection</style></keyword><keyword><style  face="normal" font="default" size="100%">face recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">facial motor parameters</style></keyword><keyword><style  face="normal" font="default" size="100%">Feedback</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">learning (artificial intelligence)</style></keyword><keyword><style  face="normal" font="default" size="100%">Machine Learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Magnetic heads</style></keyword><keyword><style  face="normal" font="default" size="100%">Pediatrics</style></keyword><keyword><style  face="normal" font="default" size="100%">real-time facial expression recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot sensing systems</style></keyword><keyword><style  face="normal" font="default" size="100%">robotic head</style></keyword><keyword><style  face="normal" font="default" size="100%">Robots</style></keyword><keyword><style  face="normal" font="default" size="100%">self-guided learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Servomechanisms</style></keyword><keyword><style  face="normal" font="default" size="100%">Servomotors</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">06/2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Shanghai</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-4117-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px;&quot;&gt;This paper explores the process of self-guided learning of realistic facial expression production by a robotic head with 31 degrees of freedom. Facial motor parameters were learned using feedback from real-time facial expression recognition from video. The experiments show that the mapping of servos to expressions was learned in under one-hour of training time. We discuss how our work may help illuminate the computational study of how infants learn to make facial expressions.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10801981</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Eckhardt, M.</style></author><author><style face="normal" font="default" size="100%">Virnes, M.</style></author><author><style face="normal" font="default" size="100%">Rodriguez, A</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Sociable robot improves toddler vocabulary skills</style></title><secondary-title><style face="normal" font="default" size="100%">2009 4th ACM/IEEE International Conference on Human-Robot Interaction (HRI)</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Algorithms</style></keyword><keyword><style  face="normal" font="default" size="100%">autonomously operated robot</style></keyword><keyword><style  face="normal" font="default" size="100%">Early Childhood Education Center</style></keyword><keyword><style  face="normal" font="default" size="100%">Educational institutions</style></keyword><keyword><style  face="normal" font="default" size="100%">Educational robots</style></keyword><keyword><style  face="normal" font="default" size="100%">Games</style></keyword><keyword><style  face="normal" font="default" size="100%">human factors</style></keyword><keyword><style  face="normal" font="default" size="100%">Human-robot interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">intervention period</style></keyword><keyword><style  face="normal" font="default" size="100%">Pediatrics</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot sensing systems</style></keyword><keyword><style  face="normal" font="default" size="100%">robotics</style></keyword><keyword><style  face="normal" font="default" size="100%">sociable robot</style></keyword><keyword><style  face="normal" font="default" size="100%">social aspects of automation</style></keyword><keyword><style  face="normal" font="default" size="100%">time 2 week</style></keyword><keyword><style  face="normal" font="default" size="100%">toddler vocabulary skills</style></keyword><keyword><style  face="normal" font="default" size="100%">Ubiquitous computering</style></keyword><keyword><style  face="normal" font="default" size="100%">Vocabulary</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">La Jolla, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-60558-404-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;We report results of a study in which a low cost sociable robot was immersed at an Early Childhood Education Center for a period of 2 weeks. The study was designed to investigate whether the robot, which operated fully autonomously during the intervention period, could improve target vocabulary skills of 18-24 month of age toddlers. The results showed a 27% improvement in knowledge of the target words taught by the robot when compared to a matched set of control words. The results suggest that sociable robots may be an effective and low cost technology to enrich Early Childhood Education environments.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">12908586</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Whitehill, J.</style></author><author><style face="normal" font="default" size="100%">Littlewort, G.</style></author><author><style face="normal" font="default" size="100%">Fasel, I.</style></author><author><style face="normal" font="default" size="100%">Bartlett, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Toward Practical Smile Detection</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Transactions on Pattern Analysis and Machine Intelligence</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Algorithms</style></keyword><keyword><style  face="normal" font="default" size="100%">Artificial intelligence</style></keyword><keyword><style  face="normal" font="default" size="100%">Automated</style></keyword><keyword><style  face="normal" font="default" size="100%">automatic facial expression recognition research</style></keyword><keyword><style  face="normal" font="default" size="100%">Biological Pattern Recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Biometry</style></keyword><keyword><style  face="normal" font="default" size="100%">Computer simulation</style></keyword><keyword><style  face="normal" font="default" size="100%">Computer vision</style></keyword><keyword><style  face="normal" font="default" size="100%">Computer-Assisted</style></keyword><keyword><style  face="normal" font="default" size="100%">Face</style></keyword><keyword><style  face="normal" font="default" size="100%">Face and gesture recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">face recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">feature representation</style></keyword><keyword><style  face="normal" font="default" size="100%">human-level expression recognition accuracy</style></keyword><keyword><style  face="normal" font="default" size="100%">illumination conditions</style></keyword><keyword><style  face="normal" font="default" size="100%">Image databases</style></keyword><keyword><style  face="normal" font="default" size="100%">Image Enhancement</style></keyword><keyword><style  face="normal" font="default" size="100%">Image Interpretation</style></keyword><keyword><style  face="normal" font="default" size="100%">image registration image representation</style></keyword><keyword><style  face="normal" font="default" size="100%">learning (artificial intelligence)</style></keyword><keyword><style  face="normal" font="default" size="100%">machine learning approaches</style></keyword><keyword><style  face="normal" font="default" size="100%">Machine Learning Models</style></keyword><keyword><style  face="normal" font="default" size="100%">n Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">object detection</style></keyword><keyword><style  face="normal" font="default" size="100%">practical smile detection</style></keyword><keyword><style  face="normal" font="default" size="100%">Reproducibility of Results</style></keyword><keyword><style  face="normal" font="default" size="100%">Sensitivity and Specificity</style></keyword><keyword><style  face="normal" font="default" size="100%">Smiling</style></keyword><keyword><style  face="normal" font="default" size="100%">Subtraction Technique</style></keyword><keyword><style  face="normal" font="default" size="100%">training data set</style></keyword><keyword><style  face="normal" font="default" size="100%">visual databases</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">11/2009</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">31</style></volume><pages><style face="normal" font="default" size="100%">2106-2111</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px;&quot;&gt;Machine learning approaches have produced some of the highest reported performances for facial expression recognition. However, to date, nearly all automatic facial expression recognition research has focused on optimizing performance on a few databases that were collected under controlled lighting conditions on a relatively small number of subjects. This paper explores whether current machine learning methods can be used to develop an expression recognition system that operates reliably in more realistic conditions. We explore the necessary characteristics of the training data set, image registration, feature representation, and machine learning algorithms. A new database, GENKI, is presented which contains pictures, photographed by the subjects themselves, from thousands of different people in many different real-world imaging conditions. Results suggest that human-level expression recognition accuracy in real-life illumination conditions is achievable with machine learning technology. However, the data sets currently used in the automatic expression recognition literature to evaluate progress may be overly constrained and could potentially lead research into locally optimal algorithmic solutions.&lt;/span&gt;&lt;/p&gt;
</style></abstract><issue><style face="normal" font="default" size="100%">11</style></issue><section><style face="normal" font="default" size="100%">2106</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ruvolo, P.</style></author><author><style face="normal" font="default" size="100%">Fasel, I.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Auditory mood detection for social and educational robots</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE International Conference on Robotics and Automation, 2008. ICRA 2008</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">auditory mood detection</style></keyword><keyword><style  face="normal" font="default" size="100%">Computer vision</style></keyword><keyword><style  face="normal" font="default" size="100%">educational robot</style></keyword><keyword><style  face="normal" font="default" size="100%">Educational robots</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotion recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">emotional speech database</style></keyword><keyword><style  face="normal" font="default" size="100%">face detection</style></keyword><keyword><style  face="normal" font="default" size="100%">hearing</style></keyword><keyword><style  face="normal" font="default" size="100%">interactive robotic application</style></keyword><keyword><style  face="normal" font="default" size="100%">learning (artificial intelligence)</style></keyword><keyword><style  face="normal" font="default" size="100%">Machine Learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Mood Prototypes</style></keyword><keyword><style  face="normal" font="default" size="100%">object recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Robotics and Automation Robots</style></keyword><keyword><style  face="normal" font="default" size="100%">social mood</style></keyword><keyword><style  face="normal" font="default" size="100%">social robot</style></keyword><keyword><style  face="normal" font="default" size="100%">Speech</style></keyword><keyword><style  face="normal" font="default" size="100%">USA Councils</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">05/2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Pasadena, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-1646-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px;&quot;&gt;Social robots face the fundamental challenge of detecting and adapting their behavior to the current social mood. For example, robots that assist teachers in early education must choose different behaviors depending on whether the children are crying, laughing, sleeping, or singing songs. Interactive robotic applications require perceptual algorithms that both run in real time and are adaptable to the challenging conditions of daily life. This paper explores a novel approach to auditory mood detection which was born out of our experience immersing social robots in classroom environments. We propose a new set of low-level spectral contrast features that extends a class of features which have proven very successful for object recognition in the modern computer vision literature. Features are selected and combined using machine learning approaches so as to make decisions about the ongoing auditory mood. We demonstrate excellent performance on two standard emotional speech databases (the Berlin Emotional Speech [W. Burkhardt et al., 2005], and the ORATOR dataset [H. Quast, 2001]). In addition we establish strong baseline performance for mood detection on a database collected from a social robot immersed in a classroom of 18-24 months old children [J. Movellan er al., 2007]. This approach operates in real time at little computational cost. It has the potential to greatly enhance the effectiveness of social robots in daily life environments.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10014826</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ruvolo, P.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Automatic cry detection in early childhood education settings</style></title><secondary-title><style face="normal" font="default" size="100%">7th IEEE International Conference on Development and Learning, 2008. ICDL 2008</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Acoustic noise</style></keyword><keyword><style  face="normal" font="default" size="100%">auditory moods</style></keyword><keyword><style  face="normal" font="default" size="100%">automatic cry detection</style></keyword><keyword><style  face="normal" font="default" size="100%">behavioural sciences computing</style></keyword><keyword><style  face="normal" font="default" size="100%">Deafness</style></keyword><keyword><style  face="normal" font="default" size="100%">early childhood education settings</style></keyword><keyword><style  face="normal" font="default" size="100%">education</style></keyword><keyword><style  face="normal" font="default" size="100%">Educational robots</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotion recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">human coders</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">learning (artificial intelligence)</style></keyword><keyword><style  face="normal" font="default" size="100%">Machine Learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Mood</style></keyword><keyword><style  face="normal" font="default" size="100%">preschool classrooms</style></keyword><keyword><style  face="normal" font="default" size="100%">Prototypes</style></keyword><keyword><style  face="normal" font="default" size="100%">Robustness</style></keyword><keyword><style  face="normal" font="default" size="100%">Working environment noise</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Monterey, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-2661-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;We present results on applying a novel machine learning approach for learning auditory moods in natural environments [1] to the problem of detecting crying episodes in preschool classrooms. The resulting system achieved levels of performance approaching that of human coders and also significantly outperformed previous approaches to this problem [2].&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10367600</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A barebones communicative robot based on social contingency and Infomax Control</style></title><secondary-title><style face="normal" font="default" size="100%">The 17th IEEE International Symposium on Robot and Human Interactive Communication, 2008. RO-MAN 2008</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Actuators</style></keyword><keyword><style  face="normal" font="default" size="100%">barebones communicative robot</style></keyword><keyword><style  face="normal" font="default" size="100%">Communication system control</style></keyword><keyword><style  face="normal" font="default" size="100%">Delay</style></keyword><keyword><style  face="normal" font="default" size="100%">Detectors</style></keyword><keyword><style  face="normal" font="default" size="100%">Human robot interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">human-model updating capability</style></keyword><keyword><style  face="normal" font="default" size="100%">humanoid robots</style></keyword><keyword><style  face="normal" font="default" size="100%">Hydrogen</style></keyword><keyword><style  face="normal" font="default" size="100%">Infomax control</style></keyword><keyword><style  face="normal" font="default" size="100%">man-machine systems</style></keyword><keyword><style  face="normal" font="default" size="100%">Pediatrics</style></keyword><keyword><style  face="normal" font="default" size="100%">policy improvement</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot control</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot sensing systems</style></keyword><keyword><style  face="normal" font="default" size="100%">Scheduling</style></keyword><keyword><style  face="normal" font="default" size="100%">social contingency</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Munich</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-2212-8</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;In this paper, we present a barebones robot which is capable of interacting with humans based on social contingency. It expands the previous work of a contingency detector into having both human-model updating (developmental capability) and policy improvement (learning capability) based on the framework of Infomax control. The proposed new controller interacts with humans in both active and responsive ways handling the turn-taking between them.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10174337</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ruvolo, P.</style></author><author><style face="normal" font="default" size="100%">Whitehill, J.</style></author><author><style face="normal" font="default" size="100%">Virnes, M.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Building a more effective teaching robot using apprenticeship learning</style></title><secondary-title><style face="normal" font="default" size="100%">7th IEEE International Conference on Development and Learning, 2008. ICDL 2008</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">apprenticeship learning</style></keyword><keyword><style  face="normal" font="default" size="100%">automated helicopter flight</style></keyword><keyword><style  face="normal" font="default" size="100%">Automatic control</style></keyword><keyword><style  face="normal" font="default" size="100%">Data mining</style></keyword><keyword><style  face="normal" font="default" size="100%">Delay</style></keyword><keyword><style  face="normal" font="default" size="100%">education</style></keyword><keyword><style  face="normal" font="default" size="100%">Educational robots</style></keyword><keyword><style  face="normal" font="default" size="100%">expert teaching</style></keyword><keyword><style  face="normal" font="default" size="100%">Helicopters</style></keyword><keyword><style  face="normal" font="default" size="100%">Human-robot interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">humanoid robots</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans Learning systems</style></keyword><keyword><style  face="normal" font="default" size="100%">mechanical control</style></keyword><keyword><style  face="normal" font="default" size="100%">robot teaching</style></keyword><keyword><style  face="normal" font="default" size="100%">Robotics and Automation</style></keyword><keyword><style  face="normal" font="default" size="100%">RUBI social robot</style></keyword><keyword><style  face="normal" font="default" size="100%">time 18 month to 24 month</style></keyword><keyword><style  face="normal" font="default" size="100%">timing</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Monterey, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-2661-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;What defines good teaching? While attributes such as timing, responsiveness to social cues, and pacing of material clearly play a role, it is difficult to create a comprehensive specification of what it means to be a good teacher. On the other hand, it is relatively easy to obtain examples of expert teaching behavior by observing a real teacher. With this inspiration as our guide, we investigated apprenticeship learning methods [1] that use data recorded from expert teachers as a means of improving the teaching abilities of RUBI, a social robot immersed in a classroom of 18-24 month old children. While this approach has achieved considerable success in mechanical control, such as automated helicopter flight [2], until now there has been little work on applying it to the field of social robotics. This paper explores two particular approaches to apprenticeship learning, and analyzes the models of teaching that each approach learns from the data of the human teacher. Empirical results indicate that the apprenticeship learning paradigm, though still nascent in its use in the social robotics field, holds promise, and that our proposed methods can already extract meaningful teaching models from demonstrations of a human expert.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10367601</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Butko, N.</style></author><author><style face="normal" font="default" size="100%">Zhang, L.</style></author><author><style face="normal" font="default" size="100%">Cottrell, G.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Visual saliency model for robot cameras</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE International Conference on Robotics and Automation, 2008. ICRA 2008</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Application software</style></keyword><keyword><style  face="normal" font="default" size="100%">approximation theory</style></keyword><keyword><style  face="normal" font="default" size="100%">Bayes methods</style></keyword><keyword><style  face="normal" font="default" size="100%">Bayesian methods</style></keyword><keyword><style  face="normal" font="default" size="100%">Bayesian model</style></keyword><keyword><style  face="normal" font="default" size="100%">camera control</style></keyword><keyword><style  face="normal" font="default" size="100%">Cameras</style></keyword><keyword><style  face="normal" font="default" size="100%">Central Processing Unit</style></keyword><keyword><style  face="normal" font="default" size="100%">Computational efficiency</style></keyword><keyword><style  face="normal" font="default" size="100%">Computational modeling</style></keyword><keyword><style  face="normal" font="default" size="100%">Explosions</style></keyword><keyword><style  face="normal" font="default" size="100%">fast approximation</style></keyword><keyword><style  face="normal" font="default" size="100%">human visual attention</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">Open loop systems</style></keyword><keyword><style  face="normal" font="default" size="100%">robot cameras</style></keyword><keyword><style  face="normal" font="default" size="100%">robot vision</style></keyword><keyword><style  face="normal" font="default" size="100%">Robot vision systems</style></keyword><keyword><style  face="normal" font="default" size="100%">robotic application</style></keyword><keyword><style  face="normal" font="default" size="100%">task free conditions</style></keyword><keyword><style  face="normal" font="default" size="100%">visual saliency model</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">05/2008</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Pasadena, CA</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-4244-1646-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;color: rgb(68, 68, 68); font-family: 'Lucida Grande', Verdana, sans-serif; font-size: 14px; background-color: rgba(0, 0, 0, 0.0470588);&quot;&gt;Recent years have seen an explosion of research on the computational modeling of human visual attention in task free conditions, i.e., given an image predict where humans are likely to look. This area of research could potentially provide general purpose mechanisms for robots to orient their cameras. One difficulty is that most current models of visual saliency are computationally very expensive and not suited to real time implementations needed for robotic applications. Here we propose a fast approximation to a Bayesian model of visual saliency recently proposed in the literature. The approximation can run in real time on current computers at very little computational cost, leaving plenty of CPU cycles for other tasks. We empirically evaluate the saliency model in the domain of controlling saccades of a camera in social robotics situations. The goal was to orient a camera as quickly as possible toward human faces. We found that this simple general purpose saliency model doubled the success rate of the camera: it captured images of people 70% of the time, when compared to a 35% success rate when the camera was controlled using an open-loop scheme. After 3 saccades (camera movements), the robot was 96% likely to capture at least one person. The results suggest that visual saliency models may provide a useful front end for camera control in robotics applications.&lt;/span&gt;&lt;/p&gt;
</style></abstract><accession-num><style face="normal" font="default" size="100%">10014787</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Butko, N.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning to Learn</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE International Conference on Development and Learning</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Taylor, C.</style></author><author><style face="normal" font="default" size="100%">Ruvolo, P.</style></author><author><style face="normal" font="default" size="100%">Eckhardt, M.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The RUBI Project: A Progress Report</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 2nd ACM/IEEE International Conference of Human-Robot Interaction</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Cicourel, A.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Socialization between toddlers and robots at an early childhood education center</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the National Academy of Science</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year></dates><volume><style face="normal" font="default" size="100%">104</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">46</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Behavior Analysis of Children’s Touch on a Small Humanoid Robot: Long-term Observation at a Daily Classroom over Three Months</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 15th IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2006</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Hatfield</style></publisher><pub-location><style face="normal" font="default" size="100%">United Kingdom</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Fortenberry, B.</style></author><author><style face="normal" font="default" size="100%">Aisaka, K.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Daily HRI evaluation at a classroom environment: Reports from dance interaction experiments</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 2006 Conference on Human-Robot Interaction (HRI)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><pub-location><style face="normal" font="default" size="100%">Salt Lake City</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The RUBI Project: Designing Everyday Robots by Immersion</style></title><secondary-title><style face="normal" font="default" size="100%">Fifth International Conference on Development and Learning (ICDL)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">06/2006</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Bloomington, U.S.A</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Fortenberry, B.</style></author><author><style face="normal" font="default" size="100%">Aisaka, K.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Developing Dance Interaction between QRIO and Toddlers in a Classroom Environment: Plans for the First Steps</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 2005 IEEE International Workshop on Robot and Human Interactive Communication (RO-MAN)</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Best Paper Award</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2005</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Nashville, U.S.A.</style></pub-location><pages><style face="normal" font="default" size="100%">223-228</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>9</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Fasel, I.</style></author><author><style face="normal" font="default" size="100%">Fortenberry, B.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">MPT: the Machine Perception Toolbox</style></title><alt-title><style face="normal" font="default" size="100%">Computer Vision and Image Understanding</style></alt-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language><orig-pub><style face="normal" font="default" size="100%">A generative framework for boosting with applications to real-time eye coding</style></orig-pub></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Fortenberry, B.</style></author><author><style face="normal" font="default" size="100%">Aisaka, K.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Plans for developing real-time dance interaction between qrio and toddlers in a classroom environment</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the International Conference on Development and Learning (ICDL05)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><pub-location><style face="normal" font="default" size="100%">Osaka, Japan</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Movellan, J.</style></author><author><style face="normal" font="default" size="100%">Tanaka, F.</style></author><author><style face="normal" font="default" size="100%">Fortenberry, B.</style></author><author><style face="normal" font="default" size="100%">Aisaka, K.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The RUBI project: Origins, principles and first steps</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the International Conference on Development and Learning (ICDL05)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><pub-location><style face="normal" font="default" size="100%">Osaka, Japan</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Littlewort, G.</style></author><author><style face="normal" font="default" size="100%">Bartlett, M.</style></author><author><style face="normal" font="default" size="100%">Fasel, I.</style></author><author><style face="normal" font="default" size="100%">Chenu, J.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Analysis of machine learning methods for real-time recognition of facial expressions from video</style></title><secondary-title><style face="normal" font="default" size="100%">Computer Vision and Pattern Recognition: Face Processing Workshop</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Littlewort, G.</style></author><author><style face="normal" font="default" size="100%">Bartlett, M.</style></author><author><style face="normal" font="default" size="100%">Chenu, J.</style></author><author><style face="normal" font="default" size="100%">Fasel, I.</style></author><author><style face="normal" font="default" size="100%">Kanda, T.</style></author><author><style face="normal" font="default" size="100%">Ishiguro, H.</style></author><author><style face="normal" font="default" size="100%">Movellan, J.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards social robots: Automatic evaluation of human-robot interaction by face detection and expression classification</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Neural Information Processing Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><volume><style face="normal" font="default" size="100%">16</style></volume><pages><style face="normal" font="default" size="100%">1563-1570</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><reprint-edition><style face="normal" font="default" size="100%">MIT Press</style></reprint-edition><section><style face="normal" font="default" size="100%">1563</style></section></record></records></xml>