@conference {73, title = {The computer expression recognition toolbox (CERT)}, booktitle = {2011 IEEE International Conference on Automatic Face Gesture Recognition and Workshops (FG 2011)}, year = {2011}, month = {03/2011}, publisher = {IEEE}, organization = {IEEE}, address = {Santa Barbara, CA}, abstract = {
We present the Computer Expression Recognition Toolbox (CERT), a software tool for fully automatic real-time facial expression recognition, and officially release it for free academic use. CERT can automatically code the intensity of 19 different facial actions from the Facial Action Unit Coding System (FACS) and 6 different prototypical facial expressions. It also estimates the locations of 10 facial features as well as the 3-D orientation (yaw, pitch, roll) of the head. On a database of posed facial expressions, Extended Cohn-Kanade (CK+[1]), CERT achieves an average recognition performance (probability of correctness on a two-alternative forced choice (2AFC) task between one positive and one negative example) of 90.1\% when analyzing facial actions. On a spontaneous facial expression dataset, CERT achieves an accuracy of nearly 80\%. In a standard dual core laptop, CERT can process 320 {\texttimes} 240 video images in real time at approximately 10 frames per second.
}, keywords = {3D orientation, Accuracy, automatic real-time facial expression recognition, CERT, computer expression recognition toolbox, Detectors, dual core laptop, Emotion recognition, Encoding, extended Cohn-Kanade, Face, face recognition, facial action unit coding system, facial expression dataset, Facial features, FACS, Gold, Image coding, software tool, software tools, two-alternative forced choice task}, isbn = {978-1-4244-9140-7}, author = {Littlewort, G. and Whitehill, J. and Wu, T. and Fasel, I. and Frank, M. and Movellan, J. and Bartlett, M.} } @conference {64, title = {Learning to Make Facial Expressions}, booktitle = {IEEE 8th International Conference on Development and Learning, 2009. ICDL 2009}, year = {2009}, month = {06/2009}, publisher = {IEEE}, organization = {IEEE}, address = {Shanghai}, abstract = {This paper explores the process of self-guided learning of realistic facial expression production by a robotic head with 31 degrees of freedom. Facial motor parameters were learned using feedback from real-time facial expression recognition from video. The experiments show that the mapping of servos to expressions was learned in under one-hour of training time. We discuss how our work may help illuminate the computational study of how infants learn to make facial expressions.
}, keywords = {Actuators, Emotion recognition, face detection, face recognition, facial motor parameters, Feedback, Humans, learning (artificial intelligence), Machine Learning, Magnetic heads, Pediatrics, real-time facial expression recognition, Robot sensing systems, robotic head, Robots, self-guided learning, Servomechanisms, Servomotors}, isbn = {978-1-4244-4117-4}, author = {Wu, T. and Butko, N. and Ruvulo, P. and Bartlett, M. and Movellan, J.} } @article {69, title = {Toward Practical Smile Detection}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {31}, year = {2009}, month = {11/2009}, pages = {2106-2111}, chapter = {2106}, abstract = {Machine learning approaches have produced some of the highest reported performances for facial expression recognition. However, to date, nearly all automatic facial expression recognition research has focused on optimizing performance on a few databases that were collected under controlled lighting conditions on a relatively small number of subjects. This paper explores whether current machine learning methods can be used to develop an expression recognition system that operates reliably in more realistic conditions. We explore the necessary characteristics of the training data set, image registration, feature representation, and machine learning algorithms. A new database, GENKI, is presented which contains pictures, photographed by the subjects themselves, from thousands of different people in many different real-world imaging conditions. Results suggest that human-level expression recognition accuracy in real-life illumination conditions is achievable with machine learning technology. However, the data sets currently used in the automatic expression recognition literature to evaluate progress may be overly constrained and could potentially lead research into locally optimal algorithmic solutions.
}, keywords = {Algorithms, Artificial intelligence, Automated, automatic facial expression recognition research, Biological Pattern Recognition, Biometry, Computer simulation, Computer vision, Computer-Assisted, Face, Face and gesture recognition, face recognition, feature representation, human-level expression recognition accuracy, illumination conditions, Image databases, Image Enhancement, Image Interpretation, image registration image representation, learning (artificial intelligence), machine learning approaches, Machine Learning Models, n Humans, object detection, practical smile detection, Reproducibility of Results, Sensitivity and Specificity, Smiling, Subtraction Technique, training data set, visual databases}, issn = {0162-8828}, author = {Whitehill, J. and Littlewort, G. and Fasel, I. and Bartlett, M. and Movellan, J.} } @conference {45, title = {Analysis of machine learning methods for real-time recognition of facial expressions from video}, booktitle = {Computer Vision and Pattern Recognition: Face Processing Workshop}, year = {2004}, author = {Littlewort, G. and Bartlett, M. and Fasel, I. and Chenu, J. and Movellan, J.} } @article {44, title = {Towards social robots: Automatic evaluation of human-robot interaction by face detection and expression classification}, journal = {Advances in Neural Information Processing Systems}, volume = {16}, year = {2004}, pages = {1563-1570}, chapter = {1563}, author = {Littlewort, G. and Bartlett, M. and Chenu, J. and Fasel, I. and Kanda, T. and Ishiguro, H. and Movellan, J.} }