Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
36e0a33
Init scene for expression tracking.
UPstartDeveloper Apr 11, 2021
66a0476
Rename folder for expression tracking.
UPstartDeveloper Apr 11, 2021
5c3c385
Add robot model.
UPstartDeveloper Apr 11, 2021
bc43456
Fix text color and add credit for CSS.
UPstartDeveloper Apr 11, 2021
a90f2a4
Separate JS for robot into separate script.
UPstartDeveloper Apr 11, 2021
c2535ed
Init JS for expression tracking.
UPstartDeveloper Apr 11, 2021
6d6d3da
List face-api as dependency.
UPstartDeveloper Apr 11, 2021
3b77d6e
Add weights for face recongnition models.
UPstartDeveloper Apr 11, 2021
479f446
Get web camera video.
UPstartDeveloper Apr 11, 2021
8ed96e0
Fix bug in make HTML video element.
UPstartDeveloper Apr 11, 2021
de88481
Fix error in expressions not being predicted.
UPstartDeveloper Apr 11, 2021
04da19e
Take out face landmark detection.
UPstartDeveloper Apr 11, 2021
7fd28d0
Add comments to createGUI function.
UPstartDeveloper Apr 12, 2021
c34609d
Fix scope issue in returning face object.
UPstartDeveloper Apr 12, 2021
81e8e47
Init index.js to combine JS for robot and face tracking.
UPstartDeveloper Apr 12, 2021
f765bda
[Broken] Tried to access face from callback via a dict.
UPstartDeveloper Apr 12, 2021
7b6e381
Access face outside of loader.load using a timeout.
UPstartDeveloper Apr 13, 2021
10522f9
Use async call to return face from init() func.
UPstartDeveloper Apr 14, 2021
613df70
Reintegrate face tracking and robot scene.
UPstartDeveloper Apr 14, 2021
b5874d6
Add video debugger.
UPstartDeveloper Apr 14, 2021
8699fd5
Cleanup index.html.
UPstartDeveloper Apr 14, 2021
3e79399
Cleanup robot functions, improve stability of expressionTracker.js.
UPstartDeveloper Apr 14, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file not shown.
55 changes: 55 additions & 0 deletions expressionTrackingDemos/robot/expressionTracker.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/* The following script was modified from a project originally
* created by Aniket Eknath Kudale:
* https://www.opensourceforu.com/2020/06/building-a-facial-expression-recognition-app-using-tensorflow-js/
*/

import "https://cdn.jsdelivr.net/gh/justadudewhohacks/face-api.js/dist/face-api.js";

export function startVideo(init, animate) {
/* Turns on the webcam, then passes the video stream
* to the emotion tracking AI.
* @param {function} init: this assembles the robot scene using Three.js.
* @param {function} animate: this enables the rendering loop for the robot.
*/
// A: get the webcame
const constraints = {video: true};
navigator.mediaDevices.getUserMedia(constraints)
// B: pass the video stream, as well as the robot setup functions to the AI
.then(videoStream => {
trackExpressions(videoStream, init, animate);
})
// C: log errors
.catch(error => {
console.error('Error accessing camera devices.', error);
});
}

const trackExpressions = (videoStream, init, animate) => {
/* Continuously animate the robot w/ expressions found on the user
* @param {MediaStream} videoStream: the video of the user's face
* @param {function} init: this assembles the robot scene using Three.js.
* @param {function} animate: this enables the rendering loop for the robot.
*/
// A: make a video from the web cam stream
console.log(videoStream)
const video = document.getElementById('faceStream');
video.srcObject = videoStream;
// B: detect emotions
video.addEventListener('play', () => {
// C: init the face
init();
animate();
// D: animate the robot's angry, surprised, and sad expressions
setInterval(async() => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceExpressions();
// console.log(detections); // The models confidence in the emotions it detects
if (window.face !== undefined && detections.length > 0) {
window.face.morphTargetInfluences[0] = detections[0].expressions.angry;
window.face.morphTargetInfluences[1] = detections[0].expressions.surprised;
window.face.morphTargetInfluences[2] = detections[0].expressions.sad;
}
}, 100);
})
// E: activate the detections
video.play();
}
33 changes: 33 additions & 0 deletions expressionTrackingDemos/robot/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
<!-- This scene originally inspired by one of the examples by Don McCurdy on the Three.js website:
https://github.com/mrdoob/three.js/blob/master/examples/webgl_animation_skinning_morph.html

The robot was originally created by Tomás Laulhé: https://www.patreon.com/quaternius
-->

<!DOCTYPE html>
<html lang="en">
<head>
<title>Roberto, the iQ3 Robot</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<!-- CSS for the Robot -->
<link type="text/css" rel="stylesheet" href="styles.css">
</head>

<body>
<!-- Information about the Three.js Scenegraph -->
<div id="info">
<a href="https://threejs.org" target="_blank" rel="noopener">three.js</a> webgl - skinning and morphing<br />
<p>
The animation system allows clips to be played individually, looped, or crossfaded with other clips. This example shows a character looping in one of several base animation states, then transitioning smoothly to one-time actions. Facial expressions are controlled independently with morph targets.
</p>
Model by
<a href="https://www.patreon.com/quaternius" target="_blank" rel="noopener">Tomás Laulhé</a>,
modifications by <a href="https://donmccurdy.com/" target="_blank" rel="noopener">Don McCurdy</a>. CC0.<br />
</div>
<!-- JS for Tracking the User, and Making the Robot Appear -->
<script src="./index.js" type="module"></script>
<!-- Video Where We Capture the User's Expressions -->
<video id="faceStream" height="200px" width="300px"></video>
</body>
</html>
14 changes: 14 additions & 0 deletions expressionTrackingDemos/robot/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
// A: import function for robot setup
import { animate, init } from "./robot.js";
// B: import objects needed for expression tracking
import "https://cdn.jsdelivr.net/gh/justadudewhohacks/face-api.js/dist/face-api.js";
import { startVideo } from "./expressionTracker.js";

// C: Load in the neural net for identifying and analyzing faces
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('./weights'),
faceapi.nets.faceLandmark68Net.loadFromUri('./weights'),
faceapi.nets.faceRecognitionNet.loadFromUri('./weights'),
faceapi.nets.faceExpressionNet.loadFromUri('./weights')
// D: turn on the webcam, and setup the robot
]).then(startVideo(init, animate));
230 changes: 230 additions & 0 deletions expressionTrackingDemos/robot/robot.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
import * as THREE from 'https://threejsfundamentals.org/threejs/resources/threejs/r122/build/three.module.js';

import Stats from 'https://threejsfundamentals.org/threejs/resources/threejs/r125/examples/jsm/libs/stats.module.js';
import { GUI } from 'https://threejsfundamentals.org/threejs/resources/threejs/r125/examples/jsm/libs/dat.gui.module.js';

import {GLTFLoader} from 'https://threejsfundamentals.org/threejs/resources/threejs/r125/examples/jsm/loaders/GLTFLoader.js';

let container, stats, clock, gui, mixer, actions, activeAction, previousAction;
let camera, scene, renderer, model, face;

const api = { state: 'Walking' };

export function init() {
/* Assembles the robot on the DOM using Three.js, and adds a GUI to
* manually edit its emotions, trigger specific actions,
* or change its state from walking to sitting, idle, etc.
*
* For face tracking, we initialize the global face variable (declared above),
* in the createGUI function,
* so that it is accessible via window.face.
*
* @return {null}
*/

container = document.createElement( 'div' );
document.body.appendChild( container );

camera = new THREE.PerspectiveCamera( 45, window.innerWidth / window.innerHeight, 0.25, 100 );
camera.position.set( - 5, 3, 10 );
camera.lookAt( new THREE.Vector3( 0, 2, 0 ) );

scene = new THREE.Scene();
scene.background = new THREE.Color( 0xe0e0e0 );
scene.fog = new THREE.Fog( 0xe0e0e0, 20, 100 );

clock = new THREE.Clock();

// A: lights

const hemiLight = new THREE.HemisphereLight( 0xffffff, 0x444444 );
hemiLight.position.set( 0, 20, 0 );
scene.add( hemiLight );

const dirLight = new THREE.DirectionalLight( 0xffffff );
dirLight.position.set( 0, 20, 10 );
scene.add( dirLight );

// B: ground

const mesh = new THREE.Mesh( new THREE.PlaneGeometry( 2000, 2000 ), new THREE.MeshPhongMaterial( { color: 0x999999, depthWrite: false } ) );
mesh.rotation.x = - Math.PI / 2;
scene.add( mesh );

const grid = new THREE.GridHelper( 200, 40, 0x000000, 0x000000 );
grid.material.opacity = 0.2;
grid.material.transparent = true;
scene.add( grid );

// C: model
const loader = new GLTFLoader();
loader.load( '../models/RobotExpressive.glb', ( gltf ) => {
// 1. once the model is loaded, add it to the DOM
model = gltf.scene;
scene.add( model );
// 2. add the GUI & initialize the global face variable
face = createGUI( model, gltf.animations );

},
undefined, // 3. this "function" happens while the loading is in progress
function ( e ) {
// 4. this happens in case there is an error in loading the model
console.error( e );
} );
renderer = new THREE.WebGLRenderer( { antialias: true } );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
renderer.outputEncoding = THREE.sRGBEncoding;
container.appendChild( renderer.domElement );

window.addEventListener( 'resize', onWindowResize );

// D: stats
stats = new Stats();
container.appendChild( stats.dom );
}

function createGUI( model, animations ) {

const states = [ 'Idle', 'Walking', 'Running', 'Dance', 'Death', 'Sitting', 'Standing' ];
const emotes = [ 'Jump', 'Yes', 'No', 'Wave', 'Punch', 'ThumbsUp' ];

gui = new GUI();

// ANIMATING the robot
mixer = new THREE.AnimationMixer( model );

actions = {};

for ( let i = 0; i < animations.length; i ++ ) {

const clip = animations[ i ];
const action = mixer.clipAction( clip );
actions[ clip.name ] = action;

if ( emotes.indexOf( clip.name ) >= 0 || states.indexOf( clip.name ) >= 4 ) {

action.clampWhenFinished = true;
action.loop = THREE.LoopOnce;

}

}

/* STATES = super IMPORTANT for animating the robot! */
const statesFolder = gui.addFolder( 'States' );

const clipCtrl = statesFolder.add( api, 'state' ).options( states );

clipCtrl.onChange( function () {

fadeToAction( api.state, 0.5 );

} );

statesFolder.open();

// emotes

const emoteFolder = gui.addFolder( 'Emotes' );

function createEmoteCallback( name ) {

api[ name ] = function () {

fadeToAction( name, 0.2 );

mixer.addEventListener( 'finished', restoreState );

};

emoteFolder.add( api, name );

}

function restoreState() {

mixer.removeEventListener( 'finished', restoreState );

fadeToAction( api.state, 0.2 );

}

for ( let i = 0; i < emotes.length; i ++ ) {

createEmoteCallback( emotes[ i ] );

}

emoteFolder.open();

/* Expressions - THIS is what we need to control via web cam */

let face = model.getObjectByName( 'Head_4' );
// console.log(face.parent);
// Lists the expression that the robot can have on the GUI panel
const expressions = Object.keys( face.morphTargetDictionary );
const expressionFolder = gui.addFolder( 'Expressions' );
// gives the expressions on the panel their initial values
for ( let i = 0; i < expressions.length; i ++ ) {
// the params for the .add function: (values array, index (of values array), min, max, step_size)
expressionFolder.add( face.morphTargetInfluences, i, 0, 1, 0.01 ).name( expressions[ i ] );

}
// sets the robot in it's default action
activeAction = actions[ 'Walking' ];
activeAction.play();
// collapses the "Expressions" tab on the GUI
expressionFolder.open();
// RETURN THE FACE, so we can maninpulate it using the user's expression
window.face = face;
return face
}

function fadeToAction( name, duration ) {
// TODO: figure out the unit of time on the duration arg

previousAction = activeAction;
activeAction = actions[ name ];

if ( previousAction !== activeAction ) {

previousAction.fadeOut( duration );

}

activeAction
.reset()
.setEffectiveTimeScale( 1 )
.setEffectiveWeight( 1 )
.fadeIn( duration )
.play();

}

function onWindowResize() {

camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();

renderer.setSize( window.innerWidth, window.innerHeight );

}

export function animate() {
/* Controls the render loop of the robot, in effect
* creating the appearance of movement on the HTML document.
* @return {undefined}
*/

const dt = clock.getDelta();

if ( mixer ) mixer.update( dt );

requestAnimationFrame( animate );

renderer.render( scene, camera );

stats.update();

}
Loading