I have this code from GitHub - WebDevSimplified/Face-Detection-JavaScript, using face-api.js.
const video = document.getElementById('video')
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models')
]).then(startVideo)
function startVideo() {
navigator.getUserMedia({
video: {}
},
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('playing', () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = {
width: video.width,
height: video.height
}
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
// const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks()
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
// faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
// faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
landmarks = await faceapi.detectFaceLandmarks(video)
landmarkPositions = landmarks.positions
// console.log(landmarkPositions[0]);
}, 100)
})
//p5.js
function setup() {
var myCanvas = createCanvas(windowWidth, windowHeight);
myCanvas.parent("overlay");
angleMode(DEGREES);
}
function draw() {
// background(0);
stroke(255);
noFill();
strokeWeight(4);
beginShape();
//test
curveVertex(100, 200);
curveVertex(150, 50);
curveVertex(250, 60);
curveVertex(300, 200);
curveVertex(300, 200);
//get landmark positions
curveVertex(landmarkPositions[0]);
// curveVertex(landmarkPositions[1]),
// curveVertex(landmarkPositions[2]),
// curveVertex(landmarkPositions[3])
endShape(CLOSE);
}
My aim is to retrieve the 68 facial landmarks and use them to draw curveVertex()
I’m having trouble retrieving the points, how should I do this? Any help is much appreciated, thanks in advance!
I’m not sure what your HTML/CSS looks like, but I had to make some tweaks from what was in the GitHub repo:
/* position the overlay div to cover the window */
#overlay {
position: absolute;
left: 0;
top: 0;
right: 0;
bottom: 0;
display: flex;
justify-content: center;
align-items: center;
}
/* Make sure the p5.js canvas apears on top of the face-api canvas */
#overlay canvas {
position: relative;
z-index: 1000;
}
Then I just had to tweak your code a bit:
- Use
resizedDetections
when getting landmarkPositions
- Iterate over the
landmarkPositions
using a for…of loop
- Use the .x and .y properties of each position (you cannot treat this like p5.Vector instances).
- Switched to using
POINTS
because if you want to draw shapes you need to split the landmarks up into reasonable chunks.
const video = document.getElementById('video')
// declare global variables
let landmarks;
let landmarkPositions;
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models')
]).then(startVideo)
function startVideo() {
navigator.getUserMedia({
video: {}
},
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('playing', () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = {
width: video.width,
height: video.height
}
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
// const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks()
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
// faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
// faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
landmarkPositions = resizedDetections[0].landmarks.positions;
// console.log(landmarkPositions[0]);
}, 100)
});
//p5.js
function setup() {
let myCanvas = createCanvas(video.width, video.height);
myCanvas.parent("overlay");
angleMode(DEGREES);
}
function draw() {
// background(0);
clear();
stroke(255);
noFill();
strokeWeight(4);
if (landmarkPositions) {
beginShape(POINTS);
//get landmark positions
for (var pos of landmarkPositions) {
vertex(pos.x, pos.y);
}
endShape(CLOSE);
}
}
Hi, thank you for your help, it’s really working! It’s successfully created all points from face-api and working fine but I have an error message in the console: Uncaught (in promise) TypeError: Cannot read property ‘landmarks’ of undefined (for this line of code: landmarkPositions = resizedDetections[0].landmarks.positions;
) What might be the problem here? and one more question is how can I go about just drawing specific points from the face landmarks rather than looping through all of them? Thank you for your help!
My guess is that the undefined reference error is the result of resizedDetections
being an empty array, so you probably need to check for that case.
As far a only drawing specific points, my guess is that face-api always returns the landmarks in the same order. So if you just wanted to draw the eyes for example, you would just need to identify which range of indices in the positions array correspond to the eyes.