Hi everyone,
I’m using Dan Schiffmans example on a Kinect v1 to track the position of two hands. I have been able to track the movement of one hand and get the centre point. but I want to track both hands for the purpose of a zoom function on an image. I’ve been trying to use blob detection to distinguish one from the other but my code doesn’t seem to be working. If anyone could offer some guidance I would be so grateful. Thanks
// Code Below
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
//Kinect Library Object
Kinect kinect2;
color trackColor;
float threshold = 20;
float distThreshold = 75;
float minThresh = 502;
float maxThresh = 646;
PImage img1, img2;
int zoom = 1200;
ArrayList<Blob> blobs = new ArrayList<Blob>();
void setup(){
size(640, 480);
kinect2 = new Kinect(this);
kinect2.initDepth();
img1 = loadImage("LimerickCityMap.jpg");
img2 = createImage(kinect2.width, kinect2.height, RGB);
trackColor = color(255, 0, 0);
}
void draw() {
img2.loadPixels();
image(img2, 0, 0);
blobs.clear();
//minThresh = map(mouseX, 0, width, 0, 4500);
// maxThresh = map(mouseY, 0, width, 0, 4500);
int[] depth = kinect2.getRawDepth();
float sumX = 0;
float sumY = 0;
float totalPixels = 0;
for(int x = 0; x < kinect2.width; x++) {
for(int y = 0; y < kinect2.height; y++){
int offset = x + y * kinect2.width;
int d = depth[offset];
if (d > minThresh && d < maxThresh){
img2.pixels[offset] = color(255, 0, 0);
color currentColor = img2.pixels[offset];
float r1 = red(currentColor);
float g1 = green(currentColor);
float b1 = blue(currentColor);
float r2 = red(trackColor);
float g2 = green(trackColor);
float b2 = blue(trackColor);
float dist = distSq(r1, g1, b1, r2, g2, b2);
if (dist < threshold*threshold) {
boolean found = false;
for ( Blob b : blobs) {
if (b.isNear(x,y)){
b.add(x,y);
found = true;
break;
}
}
if (!found) {
Blob b = new Blob(x, y);
blobs.add(b);
}
}
}
else {
img2.pixels[offset] = color(0);
}
}
}
for (Blob b : blobs){
if (b.size() > 500){
b.show();
}
}
// sumX += x;
// sumY += y;
// totalPixels++;
float avgX = sumX / totalPixels;
float avgY = sumY / totalPixels;
img2.updatePixels();
imageMode(CORNER);
image(img2, 0, 0);
imageMode(CENTER);
//image(img1, avgX, avgY, zoom, zoom);
//text("x-> " + bd.getCentroidX(0) + "\n" + "y-> " + bd.getCentroidY(0), bd.getCentroidX(0), bd.getCentroidY(0)-7);
//float avgX = sumX / totalPixels;
//float avgY = sumY / totalPixels;
//fill(150, 0, 255);
ellipse(avgX, avgY, 64, 64);
//println(avgX, avgY);
}
float distSq(float x1, float y1, float x2, float y2) {
float d = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1);
return d;
}
float distSq(float x1, float y1, float z1, float x2, float y2, float z2) {
float d = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) +(z2-z1)*(z2-z1);
return d;
}
// Bubble Class