Hi, struggling with my Processing code… parts are taken from the Thomas Sanchez KinectPV2 examples… So my object is to access the Kinect V2 point cloud data and depth image and then to tracking small moving objects as blobs. So i don’t need to see the actual Point Cloud image, but see the moving objects tracked as white blobs, and be able to see their depth (so that they have x,yz co-ordinates). The intended area will be white with black moving objects, so is fine if the image from the Kinect v2 is in binary (thus the flipcoin code block)… and then it’s told to track. As these will be in a box, maybe it will also need some calibration? Could someone possibly have a look and let me know how I can go forward with what I have so far please?
import java.nio.*;
import KinectPV2.*;
private KinectPV2 kinect;
//values for the 3d scene
//rotation
float a = 3;
//z scale value
int zval = 350;
//value to scale the point cloud
float scaleVal = 990;
float rotY = 0;
float rotZ = 0;
float rotX = PI;
float depthVal = 0;
int vertLoc;
int colorLoc;
//openGL instances
//render ot openGL object
PGL pgl;
//create a shader
PShader sh;
//VBO buffer location in the GPU
int vertexVboId;
int colorVboId;
color trackColor;
//Distance Threashold
int maxD = 4500; // 4.5mx
int minD = 0; // 50cm
public void setup() {
size(1280, 720, P3D);
kinect = new KinectPV2(this);
kinect.enableDepthImg(true);
kinect.enableColorImg(true);
kinect.enableColorPointCloud(true);
//int[] depth = kinect.getRawDepth();
kinect.init();
//create shader object with a vertex shader and a fragment shader
sh = loadShader("frag.glsl", "vert.glsl");
//create VBO
PGL pgl = beginPGL();
// allocate buffer big enough to get all VBO ids back
IntBuffer intBuffer = IntBuffer.allocate(2);
pgl.genBuffers(2, intBuffer);
//memory location of the VBO
vertexVboId = intBuffer.get(0);
colorVboId = intBuffer.get(1);
endPGL();
// Start off tracking for red
trackColor = color(255, 0, 0);
}
public void draw() {
background(0);
image(kinect.getDepthImage(), 0, 0);
//get Z valule, which si the Point Cloud distance -
// The Point cloud values are mapped from (0 - 4500) mm to gray color format (0 - 255)
image(kinect.getPointCloudDepthImage(), 512, 0);
//obtain the raw depth data in integers from [0 - 4500]
int [] rawData = kinect.getRawDepthData();
//Threshold of the point Cloud.
kinect.setLowThresholdPC(minD);
kinect.setHighThresholdPC(maxD);
//// cache the image first
//PImage kinectColor = kinect.getColorImage ();
//image(kinectColor, 0, 0, 640, 340);
float worldRecord = 500;
// XY coordinate of closest color
int closestX = 0;
int closestY = 0;
PImage myImage = new PImage (640, 480);
for (int x = 0; x < myImage.width; x++ ) {
for (int y = 0; y < myImage.height; y++ ) {
color currentColor = kinectColor.pixels[loc];
int loc = x + y * myImage.width;
loadPixels();
pixels currentDepthPixel = kinectDepth.pixels[loc];
if (pixels.depth < calibrationImage.depth) {
myImage.pixels[loc] = 1;
}
int coinFlip = int(random(2));
if (coinFlip !=0) {
myImage.pixels[loc] = color (255,255,255);
}
else {
myImage.pixels[loc] = color (0,0,0);
}
}
}
image (myImage, 0, 0, 640, 480);
//// The geometric transformations will be automatically passed to the shader
pushMatrix();
translate(width / 2, height / 2, zval);
scale(scaleVal, -1 * scaleVal, scaleVal);
rotate(a, 0.0f, 1.0f, 0.0f);
////render to the openGL object
pgl = beginPGL();
//sh.bind();
////obtain the point cloud positions
//FloatBuffer pointCloudBuffer = kinect.getPointCloudColorPos();
endPGL();
popMatrix();
stroke(255, 0, 0);
text(frameRate, 50, height- 50);
}
public void mousePressed() {
println(frameRate);
// saveFrame();
// Save color where the mouse is clicked in trackColor variable
int loc = mouseX + mouseY*kinect.getColorImage().width;
trackColor = kinect.getColorImage().pixels[loc];
}