Does anyone know K2P3Toolkit?

Hello guys! I was researching the K2P3Toolkit library in win10 recently, and I found that it can only be applied to the openkinect library on the mac. I tried to modify the code using the kinectPV2 library, but it runs abnormally slow. I found the slow part but there is no way to modify it. , Does anyone know what’s going on?

//==========================================================
// set resolution of your projector image/second monitor
// and name of your calibration file-to-be
int pWidth = 800;
int pHeight = 600; 
String calibFilename = "calibration.txt";

//Here is most of the modified core code.
//==========================================================
//==========================================================

//==========================================================
// set resolution of your projector image/second monitor
// and name of your calibration file-to-be
int pWidth = 1920;
int pHeight = 1080; 
String calibFilename = "calibration.txt";


//==========================================================
//==========================================================

import javax.swing.JFrame;
import KinectPV2.*;

import gab.opencv.*;
import controlP5.*;
import Jama.*;

KinectPV2 kinect;

OpenCV opencv;
ChessboardFrame frameBoard;
ChessboardApplet ca;
PVector[] depthMap;
PImage src;

ArrayList<PVector> foundPoints = new ArrayList<PVector>();
ArrayList<PVector> projPoints = new ArrayList<PVector>();
ArrayList<PVector> ptsK, ptsP;
PVector testPoint, testPointP;
boolean isSearchingBoard = false;
boolean calibrated = false;
boolean testingMode = false;
int cx, cy, cwidth;

void setup() 
{
 surface.setSize(1920, 1080);
  
  textFont(createFont("Courier", 24));
  frameBoard = new ChessboardFrame();

  // set up kinect 
  kinect = new KinectPV2(this);
  kinect.enableDepthImg(true); 
  kinect.enableColorImg(true);
  kinect.enableInfraredImg(true);
  kinect.enableInfraredLongExposureImg(true);
  kinect.init();
;
  
  opencv = new OpenCV(this, kinect.getDepthImage().width,kinect.getDepthImage().height);

  depthMap = new PVector[kinect.getDepthImage().width*kinect.getDepthImage().height];

  // matching pairs
  ptsK = new ArrayList<PVector>();
  ptsP = new ArrayList<PVector>();
  testPoint = new PVector();
  testPointP = new PVector();
  setupGui();
}

void draw() 
{
  // draw chessboard onto scene
  projPoints = drawChessboard(cx, cy, cwidth);
  println(kinect.getDepthImage().width);
  // update depth map
 depthMap = depthMapRealWorld();

  src = kinect.getColorImage();
  
  // mirroring
  //for (int h = 0; h < kinect.getDepthImage().height; h++)
  //{
  //  for (int r = 0; r < kinect.getDepthImage().width / 2; r++)
  //  {
  //    PVector temp = depthMap[h*kinect.getDepthImage().width + r];
  //    depthMap[h*kinect.getDepthImage().width + r] = depthMap[h*kinect.getDepthImage().width + (kinect.getDepthImage().width - r - 1)];
  //    depthMap[h*kinect.getDepthImage().width + (kinect.getDepthImage().width - r - 1)] = temp;
  //  }
  //}

  // mirroring
  //for (int h = 0; h < src.height; h++)
  //{
  //  for (int r = 0; r < src.width / 2; r++)
  //  {
  //    int temp2 = src.get(r, h);   //h*src.width + r);
  //    src.set(r, h, src.get(src.width - r - 1, h));
  //    src.set(src.width - r - 1, h, temp2);
  //  }
  //}

  opencv.loadImage(src);
  opencv.gray();

  if (isSearchingBoard)
    foundPoints = opencv.findChessboardCorners(4, 3);

  drawGui();
  frameRate(60);
}

void drawGui() 
{

  background(0, 100, 0);

  // draw the RGB image
  pushMatrix();
  translate(30, 120);
  textSize(22);
  fill(255);
  image(src, 0, 0);
  
  // draw chessboard corners, if found
  if (isSearchingBoard) {
    int numFoundPoints = 0;
    for (PVector p : foundPoints) {
      if (getDepthMapAt((int)p.x, (int)p.y).z > 0) {
        fill(0, 255, 0);
        numFoundPoints += 1;
      }
      else  fill(255, 0, 0);
      ellipse(p.x, p.y, 5, 5);
    }
    if (numFoundPoints == 12)  guiAdd.show();
    else                       guiAdd.hide();
  }
  else  guiAdd.hide();
  if (calibrated && testingMode) {
    fill(255, 0, 0);
    ellipse(testPoint.x, testPoint.y, 10, 10);
  }
  popMatrix();

  // draw GUI
  pushMatrix();
  pushStyle();
  translate(kinect.getDepthImage().width+70, 40); // this is black box
  fill(0);
  rect(0, 0, 450, 680); // blackbox size
  fill(255);
  text(ptsP.size()+" pairs", 26, guiPos.y+525);
  popStyle();
  popMatrix();
}

ArrayList<PVector> drawChessboard(int x0, int y0, int cwidth) {
  ArrayList<PVector> projPoints = new ArrayList<PVector>();
  ca.redraw();
  return projPoints;
}


void addPointPair() {
  if (projPoints.size() == foundPoints.size()) {
    println(getDepthMapAt((int) foundPoints.get(1).x, (int) foundPoints.get(1).y));
    for (int i=0; i<projPoints.size(); i++) {
      ptsP.add( projPoints.get(i) );
      ptsK.add( getDepthMapAt((int) foundPoints.get(i).x, (int) foundPoints.get(i).y) );
    }
  }
  guiCalibrate.show();
  guiClear.show();
}

PVector getDepthMapAt(int x, int y) {
  PVector dm = depthMap[kinect.getDepthImage().width * y + x];
  
  return new PVector(dm.x, dm.y, dm.z);
}

void clearPoints() {
  ptsP.clear();
  ptsK.clear();
  guiSave.hide();
}

void saveC() {
  saveCalibration(calibFilename); 
}

void loadC() {
  println("load");
  loadCalibration(calibFilename);
  guiTesting.addItem("Testing Mode", 1);
}

void mousePressed() {
  if (calibrated && testingMode) {
    testPoint = new PVector(constrain(mouseX-30, 0, kinect.getDepthImage().width-1), 
                            constrain(mouseY-120, 0, kinect.getDepthImage().height-1));
    int idx = kinect.getDepthImage().width * (int) testPoint.y + (int) testPoint.x;
    
    testPointP = convertKinectToProjector(depthMap[idx]);
  }
}


// all functions below used to generate depthMapRealWorld point cloud
PVector[] depthMapRealWorld()
{
  int[] depth = kinect.getRawDepthData();
  int skip = 1;
  for (int y = 0; y < kinect.getDepthImage().height; y+=skip) {
    for (int x = 0; x < kinect.getDepthImage().width; x+=skip) {
       println(x,y);
        int offset = x + y * kinect.getDepthImage().width;
        //calculate the x, y, z camera position based on the depth information
        PVector point = depthToPointCloudPos(x, y, depth[offset]);
      
        depthMap[kinect.getDepthImage().width * y + x] = point;
      }
    }
   
    return depthMap;
}

//calculte the xyz camera position based on the depth data
PVector depthToPointCloudPos(int x, int y, float depthValue) {
  PVector point = new PVector();
  point.z = (depthValue);// / (1.0f); // Convert from mm to meters
  point.x = ((x - CameraParams.cx) * point.z / CameraParams.fx);
  point.y = ((y - CameraParams.cy) * point.z / CameraParams.fy);
  return point;
}

//camera information based on the Kinect v2 hardware
static class CameraParams {
  static float cx = 254.878f;
  static float cy = 205.395f;
  static float fx = 365.456f;
  static float fy = 365.456f;
  static float k1 = 0.0905474;
  static float k2 = -0.26819;
  static float k3 = 0.0950862;
  static float p1 = 0.0;
  static float p2 = 0.0;
}

//Before

//==========================================================
// set resolution of your projector image/second monitor
// and name of your calibration file-to-be
int pWidth = 1920;
int pHeight = 1080; 
String calibFilename = "calibration.txt";


//==========================================================
//==========================================================

import javax.swing.JFrame;
import org.openkinect.freenect.*;
import org.openkinect.freenect2.*;
import org.openkinect.processing.*;

import gab.opencv.*;
import controlP5.*;
import Jama.*;

Kinect2 kinect;

OpenCV opencv;
ChessboardFrame frameBoard;
ChessboardApplet ca;
PVector[] depthMap;
PImage src;
ArrayList<PVector> foundPoints = new ArrayList<PVector>();
ArrayList<PVector> projPoints = new ArrayList<PVector>();
ArrayList<PVector> ptsK, ptsP;
PVector testPoint, testPointP;
boolean isSearchingBoard = false;
boolean calibrated = false;
boolean testingMode = false;
int cx, cy, cwidth;

void setup() 
{
  surface.setSize(1920, 1080);
  
  textFont(createFont("Courier", 24));
  frameBoard = new ChessboardFrame();

  // set up kinect 
  kinect = new Kinect2(this);
  kinect.initDepth(); 
  kinect.initVideo();
  kinect.initRegistered();
  kinect.initIR();
  kinect.initDevice();
  
  opencv = new OpenCV(this, kinect.depthWidth, kinect.depthHeight);

  depthMap = new PVector[kinect.depthWidth*kinect.depthHeight];

  // matching pairs
  ptsK = new ArrayList<PVector>();
  ptsP = new ArrayList<PVector>();
  testPoint = new PVector();
  testPointP = new PVector();
  setupGui();
}

void draw() 
{
  // draw chessboard onto scene
  projPoints = drawChessboard(cx, cy, cwidth);

  // update depth map
  depthMap = depthMapRealWorld();

  src = kinect.getRegisteredImage();
  
  // mirroring
  for (int h = 0; h < kinect.depthHeight; h++)
  {
    for (int r = 0; r < kinect.depthWidth / 2; r++)
    {
      PVector temp = depthMap[h*kinect.depthWidth + r];
      depthMap[h*kinect.depthWidth + r] = depthMap[h*kinect.depthWidth + (kinect.depthWidth - r - 1)];
      depthMap[h*kinect.depthWidth + (kinect.depthWidth - r - 1)] = temp;
    }
  }
  
  // mirroring
  for (int h = 0; h < src.height; h++)
  {
    for (int r = 0; r < src.width / 2; r++)
    {
      int temp2 = src.get(r, h);   //h*src.width + r);
      src.set(r, h, src.get(src.width - r - 1, h));
      src.set(src.width - r - 1, h, temp2);
    }
  }

  opencv.loadImage(src);
  opencv.gray();

  if (isSearchingBoard)
    foundPoints = opencv.findChessboardCorners(4, 3);

  drawGui();
}

void drawGui() 
{
  background(0, 100, 0);

  // draw the RGB image
  pushMatrix();
  translate(30, 120);
  textSize(22);
  fill(255);
  image(src, 0, 0);
  
  // draw chessboard corners, if found
  if (isSearchingBoard) {
    int numFoundPoints = 0;
    for (PVector p : foundPoints) {
      if (getDepthMapAt((int)p.x, (int)p.y).z > 0) {
        fill(0, 255, 0);
        numFoundPoints += 1;
      }
      else  fill(255, 0, 0);
      ellipse(p.x, p.y, 5, 5);
    }
    if (numFoundPoints == 12)  guiAdd.show();
    else                       guiAdd.hide();
  }
  else  guiAdd.hide();
  if (calibrated && testingMode) {
    fill(255, 0, 0);
    ellipse(testPoint.x, testPoint.y, 10, 10);
  }
  popMatrix();

  // draw GUI
  pushMatrix();
  pushStyle();
  translate(kinect.depthWidth+70, 40); // this is black box
  fill(0);
  rect(0, 0, 450, 680); // blackbox size
  fill(255);
  text(ptsP.size()+" pairs", 26, guiPos.y+525);
  popStyle();
  popMatrix();
}

ArrayList<PVector> drawChessboard(int x0, int y0, int cwidth) {
  ArrayList<PVector> projPoints = new ArrayList<PVector>();
  ca.redraw();
  return projPoints;
}


void addPointPair() {
  if (projPoints.size() == foundPoints.size()) {
    println(getDepthMapAt((int) foundPoints.get(1).x, (int) foundPoints.get(1).y));
    for (int i=0; i<projPoints.size(); i++) {
      ptsP.add( projPoints.get(i) );
      ptsK.add( getDepthMapAt((int) foundPoints.get(i).x, (int) foundPoints.get(i).y) );
    }
  }
  guiCalibrate.show();
  guiClear.show();
}

PVector getDepthMapAt(int x, int y) {
  PVector dm = depthMap[kinect.depthWidth * y + x];
  
  return new PVector(dm.x, dm.y, dm.z);
}

void clearPoints() {
  ptsP.clear();
  ptsK.clear();
  guiSave.hide();
}

void saveC() {
  saveCalibration(calibFilename); 
}

void loadC() {
  println("load");
  loadCalibration(calibFilename);
  guiTesting.addItem("Testing Mode", 1);
}

void mousePressed() {
  if (calibrated && testingMode) {
    testPoint = new PVector(constrain(mouseX-30, 0, kinect.depthWidth-1), 
                            constrain(mouseY-120, 0, kinect.depthHeight-1));
    int idx = kinect.depthWidth * (int) testPoint.y + (int) testPoint.x;
    
    testPointP = convertKinectToProjector(depthMap[idx]);
  }
}


// all functions below used to generate depthMapRealWorld point cloud
PVector[] depthMapRealWorld()
{
  int[] depth = kinect.getRawDepth();
  int skip = 1;
  for (int y = 0; y < kinect.depthHeight; y+=skip) {
    for (int x = 0; x < kinect.depthWidth; x+=skip) {
        int offset = x + y * kinect.depthWidth;
        //calculate the x, y, z camera position based on the depth information
        PVector point = depthToPointCloudPos(x, y, depth[offset]);
        depthMap[kinect.depthWidth * y + x] = point;
      }
    }
    return depthMap;
}

//calculte the xyz camera position based on the depth data
PVector depthToPointCloudPos(int x, int y, float depthValue) {
  PVector point = new PVector();
  point.z = (depthValue);// / (1.0f); // Convert from mm to meters
  point.x = ((x - CameraParams.cx) * point.z / CameraParams.fx);
  point.y = ((y - CameraParams.cy) * point.z / CameraParams.fy);
  return point;
}

//camera information based on the Kinect v2 hardware
static class CameraParams {
  static float cx = 254.878f;
  static float cy = 205.395f;
  static float fx = 365.456f;
  static float fy = 365.456f;
  static float k1 = 0.0905474;
  static float k2 = -0.26819;
  static float k3 = 0.0950862;
  static float p1 = 0.0;
  static float p2 = 0.0;
}

//Associated github address

I don’t exactly know but the codes look quite different from each other (but maybe you edited later) which is not a “fair” comparison. What was the framerate before and after? What are you exactly trying to do? Do the both libraries see same images (is the color/white balance the same? are the resolutions the same?)

Hi micuat. I don’t know what’s different. I think everything I changed is correct.But when I printlnln(i) in the depthMapRealWorld function, it grows very slowly and increases by one value per second.

can you point to the part which is actually slow? of course if you go through every pixel in kinect that is slow. If you make skip to 4, for example, does that make framerate better?

This is my changed code.

PVector[] depthMapRealWorld()
{
  int[] depth = kinect.getRawDepthData();
  int skip = 1;
  for (int y = 0; y < kinect.getDepthImage().height; y+=skip) {
    for (int x = 0; x < kinect.getDepthImage().width; x+=skip) {
       println(x,y);
        int offset = x + y * kinect.getDepthImage().width;
        //calculate the x, y, z camera position based on the depth information
        PVector point = depthToPointCloudPos(x, y, depth[offset]);
      
        depthMap[kinect.getDepthImage().width * y + x] = point;
      }
    }
   
    return depthMap;
}

This is the original code.

PVector[] depthMapRealWorld()
{
  int[] depth = kinect.getRawDepth();
  int skip = 1;
  for (int y = 0; y < kinect.depthHeight; y+=skip) {
    for (int x = 0; x < kinect.depthWidth; x+=skip) {
        int offset = x + y * kinect.depthWidth;
        //calculate the x, y, z camera position based on the depth information
        PVector point = depthToPointCloudPos(x, y, depth[offset]);
        depthMap[kinect.depthWidth * y + x] = point;
      }
    }
    return depthMap;
}

Their for loop times are the same, but the value in the changed code may only increase by one per second or even a few seconds.Maybe something else caused this problem? I can’t find the problem.

I guess they are essentially the same. My suggestion was, what if you change step value to 4 or even bigger to speed up the loop. This technique is often used because going through all the pixels may not be possible in one frame (1/60 seconds). You will get a sparse map, which may not be what you want, but at least it will give you an idea of which part of the code is slowing down.

The effect is not great. I don’t think this is the problem, because it runs abnormally slow, and it may take tens of seconds to wait for each frame. The original code can maintain sixty frames per second without any problems. This is the modified code, you can run the code test.code

Ok then what I pointed out seems irrelevant. Sorry about that.

What makes it very difficult here is that without actual hardware (kinect/projector) it’s very challenging to find out what is causing the issue. Last thing I would do is to comment out everything in the draw loop, put them back in one by one and see which line is actually causing the frame drop.

1 Like

This can run without kinect, But need to install kinectPV2 library.I have tried to annotate one by one to find the problem, and finally found that when the depthMapRealWorld() method is used, It will run very slowly.

sorry I’m lazy to set up all the libraries to test it anyways. Could you add

if(frameCount%60==0) println(frameRate);

at the end of the draw function and tell us what the actual frame rate? before doing so you should comment out all the other println functions because this makes it run slow and also floods the console.

I tried to modify some things myself, put kinect.getDepthImage().width and kinect.getDepthImage().height into variables. The running speed seems to be faster. According to what you said, println is now about 25, but it is far from the original The running speed is still far behind, I think there are still some things I haven’t optimized.

The following is the latest modified code

//==========================================================
// set resolution of your projector image/second monitor
// and name of your calibration file-to-be
int pWidth = 800;
int pHeight = 400 ; 
String calibFilename = "calibration.txt";


//==========================================================
//==========================================================

import javax.swing.JFrame;
import KinectPV2.*;

import gab.opencv.*;
import controlP5.*;
import Jama.*;

KinectPV2 kinect;

OpenCV opencv;
ChessboardFrame frameBoard;
ChessboardApplet ca;
PVector[] depthMap;
PImage src;

ArrayList<PVector> foundPoints = new ArrayList<PVector>();
ArrayList<PVector> projPoints = new ArrayList<PVector>();
ArrayList<PVector> ptsK, ptsP;
PVector testPoint, testPointP;
boolean isSearchingBoard = false;
boolean calibrated = false;
boolean testingMode = false;
int cx, cy, cwidth;
int kinectWidth;
int kinectHeight;
void setup() 
{
  surface.setSize(1200, 768);
  
  textFont(createFont("Courier", 24));
  frameBoard = new ChessboardFrame();

  // set up kinect 
  kinect = new KinectPV2(this);
  kinect.enableDepthImg(true); 
  kinect.enableColorImg(true);
  kinect.enableInfraredImg(true);
  kinect.enableInfraredLongExposureImg(true);
  kinect.init();

  kinectWidth = kinect.getDepthImage().width;
  kinectHeight = kinect.getDepthImage().height;
  opencv = new OpenCV(this, kinectWidth,kinectHeight);

  depthMap = new PVector[kinectWidth*kinectHeight];
  src = createImage(512,424,ARGB);
  //src.resize(512,0);
  // matching pairs
  ptsK = new ArrayList<PVector>();
  ptsP = new ArrayList<PVector>();
  testPoint = new PVector();
  testPointP = new PVector();
  setupGui();
}

void draw() 
{
  if(frameCount%60==0) println(frameRate);
  // draw chessboard onto scene
  projPoints = drawChessboard(cx, cy, cwidth);
 // println(kinect.getDepthImage().width);
  // update depth map
  depthMap = depthMapRealWorld();

  src = kinect.getColorImage();
  
  // mirroring
  //for (int h = 0; h < kinect.getDepthImage().height; h++)
  //{
  //  for (int r = 0; r < kinect.getDepthImage().width / 2; r++)
  //  {
  //    PVector temp = depthMap[h*kinect.getDepthImage().width + r];
  //    depthMap[h*kinect.getDepthImage().width + r] = depthMap[h*kinect.getDepthImage().width + (kinect.getDepthImage().width - r - 1)];
  //    depthMap[h*kinect.getDepthImage().width + (kinect.getDepthImage().width - r - 1)] = temp;
  //  }
  //}

  // mirroring
  for (int h = 0; h < src.height; h++)
  {
    for (int r = 0; r < src.width / 2; r++)
    {
      int temp2 = src.get(r, h);   //h*src.width + r);
      src.set(r, h, src.get(src.width - r - 1, h));
      src.set(src.width - r - 1, h, temp2);
    }
  }

  opencv.loadImage(src);
  opencv.gray();

  if (isSearchingBoard)
    foundPoints = opencv.findChessboardCorners(4, 3);

  drawGui();
}

void drawGui() 
{
  background(0, 100, 0);

  // draw the RGB image
  pushMatrix();
  translate(30, 120);
  textSize(22);
  fill(255);
  image(src, 0, 0);
  
  // draw chessboard corners, if found
  if (isSearchingBoard) {
    int numFoundPoints = 0;
    for (PVector p : foundPoints) {
      if (getDepthMapAt((int)p.x, (int)p.y).z > 0) {
        fill(0, 255, 0);
        numFoundPoints += 1;
      }
      else  fill(255, 0, 0);
      ellipse(p.x, p.y, 5, 5);
    }
    if (numFoundPoints == 12)  guiAdd.show();
    else                       guiAdd.hide();
  }
  else  guiAdd.hide();
  if (calibrated && testingMode) {
    fill(255, 0, 0);
    ellipse(testPoint.x, testPoint.y, 10, 10);
  }
  popMatrix();

  // draw GUI
  pushMatrix();
  pushStyle();
  translate(512+70, 40); // this is black box
  fill(0);
  rect(0, 0, 450, 680); // blackbox size
  fill(255);
  text(ptsP.size()+" pairs", 26, guiPos.y+525);
  popStyle();
  popMatrix();
}

ArrayList<PVector> drawChessboard(int x0, int y0, int cwidth) {
  ArrayList<PVector> projPoints = new ArrayList<PVector>();
  ca.redraw();
  return projPoints;
}


void addPointPair() {
  if (projPoints.size() == foundPoints.size()) {
   // println(getDepthMapAt((int) foundPoints.get(1).x, (int) foundPoints.get(1).y));
    for (int i=0; i<projPoints.size(); i++) {
      ptsP.add( projPoints.get(i) );
      ptsK.add( getDepthMapAt((int) foundPoints.get(i).x, (int) foundPoints.get(i).y) );
    }
  }
  guiCalibrate.show();
  guiClear.show();
}

PVector getDepthMapAt(int x, int y) {
  PVector dm = depthMap[512 * y + x];
  
  return new PVector(dm.x, dm.y, dm.z);
}

void clearPoints() {
  ptsP.clear();
  ptsK.clear();
  guiSave.hide();
}

void saveC() {
  saveCalibration(calibFilename); 
}

void loadC() {
  println("load");
  loadCalibration(calibFilename);
  guiTesting.addItem("Testing Mode", 1);
}

void mousePressed() {
  if (calibrated && testingMode) {
    testPoint = new PVector(constrain(mouseX-30, 0, kinectWidth-1), 
                            constrain(mouseY-120, 0, kinectHeight-1));
    int idx = 512 * (int) testPoint.y + (int) testPoint.x;
    
    testPointP = convertKinectToProjector(depthMap[idx]);
  }
}


// all functions below used to generate depthMapRealWorld point cloud
PVector[] depthMapRealWorld()
{
  int[] depth = kinect.getRawDepthData();
  int skip = 1;
  for (int y = 0; y < kinectHeight; y+=skip) {
    for (int x = 0; x < kinectWidth; x+=skip) {
     //  println(x,y);
        int offset = x + y * kinectWidth;
        //calculate the x, y, z camera position based on the depth information
        PVector point = depthToPointCloudPos(x, y, depth[offset]);
      
        depthMap[kinectWidth * y + x] = point;
      }
    }
   
    return depthMap;
}

//calculte the xyz camera position based on the depth data
PVector depthToPointCloudPos(int x, int y, float depthValue) {
  PVector point = new PVector();
  point.z = (depthValue);// / (1.0f); // Convert from mm to meters
  point.x = ((x - CameraParams.cx) * point.z / CameraParams.fx);
  point.y = ((y - CameraParams.cy) * point.z / CameraParams.fy);
  return point;
}

//camera information based on the Kinect v2 hardware
static class CameraParams {
  static float cx = 254.878f;
  static float cy = 205.395f;
  static float fx = 365.456f;
  static float fy = 365.456f;
  static float k1 = 0.0905474;
  static float k2 = -0.26819;
  static float k3 = 0.0950862;
  static float p1 = 0.0;
  static float p2 = 0.0;
}