Pixel position synchronization error with face detector

Hello, I am trying to create a code that creates pixels on a video following the movement of the face detector. He managed to create the pixelated effect on the video but it stays in the upper corner and does not follow the face detector, he did many tests changing the variables in this area thinking about that problem but I could not find the problem.


cols= faces[i].x/cuadraditos;
rows= faces[i].y/ cuadraditos;
 for (faces[i].x = 0; faces[i].x < cols; faces[i].x++) {
    // Begin loop for rows
    for ( int j = 0; j < rows; j++) {

  int     x = faces[i].x  *cuadraditos;
 int    y = j * cuadraditos;
      // Looking up the appropriate color in the pixel array
      color c = film.pixels[x + j * film.width];
      fill(c);
      stroke(0);
      rect(x, y, cuadraditos, cuadraditos);

    if (frameCount%15==0) println(faces[i].x, faces[i].y, d);
  }
}

I think the problem is because I indicated to start the pixelated effect in the upper left corner and should change them to faces[i].x and faces[i].y but I don’t know where exactly. Please, I would appreciate your indications, thank you.
I put the entire code of what I have so far.

import gab.opencv.*;

import processing.video.*;
import java.awt.*;
Capture cam;
OpenCV opencv;
Movie film;
PImage cara;
int cuadraditos = 8;
// Number of columns and rows in our system
int cols, rows;

void settings() {
  size(640, 480);
}

void setup() {  
  cara= loadImage("cara2.png");
  colorMode(HSB, 256, 256, 256);

  film= new Movie(this, "Secuencia 01_2.mp4");
  cam = new Capture(this, width, height);  

  opencv = new OpenCV(this, width, height);
  opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);  

  cam.start();
  film.loop();


  noFill();
  stroke(0, 255, 0);
  strokeWeight(3);
}

void draw() {
  background(250, 150, 0);

  //scale(2);
  opencv.loadImage(cam);

  //video.loadPixels();  
  image(film, 0, 0);
  loadPixels();

  Rectangle[] faces = opencv.detect();



  if (frameCount%15==0) println(faces.length);

  for (int x = 0; x < film.width; x++) {    
    for (int y = 0; y < film.height; y++) {      
      // Calculate the 1D location from a 2D grid
      int loc = x + y * film.width;      

      float h= hue(film.pixels[loc]);
      float s= saturation(film.pixels[loc]);
      float br= brightness(film.pixels[loc]);



      for (int i = 0; i < faces.length; i++) {
        //println(faces[i].x + "," + faces[i].y);
        float d = dist(faces[i].x, faces[i].y, faces[i].width, faces[i].height);      
        float adjustbrightness = map(d, 50, 200, 0, 255);


        color c = color(h, s, br);      
     
      }
    }
  }
  updatePixels();

  for (int i = 0; i < faces.length; i++) {
    //println(faces[i].x + "," + faces[i].y);
    float d = dist(faces[i].x, faces[i].y, faces[i].width, faces[i].height);


cols= faces[i].x/cuadraditos;
rows= faces[i].y/ cuadraditos;
 for (faces[i].x = 0; faces[i].x < cols; faces[i].x++) {
    // Begin loop for rows
    for ( int j = 0; j < rows; j++) {

  int     x = faces[i].x  *cuadraditos;
 int    y = j * cuadraditos;
      // Looking up the appropriate color in the pixel array
      color c = film.pixels[x + j * film.width];
      fill(c);
      stroke(0);
      rect(x, y, cuadraditos, cuadraditos);

    if (frameCount%15==0) println(faces[i].x, faces[i].y, d);
  }
}
  }}

void captureEvent(Capture video) {  
  video.read();
}

void movieEvent(Movie m) {
  m.read();
}
1 Like

This isn’t testable without your film file. Is that film file required, or could you remove it and provide a test sketch that only uses the camera?

1 Like

Video is required in addition to the camera

Can you share the file? Or could it be any video file?

Any video can be used, what I wanted to achieve is that you could pixelate any video with your face, I made changes and managed to make the pixels appear where the face detector but I want it to occupy the entire face and only occupies one column, I still don’t know what problem is happening to me and what should change so that the pixels occupy the entire face detector, I pass the changes made:
specifically I changed this part:

 for (int i = 0; i < faces.length; i++) {
    //println(faces[i].x + "," + faces[i].y);
    float d = dist(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
  

cols= faces[i].x/cuadraditos;
rows= faces[i].y/ cuadraditos;
 for (faces[i].x = 0; faces[i].x < cols; faces[i].x++) {
    // Begin loop for rows
    for ( int j = 0; j < rows; j++) {

      // Where are we, pixel-wise?
      
 int  x= faces[i].x * cuadraditos;
 
int y= j * cuadraditos;
      // Looking up the appropriate color in the pixel array
      color c = film.pixels[x + y * film.width];
      fill(c);
      stroke(0);
      dist(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
      rect(d,y, cuadraditos,cuadraditos);

full code

import gab.opencv.*;

import processing.video.*;
import java.awt.*;
Capture cam;
OpenCV opencv;
Movie film;
PImage cara;
int cuadraditos = 15;
// Number of columns and rows in our system
int cols, rows;

void settings() {
  size(640, 480);
}

void setup() {  
  cara= loadImage("cara2.png");
  colorMode(HSB, 256, 256, 256);

  film= new Movie(this, "Secuencia 01_2.mp4");
  cam = new Capture(this, width, height);  

  opencv = new OpenCV(this, width, height);
  opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);  

  cam.start();
  film.loop();


  noFill();
  stroke(0, 255, 0);
  strokeWeight(3);
}

void draw() {
  background(250, 150, 0);

  //scale(2);
  opencv.loadImage(cam);

  //video.loadPixels();  
  image(film, 0, 0);
  loadPixels();

  Rectangle[] faces = opencv.detect();



  if (frameCount%15==0) println(faces.length);

  for (int x = 0; x < film.width; x++) {    
    for (int y = 0; y < film.height; y++) {      
      // Calculate the 1D location from a 2D grid
      int loc = x + y * film.width;      

      float h= hue(film.pixels[loc]);
      float s= saturation(film.pixels[loc]);
      float br= brightness(film.pixels[loc]);



      for (int i = 0; i < faces.length; i++) {
        //println(faces[i].x + "," + faces[i].y);
        float d = dist(faces[i].x, faces[i].y, faces[i].width, faces[i].height);      
        float adjustbrightness = map(d, 50, 200, 0, 255);


        color c = color(h, s, br);      
     
      }
    }
  }
  updatePixels();

  for (int i = 0; i < faces.length; i++) {
    //println(faces[i].x + "," + faces[i].y);
    float d = dist(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
  

cols= faces[i].x/cuadraditos;
rows= faces[i].y/ cuadraditos;
 for (faces[i].x = 0; faces[i].x < cols; faces[i].x++) {
    // Begin loop for rows
    for ( int j = 0; j < rows; j++) {

      // Where are we, pixel-wise?
      
 int  x= faces[i].x * cuadraditos;
 
int y= j * cuadraditos;
      // Looking up the appropriate color in the pixel array
      color c = film.pixels[x + y * film.width];
      fill(c);
      stroke(0);
      dist(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
      rect(d,y, cuadraditos,cuadraditos);


    if (frameCount%15==0) println(faces[i].x, faces[i].y, d);
  }
}
  }}

void captureEvent(Capture video) {  
  video.read();
}

void movieEvent(Movie m) {
  m.read();
}