Python help? Export OSC data to Processing

Hey I’m relatively new to all of this, especially Python. But I essentially need some data from a Python program. The full code is in this github repo: https://github.com/omar178/Emotion-recognition#p1.

But essentially I think the values I’m looking for are stored in a variable called ‘prob’ - the probability value of a particular face emotion/expression at that frame. I tried printing this and since it’s in a for loop, it prints a value for each of the 7 expressions at each frame (continuously). My question is how would I store this prob value into data packets (float vectors) of 7 (a prob value for each of the 7 expressions) for each frame, and then how would I send this to port 8338? (I know how to receive the data from port 8338 in processing using the osc library).
PYTHON MAIN CODE BELOW

from keras.preprocessing.image import img_to_array
import imutils
import cv2
from keras.models import load_model
import numpy as np

# parameters for loading data and images
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'

# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised",
 "neutral"]


#feelings_faces = []
#for index, emotion in enumerate(EMOTIONS):
   # feelings_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))

# starting video streaming
cv2.namedWindow('your_face')
camera = cv2.VideoCapture(0)
while True:
    frame = camera.read()[1]
    #reading the frame
    frame = imutils.resize(frame,width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_detection.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
    
    canvas = np.zeros((250, 300, 3), dtype="uint8")
    frameClone = frame.copy()
    if len(faces) > 0:
        faces = sorted(faces, reverse=True,
        key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
        (fX, fY, fW, fH) = faces
                    # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
            # the ROI for classification via the CNN
        roi = gray[fY:fY + fH, fX:fX + fW]
        roi = cv2.resize(roi, (64, 64))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)
        
        
        preds = emotion_classifier.predict(roi)[0]
        emotion_probability = np.max(preds)
        label = EMOTIONS[preds.argmax()]
    else: continue
    
    for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
                print(prob)
		# construct the label text
                text = "{}: {:.2f}%".format(emotion, prob * 100)

                # draw the label + probability bar on the canvas
               # emoji_face = feelings_faces[np.argmax(preds)]

                
                w = int(prob * 300)
                cv2.rectangle(canvas, (7, (i * 35) + 5),
                (w, (i * 35) + 35), (0, 0, 255), -1)
                cv2.putText(canvas, text, (10, (i * 35) + 23),
                cv2.FONT_HERSHEY_SIMPLEX, 0.45,
                (255, 255, 255), 2)
                cv2.putText(frameClone, label, (fX, fY - 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
                cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
                             (0, 0, 255), 2)
     
#    for c in range(0, 3):
#        frame[200:320, 10:130, c] = emoji_face[:, :, c] * \
#        (emoji_face[:, :, 3] / 255.0) + frame[200:320,
#        10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)


    cv2.imshow('your_face', frameClone)
    cv2.imshow("Probabilities", canvas)
    if cv2.waitKey(1) & 0xFF == ord('q'):
	      break

camera.release()
cv2.destroyAllWindows()

1 Like

Hello, I am sure I have seen another OSC question in Python recently. I have not attempted to communicate between Python and Processing using OSC but it should be doable. Here, I would like to suggest you explore a recent example I shared in this post where I send data from Python and Processing listen to it:

This is an alternative to an OSC solution.

Kf

1 Like

Hey, thanks that was an interesting way too but I actually got OSC working! Just having some issues with the data type, can you please help with this?

So my python code basically sends

  for x in range(len(data)):
        client.send_message("/filter", float(data[x]))

My processing program receives:

public void rawReceived(float[] allPoints) {

  // receive the array of allPoints and store them 
  for (int i=0; i < allPoints.length; i++) {
    rawPoints[i] = allPoints[i];
    println(rawPoints[i]);
  }
}

It prints each number on a new line ie: but I just need to store them as packets of 7.

0.11
0.40
0.33
0.60
0.40
0.20
0.50
0.12
0.41
0.3
0.63
0.41
0.21
0.50
...

I want:

array 1 = {0.11, 0.12,...}  //1st,7th,14th,21st....variable printed
array 2  = {0.40, 0.41,...} //2nd, 8th, 15th, 22nd....
 .....
array 7 = {0.50,0.50,...}
1 Like

You have done the hardest part so now the question is, do you want to handle your case logic (data grouping) in Python side or Processing side?

I will do it in Processing side and I will apply a slightly modification to your Py code. First:

  • I will send client.send_message("/filter", str(index)+","+str(data[x]) ). Adding an index/counter to your data will allow to check for data integrity in the receiving end if needed. I find this will make your life easier when it comes to debugging.
  • In Processing, you process your data as it arrives adding it to the right category.

This demo runs for about 10 seconds. You can press any key to see the content of each category.

Note: If you are not concerned with data integrity, I will drop the TreeMap and use FloatList instead.

Kf

//===========================================================================
// IMPORTS:
import java.util.Map;
import java.util.TreeMap;

//===========================================================================
// FINAL FIELDS:
final int N=7;
final char SEPARATOR=',';

//===========================================================================
// GLOBAL VARIABLES:
ArrayList<TreeMap<Integer, Float>> categories;
String indata;

//===========================================================================
// PROCESSING DEFAULT FUNCTIONS:

void settings() {
  size(400, 600);
}

void setup() {

  categories = new ArrayList<TreeMap<Integer, Float>>();
  for (int i=0; i<N; i++) {
    TreeMap<Integer, Float> map = new TreeMap<Integer, Float>();
    categories.add(map);
  }
}


void draw() {
  background(0);
  indata=str(frameCount)+SEPARATOR+str(random(MAX_INT));
  processData(indata);
  
  if(millis()>10000){
   exit(); 
  }
}

void keyReleased() {
  printData();
}

//===========================================================================
// OTHER FUNCTIONS:
void processData(String data) {
  String[] tokens=data.split(SEPARATOR);
  if (tokens.length == 2) {
    int idx=int(tokens[0]);
    float val =  float(tokens[1]);
    int whichCategory = idx%N;  //Assuming there are N categories
    categories.get(whichCategory).put(idx, val);
  }
}

void printData() {
  println("=======\nDATA DUMP\n=======");
  for (int i=0; i<N; i++) {
    println("CATEGORY #"+i);
    TreeMap<Integer, Float> hm = categories.get(i);
    for (Map.Entry me : hm.entrySet()) {
      println("\t" + me.getKey() + " => " + me.getValue());
    }
  }
}
2 Likes