Gimbals, quaternions, and manual rotation

The long answer:

If a PShape is a GROUP that contains other PShapes, then you need to either get its children first, or try to tessellate it. PShape's methods can be quite stingy, so I’ve found it’s better to store important shape data separately: namely, the transform matrix and a collection of vertices.

It’s possible for getVertex to return redundant information. Processing’s UV sphere is a good illustration of this: the same vertex at the north and south poles of a sphere is repeated for every longitude. If you know about collections you can use a Set to hold only unique instances of a vertex coordinate.

Another consideration: the vertices set when you create a shape are in object space. For example, in a sphere with a radius of 0.5, the north pole is at (0.0, 0.5, 0.0) and the soulth pole is at (0.0, -0.5, 0.0) . They will not necessarily change as you translate, rotate or scale the shape in draw. To change from object space to to world space, you multiply a vertex with the shape’s transform.

0287

import java.util.*;

// A shape's points can be retrieved with getVertex . However, a shape
// may return multiple copies of the same point -- for example -- the
// same two points at the north and south pole of a sphere. A set
// prevents duplicates.
Set<PVector> vertices = new HashSet<PVector>();

// A shape's transform can only be indirectly set with resetMatrix
// and applyMatrix
PMatrix3D shapeMat = new PMatrix3D();

// To show how the screen function works, the renderer is needed.
PGraphics3D rndr;

// To avoid the sqrt incurred by calculating the distance,
// square the radius.
float radius = 3.0;
float radsq = radius * radius;

void setup() {
  size(256, 256, P3D);
  ellipseMode(RADIUS);
  perspective();
  camera(
    0.0, 0.0, height * 0.86602,
    0.0, 0.0, 0.0,
    0.0, 1.0, 0.0);

  // The number of latitudes and longitudes in a sphere,
  // even one stored in a PShape, is determined by sphereDetail .
  sphereDetail(27, 9);
  PShape shape = createShape(SPHERE, 0.5);

  // Acquire vertices from the PShape.
  int len = shape.getVertexCount();
  for (int i = 0; i < len; ++i) {
    vertices.add(shape.getVertex(i));
  }

  // The set has fewer vertices than the shape.
  println(len, vertices.size());

  rndr = (PGraphics3D)getGraphics();
  shapeMat.scale(200.0);
}

void draw() {
  shapeMat.rotate(0.002, 0.7071, 0.7071, 0.0);
  background(#fff7d5);

  // The vertex as transformed by shapeMat.
  PVector trvert = new PVector();

  // The transformed vertex in screen space.
  PVector scvert = new PVector();

  // The mouse input.
  PVector mouse = new PVector(mouseX, mouseY);

  // The difference from subtracting the mouse from scvert.
  PVector diff = new PVector();

  // Loop over all the vertices.
  for (PVector vert : vertices) {

    // Two transformations of the points: from object
    // space to world space, then to screen space.
    shapeMat.mult(vert, trvert);
    screen(rndr, trvert, scvert);

    // Find the distance between the mouse and the point.
    // Euclidean distance squared is used, not distance, to
    // avoid the square-root.
    PVector.sub(scvert, mouse, diff);
    float distsq = PVector.dot(diff, diff);

    // If the point is close by, draw red.
    // If not, black.
    if (distsq < radsq) {
      stroke(#ff2828);
      strokeWeight(10.0);
    } else {
      strokeWeight(4.0);
      stroke(#202020);
    }

    point(trvert.x, trvert.y, trvert.z);
  }
}

It may help to do some research on how screen and model functions work. I’ve never had any luck with those two functions, but I think others have worked on the issue you’re exploring now. This forum thread is one place to start P3D Formula : opposite of screenX and screenY is ...? . The source code for screen is opaque; just know that – just like changing vertices from object space to world space above – multiplying points and transforms is involved.

static Vec4 promote(PVector src, float w, Vec4 trg) {

  // Promote a 3D vector to a 4D vector.
  return trg.set(src.x, src.y, src.z, w);
}

static Vec4 mul(PMatrix3D m, Vec4 src, Vec4 trg) {

  // [ m00, m01, m02, m03,     [ x,        [ dot(m0c, x),
  //   m10, m11, m12, m13,  x    y,    :=    dot(m1c, y),
  //   m20, m21, m22, m23,       z,          dot(m2c, z),
  //   m30, m31, m32, m33 ]      w ]         dot(m3c, w) ]
  //
  // where 'c' is a matrix column index.

  return trg.set(
    m.m00 * src.x + m.m01 * src.y + m.m02 * src.z + m.m03 * src.w,
    m.m10 * src.x + m.m11 * src.y + m.m12 * src.z + m.m13 * src.w,
    m.m20 * src.x + m.m21 * src.y + m.m22 * src.z + m.m23 * src.w,
    m.m30 * src.x + m.m31 * src.y + m.m32 * src.z + m.m33 * src.w);
}

static PVector demote(Vec4 source, PVector target) {

  // Demote a homogenous coordinate to a point by dividing
  // the x, y and z components by w.
  if (source.w == 0.0) {
    return target.set(0.0, 0.0, 0.0);
  }
  float wInv = 1.0 / source.w;
  return target.set(
    source.x * wInv,
    source.y * wInv,
    source.z * wInv);
}

static PVector screen(
  PGraphicsOpenGL rndr,
  PVector src,
  PVector trg) {

  Vec4 point = promote(src, 1.0, new Vec4());

  // Multiply by the modelview, then by the projection.
  Vec4 mvmul = mul(rndr.modelview, point, new Vec4());
  Vec4 projmul = mul(rndr.projection, mvmul, new Vec4());

  demote(projmul, trg);

  // Shift range from [-1.0, 1.0] to [0.0, 1.0] .
  trg.add(1.0, 1.0, 1.0).mult(0.5);  

  // Multiply by screen dimensions.
  trg.x *= rndr.width;
  trg.y *= rndr.height;

  // Flip y-axis because Processing.
  trg.y = rndr.height - trg.y;
  return trg;
}

static class Vec4 {
  float x, y, z, w;

  Vec4() { }

  Vec4(float x, float y, float z, float w) {
    set(x, y, z, w);
  }

  Vec4 set(float x, float y, float z, float w) {
    this.x = x; this.y = y; this.z = z; this.w = w;
    return this;
  }
}

Regards,

1 Like

Just a note that you might also be interested in looking at the Picking Library implementation of picking. It is P2D/P3D only – not Java2D – but since you are using P3D anyway, it might be a good fit. Previous discussion:

Courtesy update.
The project had gotten like a library of libraries in a garden of forking paths. So, I guess I was asking for directions having started in the wrong place.
To make things more manageable I have retraced my steps and started again. This time without the tori. It is much easier to track the rotation of a sphere and when it comes time to have three orthogonal rings I can do this with a texture, or a height map, or both!
Here is the code for rotation of a sphere around three axes. Now to replace the key presses with a mouse event…

PShape sphera;


// Axis around which sphera rotates
PVector axis = new PVector();

// Record of whether or not a key is pressed
boolean xPressed;
boolean yPressed;
boolean zPressed;

float rotateSpeed = radians(0.5);

void setup() {
  size(600, 600, P3D);

  // Center camera at (0, 0, 0)
  camera(
    0.0, 0.0, height * 0.86602, 
    0.0, 0.0, 0.0, 
    0.0, 1.0, 0.0);
  //
  //https://processing.org/reference/createShape_.html
  //sphereDetail() //maybe
  sphera = createShape(SPHERE, 150);
}

void draw() {
  // section Q1
  bvec(xPressed, yPressed, zPressed, axis);
  axis.normalize();
  if (keyPressed) { 
    println(axis);
  }


  sphera.rotate(rotateSpeed, axis.x, axis.y, axis.z);

  background(#fff7d5);
  shape(sphera);
}

void keyPressed() {
  if (key == 'x' || key == 'X') {
    xPressed = true;
  }

  if (key == 'y' || key == 'Y') {
    yPressed = true;
  }

  if (key == 'z' || key == 'Z') {
    zPressed = true;
  }
}

void keyReleased() {
  if (key == 'x' || key == 'X') {
    xPressed = false;
  }

  if (key == 'y' || key == 'Y') {
    yPressed = false;
  }

  if (key == 'z' || key == 'Z') {
    zPressed = false;
  }
}


PVector bvec(boolean x, boolean y, boolean z) {
  return bvec(x, y, z, (PVector)null);
}

PVector bvec(
  boolean x, 
  boolean y, 
  boolean z, 
  PVector target) {
  if (target == null) target = new PVector();
  return target.set(
    boolToFloat(x), 
    boolToFloat(y), 
    boolToFloat(z));
}

float boolToFloat(boolean bool) {
  return bool ? 1.0 : 0.0;
}
2 Likes