Hello everyone,
I am keeping exploring the field of shaders in Processing, even if I am facing many problems.
My last problem is when I try to use a texture (for example a PGraphics of PImage) to use it as parameters from Processing to the Shader, it keeps saying that the Uniform was removed during compilation.
To pass the texture from CPU to GPU (Processing to Shader) I do it in this way:
shader.set("texture", myTexture);
Intuitively this should work, but it keeps giving me the error:
The shader doesn't have a uniform called "texture" OR the uniform was removed during compilation because it was unused.
If I try to access the texture via sampler2D(); it crashes.
Here is the full code:
// Import PeasyCam, for easy camera controls
import peasy.*;
PeasyCam cam;
// 2D arrays for points and to keep track of noise, so we don't calculate it everytime
PVector[][] points;
PVector[][] noise;
// Radius of the sphere
float radius = 200;
// Total number of points
// 2D array means 250*250=62.500 points in total
int total = 250;
// This creates the animation for the sphere
float updateNoise = 0;
// offscreen render to use as texture in the shader
PGraphics txt;
void settings() {
size(1280, 720, P3D);
}
void setup() {
// Maximum speed, for testing purpose
frameRate(1000);
txt = createGraphics(1024, 1024);
txt.beginDraw();
txt.background(100);
txt.endDraw();
// Initialize the shader, check the shader tab
initShader();
// Disable the depth test to not have weird shading on the colors
hint(DISABLE_DEPTH_TEST);
// field of view and perspective of the camera
float fov = PI/3.0;
float cameraZ = (height/2.0) / tan(fov/2.0);
perspective(fov, float(width)/float(height),
cameraZ/10.0, cameraZ*10000.0);
// Initialize the points and noise arrays
points = new PVector[total][total];
noise = new PVector[total][total];
// Initialize the camera
cam = new PeasyCam(this, 500);
// Variable for the 2D noise, x coordinate
float nx = 0;
for (int i = 0; i < total; i++) {
// Calculate the latitude
float lat = map(i, 0, total-1, 0, PI);
// Second variable for the noise
float ny = 0;
for (int j = 0; j < total; j++) {
// Longitude
float lon = map(j, 0, total-1, 0, TWO_PI);
// Radius with noise applied
float r = radius * noise(nx, ny);
// Spherical coordinates
float x = r * sin(lat) * cos(lon);
float y = r * sin(lat) * sin(lon);
float z = r * cos(lat);
points[i][j] = new PVector(x, y, z);
noise[i][j] = new PVector(nx, ny);
ny += 0.01;
}
nx += 0.02;
}
}
void draw() {
background(0);
update(); // Update the coordinates
updateShader(); // Update the buffer in the openGL shader
showShader(); // Display the shader
}
void update() {
// TODO: optimize the update method
for (int i = 0; i < total; i++) {
float lat = map(i, 0, total, 0, PI);
for (int j = 0; j < total; j++) {
float lon = map(j, 0, total-1, 0, TWO_PI);
float r = radius * noise(noise[i][j].x + updateNoise, noise[i][j].y + updateNoise);
points[i][j].x = r * sin(lat) * cos(lon);
points[i][j].y = r * sin(lat) * sin(lon);
points[i][j].z = r * cos(lat);
}
}
updateNoise += 0.01;
}
OpenGL:
// Import NIO from Java, for I/O operations, and to exchange data from Java to C# in the shader
import java.nio.*;
import com.jogamp.opengl.*;
// Declare shader variables
float[] shaderPoints; // The points to share with openGL
int vertLoc; // Keeps the vertex location for the buffer
PGL pgl; // PGL, Processing openGL
PShader sh; // Vertex and Fragment shader
FloatBuffer pointCloudBuffer; // FloatBuffer of the openGL implementation
ByteBuffer byteBuf; // ByteBuffer to share floats between Processing sketch and OpenGL
int vertexVboId; // Vertex Buffer Object ID
void initShader() {
// Load the vertex and fragment shader
sh = loadShader("frag.glsl", "vert.glsl");
// In openGL there are no vectors, or multivalue variables,
// it will take every 3 values as x, y and z for each vertex.
shaderPoints = new float[total*total*3];
// Start new PGL
PGL pgl = beginPGL();
// Declare an int buffer, it will identify our vertex buffer
IntBuffer intBuffer = IntBuffer.allocate(1);
// Generate 1 buffer, put the resulting identifier in vertexbuffer
pgl.genBuffers(1, intBuffer);
// The the ID from the int buffer
vertexVboId = intBuffer.get(0);
// Allocate the float array in a byte buffer
byteBuf = ByteBuffer.allocateDirect(shaderPoints.length * Float.BYTES); //4 bytes per float
// End the PGL
endPGL();
}
void updateShader() {
// This will update the values in our shader, by sharing the buffer from Processing to OpenGL.
// In the float array we will share the coordinates of the vertices. Each value is one f the x, y or z
// coordinate from our vertices. THe index is to keep track of the actual value in the array
int index = 0;
for (int i = 0; i < total; i++) {
for (int j = 0; j < total; j++) {
shaderPoints[index+0] = points[i][j].x; // X
shaderPoints[index+1] = points[i][j].y; // Y
shaderPoints[index+2] = points[i][j].z; // Z
index += 3; // index increments by 3
}
}
// Allocate the float array in a byte buffer
// This has been moved to the Shader setup because it was draining the memory, thanks to Neil Smith for the fix
// byteBuf = ByteBuffer.allocateDirect(shaderPoints.length * Float.BYTES); //4 bytes per float
// Order the byte byuffer
byteBuf.order(ByteOrder.nativeOrder());
// Converts the byte buffer in a float buffer
pointCloudBuffer = byteBuf.asFloatBuffer();
// Put the values in the float buffer
pointCloudBuffer.put(shaderPoints);
// Set the position to 0, starting point of the buffer
pointCloudBuffer.position(0);
}
void showShader() {
sh.set("txtr", txt);
// Begin PGL
pgl = beginPGL();
// Get the GL Context and enable gl_PointSize
GLContext.getCurrentGL().getGL3().glEnable(GL3.GL_PROGRAM_POINT_SIZE);
// Bind the Shader
sh.bind();
// Set the vertex location, from the shader
vertLoc = pgl.getAttribLocation(sh.glProgram, "vertex");
// Enable the generic vertex attribute array specified by vertLoc
pgl.enableVertexAttribArray(vertLoc);
// Get the size of the float buffer
int vertData = shaderPoints.length;
// Binds the buffer object
pgl.bindBuffer(PGL.ARRAY_BUFFER, vertexVboId);
// Give our vertices to OpenGL.
pgl.bufferData(PGL.ARRAY_BUFFER, Float.BYTES * vertData, pointCloudBuffer, PGL.DYNAMIC_DRAW);
pgl.vertexAttribPointer(vertLoc, // Gets the vertex location, must match the layout in the shader.
3, // Size, 3 values for each vertex from the float buffer
PGL.FLOAT, // Type of the array / buffer
false, // Normalized?
Float.BYTES * 3, // Size of the float byte, 3 values for x, y and z
0 // Stride
);
// Here I try to enable gl_PointSize
//pgl.Enable( PGL.VERTEX_POINT_SIZE );
// The following commands will talk about our 'vertexbuffer' buffer
pgl.bindBuffer(PGL.ARRAY_BUFFER, 0);
// Draw the sphere
pgl.drawArrays(PGL.POINTS, // Type of draw, in this case POINTS
0, // Starting from vertex 0
vertData // Drawing all the points from vertData, the array size
);
// Disable the generic vertex attribute array specified by vertLoc
pgl.disableVertexAttribArray(vertLoc);
// Unbind the vertex
sh.unbind();
// End the POGL
endPGL();
}
Vertex Shader:
uniform mat4 transform;
uniform sampler2D txtr;
attribute vec4 vertex;
attribute vec4 color;
varying vec4 vertColor;
void main() {
gl_Position = transform * vertex;
float st = texture2D(txt_in, vec2(0.5, 0.5)).r;
gl_PointSize = st * 10.0;
vertColor = color;
}
Fragment Shader:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor;
void main() {
//outputColor
gl_FragColor = vec4(vertColor.xyzw);
}