Hello everyone,
Does anyone know where I can find information on applying shaders to video using Processing? I’ve had success using many Shadertoy examples before, except for those that involve video or webcam inputs. While there are no console errors necessarily, the effects don’t work as they do on Shadertoy. Is there a more straightforward way to handle video from Processing or GSStream Video? Do I need to update GSStream video verisons? Or modify the GLSL code for this to work properly?
In this case the effect in Shadertoy is running differently in Processing.
This is my Processing console information:
“can’t load library openh264 (openh264|libopenh264|libopenh264-0) with -Djna.library.path=/x/x/x/x/x/x/linux-amd64. Last error:java.lang.UnsatisfiedLinkError: Unable to load library ‘openh264’:
/x/x/x/x/x/x/linux-amd64/libopenh264.so: file too short
/x/x/x/x/x/x/linux-amd64/libopenh264.so: file too short
/x/x/x/x/x/x/linux-amd64/libopenh264.so.4: file too short
Native library (linux-x86-64/libopenh264.so) not found in resource path (/tmp/processing/ReactionDiffusionAT15576625539041171247temp:/opt/processing-4.3-linux-x64/processing-4.3/core/library/core.jar:/opt/processing-4.3-linux-x64/processing-4.3/core/library/jogl-all.jar:/opt/processing-4.3-linux-x64/processing-4.3/core/library/gluegen-rt.jar:/home/andre/sketchbook/libraries/video/library/video.jar:/x/x/x/x/x/x/jna.jar:/x/x/x/x/x/x/gst1-java-core-1.4.0.jar)
(Processing core video:12748): GStreamer-WARNING **: 18:13:37.060: Failed to load plugin ‘/home/x/x/x/x/x/linux-amd64/gstreamer-1.0/libgstopenh264.so’: libopenh264.so.4: cannot open shared object file: No such file or directory
Processing video library using bundled GStreamer 1.16.3”
Shadertoy reference: Shader - Shadertoy BETA
Thanks in advance!
My code:
import processing.video.*;
PShader reactionDiffusionShader;
Movie video;
PShader renderShader;
PGraphics buffer1, buffer2;
void setup() {
size(1280, 720, P2D);
buffer1 = createGraphics(width, height, P2D);
buffer2 = createGraphics(width, height, P2D);
reactionDiffusionShader = loadShader("reactionDiffusion.glsl");
renderShader = loadShader("render.glsl");
video = new Movie(this, "Insects.mp4");
video.loop(); // Start playing the video and loop it
// Initialize buffer1 with random noise
buffer1.beginDraw();
buffer1.loadPixels();
for (int i = 0; i < buffer1.pixels.length; i++) {
float r = random(1);
buffer1.pixels[i] = color(r, r, r, 1);
}
buffer1.updatePixels();
buffer1.endDraw();
buffer2.beginDraw();
buffer2.background(0);
buffer2.endDraw();
}
void draw() {
if (video.available()) {
video.read();
}
// Reaction-diffusion pass
reactionDiffusionShader.set("iResolution", float(width), float(height));
reactionDiffusionShader.set("iTime", millis() / 1000.0);
reactionDiffusionShader.set("iFrame", frameCount);
reactionDiffusionShader.set("iChannel0", buffer1);
reactionDiffusionShader.set("iChannel1", video);
buffer2.beginDraw();
buffer2.shader(reactionDiffusionShader);
buffer2.rect(0, 0, width, height);
buffer2.endDraw();
// Swap buffers
PGraphics temp = buffer1;
buffer1 = buffer2;
buffer2 = temp;
// Render pass
renderShader.set("iResolution", float(width), float(height));
renderShader.set("iTime", millis() / 1000.0);
renderShader.set("iChannel0", buffer1);
shader(renderShader);
rect(0, 0, width, height);
}
// reactionDiffusion.glsl:
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 iResolution;
uniform float iTime;
uniform int iFrame;
uniform sampler2D iChannel0;
uniform sampler2D iChannel1;
vec3 hash33(in vec2 p){
float n = sin(dot(p, vec2(41, 289)));
return fract(vec3(2097152, 262144, 32768)*n);
}
vec4 tx(in vec2 p){ return texture2D(iChannel0, p); }
float blur(in vec2 p){
vec3 e = vec3(1, 0, -1);
vec2 px = 1./iResolution.xy;
float res = 0.0;
res += tx(p + e.xx*px ).x + tx(p + e.xz*px ).x + tx(p + e.zx*px ).x + tx(p + e.zz*px ).x;
res += (tx(p + e.xy*px ).x + tx(p + e.yx*px ).x + tx(p + e.yz*px ).x + tx(p + e.zy*px ).x)*2.;
res += tx(p + e.yy*px ).x*4.;
return res/16.;
}
void main() {
vec2 uv = gl_FragCoord.xy/iResolution.xy;
vec2 pw = 1./iResolution.xy;
float avgReactDiff = blur(uv);
vec3 noise = hash33(uv + vec2(53, 43)*iTime)*.6 + .2;
vec3 e = vec3(1, 0, -1);
vec2 pwr = pw*1.5;
vec2 lap = vec2(tx(uv + e.xy*pwr).y - tx(uv - e.xy*pwr).y, tx(uv + e.yx*pwr).y - tx(uv -
e.yx*pwr).y);
uv = uv + lap*pw*3.0;
float newReactDiff = tx(uv).x + (noise.z - 0.5)*0.0025 - 0.002;
newReactDiff += dot(tx(uv + (noise.xy-0.5)*pw).xy, vec2(1, -1))*0.145;
float ifr = min(0.97, float(iFrame) / 100.0);
// Flip the y-coordinate when sampling from iChannel1 (video texture)
vec2 flippedUV = vec2(uv.x, 1.0 - uv.y);
gl_FragColor.xy = mix(texture2D(iChannel1, flippedUV).xz, clamp(vec2(newReactDiff,
avgReactDiff/.98), 0., 1.), ifr);
gl_FragColor.zw = vec2(0, 1);
}
// render.glsl:
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 iResolution;
uniform float iTime;
uniform sampler2D iChannel0;
void main() {
vec2 uv = gl_FragCoord.xy/iResolution.xy;
float c = 1. - texture2D(iChannel0, uv).y;
float c2 = 1. - texture2D(iChannel0, uv + .5/iResolution.xy).y;
float pattern = -cos(uv.x*0.75*3.14159-0.9)*cos(uv.y*1.5*3.14159-0.75)*0.5 + 0.5;
vec3 col = vec3(c*1.5, pow(c, 2.25), pow(c, 6.));
col = mix(col, col.zyx, clamp(pattern-.2, 0., 1.) );
col += vec3(.6, .85, 1.)*max(c2*c2 - c*c, 0.)*12.;
col *= pow( 16.0*uv.x*uv.y*(1.0-uv.x)*(1.0-uv.y) , .125)*1.15;
col *= smoothstep(0., 1., iTime/2.);
gl_FragColor = vec4(min(col, vec3(1.0)), 1.0);
}