Color Grading using LUT and PShader

Hi everyone,

I am trying to implement a color grading shader using LUT as described here by Matt DesLauriers et here by Lev Zelensky but I get a strange result when I apply the shader with a standard lut :

On the left you can see the result I get when applying the LUT shader vs the desired result on the right.

Here my implementation on processing :

PImage source;
PShader PP_LUT;
PGraphics buffer;
PGraphics lut;
PImage lutsrc;

void setup() {
  size(512, 512), P2D);
  source = loadImage("test.png");
  lutsrc = loadImage("_LUT/lookup.png");
  
  lut = createGraphics(lutsrc.width, lutsrc.height, P2D);
  ((PGraphicsOpenGL)lut).textureSampling(2);

  lut.beginDraw();
  lut.image(lutsrc, 0, 0);
  lut.endDraw();

  buffer = createGraphics(source.width, source.height, P3D);

  PP_LUT = loadShader("PP_LUT.glsl");
  PP_LUT.set("resolution", (float) buffer.width, (float) buffer.height);  
  PP_LUT.set("lut", lut);
}

void draw() {
  buffer.beginDraw();
  buffer.background(0);
  buffer.shader(PP_LUT);
  buffer.image(source, 0, 0);
  buffer.endDraw();

  image(buffer, 0, 0, width, height);
  image(lut, 0, 0, width * 0.25, height * 0.25);
}

and the shader part :

#version 150
#ifdef GL_ES
#endif

uniform sampler2D texture;
uniform sampler2D lut;

in vec4 vertTexCoord;
out vec4 fragColor;

//https://github.com/mattdesl/glsl-lut
vec4 lookup(vec4 color_, sampler2D lut_){
	color_ = clamp(color_, vec4(0), vec4(1));
	//define blue
	mediump float blue = color_.b * 63.0;

	//define quad 1
	mediump vec2 quaduv1;
	quaduv1.y = floor(floor(blue) / 8.0); //devide blue by the number of col on the LUT
	quaduv1.x = floor(blue) - (quaduv1.y * 8.0); 

	//define quad 2
	mediump vec2 quaduv2;
	quaduv2.y = floor(ceil(blue) / 8.0); //devide blue by the number of col on the LUT
	quaduv2.x = ceil(blue) - (quaduv2.y * 8.0); 

	//define colorUV 1 
	highp vec2 coloruv1;
	coloruv1.x = (quaduv1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
	coloruv1.y = (quaduv1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);
	
	//define colorUV 2
	highp vec2 coloruv2;
	coloruv2.x = (quaduv2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
	coloruv2.y = (quaduv2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);

	//PROCESSING NEED TO FLIP y uv
	//coloruv1.y = 1.0 - coloruv1.y;
	//coloruv2.y = 1.0 - coloruv2.y;

	//define new color 1 & 2
	lowp vec4 ncolor1 = texture2D(lut_, coloruv1);
	lowp vec4 ncolor2 = texture2D(lut_, coloruv2);

	//return looked up color
	lowp vec4 lookedcolor = mix(ncolor1, ncolor2, fract(blue));
	return vec4(lookedcolor.rgb, color_.w); 
}

void main()
{
	vec2 uv = vertTexCoord.xy;
	vec4 color = texture2D(texture, uv);
	vec4 lutColor = lookup(color, lut);
	
   	fragColor = lutColor;
}

As I understand it seems to be a problem on the texture filtering part so I tried to write my lut into an offscreen buffer and set the texture filtering mode as nearest as described on the wiki page of processing

I don’t know what I am missing here. Can anyone has an idea on this ?

Thanks