Making the dive into the wonderfully confusing world of OpenGL.
Can anyone point to an example of using multiple textures within an GLSL based Filter?
I am trying to create a 1D Lookup Table, and it is proving to be a big pain. There doesn't appear to be any way to debug what is going on.
From within my FxPlug Filter's renderOutput method..
// Render on the hardware
double left, right, top, bottom;
double tLeft, tRight, tTop, tBottom;
FxTexture *inTex = (FxTexture *)inputImage;
FxTexture *outTex = (FxTexture *)outputImage;
//glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE );
[inTex getTextureCoords:&tLeft
right:&tRight
bottom:&tBottom
top:&tTop];
[outTex getTextureCoords:&left
right:&right
bottom:&bottom
top:&top];
// Get Linear Texture
glGenTextures(1, &_lutID[0]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_1D, _lutID[0]);
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGBA, 65536, 0, GL_RGBA, GL_FLOAT, [[colorObj preLUTObj] glLUTFData]);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Tell GL which program we want to use
glUseProgram(_programID_v1);
// Get the location of the parameters in the shader and
// set them to the appropriate values
GLint srcTexLoc;
GLint linLutTexLoc;
// The actual image as a texture.
srcTexLoc = glGetUniformLocation(_programID_v1,"ImageTexture");
glUniform1i(srcTexLoc,0);
// Set Linearization LUT
linLutTexLoc = glGetUniformLocation(_programID_v1,"LinearLUT");
glUniform1f(linLutTexLoc,1);
// Bind Image Texture
glActiveTexture(GL_TEXTURE0);
[inTex bind];
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_1D, _lutID[0]);
// Draw a textured quad with our shader program applied
glBegin(GL_QUADS);
{
glTexCoord2f( (GLfloat)tLeft, (GLfloat)tBottom );
glVertex2f( (GLfloat)left, (GLfloat)bottom );
glTexCoord2f( (GLfloat)tRight, (GLfloat)tBottom );
glVertex2f( (GLfloat)right, (GLfloat)bottom );
glTexCoord2f( (GLfloat)tRight, (GLfloat)tTop );
glVertex2f( (GLfloat)right, (GLfloat)top );
glTexCoord2f( (GLfloat)tLeft, (GLfloat)tTop );
glVertex2f( (GLfloat)left, (GLfloat)top );
}
glEnd();
glUseProgram(0);
[inTex disable];
glBindTexture ([inTex target], 0);
retval = YES;
Here is my vertex and fragment shader..
uniform sampler2DRect ImageTexture;
uniform sampler1D LinearLUT;
void main()
{
vec4 inputColor = texture2DRect(ImageTexture, gl_TexCoord[0].st);
// Linearize Image Data with LUT
inputColor.r = texture1D(LinearLUT, inputColor.r).r;
inputColor.g = texture1D(LinearLUT, inputColor.g).g;
inputColor.b = texture1D(LinearLUT, inputColor.b).b;
gl_FragColor = inputColor;
}
I can't see what I am doing wrong here.
The LUT has been converted from 16-bit RGB values, to normalized 32-bit floats before being placed into the _lutID[0] texture. I am assuming that 32-bit floats are what is being used by the shaders. (No way to verify this.. as i can't step through the GLSL code)
Currently, I get a black frame.
bob.