9 Replies Latest reply on Jun 23, 2011 9:52 PM by Meteorhead

    CL-GL interop segfault at init

    Meteorhead
      urgent!

      Hi all!

      I have a problem which is very urgent, so a little help would be apreciated. I am writing a rather simple CL-GL interop application, but the program crashes at init. Let me copy-paste the relevant lines of the code. Point is I do not see "GL context acquired" in the console, even if I comment out the rest of the setupCL_GL() function. It breaks at creating cpsGL[] but I do not know why.

      Help would be very much apreciated!

      ... // OpenCL and OpenGL includes #include <GL/glew.h> #include <GL/glxew.h> #include <GL/glut.h> #include <CL/cl.h> #include <CL/cl_gl.h> // OpenCL-OpenGL variables GLint GL_err; cl_int numDevices, my_device, CL_err; cl_uint numPlatforms; cl_platform_id platform = NULL; cl_device_id* devices; cl_context_properties* cprops; cl_context context; GLXContext glCtx; ... void setupCL_GL() { // Begin platform layer std::cout << "Accessing platform layer\n"; CL_err = clGetPlatformIDs(0, NULL, &numPlatforms); checkErr(CL_err, "clGetPlatformIDs(numPlatforms)"); if (0 < numPlatforms) { cl_platform_id* platforms = new cl_platform_id[numPlatforms]; CL_err = clGetPlatformIDs(numPlatforms, platforms, NULL); checkErr(CL_err, "clGetPlatformIDs(platforms)"); for (unsigned i = 0; i < numPlatforms; ++i) { char pbuf[100]; CL_err = clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, sizeof(pbuf), pbuf, NULL); checkErr(CL_err, "clGetPlatformInfo()"); platform = platforms[i]; if (!strcmp(pbuf, "Advanced Micro Devices, Inc.")) {break;} } delete[] platforms; } // If we could find our platform, use it. Otherwise pass a NULL and get whatever the // implementation thinks we should be using. std::cout << "Using platform: "; char vendor[64]; clGetPlatformInfo(platform, CL_PLATFORM_VENDOR, sizeof(vendor), vendor, NULL); printf(vendor); std::cout << "\n"; glCtx = glXGetCurrentContext(); cl_context_properties cpsGL[] = {CL_CONTEXT_PLATFORM, (cl_context_properties)platform, CL_GLX_DISPLAY_KHR, (intptr_t) glXGetCurrentDisplay(), CL_GL_CONTEXT_KHR, (intptr_t) glCtx, 0}; std::cout << "GL context acquired\n"; ... } int main() { setupCL_GL(); ... }

        • CL-GL interop segfault at init
          laobrasuca

          since you are simply creating an array of fixed size, the only thing I can see that could make it fail is the call to the glXGetCurrentDisplay(). Now, why is it failing, I can't tell you from these few line code.

            • CL-GL interop segfault at init
              Meteorhead

              I suspect something that I will check "today". I downloaded libglewmx-1.5 and freeglut3, but they seem not to work with interop.

              glXGetCurrentDisplay() seems to be undefined in the dev package of these two, so I had to use glxew.h from the SDK, but that only contains defines for glew 1.4, and not for 1.5. The SDK has it's own libs, but how can I tell my application to use those instead of the ones installed into /usr/lib? (Ubuntu 10.04, Catalyst 11.5, SDK 2.4)

                • CL-GL interop segfault at init
                  Meteorhead

                  Someone tell me: where on Earth does the SimpleGL example in the SDK get it's glXGetCurrentDisply function?? It simply should not be able to compile with the given amount of includes found in the example.

                  The problem was not version mismatch, I tried with glx.h instead of glxew.h, and instread of segfaulting it went on and threw an error of creating conext. Output is:

                  Accessing platform layer
                  Using platform: Advanced Micro Devices, Inc.
                  Context found
                  GL context acquired
                  Querying number of GPU devices
                  Number of GPU devices queried
                  Devices queried
                  Creating context
                  ERROR: clCreateContext() (-1000)

                  Current code is attached:

                   

                  // OpenCL és OpenGL include #include <CL/cl.h> #include <GL/glew.h> #include <CL/cl_gl.h> #include <GL/glx.h> #include <GL/glut.h> // Sztenderd C++ include #include <iostream> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <fstream> #include <memory.h> #include <assert.h> #include <string.h> #include <malloc.h> // OpenCL-OpenGL variables GLint GL_err; cl_int numDevices, my_device, CL_err; cl_uint numPlatforms; cl_platform_id platform = NULL; cl_device_id* devices; cl_context_properties* cprops; cl_context context; GLXContext glCtx; void setupCL_GL() { // Begin platform layer std::cout << "Accessing platform layer\n"; CL_err = clGetPlatformIDs(0, NULL, &numPlatforms); checkErr(CL_err, "clGetPlatformIDs(numPlatforms)"); if (0 < numPlatforms) { cl_platform_id* platforms = new cl_platform_id[numPlatforms]; CL_err = clGetPlatformIDs(numPlatforms, platforms, NULL); checkErr(CL_err, "clGetPlatformIDs(platforms)"); for (unsigned i = 0; i < numPlatforms; ++i) { char pbuf[100]; CL_err = clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, sizeof(pbuf), pbuf, NULL); checkErr(CL_err, "clGetPlatformInfo()"); platform = platforms[i]; if (!strcmp(pbuf, "Advanced Micro Devices, Inc.")) {break;} } delete[] platforms; } // If we could find our platform, use it. Otherwise pass a NULL and get whatever the // implementation thinks we should be using. std::cout << "Using platform: "; char vendor[64]; clGetPlatformInfo(platform, CL_PLATFORM_VENDOR, sizeof(vendor), vendor, NULL); printf(vendor); std::cout << "\n"; glCtx = glXGetCurrentContext(); std::cout << "Context found\n"; cl_context_properties cpsGL[] = { CL_CONTEXT_PLATFORM, (cl_context_properties)platform, CL_GLX_DISPLAY_KHR, (intptr_t)glXGetCurrentDisplay(), CL_GL_CONTEXT_KHR, (intptr_t) glCtx, 0 }; std::cout << "GL context acquired\n"; // Use NULL for backward compatibility cl_context_properties* cprops = NULL; cprops = (NULL == platform) ? NULL : cpsGL; std::cout << "Querying number of GPU devices\n"; CL_err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, 0, (cl_uint*)&numDevices); checkErr(CL_err, "clGetDeviceIDs"); std::cout << "Number of GPU devices queried\n"; devices = (cl_device_id*)malloc(numDevices * sizeof(cl_device_id)); CL_err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numDevices, devices, 0); checkErr(CL_err, "clGetDeviceIDs"); std::cout << "Devices queried\n"; my_device = (0) % numDevices; // trying to avoid using default adapter. std::cout << "Creating context\n"; context = clCreateContext(cprops, 1, &devices[my_device], NULL, NULL, &CL_err); checkErr(CL_err, "clCreateContext()"); std::cout << "Creating command queue\n"; commandQueue = clCreateCommandQueue(context, devices[my_device], 0, &CL_err); checkErr(CL_err, "clCreateCommandQueue(commandQueue)"); } int main() { setupCL_GL(); ... }

                    • CL-GL interop segfault at init
                      nou

                      all glX functions are declared in <GL/glx.h>

                      check what value returns glXGetCurrentDisplay() and glXGetCurrentContext()

                      do you have active OpenGL context before creating a OpenCL one?

                        • CL-GL interop segfault at init [PARTIAL SOLVE]
                          Meteorhead

                          Thanks nou, the problem was that I was missing the initializeGL() part of the SDK sample. :) Alright... I'm rather new to OpenGL.

                          Anyhow, I was looking through the web about a problem I encountered after, namely that after linking the program and calling

                          glGetProgramiv(glProgram, GL_LINK_STATUS, &GL_err);

                          it returns it being failed to link. I found a thread related to this:

                          http://www.gamedev.net/topic/372356-very-peculiar-glsl-bug/

                          How come OpenGL links a working program, but if queried for status, it kept telling me that:

                          Building program... Failed to link program: Vertex shader(s) failed to link, fragment shader(s) failed to link.
                          ERROR: error(#280) Not all shaders have valid object code
                          ERROR: error(#280) Not all shaders have valid object code

                          I do not quite understand the concept of OpenGL states and how this strange behaviour. Complete OpenGL state is different from SUCCES, thus checking program will show print out errors, but apart from that the shaders seem operational.

                            • CL-GL interop segfault at init
                              nou

                              well do you check build log of shader compiling? most likely it cant compile so it fail at link stage. check glGetShaderInfoLog()

                              and maybe post vertex, pixel shader code.

                                • CL-GL interop segfault at init
                                  Meteorhead

                                  It compiles alright and the app now runs (although vertexes are most likely in the wrong place, as I only see one mesh point). So the program must link if it places vertexes and paints at least one of them red.

                                  Shaders are from the SDK example, but I do not load a texture from file, instead I shade every vertex through a texture based on y coord, but upon drawing I project into x-z plane.

                                  Here is the relevant part of host code:

                                  void loadShaders() { std::cout << "Loading shader code\n"; std::ifstream vertex_shader_file("vizsga_Shaders_vertex.gl"); checkErr(vertex_shader_file.is_open() ? CL_SUCCESS : -1, "ifstream(vertex) cannot access file"); std::ifstream pixel_shader_file("vizsga_Shaders_pixel.gl"); checkErr(vertex_shader_file.is_open() ? CL_SUCCESS : -1, "ifstream(pixel) cannot access file"); std::string vertex_string( std::istreambuf_iterator<char>(vertex_shader_file), (std::istreambuf_iterator<char>())); std::string pixel_string( std::istreambuf_iterator<char>(vertex_shader_file), (std::istreambuf_iterator<char>())); const char* vertexShaderSrc = vertex_string.c_str(); const char* pixelShaderSrc = pixel_string.c_str(); vertexShaderObj = glCreateShader(GL_VERTEX_SHADER); pixelShaderObj = glCreateShader(GL_FRAGMENT_SHADER); checkErr(!vertexShaderObj, "Failed to create vertex shader handle"); checkErr(!pixelShaderObj, "Failed to create pixel shader handle"); glShaderSource(vertexShaderObj, 1, &vertexShaderSrc, NULL); glShaderSource(pixelShaderObj, 1, &pixelShaderSrc, NULL); std::cout << "Building program... "; glCompileShader(vertexShaderObj); glGetShaderiv(vertexShaderObj, GL_COMPILE_STATUS, &GL_err); if(!GL_err) { char temp[256]; glGetShaderInfoLog(vertexShaderObj, 256, 0, temp); std::cout << "Failed to compile shader: " << temp << std::endl; exit(EXIT_FAILURE); } glCompileShader(pixelShaderObj); glGetShaderiv(pixelShaderObj, GL_COMPILE_STATUS, &GL_err); if(!GL_err) { char temp[256]; glGetShaderInfoLog(pixelShaderObj, 256, 0, temp); std::cout << "Failed to compile shader: " << temp << std::endl; exit(EXIT_FAILURE); } glProgram = glCreateProgram(); glAttachShader(glProgram, vertexShaderObj); glAttachShader(glProgram, pixelShaderObj); glLinkProgram(glProgram); // check if program linked (disabled for now) /*GL_err = 0; glGetProgramiv(glProgram, GL_LINK_STATUS, &GL_err); if(!GL_err) { char temp[256]; glGetProgramInfoLog(glProgram, 256, 0, temp); std::cout << "Failed to link program: " << temp << std::endl; glDeleteProgram(glProgram); exit(EXIT_FAILURE); }*/ std::cout << "done.\n"; } VERTEX SHADER void main() { gl_TexCoord[0].st = vec2(gl_Vertex.xy); gl_Vertex.y = 0.0; //gl_TexCoord[0].st = vec2(gl_Position.x, 1.0); gl_Position = ftransform(); } PIXEL SHADER void main() { vec3 color = vec3(gl_TexCoord[0].t * 1, gl_TexCoord[0].t * (-1), 0.0); //vec3 color = vec3(texture2D(tex,gl_TexCoord[0].st)); gl_FragColor = vec4(color, 1.0); }