Hello everyone, my programme crashes when I run it. The error message is title. The strange thing is I never use memset function in the kernel, but the error comes from clGetProgramBuildInfo。And when I delete the memset function in the host programme, it also comes to this error。
I use AMD APP SDK 3.0, visual studio 2017, windows 10 x64, AMD FirePro W7100。
This is my host programme source code.
#include "stdafx.h"
#include<stdio.h>
#include <string.h>
#include <stdlib.h>
#include<CL/cl.h>
#include<setjmp.h>
#include<windows.h>
#define MATRIX_ROW 5888
#define MATRIX_COLUM 64
#define LINE_NUM 65
#define FILTER_ORDER 512
#define PRO_FILE "SA_DMAS.cl"
#define KERNEL_NAME "SA"
#define KERNEL_FILTER "FT"
#pragma warning( disable : 4996 )
#pragma comment (lib,"OpenCL.lib")
double dataTime = 0;
double computeTime1 = 0;
double computeTime2 = 0;
cl_context createContext(cl_device_id *device) {
cl_platform_id *platforms;
cl_context context;
cl_uint num_platforms;
cl_int error = 0;
error = clGetPlatformIDs(1, NULL, &num_platforms);
if (error != CL_SUCCESS) {
perror("Create platform failed!\n");
exit(EXIT_FAILURE);
}
platforms = (cl_platform_id*)malloc(sizeof(cl_platform_id)*num_platforms);
error = clGetPlatformIDs(num_platforms, platforms, NULL);
error = clGetDeviceIDs(platforms[0], CL_DEVICE_TYPE_GPU, 1,
device, NULL);
if (error != CL_SUCCESS) {
perror("Create device failed!\n");
exit(EXIT_FAILURE);
}
context = clCreateContext(NULL, 1, device, NULL, NULL,
&error);
if (error != CL_SUCCESS) {
perror("Create context failed!\n");
exit(EXIT_FAILURE);
}
free(platforms);
return context;
}
cl_command_queue createQueue(cl_context context,
cl_device_id device) {
cl_command_queue queue;
cl_int error;
queue = clCreateCommandQueue(context, device,
CL_QUEUE_PROFILING_ENABLE, &error);
if (error != CL_SUCCESS) {
perror("Create queue failed!\n");
exit(EXIT_FAILURE);
}
return queue;
}
cl_program createProgram(cl_context context,
cl_device_id device) {
cl_program program;
FILE *program_handle;
size_t program_size;
char *program_buffer;
char *program_log;
size_t log_size;
int error = 0;
program_handle = fopen(PRO_FILE, "rb");
if (program_handle == NULL) {
perror("Open kernel file failed!\n");
exit(EXIT_FAILURE);
}
fseek(program_handle, 0, SEEK_END);
program_size = ftell(program_handle);
program_buffer = (char*)malloc(program_size + 1);
program_buffer[program_size] = '\0';
rewind(program_handle);
//fseek(program_handle, 0, SEEK_SET);
error = fread(program_buffer, sizeof(char), program_size,
program_handle);
if (error == 0) {
perror("Read kernel failed!\n");
exit(EXIT_FAILURE);
}
fclose(program_handle);
program = clCreateProgramWithSource(context, 1,
(const char **)&program_buffer, &program_size, &error);
if (error != CL_SUCCESS) {
perror("Create program failed!\n");
exit(EXIT_FAILURE);
}
free(program_buffer);
error = clBuildProgram(program, 1, &device, "-Dsize=32",
NULL, NULL);
if (error != CL_SUCCESS) {
clGetProgramBuildInfo(program, device,
CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size);
program_log = (char*)malloc(log_size + 1);
program_log[log_size] = '\0';
clGetProgramBuildInfo(program, device,
CL_PROGRAM_BUILD_LOG, log_size, program_log,
NULL);
printf("%s\n", program_log);
getchar();
free(program_log);
exit(EXIT_FAILURE);
}
return program;
}
int getData(const char *fileName, float **data_buffer) {
FILE *data_file;
size_t data_size;
long error;
data_file = fopen(fileName, "rb");
if (!data_file) {
printf("Open data file failed!\n");
return -1;
}
fseek(data_file, 0, SEEK_END);
data_size = ftell(data_file);
//fseek(data_file, 0, SEEK_SET);
rewind(data_file);
*data_buffer = (float *)malloc(data_size);
if (*data_buffer == NULL) {
printf("malloc filed!\n");
return -1;
}
memset((*data_buffer), 0, data_size);
error = fread(*data_buffer, sizeof(float),
data_size / sizeof(float), data_file);
if (error != data_size / sizeof(float)) {
printf("Read data file failed!\n");
return -1;
}
fclose(data_file);
return 0;
}
int siveResult(const char *fileName, float *data) {
FILE *writeFile;
cl_int error = 0;
writeFile = fopen(fileName, "wb");
if (!writeFile) {
printf("Open the file to write failed!\n");
return -1;
}
error = fwrite(data, sizeof(float), MATRIX_ROW * LINE_NUM, writeFile);
if (error != MATRIX_ROW * LINE_NUM) {
printf("Write data failed!\n");
return -1;
}
fclose(writeFile);
return 0;
}
int main()
{
DWORD dwStart0 = GetTickCount();
cl_device_id device = NULL;
cl_context context = NULL;
cl_command_queue queue = NULL;
cl_program program = NULL;
cl_kernel sa_kernel = NULL;
cl_kernel FT_kernel = NULL;
const char *fileName = "data.bin";
const char *filterName = "filterCoe.bin";
const char *outputName = "writeData.bin";
float *data_buffer = NULL;
size_t data_size = 0;
cl_mem memObjects[4];
size_t global_size = 0;
size_t work_dims = 0;
size_t global_work_size[2] = { 0 };
size_t local_work_size[2] = { 0 };
// cl_int lineSize = 0;
cl_int error = 0;
// cl_event evt1;
// cl_event evt2;
cl_event evt3;
cl_int status;
cl_ulong timeStart = 0;
cl_ulong timeEnd = 0;
// DWORD c_cut_star;
// DWORD c_cut_end;
double c_cut_time = 0;
context = createContext(&device);
if (!device) {
perror("Get device failed!\n");
exit(EXIT_FAILURE);
}
if (!context) {
perror("Create context failed!\n");
exit(EXIT_FAILURE);
}
queue = createQueue(context, device);
if (!queue) {
perror("Create queue failed!\n");
exit(EXIT_FAILURE);;
}
program = createProgram(context, device);
if (!program) {
perror("Create program failed!\n");
exit(EXIT_FAILURE);
}
sa_kernel = clCreateKernel(program, KERNEL_NAME, &error);
FT_kernel = clCreateKernel(program, KERNEL_FILTER, &error);
if (error != CL_SUCCESS) {
//printf("%d\n", error);
perror("Create SA kernel failed!\n");
exit(EXIT_FAILURE);
}
/* clGetDeviceInfo(device, CL_DEVICE_MAX_COMPUTE_UNITS,
sizeof(global_size), &global_size, NULL);
clGetDeviceInfo(device, CL_DEVICE_MAX_WORK_GROUP_SIZE,
sizeof(local_size), &local_size, NULL);
global_size *= local_size;*/
float *input = (float*)malloc(MATRIX_ROW*MATRIX_COLUM*LINE_NUM * sizeof(float));
if (input == NULL) {
perror("Cannot allocate memory for input in host.");
exit(EXIT_FAILURE);
}
memset(input, 0, sizeof(input));
float *output = (float*)malloc(MATRIX_ROW*LINE_NUM * sizeof(float));
if (output == NULL) {
perror("Cannot allocate memory for output in host.");
exit(EXIT_FAILURE);
}
memset(output, 0, sizeof(output));
float *filter_coe = (float*)malloc(FILTER_ORDER * sizeof(float));
if (filter_coe == NULL) {
perror("Cannot allocate memory for filter_coe in host.");
exit(EXIT_FAILURE);
}
memset(filter_coe, 0, sizeof(filter_coe));
DWORD dwStart1 = GetTickCount();
error = getData(fileName, &input);
error = getData(filterName, &filter_coe);
//error = getData("filterCoe.bin", &filter_coe);
if (error != 0) {
perror("Get data failed!\n");
exit(EXIT_FAILURE);
}
memObjects[0] = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
sizeof(float)*MATRIX_ROW*LINE_NUM*MATRIX_COLUM, input, &error);
memObjects[1] = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
sizeof(float)*MATRIX_ROW*LINE_NUM, NULL, &error);
memObjects[2] = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
sizeof(float)*FILTER_ORDER, filter_coe, &error);
// memObjects[3] = clCreateBuffer(context, CL_MEM_READ_WRITE,
// sizeof(float)*MATRIX_ROW, NULL, &error);
if (memObjects[0] == NULL || memObjects[1] == NULL
|| memObjects[2] == NULL) {
printf("%d\n", error);
perror("Error creating memory objects!\n");
exit(EXIT_FAILURE);
}
error = clSetKernelArg(sa_kernel, 0, sizeof(cl_mem),
&memObjects[0]);
error |= clSetKernelArg(sa_kernel, 1, sizeof(cl_mem),
&memObjects[1]);
if (error != CL_SUCCESS) {
perror("Error setting sa_kernel arguments!\n");
exit(EXIT_FAILURE);
}
//global_size = MATRIX_DIM * FILETOTAL;//4597*65
work_dims = 2;
global_work_size[0] = MATRIX_ROW;
global_work_size[1] = LINE_NUM;
local_work_size[0] = 256;
local_work_size[1] = 1;
error = clEnqueueNDRangeKernel(queue, sa_kernel, work_dims, 0,
global_work_size, NULL, 0, NULL, &evt3);
if (error != CL_SUCCESS) {
perror("Error queuing sa_kernel for execution!\n");
getchar();
exit(EXIT_FAILURE);
}
clWaitForEvents(1, &evt3);
clGetEventProfilingInfo(evt3,
CL_PROFILING_COMMAND_START, sizeof(timeStart),
&timeStart, NULL);
clGetEventProfilingInfo(evt3,
CL_PROFILING_COMMAND_END, sizeof(timeEnd),
&timeEnd, NULL);
computeTime1 += (timeEnd - timeStart);
if (computeTime1 > 3.14*1e+38) {
printf("溢出2\n");
}
clReleaseEvent(evt3);
error |= clSetKernelArg(FT_kernel, 0, sizeof(cl_mem),
&memObjects[1]);
error |= clSetKernelArg(FT_kernel, 1, sizeof(cl_mem),
&memObjects[2]);
if (error != CL_SUCCESS) {
perror("Error setting FT_kernel arguments!\n");
exit(EXIT_FAILURE);
}
size_t FT_work_dims = 1;
size_t FT_global_work_size = 65;
error = clEnqueueNDRangeKernel(queue, FT_kernel, FT_work_dims, 0,
&FT_global_work_size, NULL, 0, NULL, &evt3);
clWaitForEvents(1, &evt3);
error = clGetEventInfo(evt3, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(status), &status, NULL);
if (error != CL_SUCCESS || status != CL_COMPLETE) {
printf("Running sa_kernel failed!\n");
getchar();
return -1;
}
clGetEventProfilingInfo(evt3,
CL_PROFILING_COMMAND_START, sizeof(timeStart),
&timeStart, NULL);
clGetEventProfilingInfo(evt3,
CL_PROFILING_COMMAND_END, sizeof(timeEnd),
&timeEnd, NULL);
computeTime2 += (timeEnd - timeStart);
if (computeTime2 > 3.14*1e+38) {
printf("溢出2\n");
}
clReleaseEvent(evt3);
error = clEnqueueReadBuffer(queue, memObjects[1],
CL_TRUE, 0, MATRIX_ROW * LINE_NUM * sizeof(float),
output, 0, NULL, &evt3);
if (error != CL_SUCCESS) {
perror("Error reading result buffer!\n");
exit(EXIT_FAILURE);
}
clWaitForEvents(1, &evt3);
clGetEventProfilingInfo(evt3,
CL_PROFILING_COMMAND_START, sizeof(timeStart),
&timeStart, NULL);
clGetEventProfilingInfo(evt3,
CL_PROFILING_COMMAND_END, sizeof(timeEnd),
&timeEnd, NULL);
dataTime += (timeEnd - timeStart);
clReleaseEvent(evt3);
error = siveResult(outputName, output);
if (error == 0)
printf("File %s calculated successed!\n",
fileName);
else
printf("File %s calculated failed!\n", fileName);
free(data_buffer);
DWORD dwEnd = GetTickCount();
printf("GPU->数据传输时间: %f\n", dataTime / 1000 / 1000);
printf("GPU->计算时间: %f\n", (computeTime1 + computeTime2) / 1000 / 1000);
printf("CPU->总共时间: %d\n", dwEnd - dwStart0);
printf("CPU->运行时间: %d\n", dwEnd - dwStart1);
printf("CPU->C运行时间: %f\n", c_cut_time); //这条语句并没有起到作用
free(input);
free(output);
clReleaseProgram(program);
clReleaseContext(context);
clReleaseCommandQueue(queue);
clReleaseKernel(sa_kernel);
clReleaseMemObject(memObjects[0]);
clReleaseMemObject(memObjects[1]);
clReleaseMemObject(memObjects[2]);
getchar();
return 0;
}
Solved! Go to Solution.
After a quick check with CodeXL, here are my findings :
float temp[5888] = { 0 };
>>> Modify the code as shown below:
float temp[5888];
...
//initialize later like this...for (i = 0; i<5888; i++)
{
temp = 0;
for (j = i, k = 0; j >= 0 && k<512; k++, j--)
>>> Build the kernel without optimization i.e. pass build flag "-O0".
I'll check it further and report to the appropriate team.
[Note: (5888 x 4) bytes space is much bigger than the available register (VGPR) size. So, the kernel is using a lot of scratch registers that might slow down the performance.]
Please provide the kernel code that produces LLVM error. Also share the driver detail and clinfo output.
Btw, please try to build the kernel using CodeXL and let us know your observation.
P.S. I've whitelisted you.
Thanks for your advice.
OK。 Here is kernel.
#ifdef FP_64
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#endif
__kernel void SA(__global float *input1, __global float *output1) {
int gidx = get_global_id(0);
int gidy = get_global_id(1);
//int lidx = get_local_id(0);
//int groupIDx = get_group_id(0);
//int groupIDy = get_group_id(1);
int i, j, k;
float R[size][size] = { 0 };
float M[size][size] = { 0 };
float trace = 0.0;
float sum = 0.0;
//__local float result[256];
//result[256] = { 0 };
float linshi[64] = { 0 };
output1[gidx * 65 + gidy] = 0;
//barrier(CLK_LOCAL_MEM_FENCE);
//if (gidx == 1023 && gidy == 0) {
for (i = 0; i < 64; i++)
{
linshi = input1[gidy * 5888 * 64 + gidx + i * 5888];
}
for (i = 0; i < 33; i++)
{
for (j = 0; j < size; j++)
{
for (k = 0; k < size; k++)
{
//R
//R
R
// if(R
// printf("%f\t", R
}
}
}
// printf("%f\n", R[4][8]);
// }
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
{
R
}
}
for (i = 0; i < size; i++)
{
for (j = 0; j < size; j++)
{
M
}
}
for (i = 0; i < size; i++)
{
trace += M;
for (j = 0; j < size; j++)
{
sum += M
}
}
output1[gidx * 65 + gidy] = (sum + trace) / 2;
//temp[lidx] = (sum + trace) / 2;
//async_work_group_copy(output1,temp,256,0);
}
__kernel void FT(__global float *input1, __constant float *filterCoe)
{
int gidx = get_global_id(0);
float temp[5888] = { 0 };
//__global float *result;
int i, j, k;
for (i = 0; i<5888; i++)
{
for (j = i, k = 0; j >= 0 && k<512; k++, j--)
{
//result += temp
temp += input1[gidx + j * 65] * filterCoe
}
}
for (i = 0; i < 5888; i++)
{
//input1[gidx + i * 65] = temp;
input1[gidx * 5888 + i] = temp;
}
}
clinfo just as follows:
C:\Users\baifeng>clinfo
Number of platforms: 2
Platform Profile: FULL_PROFILE
Platform Version: OpenCL 2.0 AMD-APP (2348.3)
Platform Name: AMD Accelerated Parallel Processing
Platform Vendor: Advanced Micro Devices, Inc.
Platform Extensions: cl_khr_icd cl_khr_d3d10_sharing cl_khr_d3d11_sharing cl_khr_dx9_media_sharing cl_amd_event_callback cl_amd_offline_devices
Platform Profile: FULL_PROFILE
Platform Version: OpenCL 2.0 AMD-APP (2348.3)
Platform Name: AMD Accelerated Parallel Processing
Platform Vendor: Advanced Micro Devices, Inc.
Platform Extensions: cl_khr_icd cl_khr_d3d10_sharing cl_khr_d3d11_sharing cl_khr_dx9_media_sharing cl_amd_event_callback cl_amd_offline_devices
Platform Name: AMD Accelerated Parallel Processing
Number of devices: 2
Device Type: CL_DEVICE_TYPE_GPU
Vendor ID: 1002h
Board name: AMD FirePro W7100
Device Topology: PCI[ B#3, D#0, F#0 ]
Max compute units: 28
Max work items dimensions: 3
Max work items[0]: 256
Max work items[1]: 256
Max work items[2]: 256
Max work group size: 256
Preferred vector width char: 4
Preferred vector width short: 2
Preferred vector width int: 1
Preferred vector width long: 1
Preferred vector width float: 1
Preferred vector width double: 1
Native vector width char: 4
Native vector width short: 2
Native vector width int: 1
Native vector width long: 1
Native vector width float: 1
Native vector width double: 1
Max clock frequency: 920Mhz
Address bits: 64
Max memory allocation: 6241124352
Image support: Yes
Max number of images read arguments: 128
Max number of images write arguments: 64
Max image 2D width: 16384
Max image 2D height: 16384
Max image 3D width: 2048
Max image 3D height: 2048
Max image 3D depth: 2048
Max samplers within kernel: 16
Max size of kernel argument: 1024
Alignment (bits) of base address: 2048
Minimum alignment (bytes) for any datatype: 128
Single precision floating point capability
Denorms: No
Quiet NaNs: Yes
Round to nearest even: Yes
Round to zero: Yes
Round to +ve and infinity: Yes
IEEE754-2008 fused multiply-add: Yes
Cache type: Read/Write
Cache line size: 64
Cache size: 16384
Global memory size: 8589934592
Constant buffer size: 6241124352
Max number of constant args: 8
Local memory type: Scratchpad
Local memory size: 32768
Max pipe arguments: 16
Max pipe active reservations: 16
Max pipe packet size: 1946157056
Max global variable size: 5617011712
Max global variable preferred total size: 8589934592
Max read/write image args: 64
Max on device events: 1024
Queue on device max size: 8388608
Max on device queues: 1
Queue on device preferred size: 262144
SVM capabilities:
Coarse grain buffer: Yes
Fine grain buffer: Yes
Fine grain system: No
Atomics: No
Preferred platform atomic alignment: 0
Preferred global atomic alignment: 0
Preferred local atomic alignment: 0
Kernel Preferred work group size multiple: 64
Error correction support: 0
Unified memory for Host and Device: 0
Profiling timer resolution: 1
Device endianess: Little
Available: Yes
Compiler available: Yes
Execution capabilities:
Execute OpenCL kernels: Yes
Execute native function: No
Queue on Host properties:
Out-of-Order: No
Profiling : Yes
Queue on Device properties:
Out-of-Order: Yes
Profiling : Yes
Platform ID: 00007FFCF2940188
Name: Tonga
Vendor: Advanced Micro Devices, Inc.
Device OpenCL C version: OpenCL C 2.0
Driver version: 2348.3
Profile: FULL_PROFILE
Version: OpenCL 2.0 AMD-APP (2348.3)
Extensions: cl_khr_fp64 cl_amd_fp64 cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_int64_base_atomics cl_khr_int64_extended_atomics cl_khr_3d_image_writes cl_khr_byte_addressable_store cl_khr_fp16 cl_khr_gl_sharing cl_khr_gl_depth_images cl_amd_device_attribute_query cl_amd_vec3 cl_amd_printf cl_amd_media_ops cl_amd_media_ops2 cl_amd_popcnt cl_khr_d3d10_sharing cl_khr_d3d11_sharing cl_khr_dx9_media_sharing cl_khr_image2d_from_buffer cl_khr_spir cl_khr_subgroups cl_khr_gl_event cl_khr_depth_images cl_khr_mipmap_image cl_khr_mipmap_image_writes cl_amd_liquid_flash
Device Type: CL_DEVICE_TYPE_CPU
Vendor ID: 1002h
Board name:
Max compute units: 32
Max work items dimensions: 3
Max work items[0]: 1024
Max work items[1]: 1024
Max work items[2]: 1024
Max work group size: 1024
Preferred vector width char: 16
Preferred vector width short: 8
Preferred vector width int: 4
Preferred vector width long: 2
Preferred vector width float: 8
Preferred vector width double: 4
Native vector width char: 16
Native vector width short: 8
Native vector width int: 4
Native vector width long: 2
Native vector width float: 8
Native vector width double: 4
Max clock frequency: 2095Mhz
Address bits: 64
Max memory allocation: 8569505792
Image support: Yes
Max number of images read arguments: 128
Max number of images write arguments: 64
Max image 2D width: 8192
Max image 2D height: 8192
Max image 3D width: 2048
Max image 3D height: 2048
Max image 3D depth: 2048
Max samplers within kernel: 16
Max size of kernel argument: 4096
Alignment (bits) of base address: 1024
Minimum alignment (bytes) for any datatype: 128
Single precision floating point capability
Denorms: Yes
Quiet NaNs: Yes
Round to nearest even: Yes
Round to zero: Yes
Round to +ve and infinity: Yes
IEEE754-2008 fused multiply-add: Yes
Cache type: Read/Write
Cache line size: 64
Cache size: 32768
Global memory size: 34278023168
Constant buffer size: 65536
Max number of constant args: 8
Local memory type: Global
Local memory size: 32768
Max pipe arguments: 16
Max pipe active reservations: 16
Max pipe packet size: 4274538496
Max global variable size: 1879048192
Max global variable preferred total size: 1879048192
Max read/write image args: 64
Max on device events: 0
Queue on device max size: 0
Max on device queues: 0
Queue on device preferred size: 0
SVM capabilities:
Coarse grain buffer: No
Fine grain buffer: No
Fine grain system: No
Atomics: No
Preferred platform atomic alignment: 0
Preferred global atomic alignment: 0
Preferred local atomic alignment: 0
And I am just a beginner, cannot use CodeXL fluently...
Thanks for providing the kernel code and other information. We'll check and get back to you.
And I am just a beginner, cannot use CodeXL fluently.
Building or analyzing a kernel in CodeXL is very easy. It's just an offline compilation. First you need to load the kernel file or copy paste the kernel code. Then set the build flags (no flag is needed for default case) and select the devices for which you want to build/analyze the kernel. Then build. That's all.
For more information, please check "Analyze Mode" section of the CodeXL_Quick_Start_Guide available here: https://gpuopen.com/compute-product/codexl/
After a quick check with CodeXL, here are my findings :
float temp[5888] = { 0 };
>>> Modify the code as shown below:
float temp[5888];
...
//initialize later like this...for (i = 0; i<5888; i++)
{
temp = 0;
for (j = i, k = 0; j >= 0 && k<512; k++, j--)
>>> Build the kernel without optimization i.e. pass build flag "-O0".
I'll check it further and report to the appropriate team.
[Note: (5888 x 4) bytes space is much bigger than the available register (VGPR) size. So, the kernel is using a lot of scratch registers that might slow down the performance.]
Thank you for your advice and the problem has been solved.
I will try my best to optimize my programme. I find a lot to learn, thanks.
Update:
The above issue has been fixed in the latest internal builds. The fix will be available soon in the public version.