0 Replies Latest reply on Apr 14, 2010 6:32 PM by Raistmer

    Why listed kernel overwrites first elements of memory buffer?

    Raistmer
      What I missed?

      This test kernel should read data from beginning of buffer (data supposed to be in 32*6 float arrays 16 k size each, stored one by one in memory), does coadd and writes 8k float arrays one by one after initial data in the same common memory buffer.
      But after kernel execution initial data is corrupted. That is, writes go not behind of initial data but right over it.
      Can't notice what wrong with offsets though

      Kernel: __kernel void find_single_pulse_kernel_test(__global float* gpu_power){ //R: each work-item will handle separate data_chunk/dm/sign power array through all needed coadds //R: to avoid host to device data transfers (very costly for current implementation) uint tid=get_global_id(0); uint dchunk=get_global_id(1); uint len4 = 32768 >> 3; uint4 coadd_offsets=(uint4)(0); uint4 coadd_begins=(uint4)(0); uint next_begin,next_offset; //R: will do unroll for coadds to save reads from global memory float4 d0,d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15; //R: no arrays in registers still (AMD still had to implement it) so store data in separate registers coadd_begins.x=0;//R:initial array - no need to write, in gpu_power domain coadd_begins.y=6*32*(32768>>3); coadd_begins.z=coadd_begins.y+6*(32768>>4)*32; coadd_begins.w=coadd_begins.z+6*(32768>>5)*32; next_begin=coadd_begins.w+6*(32768>>6)*32; coadd_offsets=coadd_begins; coadd_offsets.x+=(dchunk*32+tid)*(32768>>3);//l==0 coadd_offsets.y+=(dchunk*32+tid)*(32768>>4); coadd_offsets.z+=(dchunk*32+tid)*(32768>>5); coadd_offsets.w+=(dchunk*32+tid)*(32768>>6); next_offset=next_begin+(dchunk*32+tid)*(32768>>7); for(int m=0;m<len4;m+=16){//R:loading data piece of 16*4 samples int m1=coadd_offsets.x+m; d0=vload4(m1,gpu_power);d1=vload4(m1+1,gpu_power);d2=vload4(m1+2,gpu_power);d3=vload4(m1+3,gpu_power); d4=vload4(m1+4,gpu_power);d5=vload4(m1+5,gpu_power);d6=vload4(m1+6,gpu_power);d7=vload4(m1+7,gpu_power); d8=vload4(m1+8,gpu_power);d9=vload4(m1+9,gpu_power);d10=vload4(m1+10,gpu_power);d11=vload4(m1+11,gpu_power); d12=vload4(m1+12,gpu_power);d13=vload4(m1+13,gpu_power);d14=vload4(m1+14,gpu_power);d15=vload4(m1+15,gpu_power); //R: now do coadd and save coadded piece in memory d0.xy=d0.xz+d0.yw;d0.zw=d1.xz+d1.yw;d1.xy=d2.xz+d2.yw;d1.zw=d3.xz+d3.yw; d2.xy=d4.xz+d4.yw;d2.zw=d5.xz+d5.yw;d3.xy=d6.xz+d6.yw;d3.zw=d7.xz+d7.yw; d4.xy=d8.xz+d8.yw;d4.zw=d9.xz+d9.yw;d5.xy=d10.xz+d10.yw;d5.zw=d11.xz+d11.yw; d6.xy=d12.xz+d12.yw;d6.zw=d13.xz+d13.yw;d7.xy=d14.xz+d14.yw;d7.zw=d15.xz+d15.yw; m1=m>>1+coadd_offsets.y; vstore4(d0,m1,gpu_power);vstore4(d1,m1+1,gpu_power); vstore4(d2,m1+2,gpu_power);vstore4(d3,m1+3,gpu_power); vstore4(d4,m1+4,gpu_power);vstore4(d5,m1+5,gpu_power); vstore4(d6,m1+6,gpu_power);vstore4(d7,m1+7,gpu_power); } } Calling code: #define DATA_CHUNK_UNROLL 6 ..... err = clSetKernelArg(find_single_pulse_kernel,0,sizeof(cl_mem),(void *)&gpu_power); /* err |= clSetKernelArg(find_single_pulse_kernel,1,sizeof(cl_mem),(void *)&gpu_thresholds); err |= clSetKernelArg(find_single_pulse_kernel,2,sizeof(cl_mem),(void *)&gpu_best_pulses); err |= clSetKernelArg(find_single_pulse_kernel,3,sizeof(cl_mem),(void *)&gpu_best_pulses_new); err |= clSetKernelArg(find_single_pulse_kernel,4,sizeof(cl_mem),(void *)&gpu_results); err |= clSetKernelArg(find_single_pulse_kernel,5,sizeof(cl_int),(void *)&num_dchunks); err |= clSetKernelArg(find_single_pulse_kernel,6,sizeof(cl_int),(void *)&need_small_FFA_data); err |= clSetKernelArg(find_single_pulse_kernel,7,sizeof(cl_int),(void *)&need_large_FFA_data); err |= clSetKernelArg(find_single_pulse_kernel,8,sizeof(cl_mem),(void *)&gpu_fold_small_neg); err |= clSetKernelArg(find_single_pulse_kernel,9,sizeof(cl_mem),(void *)&gpu_fold_small_pos); err |= clSetKernelArg(find_single_pulse_kernel,10,sizeof(cl_mem),(void *)&gpu_fold_large_neg); err |= clSetKernelArg(find_single_pulse_kernel,11,sizeof(cl_mem),(void *)&gpu_fold_large_pos); */ if(err != CL_SUCCESS)fprintf(stderr,"ERROR: Setting kernel argument:find_single_pulse_kernel: %d\n",err); size_t globalThreads[2]; globalThreads[0] = 32;//R: each work item works with complete data chunk globalThreads[1] = DATA_CHUNK_UNROLL; err = clEnqueueNDRangeKernel( cq, find_single_pulse_kernel, 2,//R: 2D execution domain used NULL, globalThreads, NULL,//R: no workgroups requred 0, NULL,//R: synching between kernels not actually needed cause queue in-order one. NULL); Buffer allocation: ocl_global_buf1=clCreateBuffer( context, CL_MEM_READ_WRITE, sizeof(cl_float4) * (32768/*fft_len*//2/*2 complex elements per float4 item*/)*2/*signs*/*16/*small DM chunk size*/*DATA_CHUNK_UNROLL, NULL, &err);