cancel
Showing results for 
Search instead for 
Did you mean: 

Archives Discussions

dave_fournier
Journeyman III

slow performance of test code for mixtures of normal distributions

I'm just learning opencl and trying tests on the kind of

problems I have to see it I can get speedups in my code.

The problem is to calculate the expected probabilities for

a large number of mixtures of normal distributions. In this example

there are 50,000 samples. each one has a mixture of 20 normal

distributions evaluated at 100 points. Approximate time in CPU

norm_mix function is 4.76 seconds. Approximate time in GPU kernel is

3.49 seconds.  This seems to be pretty poor performance. Advice would be appreciated.

Below is my code, kernel, and output from clinfo.

#include <admodel.h>
#include <stdio.h>
#include <stdlib.h>
 
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
 
#define MAX_SOURCE_SIZE (0x100000)
 
#pragma OPENCL EXTENSION cl_amd_fp64: enable
 
void norm_mix(double *P,
       double *q,  
       double *mu,  
       double *sd,  
       double *x,  
     int m,int nlen,int n)  
{
  q--;
  mu--;
  sd--;
  x--;
  int ind,Poffset,qoffset,ii,j;
  double tmp,d;
  int i;
 
  for (i=1;i<=n;i++)
  {
    Poffset=i*m;
    qoffset=i*nlen;
    double qsum=0;
    for (ii=1;ii<=nlen;ii++)
    {
      tmp=0;
      for (j=1;j<=m;j++)
      {
        d=(x[ii]-mu)/sd;
        tmp+=P[Poffset+j]*exp(-0.5*d*d)/sd;  
      }
      q[qoffset+ii]=tmp;      
      qsum+=tmp;
    }
    for (ii=1;ii<=nlen;ii++)
    {
      q[qoffset+ii]/=qsum;      
    }
  }
}
 
dmatrix unstack(int lr,int ur,int lc ,int uc,dvector & v)
{
  dmatrix M(lr,ur);
  int offset=0;
  for (int i=lr;i<=ur;i++)
  {
    M(i)=v(lc+offset,uc+offset).shift(lc);
    offset+=uc-lc+1;
  }
  return M;
}
 
int main(void) {
    // Create the two input vectors
    int i;
    const int n = 50000;   // number of samples
    const int m = 20;
    const int nlen = 100;
    dvector P(1,n*m);   // proportions at age
    dvector q(1,n*nlen);   // proportions at length
    dvector mu(1,m);    // mean lengths
    dvector sd(1,m);    // mean lengths
    dvector x(1,nlen);     // middle of length intervals
    random_number_generator rng(101);
    x.fill_seqadd(10,1);
    P.fill_randu(rng);
    double Linf=90;
    double K=0.15;
    sd.fill_seqadd(3,0.25);
    dmatrix PM=unstack(1,n,1,m,P);
    dmatrix qM=unstack(1,n,1,nlen,q);
    //cout << PM(1) << endl;
    //cout << sum(PM(1)) << endl;
    for (i=1;i<=m;i++)
    {
      mu(i)=Linf*(1-exp(-K*i));
    }
    for (i=1;i<=n;i++)
    {
      //cout << " i = " << i << endl;
      PM(i)/=sum(PM(i));
    }
    clock_t tt1=clock();
    norm_mix(&(P(1)),&(q(1)),&(mu(1)),&(sd(1)),&(x(1)),m,nlen,n);  
    clock_t tt2=clock();
    cout <<  "time = " << tt2-tt1 << "  " << double(tt2-tt1)/CLOCKS_PER_SEC  << endl;
    cout <<  "q(1)  = " << qM(1) << endl;
    cout <<  "q(n)  = " << qM(n) << endl;
    cout <<  "sum(q(n))  = " << sum(qM(n)) << endl;
    // Display the result to the screen
    //cout << PM << endl;
 
    // Load the kernel source code into the array source_str
    FILE *fp;
    char *source_str;
    size_t source_size;
 
    fp = fopen("norm_mix_kernel.cl", "r");
    if (!fp) {
        fprintf(stderr, "Failed to load kernel.\n");
        exit(1);
    }
    source_str = (char*)malloc(MAX_SOURCE_SIZE);
    source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
    fclose( fp );
 
    // Get platform and device information
    cl_platform_id platform_id = NULL;
    cl_device_id device_id = NULL;    
    cl_uint ret_num_devices;
    cl_uint ret_num_platforms;
    cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
    ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_GPU, 1,  
            &device_id, &ret_num_devices);
 
    // Create an OpenCL context
    cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
 
    // Create a command queue
    cl_command_queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
 
    // Create memory buffers on the device for each vector  
    //  memory for P
    /*
    dvector P(1,n*m);   // proportions at age
 
    dvector q(1,n*nlen);   // proportions at length
    dvector mu(1,m);    // mean lengths
    dvector x(1,nlen);     // middle of length intervals
    */
    clock_t t1=clock();
    cl_mem P_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,  
            n*m * sizeof(double), NULL, &ret);
    cl_mem sd_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
            m * sizeof(double), NULL, &ret);
    cl_mem mu_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
            m * sizeof(double), NULL, &ret);
    cl_mem x_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
            nlen * sizeof(double), NULL, &ret);
    cl_mem q_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,  
            n*nlen * sizeof(double), NULL, &ret);
    //cl_mem m_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,  
      //      sizeof(int), NULL, &ret);
 
    // Copy the lists A and B to their respective memory buffers
    ret = clEnqueueWriteBuffer(command_queue, P_mem_obj, CL_TRUE, 0,
            n*m * sizeof(double), &(P.elem(1)), 0, NULL, NULL);
    ret = clEnqueueWriteBuffer(command_queue, q_mem_obj, CL_TRUE, 0,  
            m * sizeof(double), &(q.elem(1)), 0, NULL, NULL);
    ret = clEnqueueWriteBuffer(command_queue, mu_mem_obj, CL_TRUE, 0,  
            m * sizeof(double), &(mu.elem(1)), 0, NULL, NULL);
    ret = clEnqueueWriteBuffer(command_queue, sd_mem_obj, CL_TRUE, 0,  
            m * sizeof(double), &(sd.elem(1)), 0, NULL, NULL);
    ret = clEnqueueWriteBuffer(command_queue, x_mem_obj, CL_TRUE, 0,  
            nlen * sizeof(double), &(x.elem(1)), 0, NULL, NULL);
 
    // Create a program from the kernel source
    cl_program program = clCreateProgramWithSource(context, 1,  
            (const char **)&source_str, (const size_t *)&source_size, &ret);
    cout << "A ret = " << ret << endl;
 
    // Build the program
    ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
    cout << "B ret = " << ret << endl;
 
    // Create the OpenCL kernel
    cl_kernel kernel = clCreateKernel(program, "norm_mix", &ret);
    cout << "C ret = " << ret << endl;
 
    // Set the arguments of the kernel
    ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&P_mem_obj);
    ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&q_mem_obj);
    ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&mu_mem_obj);
    ret = clSetKernelArg(kernel, 3, sizeof(cl_mem), (void *)&sd_mem_obj);
    ret = clSetKernelArg(kernel, 4, sizeof(cl_mem), (void *)&x_mem_obj);
    ret = clSetKernelArg(kernel, 5, sizeof(cl_int),(void*)&m);
    ret = clSetKernelArg(kernel, 6, sizeof(cl_int),(void*)&nlen);
     
    // Execute the OpenCL kernel on the list
    size_t global_item_size = n; // Process the entire lists
    size_t local_item_size = 1; // Process one item at a time
    ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,  
            &global_item_size, &local_item_size, 0, NULL, NULL);
 
    clock_t t3=clock();
    // Read the memory buffer q on the device to the local variable q
    ret = clEnqueueReadBuffer(command_queue, q_mem_obj, CL_TRUE, 0,  
            n*nlen * sizeof(double), &(q(1)), 0, NULL, NULL);
    clock_t t2=clock();
 
    cout <<  "time = " << t2-t1 << "  " << double(t2-t1)/CLOCKS_PER_SEC  << endl;
    cout <<  "time = " << t2-t3 << "  " << double(t2-t3)/CLOCKS_PER_SEC  << endl;
    cout <<  "q(1)  = " << qM(1) << endl;
    cout <<  "q(n)  = " << qM(n) << endl;
    cout <<  "sum(q(n))  = " << sum(qM(n)) << endl;
    // Display the result to the screen
 
    // Clean up
    ret = clFlush(command_queue);
    ret = clFinish(command_queue);
    ret = clReleaseKernel(kernel);
    ret = clReleaseProgram(program);
    ret = clReleaseMemObject(P_mem_obj);
    ret = clReleaseMemObject(q_mem_obj);
    ret = clReleaseMemObject(x_mem_obj);
    ret = clReleaseMemObject(mu_mem_obj);
    ret = clReleaseMemObject(sd_mem_obj);
    ret = clReleaseCommandQueue(command_queue);
    ret = clReleaseContext(context);
    return 0;
}

 

 

Here is my kernel

#pragma OPENCL EXTENSION cl_amd_fp64: enable

__kernel void norm_mix(__global double *P,
      __global double *q,
      __global double *mu,
      __global double *sd,
      __global double *x,
     int m,int nlen)
{
 
  q--;
  mu--;
  sd--;
  x--;
  int ind,Poffset,qoffset,ii,j;
  double tmp,d;
  // Get the index of the current element
  int i = get_global_id(0);

 
  Poffset=i*m;
  qoffset=i*nlen;
  double qsum=0;
  for (ii=1;ii<=nlen;ii++)
  {
    tmp=0;
    for (j=1;j<=m;j++)
    {
      d=(x[ii]-mu)/sd;
      tmp+=P[Poffset+j]*exp(-0.5*d*d)/sd;
    }
    q[qoffset+ii]=tmp;     
    qsum+=tmp;
  }
  for (ii=1;ii<=nlen;ii++)
  {
    q[qoffset+ii]=q[qoffset+ii]/qsum;     
  }
}

here is the output from clinfo.

Number of platforms:                 1
  Platform Profile:                 FULL_PROFILE
  Platform Version:                 OpenCL 1.1 AMD-APP-SDK-v2.5 (684.213)
  Platform Name:                 AMD Accelerated Parallel Processing
  Platform Vendor:                 Advanced Micro Devices, Inc.
  Platform Extensions:                 cl_khr_icd cl_amd_event_callback cl_amd_offline_devices


  Platform Name:                 AMD Accelerated Parallel Processing
Number of devices:                 2
  Device Type:                     CL_DEVICE_TYPE_GPU
  Device ID:                     4098
  Device Topology:                 PCI[ B#2, D#0, F#0 ]
  Max compute units:                 22
  Max work items dimensions:             3
    Max work items[0]:                 256
    Max work items[1]:                 256
    Max work items[2]:                 256
  Max work group size:                 256
  Preferred vector width char:             16
  Preferred vector width short:             8
  Preferred vector width int:             4
  Preferred vector width long:             2
  Preferred vector width float:             4
  Preferred vector width double:         0
  Native vector width char:             16
  Native vector width short:             8
  Native vector width int:             4
  Native vector width long:             2
  Native vector width float:             4
  Native vector width double:             0
  Max clock frequency:                 0Mhz
  Address bits:                     32
  Max memory allocation:             268435456
  Image support:                 Yes
  Max number of images read arguments:         128
  Max number of images write arguments:         8
  Max image 2D width:                 8192
  Max image 2D height:                 8192
  Max image 3D width:                 2048
  Max image 3D height:                 2048
  Max image 3D depth:                 2048
  Max samplers within kernel:             16
  Max size of kernel argument:             1024
  Alignment (bits) of base address:         32768
  Minimum alignment (bytes) for any datatype:     128
  Single precision floating point capability
    Denorms:                     No
    Quiet NaNs:                     Yes
    Round to nearest even:             Yes
    Round to zero:                 Yes
    Round to +ve and infinity:             Yes
    IEEE754-2008 fused multiply-add:         Yes
  Cache type:                     None
  Cache line size:                 0
  Cache size:                     0
  Global memory size:                 1073741824
  Constant buffer size:                 65536
  Max number of constant args:             8
  Local memory type:                 Scratchpad
  Local memory size:                 32768
  Kernel Preferred work group size multiple:     64
  Error correction support:             0
  Unified memory for Host and Device:         0
  Profiling timer resolution:             1
  Device endianess:                 Little
  Available:                     Yes
  Compiler available:                 Yes
  Execution capabilities:                
    Execute OpenCL kernels:             Yes
    Execute native function:             No
  Queue properties:                
    Out-of-Order:                 No
    Profiling :                     Yes
  Platform ID:                     0x7f15ee31f060
  Name:                         Cayman
  Vendor:                     Advanced Micro Devices, Inc.
  Device OpenCL C version:             OpenCL C 1.1
  Driver version:                 CAL 1.4.1523
  Profile:                     FULL_PROFILE
  Version:                     OpenCL 1.1 AMD-APP-SDK-v2.5 (684.213)
  Extensions:                     cl_amd_fp64 cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_3d_image_writes cl_khr_byte_addressable_store cl_khr_gl_sharing cl_ext_atomic_counters_32 cl_amd_device_attribute_query cl_amd_vec3 cl_amd_printf cl_amd_media_ops cl_amd_popcnt


  Device Type:                     CL_DEVICE_TYPE_CPU
  Device ID:                     4098
  Max compute units:                 6
  Max work items dimensions:             3
    Max work items[0]:                 1024
    Max work items[1]:                 1024
    Max work items[2]:                 1024
  Max work group size:                 1024
  Preferred vector width char:             16
  Preferred vector width short:             8
  Preferred vector width int:             4
  Preferred vector width long:             2
  Preferred vector width float:             4
  Preferred vector width double:         0
  Native vector width char:             16
  Native vector width short:             8
  Native vector width int:             4
  Native vector width long:             2
  Native vector width float:             4
  Native vector width double:             0
  Max clock frequency:                 2800Mhz
  Address bits:                     64
  Max memory allocation:             2147483648
  Image support:                 Yes
  Max number of images read arguments:         128
  Max number of images write arguments:         8
  Max image 2D width:                 8192
  Max image 2D height:                 8192
  Max image 3D width:                 2048
  Max image 3D height:                 2048
  Max image 3D depth:                 2048
  Max samplers within kernel:             16
  Max size of kernel argument:             4096
  Alignment (bits) of base address:         1024
  Minimum alignment (bytes) for any datatype:     128
  Single precision floating point capability
    Denorms:                     Yes
    Quiet NaNs:                     Yes
    Round to nearest even:             Yes
    Round to zero:                 Yes
    Round to +ve and infinity:             Yes
    IEEE754-2008 fused multiply-add:         No
  Cache type:                     Read/Write
  Cache line size:                 64
  Cache size:                     65536
  Global memory size:                 8390160384
  Constant buffer size:                 65536
  Max number of constant args:             8
  Local memory type:                 Global
  Local memory size:                 32768
  Kernel Preferred work group size multiple:     1
  Error correction support:             0
  Unified memory for Host and Device:         1
  Profiling timer resolution:             1
  Device endianess:                 Little
  Available:                     Yes
  Compiler available:                 Yes
  Execution capabilities:                
    Execute OpenCL kernels:             Yes
    Execute native function:             Yes
  Queue properties:                
    Out-of-Order:                 No
    Profiling :                     Yes
  Platform ID:                     0x7f15ee31f060
  Name:                         AMD Phenom(tm) II X6 1055T Processor
  Vendor:                     AuthenticAMD
  Device OpenCL C version:             OpenCL C 1.1
  Driver version:                 2.0
  Profile:                     FULL_PROFILE
  Version:                     OpenCL 1.1 AMD-APP-SDK-v2.5 (684.213)
  Extensions:                     cl_khr_fp64 cl_amd_fp64 cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_int64_base_atomics cl_khr_int64_extended_atomics cl_khr_byte_addressable_store cl_khr_gl_sharing cl_ext_device_fission cl_amd_device_attribute_query cl_amd_vec3 cl_amd_media_ops cl_amd_popcnt cl_amd_printf

 

 

0 Likes
10 Replies
genaganna
Journeyman III

Originally posted by: dave_fournier I'm just learning opencl and trying tests on the kind of

problems I have to see it I can get speedups in my code.

The problem is to calculate the expected probabilities for

a large number of mixtures of normal distributions. In this example

there are 50,000 samples. each one has a mixture of 20 normal

distributions evaluated at 100 points. Approximate time in CPU

norm_mix function is 4.76 seconds. Approximate time in GPU kernel is

3.49 seconds.  This seems to be pretty poor performance. Advice would be appreciated.

4.76 seconds is t1 - t2?  if that is the case, you included compilation and creation of buffer also in time calculations?
0 Likes

The relevant timing code is

 clock_t tt1=clock(); 
    norm_mix(&(P(1)),&(q(1)),&(mu(1)),&(sd(1)),&(x(1)),m,nlen,n);  
    clock_t tt2=clock(); 
    cout <<  "time = " << tt2-tt1 << "  " << double(tt2-tt1)/CLOCKS_PER_SEC  << endl; 

 

tt2-tt1 is the time in the CPU version which is 4.72 seconds

 

   clock_t t3=clock(); 
    // Read the memory buffer q on the device to the local variable q 
    ret = clEnqueueReadBuffer(command_queue, q_mem_obj, CL_TRUE, 0,  
            n*nlen * sizeof(double), &(q(1)), 0, NULL, NULL); 
    clock_t t2=clock(); 

t2-t3 is the time spent waiting for the GPU to finish which is

3.49 seconds. So the GPU is only slightly faster.

 

0 Likes

Unfortunately, you can't just port CPU code onto a GPU and expect huge performance benefits. 95% of the time you need to modify your code to get a significant speedup.

There are a few things you can do to improve the code:

 I noticed the code "d=(x[ii]-mu)/sd" and "exp(-0.5*d*d)" will give the same result for every thread at the same execution point - therefore it would be better to pre-calculate all the possible values for all the combinations of 'exp(...d*d)'  in a seperate loop (20*100 values) by splitting it up between threads of a shared workgroup (i.e. each thread of a 64-thread workgroup calculates 1/64th of 20*100 values) and storing them in shared memory and accessing them from an array (e.g.[ d[ii*100 + j]). GPUs have alot of bandwidth, but not accessing memory in a particular fashion can cripple things.

This could give HUGE performance improvement as you could replace    "d=(x[ii]-mu)/sd;   tmp+=P[Poffset+j]*exp(-0.5*d*d)/sd;"
with "tmp+=P[Poffset+j]*d[ii*100+j]/sd;"

The next thing I would do is vectorise all your code for the GPU, even if it means grouping consecutive elements together in a for() loop (e.g. for(x=0;x<whatever;x+=4). This will improve bandwidth considerably as 9/10 times you will accessing >128bits at once. Most access to arrays in your code is consecutive within a for() loop so this will be relatively straightforward.

Also try defining 'm' and 'nlen' values instead of passing them as variables by writing them out to code just before compilation - the compiler can expand out your loops if it deems fit giving significant performance benefits as conditional statements aren't as well suited on the current line of GPUs.

 If you can cope with the small loss in precision, try precalculating the the reciprocal instead of a division using native_recip(), for '/qsum' I believe its still faster on GPUs.

0 Likes

In the "real" code the mu(j) are going to vary. To keep it simple here I purposefully recalculated using a fixed value. Since both the CPU and GPU do the extra calculations it shouldn't affect the comparison. I'll try the vectorization. I need double precision here. These are big, ill conditioned nonlinear optimization problems which require double precision. Thanks for the advice.
0 Likes

In the "real" code the mu(j) are going to vary. To keep it simple here I purposefully recalculated using a fixed value. Since both the CPU and GPU do the extra calculations it shouldn't affect the comparison. I'll try the vectorization. I need double precision here. These are big, ill conditioned nonlinear optimization problems which require double precision. Thanks for the advice.
0 Likes

0 Likes
notzed
Challenger

You have a lot of problems with your code: i'm surprised it's even beating a cpu.

1: using a single local work item means you're using approximately 1/64th of the ALU capability of the processor just to start with.

2: using a work-item memory access of stride of 1 means you're not coalescing memory access: potentially another 1/64th (or at least in that range) overhead.

 

The local work-size should be at least (and a multiple of) 64 for a GPU: try to re-arrange you code *and* data around that.

Since your code looks like it processes independent values, I suggest you simply change the local work size to some multiple of 64 (and set the global size to some multiple of that) as an absolute worst-case base-line.

After that there's plenty of other things you can do to optimise memory access patterns to add extra sparkle, I would guess those other changes could add at least an order of magnitude on-top of the change suggested above.  e.g. changing the data stride, using multiple work-items to process the j=1 to m loop concurrently, and so on.

For such a simple kernel with very regular access patterns you should be expecting 100-200x speedup over a CPU of the same vintage.

0 Likes

wow, missed that bit where you set the local work size to 1! It would be silly to use a value of one even on the CPU as it would be unlikely to utilise all cores and hardware threads. Don't worry that by setting the workgroup size to more than 6 on your CPU won't hurt performance.

Yes as notzed said, anything less than 64 on Cayman and your not using all the available hardware performance.

Well there you have it....1/64th of a Cayman is faster than 1/6th of a Phenom II X6 1055T....pretty good going if you ask me.

0 Likes

OK thanks to all of you. Changing the local_work_size to NULL increased the

relative speed to about 30 to1 for GPU over CPU.

After some reading I think that it is a general principle

that if all the work items in a work group acess the same global

memory, that memory should be copied to local memory. Ideally,

each work item copies a small fraction of the total. So for example

if there are 6400 doubles in global memory and 64 work items in

the work group, then each work item would copy 100 of the doubles and

a barrier put after the copy before proceeding. Is that right?

0 Likes

Well more or less.

It depends somewhat on the problem, whether the data will all fit, and how often you need to access it.  There is also constant memory too.

Pretty much the main rule of thumb (and for local memory too) is that each thread access sequential memory addresses.  So loops like your P loop are not ideal since each thread is accessing elements 20 apart.  A move to local can re-arrange such data.

Chapter 4 of the AMD OpenCL programming guide is pretty much a must-read once you start worrying about speed, but it's pretty big.

For starters though, sections 4.6 through to 4.11 cover the more interesting bits.

0 Likes