OpenCL固定内存与堆内存

时间:2016-02-10 13:55:52

标签: opencl

我编写了一个示例程序来了解GPU / CPU固定内存和堆内存的影响。以下代码说明了这一点。我已经分配了三个尺寸为1280x720的缓冲区。我填充缓冲区1&图2示出了具有一些数据并且依次使用这些缓冲器来填充缓冲器3.填充缓冲器3中涉及的数学运算是无关紧要的。在情况1中,从这些缓冲区分配的内存来自堆(malloc调用)。在情况2中,这些缓冲区的内存是从OpenCL API调用(clCreateBuffer())分配的。这两种情况之间存在性能差异。我在英特尔集成GPU上进行了测试。我无法解释这种性能差异。是否与CPU / GPU固定内存与堆内存的可缓存属性有关。

您之前是否遇到过此类行为或我做错了什么?

#include <stdio.h>
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
#include <inttypes.h>

#define OPENCL

#if defined(_WIN32)
/*
 * Win32 specific includes
 */
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#else
#include <sys/time.h>

/* timersub is not provided by msys at this time. */
#ifndef timersub
#define timersub(a, b, result) \
    do { \
      (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
      (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
      if ((result)->tv_usec < 0) { \
        --(result)->tv_sec; \
        (result)->tv_usec += 1000000; \
      } \
    } while (0)
#endif
#endif


struct usec_timer {
#if defined(_WIN32)
  LARGE_INTEGER  begin, end;
#else
  struct timeval begin, end;
#endif
};


static void usec_timer_start(struct usec_timer *t) {
#if defined(_WIN32)
  QueryPerformanceCounter(&t->begin);
#else
  gettimeofday(&t->begin, NULL);
#endif
}


static void usec_timer_mark(struct usec_timer *t) {
#if defined(_WIN32)
  QueryPerformanceCounter(&t->end);
#else
  gettimeofday(&t->end, NULL);
#endif
}


static int64_t usec_timer_elapsed(struct usec_timer *t) {
#if defined(_WIN32)
  LARGE_INTEGER freq, diff;

  diff.QuadPart = t->end.QuadPart - t->begin.QuadPart;

  QueryPerformanceFrequency(&freq);
  return diff.QuadPart * 1000000 / freq.QuadPart;
#else
  struct timeval diff;

  timersub(&t->end, &t->begin, &diff);
  return diff.tv_sec * 1000000 + diff.tv_usec;
#endif
}


#ifdef OPENCL
#include ".\CL\cl.h"

int opencl_init(cl_context *context, cl_command_queue *cmd_queue) {
  cl_int status;
  cl_uint num_platforms = 0;
  cl_platform_id platform;
  cl_uint num_devices = 0;
  cl_device_id device;
  cl_command_queue_properties command_queue_properties = 0;

  // Get the number of platforms in the system.
  status = clGetPlatformIDs(0, NULL, &num_platforms);
  if (status != CL_SUCCESS || num_platforms == 0)
    goto fail;

  // Get the platform ID for one platform
  status = clGetPlatformIDs(1, &platform, NULL);
  if (status != CL_SUCCESS)
    goto fail;

  // Get the number of devices available on the platform
  status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &num_devices);
  if (status != CL_SUCCESS || num_devices == 0)
    goto fail;

  // Get the device ID for one device
  status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
  if (status != CL_SUCCESS)
    goto fail;

  // Create OpenCL context for one device
  *context = clCreateContext(NULL, 1, &device, NULL, NULL, &status);
  if (status != CL_SUCCESS || *context == NULL)
    goto fail;

  // Create command queues for the device
  *cmd_queue = clCreateCommandQueue(*context, device, command_queue_properties, &status);
  if (status != CL_SUCCESS || *cmd_queue == NULL)
    goto fail;
  return 0;

fail:
  return 1;
}
#endif

int main(int argc, char **argv) {
  int x, y, z;
  int width = 1280, height = 720;
  unsigned char *buffer[3];
  int use_gpu;
  cl_mem opencl_mem[3];
  cl_context context;
  cl_command_queue cmd_queue;
  cl_int status;

  if (argc != 2)
    return 0;

  use_gpu = atoi(argv[1]);

  if (use_gpu) {
    if (opencl_init(&context, &cmd_queue))
      printf("OpenCL init failure");
  }

  if (use_gpu) {
    for (x = 0; x < 3; x++) {
      opencl_mem[x] = clCreateBuffer(context,
                                     CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR,
                                     width * height * sizeof(*buffer[x]), NULL,
                                     &status);
      if (status != CL_SUCCESS)
        return 0;
      buffer[x] = clEnqueueMapBuffer(cmd_queue, opencl_mem[x], CL_TRUE,
                                     CL_MAP_READ | CL_MAP_WRITE, 0,
                                     width * height * sizeof(*buffer[x]), 0,
                                     NULL, NULL, &status);
      if (status != CL_SUCCESS) {
        clReleaseMemObject(opencl_mem[x]);
        opencl_mem[x] = NULL;
        return 0;
      }
    }
  } else {
    for (x = 0; x < 3; x++) {
      buffer[x] = malloc(width * height * sizeof(*buffer[x]));
      if (buffer[x] == NULL) {
        printf("Unable to alloc memory");
      }
    }
  }

  memset(buffer[0], 1, width * height * sizeof(*buffer[0]));
  memset(buffer[1], 2, width * height * sizeof(*buffer[1]));
  memset(buffer[2], 0, width * height * sizeof(*buffer[2]));

  {
    struct usec_timer emr_timer;
    usec_timer_start(&emr_timer);
    for (z = 0; z < 600; z++) {
      for (y = 0; y < height; y++) {
        for (x = 0; x < width; x++) {
          // don't worry about overflows
          buffer[2][y * width + x] += buffer[0][y * width + x]
                                     + buffer[1][y * width + x];
        }
      }
    }
    usec_timer_mark(&emr_timer);
    printf("Elapsed time %"PRIu64"\n", usec_timer_elapsed(&emr_timer));
  }

  if (use_gpu) {
    for (x = 0; x < 3; x++) {
      if (buffer[x] != NULL) {
        status = clEnqueueUnmapMemObject(cmd_queue, opencl_mem[0], buffer[0], 0,
                                         NULL, NULL);
        status |= clFinish(cmd_queue);
        if (status != CL_SUCCESS)
          return 0;
        buffer[0] = NULL;
      }

      if (opencl_mem[0] != NULL) {
        status = clReleaseMemObject(opencl_mem[0]);
        if (status != CL_SUCCESS)
          return 0;
        opencl_mem[0] = NULL;
      }
    }

    clReleaseCommandQueue(cmd_queue);
    clReleaseContext(context);
  } else {
    for (x = 0; x < 3; x++) {
      free(buffer[x]);
      buffer[x] = NULL;
    }
  }
  return 0;
}

1 个答案:

答案 0 :(得分:2)

如果使用malloc + operation + free,则仅使用CPU资源。

如果您使用OpenCL,则使用CPU + GPU,并且涉及同步和数据复制处罚。

  • GPU中的Alloc
  • 映射到CPU空间(在CPU中分配另一个缓冲区)
  • 操作CPU缓冲区
  • 取消映射(固定复制到GPU缓冲区+取消分配CPU)。
  • 销毁GPU缓冲区

是什么让你认为它应该具有相同的速度?当然更昂贵,而且永远都是。您正在执行相同的CPU操作+一些额外的OpenCL操作。

固定内存在传输中比非固定内存更快,但它永远不会比非复制更快,因为你根本就没有复制任何内容!

同样对于内存基准测试,使用3 * 1280 * 720 = 2.6MB进行操作是完全愚蠢的。在普通系统中只需几微秒。无论如何,对于这两种情况,该部分应该是相同的。

开销将主导您的结果,而不是吞吐量。