访问映射固定主机(或对等设备)内存是否需要GPU复制引擎?

时间:2014-10-11 01:45:50

标签: cuda gpu gpgpu pci-e

假设GPU有一个执行引擎和一个复制引擎。

  1. 当在CUDA内核中时,线程访问主机内存,是否会使复制引擎忙?它是否会阻止所有异步内存复制操作进出其他流中的设备?
  2. 如果在CUDA内核线程内部访问对等设备内存,它是否会使两个设备中的复制引擎忙?

1 个答案:

答案 0 :(得分:1)

我只想提供第一个问题的答案

  

当在CUDA内核中时,线程访问主机内存,是否会使复制引擎忙?它是否会阻止所有异步内存复制操作进出其他流中的设备?

我写下了以下简单的代码。它包含两个内核,一个显式使用映射的固定主机内存,即kernel2,另一个未明确使用映射的固定主机内存,即kernel1。该代码使用三个流来检查映射的固定主机内存的使用是否会破坏并发性。

以下是代码:

#include <iostream>

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <stdio.h>

using namespace std;

#define NUM_THREADS 32
#define NUM_BLOCKS 16
#define NUM_STREAMS 3

/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
   if (code != cudaSuccess) 
   {
      fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
      if (abort) exit(code);
   }
}

/*******************************/
/* KERNEL FUNCTION - VERSION 1 */
/*******************************/
__global__ void kernel1(const int *in, int *out, int dataSize)
{
    int start = blockIdx.x * blockDim.x + threadIdx.x;
    int end =  dataSize;
    for (int i = start; i < end; i += blockDim.x * gridDim.x)
    {
        out[i] = in[i] * in[i];
    }
}

/*******************************/
/* KERNEL FUNCTION - VERSION 2 */
/*******************************/
__global__ void kernel2(const int *in, int *out, int* cnt, int dataSize)
{
    int start = blockIdx.x * blockDim.x + threadIdx.x;
    int end =  dataSize;
    for (int i = start; i < end; i += blockDim.x * gridDim.x)
    {
        out[i] = cnt[i] * in[i] * in[i];
    }
}

/********/
/* MAIN */
/********/
int main()
{
    const int dataSize = 6000000;

    // --- Host side memory allocations
    int *h_in = new int[dataSize];
    int *h_out = new int[dataSize];

    // --- Host side memory initialization
    for(int i = 0; i < dataSize; i++) h_in[i] = 5;
    for(int i = 0; i < dataSize; i++) h_out[i] = 0;

    // --- Registers host memory as page-locked, as required for asynch cudaMemcpyAsync)
    gpuErrchk(cudaHostRegister(h_in, dataSize * sizeof(int), cudaHostRegisterPortable));
    gpuErrchk(cudaHostRegister(h_out, dataSize * sizeof(int), cudaHostRegisterPortable));

    // --- Device side memory allocations
    int *d_in = 0;  gpuErrchk(cudaMalloc((void**)&d_in, dataSize * sizeof(int)));
    int *d_out = 0; gpuErrchk(cudaMalloc((void**)&d_out, dataSize * sizeof(int)));

    // --- Testing mapped pinned memory
    int *cnt; gpuErrchk(cudaMallocHost((void**)&cnt, dataSize * sizeof(int)));
    for(int i = 0; i < dataSize; i++) cnt[i] = 2;

    int streamSize = dataSize / NUM_STREAMS;
    size_t streamMemSize = dataSize * sizeof(int) / NUM_STREAMS;

    // --- Setting kernel launch config
    dim3 nThreads = dim3(NUM_THREADS,1,1);
    dim3 nBlocks = dim3(NUM_BLOCKS,1,1);

    // --- Create CUDA streams
    cudaStream_t streams[NUM_STREAMS];
    for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamCreate(&streams[i]));

    /**********/
    /* CASE 1 */
    /**********/
    for(int i = 0; i < NUM_STREAMS; i++) {
        int offset = i * streamSize;
        cudaMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, cudaMemcpyHostToDevice,     streams[i]); }

    for(int i = 0; i < NUM_STREAMS; i++)
    {
        int offset = i * streamSize;

        dim3 subKernelBlock = dim3((int)ceil((float)nBlocks.x / 2));

        kernel1<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset], &d_out[offset],   streamSize/2);
        kernel1<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset + streamSize/2],    &d_out[offset +  streamSize/2], streamSize/2);
    }

    for(int i = 0; i < NUM_STREAMS; i++) {
        int offset = i * streamSize;
        cudaMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, cudaMemcpyDeviceToHost,   streams[i]); }


    for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamSynchronize(streams[i]));

    /**********/
    /* CASE 2 */
    /**********/
    for(int i = 0; i < NUM_STREAMS; i++) {
        int offset = i * streamSize;
        cudaMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, cudaMemcpyHostToDevice,     streams[i]); }

    for(int i = 0; i < NUM_STREAMS; i++)
    {
        int offset = i * streamSize;

        dim3 subKernelBlock = dim3((int)ceil((float)nBlocks.x / 2));

        kernel2<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset], &d_out[offset], cnt, streamSize/2);
        kernel2<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset + streamSize/2], &d_out[offset +  streamSize/2], cnt, streamSize/2);
    }

    for(int i = 0; i < NUM_STREAMS; i++) {
        int offset = i * streamSize;
        cudaMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, cudaMemcpyDeviceToHost,   streams[i]); }


    for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamSynchronize(streams[i]));

    // --- Release resources
    gpuErrchk(cudaHostUnregister(h_in));
    gpuErrchk(cudaHostUnregister(h_out));
    gpuErrchk(cudaFree(d_in));
    gpuErrchk(cudaFree(d_out));

    for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamDestroy(streams[i]));

    delete[] h_in;
    delete[] h_out;

    gpuErrchk(cudaDeviceReset());

    return 0;
}

从下面的时间线看,似乎kernel2中映射的固定主机内存的使用不会破坏并发性。该算法已经在具有单拷贝引擎的GT540M卡上进行了测试。

enter image description here