如何减少CudaMemcpy开销

时间:2014-03-07 10:25:20

标签: memory cuda gpu-programming

我有一个大小为3000的数组,数组包含0和1.i想要找到第一个数组位置,该位置从第0个index.i开始存储在该位置。将此数组传输到Host并且此数组在设备上计算。然后我顺序计算Host.in我的程序上的索引我想重复执行4000次或更多次计算。我想减少这个过程所花费的时间。还有其他方法,我们可以做到这一点,这个数组是计算的实际上我在GPU上必须每次传输它。

int main()
{
for(int i=0;i<4000;i++)
{
    cudaMemcpy(A,dev_A,sizeof(int)*3000,cudaMemcpyDeviceToHost);
    int k;
    for(k=0;k<3000;k++)
    {
        if(A[k]==1)
        {
            break;
        } 
    }
    printf("got k is %d",k);
}
} 

完整代码是这样的     #包括“cuda.h”     #包括     #define SIZE 2688     #define BLOCKS 14     #define THREADS 192

__global__ void kernel(int *A,int *d_pos)
{
int thread_id=threadIdx.x+blockIdx.x*blockDim.x;
while(thread_id<SIZE)
{
    if(A[thread_id]==INT_MIN)
    {
        *d_pos=thread_id;
        return;
    }
    thread_id+=1;   
}

}

__global__ void kernel1(int *A,int *d_pos)
{
int thread_id=threadIdx.x+blockIdx.x*blockDim.x;
if(A[thread_id]==INT_MIN)
{
    atomicMin(d_pos,thread_id);
}

}

int main()
{
int pos=INT_MAX,i;
int *d_pos;
int A[SIZE];
int *d_A;
for(i=0;i<SIZE;i++)
{
    A[i]=78;
}
A[SIZE-1]=INT_MIN;
cudaMalloc((void**)&d_pos,sizeof(int));
cudaMemcpy(d_pos,&pos,sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_A,sizeof(int)*SIZE);
cudaMemcpy(d_A,A,sizeof(int)*SIZE,cudaMemcpyHostToDevice);

cudaEvent_t start_cp1,stop_cp1;
    cudaEventCreate(&stop_cp1);
    cudaEventCreate(&start_cp1);
    cudaEventRecord(start_cp1,0);

kernel1<<<BLOCKS,THREADS>>>(d_A,d_pos);

cudaEventRecord(stop_cp1,0);
    cudaEventSynchronize(stop_cp1);
    float elapsedTime_cp1;
    cudaEventElapsedTime(&elapsedTime_cp1,start_cp1,stop_cp1);
    cudaEventDestroy(start_cp1);
    cudaEventDestroy(stop_cp1);
    printf("\nTime taken by kernel is  %f\n",elapsedTime_cp1);
cudaDeviceSynchronize();

cudaEvent_t start_cp,stop_cp;
    cudaEventCreate(&stop_cp);
    cudaEventCreate(&start_cp);
    cudaEventRecord(start_cp,0);

cudaMemcpy(A,d_A,sizeof(int)*SIZE,cudaMemcpyDeviceToHost);

    cudaEventRecord(stop_cp,0);
    cudaEventSynchronize(stop_cp);
    float elapsedTime_cp;
    cudaEventElapsedTime(&elapsedTime_cp,start_cp,stop_cp);
    cudaEventDestroy(start_cp);
    cudaEventDestroy(stop_cp);
    printf("\ntime taken by copy of an array is  %f\n",elapsedTime_cp);






    cudaEvent_t start_cp2,stop_cp2;
    cudaEventCreate(&stop_cp2);
    cudaEventCreate(&start_cp2);
    cudaEventRecord(start_cp2,0);

    cudaMemcpy(&pos,d_pos,sizeof(int),cudaMemcpyDeviceToHost);

    cudaEventRecord(stop_cp2,0);
    cudaEventSynchronize(stop_cp2);
    float elapsedTime_cp2;
    cudaEventElapsedTime(&elapsedTime_cp2,start_cp2,stop_cp2);
    cudaEventDestroy(start_cp2);
    cudaEventDestroy(stop_cp2);
    printf("\ntime taken by copy of a variable is  %f\n",elapsedTime_cp2);


cudaMemcpy(&pos,d_pos,sizeof(int),cudaMemcpyDeviceToHost);
printf("\nminimum index is %d\n",pos);
return 0;
}

如何减少此代码与其他任何性能选项所花费的总时间。

1 个答案:

答案 0 :(得分:1)

如果您在GPU上运行内核4000次,则可能需要通过不同的流在内核上使用异步执行。它可能更快使用cudaMemCpyAsync是主机的非阻塞功能(如果你正在执行M次内核)。

快速介绍流和异步执行: https://devblogs.nvidia.com/parallelforall/how-overlap-data-transfers-cuda-cc/

流和并发: http://on-demand.gputechconf.com/gtc-express/2011/presentations/StreamsAndConcurrencyWebinar.pdf

希望这可以帮助...