解决CUDA中的三对角线性系统问题

时间:2013-10-23 12:25:16

标签: cuda

我正在尝试基于Cyclic Reduction上的GTS450方法实现三对角系统解算器。

循环减少在本文中说明

Y. Zhang, J. Cohen, J.D. Owens, "Fast Tridiagonal Solvers on GPU"

然而,无论我做什么,我的CUDA代码都比顺序代码快得多。总共512 x 512分的结果是7ms,但是在我的i7 3.4GHz上,它是5ms。 GPU没有加速!

哪个可能是问题?

#include "cutrid.cuh"
 __global__ void cutrid_RC_1b(double *a,double *b,double *c,double *d,double *x)
{
 int idx_global=blockIdx.x*blockDim.x+threadIdx.x;
 int idx=threadIdx.x;

 __shared__ double asub[512];
 __shared__ double bsub[512];
 __shared__ double csub[512];
 __shared__ double dsub[512];

 double at=0;
 double bt=0;
 double ct=0;
 double dt=0;

 asub[idx]=a[idx_global];
 bsub[idx]=b[idx_global];
 csub[idx]=c[idx_global];
 dsub[idx]=d[idx_global];


 for(int stride=1;stride<N;stride*=2)
  {
    int margin_left,margin_right;
    margin_left=idx-stride;
    margin_right=idx+stride;


    at=(margin_left>=0)?(-csub[idx-stride]*asub[idx]/bsub[idx-stride]):0.f; 

    bt=bsub[idx]+((margin_left>=0)?(-csub[idx-stride]*asub[idx]/bsub[idx-stride]):0.f)
    -((margin_right<512)?asub[idx+stride]*csub[idx]/bsub[idx+stride]:0.f); 

    ct=(margin_right<512)?(-csub[idx+stride]*asub[idx]/bsub[idx+stride]):0.f; 

    dt=dsub[idx]+((margin_left>=0)?(-dsub[idx-stride]*asub[idx]/bsub[idx-stride]):0.f)
    -((margin_right<512)?dsub[idx+stride]*csub[idx]/bsub[idx+stride]:0.f); 

    __syncthreads();
    asub[idx]=at;
    bsub[idx]=bt;
    csub[idx]=ct;
    dsub[idx]=dt;
    __syncthreads();
  }


x[idx_global]=dsub[idx]/bsub[idx];

}/*}}}*/

我按cutrid_RC_1b<<<512,512>>>(d_a,d_b,d_c,d_d,d_x)启动了这个内核,并达到100%设备占用率。这个结果让我困惑了好几天。

我的代码有一个改进版本:

    #include "cutrid.cuh"
    __global__ void cutrid_RC_1b(float *a,float *b,float *c,float *d,float *x)
    {/*{{{*/
     int idx_global=blockIdx.x*blockDim.x+threadIdx.x;
     int idx=threadIdx.x;

     __shared__ float asub[512];
     __shared__ float bsub[512];
     __shared__ float csub[512];
     __shared__ float dsub[512];

    asub[idx]=a[idx_global];
    bsub[idx]=b[idx_global];
    csub[idx]=c[idx_global];
    dsub[idx]=d[idx_global];
 __syncthreads();
   //Reduction  
    for(int stride=1;stride<512;stride*=2)
    {
        int margin_left=(idx-stride);
        int margin_right=(idx+stride);
        if(margin_left<0) margin_left=0;
        if(margin_right>=512) margin_right=511;
        float tmp1 = asub[idx] / bsub[margin_left];
        float tmp2 = csub[idx] / bsub[margin_right];
        float tmp3 = dsub[margin_right];
        float tmp4 = dsub[margin_left];
        __syncthreads();

        dsub[idx] = dsub[idx] - tmp4*tmp1-tmp3*tmp2;
        bsub[idx] = bsub[idx]-csub[margin_left]*tmp1-asub[margin_right]*tmp2;

        tmp3 = -csub[margin_right]; 
        tmp4 = -asub[margin_left];

        __syncthreads();
        asub[idx] = tmp3*tmp1;
        csub[idx] = tmp4*tmp2;
        __syncthreads();
     }

        x[idx_global]=dsub[idx]/bsub[idx];

    }/*}}}*/

0.73ms Quadro k4000系统的速度提高到512 x 512,但上述论文中的代码在0.5ms GTX280上运行。

3 个答案:

答案 0 :(得分:6)

求解三对角方程系统是一个具有挑战性的并行问题,因为经典的解决方案,即高斯消元,本质上是连续的。

循环减少包括两个阶段:

  1. 前进减少。原始系统分为两个独立的三对角系统,用于两组未知数,即奇数索引和偶数索引。这样的系统可以独立解决,这一步骤可以看作是 divide et impera 方案中的第一个。两个较小的系统在两个子系统中以相同的方式再次分离,并且重复该过程,直到达到仅2方程的系统。
  2. 向后替换。首先解决2方程组。然后,通过在不同的核心上独立地解决子系统,爬上 divide et impera 结构。
  3. 我不确定(但如果我错了,请纠正我)您的代码将返回一致的结果。 N似乎没有定义。此外,您正在访问csub[idx-stride],但我不确定idx==0stride>1时的含义。此外,您使用了几个条件语句,主要用于边界检查。最后,你的代码缺乏一个能够处理上面提到的 divide et impera 方案的正确的线程结构,概念上非常类似于CUDA SDK减少样本中使用的那个。

    正如我在上面的一条评论中所提到的,我记得在tridiagonalsolvers你可以找到一个实现循环减少方案来解决三对角方程系统。浏览相关的谷歌页面,在我看来,代码是由上述论文的第一作者(姚章)保留的。代码将被复制并粘贴到下面。请注意,边界检查仅执行一次(if (iRight >= systemSize) iRight = systemSize - 1;),因此限制了所涉及的条件语句的数量。还要注意能够处理 divide et impera 方案的线程结构。

    张,科恩和欧文斯的代码

    __global__ void crKernel(T *d_a, T *d_b, T *d_c, T *d_d, T *d_x)
    {
       int thid = threadIdx.x;
       int blid = blockIdx.x;
    
       int stride = 1;
    
       int numThreads = blockDim.x;
       const unsigned int systemSize = blockDim.x * 2;
    
       int iteration = (int)log2(T(systemSize/2));
       #ifdef GPU_PRINTF 
        if (thid == 0 && blid == 0) printf("iteration = %d\n", iteration);
       #endif
    
       __syncthreads();
    
       extern __shared__ char shared[];
    
       T* a = (T*)shared;
       T* b = (T*)&a[systemSize];
       T* c = (T*)&b[systemSize];
       T* d = (T*)&c[systemSize];
       T* x = (T*)&d[systemSize];
    
       a[thid] = d_a[thid + blid * systemSize];
       a[thid + blockDim.x] = d_a[thid + blockDim.x + blid * systemSize];
    
       b[thid] = d_b[thid + blid * systemSize];
       b[thid + blockDim.x] = d_b[thid + blockDim.x + blid * systemSize];
    
       c[thid] = d_c[thid + blid * systemSize];
       c[thid + blockDim.x] = d_c[thid + blockDim.x + blid * systemSize];
    
       d[thid] = d_d[thid + blid * systemSize];
       d[thid + blockDim.x] = d_d[thid + blockDim.x + blid * systemSize];
    
       __syncthreads();
    
       //forward elimination
       for (int j = 0; j <iteration; j++)
       {
           __syncthreads();
           stride *= 2;
           int delta = stride/2;
    
        if (threadIdx.x < numThreads)
        {
            int i = stride * threadIdx.x + stride - 1;
            int iLeft = i - delta;
            int iRight = i + delta;
            if (iRight >= systemSize) iRight = systemSize - 1;
            T tmp1 = a[i] / b[iLeft];
            T tmp2 = c[i] / b[iRight];
            b[i] = b[i] - c[iLeft] * tmp1 - a[iRight] * tmp2;
            d[i] = d[i] - d[iLeft] * tmp1 - d[iRight] * tmp2;
            a[i] = -a[iLeft] * tmp1;
            c[i] = -c[iRight] * tmp2;
        }
           numThreads /= 2;
       }
    
       if (thid < 2)
       {
         int addr1 = stride - 1;
         int addr2 = 2 * stride - 1;
         T tmp3 = b[addr2]*b[addr1]-c[addr1]*a[addr2];
         x[addr1] = (b[addr2]*d[addr1]-c[addr1]*d[addr2])/tmp3;
         x[addr2] = (d[addr2]*b[addr1]-d[addr1]*a[addr2])/tmp3;
       }
    
       // backward substitution
       numThreads = 2;
       for (int j = 0; j <iteration; j++)
       {
           int delta = stride/2;
           __syncthreads();
           if (thid < numThreads)
           {
               int i = stride * thid + stride/2 - 1;
               if(i == delta - 1)
                     x[i] = (d[i] - c[i]*x[i+delta])/b[i];
               else
                     x[i] = (d[i] - a[i]*x[i-delta] - c[i]*x[i+delta])/b[i];
            }
            stride /= 2;
            numThreads *= 2;
         }
    
       __syncthreads();
    
       d_x[thid + blid * systemSize] = x[thid];
       d_x[thid + blockDim.x + blid * systemSize] = x[thid + blockDim.x];
    

    }

答案 1 :(得分:2)

我想补充一点,提到可以通过函数

cuSPARSE库的框架中轻松解决三对角系统问题。
cusparse<t>gtsv()

cuSPARSE还提供了

cusparse<t>gtsv_nopivot()

,与第一个提到的例程不一致,不执行旋转。上述两种功能都解决了具有多个右侧的相同线性系统。批处理程序

cusparse<t>gtsvStridedBatch()

也存在解决多个线性系统的问题。

对于上述所有例程,只需指定下对角线,主对角线和上对角线即可修复系统矩阵。

下面,我正在报告一个使用cusparse<t>gtsv()来解决三对角线性系统的完全成型的例子。

#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>

#include <cuda_runtime.h>
#include <cusparse_v2.h>

/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
   if (code != cudaSuccess)
   {
      fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
      if (abort) { exit(code); }
   }
}

extern "C" void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }

/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(cusparseStatus_t error)
{
    switch (error)
    {

        case CUSPARSE_STATUS_SUCCESS:
            return "CUSPARSE_STATUS_SUCCESS";

        case CUSPARSE_STATUS_NOT_INITIALIZED:
            return "CUSPARSE_STATUS_NOT_INITIALIZED";

        case CUSPARSE_STATUS_ALLOC_FAILED:
            return "CUSPARSE_STATUS_ALLOC_FAILED";

        case CUSPARSE_STATUS_INVALID_VALUE:
            return "CUSPARSE_STATUS_INVALID_VALUE";

        case CUSPARSE_STATUS_ARCH_MISMATCH:
            return "CUSPARSE_STATUS_ARCH_MISMATCH";

        case CUSPARSE_STATUS_MAPPING_ERROR:
            return "CUSPARSE_STATUS_MAPPING_ERROR";

        case CUSPARSE_STATUS_EXECUTION_FAILED:
            return "CUSPARSE_STATUS_EXECUTION_FAILED";

        case CUSPARSE_STATUS_INTERNAL_ERROR:
            return "CUSPARSE_STATUS_INTERNAL_ERROR";

        case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
            return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";

        case CUSPARSE_STATUS_ZERO_PIVOT:
            return "CUSPARSE_STATUS_ZERO_PIVOT";
    }

    return "<unknown>";
}

inline void __cusparseSafeCall(cusparseStatus_t err, const char *file, const int line)
{
    if(CUSPARSE_STATUS_SUCCESS != err) {
        fprintf(stderr, "CUSPARSE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs",__FILE__, __LINE__,err, \
                                _cusparseGetErrorEnum(err)); \
        cudaDeviceReset(); assert(0); \
    }
}

extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }

/********/
/* MAIN */
/********/
int main()
{
    // --- Initialize cuSPARSE
    cusparseHandle_t handle;    cusparseSafeCall(cusparseCreate(&handle));

    const int N     = 5;        // --- Size of the linear system

    // --- Lower diagonal, diagonal and upper diagonal of the system matrix
    double *h_ld = (double*)malloc(N * sizeof(double));
    double *h_d  = (double*)malloc(N * sizeof(double));
    double *h_ud = (double*)malloc(N * sizeof(double));

    h_ld[0]     = 0.;
    h_ud[N-1]   = 0.;
    for (int k = 0; k < N - 1; k++) {
        h_ld[k + 1] = -1.;
        h_ud[k]     = -1.;
    }

    for (int k = 0; k < N; k++) h_d[k] = 2.;

    double *d_ld;   gpuErrchk(cudaMalloc(&d_ld, N * sizeof(double)));
    double *d_d;    gpuErrchk(cudaMalloc(&d_d,  N * sizeof(double)));
    double *d_ud;   gpuErrchk(cudaMalloc(&d_ud, N * sizeof(double)));

    gpuErrchk(cudaMemcpy(d_ld, h_ld, N * sizeof(double), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(d_d,  h_d,  N * sizeof(double), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(d_ud, h_ud, N * sizeof(double), cudaMemcpyHostToDevice));

    // --- Allocating and defining dense host and device data vectors
    double *h_x = (double *)malloc(N * sizeof(double)); 
    h_x[0] = 100.0;  h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0; h_x[4] = 300.0;

    double *d_x;        gpuErrchk(cudaMalloc(&d_x, N * sizeof(double)));   
    gpuErrchk(cudaMemcpy(d_x, h_x, N * sizeof(double), cudaMemcpyHostToDevice));

    // --- Allocating the host and device side result vector
    double *h_y = (double *)malloc(N * sizeof(double)); 
    double *d_y;        gpuErrchk(cudaMalloc(&d_y, N * sizeof(double))); 

    cusparseSafeCall(cusparseDgtsv(handle, N, 1, d_ld, d_d, d_ud, d_x, N));

    cudaMemcpy(h_x, d_x, N * sizeof(double), cudaMemcpyDeviceToHost);
    for (int k=0; k<N; k++) printf("%f\n", h_x[k]);
}

在此gitHub repository,报告了cuSOLVER库中可用于三对角线性系统解决方案的不同CUDA例程的比较。

答案 2 :(得分:0)

我看到的事情:

  1. 第一__syncthreads()似乎多余。

  2. 您的代码中有重复的操作集,例如(-csub[idx-stride]*asub[idx]/bsub[idx-stride])。使用中间变量来保存结果并重复使用它们,而不是让GPU每次都计算这些集合。

  3. 使用NVIDIA分析器查看问题所在。