CUDA批处理cholesky分解

时间:2019-03-17 06:17:42

标签: cuda

我有点了解如何处理2D cuda。但是批处理的cholesky在算法即将结束时具有4D。如果有人可以给我提示,我会附加cholesky和cuda代码。

--speak: never

Cuda:

counter-reset: speak-never

入门:

int i, k, m, n;
    // Batched Cholesky factorization.
    for (i = 0; i < batch; i++) {

            float *pA = &dA[i*N*N];

            // Single Cholesky factorization.
            for (k = 0; k < N; k++) {

                    // Panel factorization.
                    pA[k*N+k] = sqrtf(pA[k*N+k]);

                    for (m = k+1; m < N; m++)
                            pA[k*N+m] /= pA[k*N+k];

                    // Update of the trailing submatrix.
                    for (n = k+1; n < N; n++)
                            for (m = n; m < N; m++)
                                    pA[n*N+m] -= (pA[k*N+n]*pA[k*N+m]);
            }


    }

1 个答案:

答案 0 :(得分:2)

我将在这里不加评论。该代码是相对不言自明的。此实现完全忠实于您的串行版本,具有以下功能:

  1. 每个块在批处理中仅执行一个分解。运行与要分解的批处理矩阵一样多的块。
  2. 由于分解均在块范围内完成,因此并行操作之间的同步是可能的,因此要遵守分解的操作顺序
  3. 该算法公开的唯一并行性是在分解和更新操作的行操作之内
  4. 块的大小应根据批处理矩阵大小中的行数以扭曲大小的整数倍(迄今为止,所有支持CUDA的设备上为32)

下面的代码已经过非常严格的测试,不能保证能正常工作或正确。使用后果自负:

#include <iostream>
#include <algorithm>

__global__ 
void batchkernel(float** batches, int nbatches, int N, int LDA)
{
    if (blockIdx.x < nbatches) {
        float* pA = batches[blockIdx.x];
        for (int k = 0; k < N; k++) {

            // Panel factorization.
            if (threadIdx.x == 0)  {
                pA[k*LDA+k] = sqrtf(pA[k*LDA+k]);
            }
            __syncthreads();

            for (int m = threadIdx.x; ((m < N) && (threadIdx.x > k)); m+=blockDim.x) {
                pA[k*LDA+m] /= pA[k*LDA+k];
            }
            __syncthreads();

            // Update of the trailing submatrix.
            for (int n = k+1; (n < N); n++) {
                for (int m = threadIdx.x; ((m < N) && (threadIdx.x >= n)); m+=blockDim.x) {
                    pA[n*LDA+m] -= pA[k*LDA+n] * pA[k*LDA+m];
                }
            }
            __syncthreads();
        }
    }
}

void refCholeskey(float* pA, int N)
{
    int k, m, n;

    // Single Cholesky factorization.
    for (k = 0; k < N; k++) {
        // Panel factorization.
        pA[k*N+k] = sqrtf(pA[k*N+k]);

        for (m = k+1; m < N; m++)
            pA[k*N+m] /= pA[k*N+k];

        // Update of the trailing submatrix.
        for (n = k+1; n < N; n++)
            for (m = n; m < N; m++)
                pA[n*N+m] -= (pA[k*N+n]*pA[k*N+m]);
    }
}


int main()
{
    // B = np.random.random((10,10))
    // SPDmatrix = (0.5*(B+B.T)) + B.shape[0]*np.eye(B.shape[0])
    const int N = 10;
    const int LDA = 10;
    float SPDmatrix[LDA*N] = {
    10.22856331,   0.17380577,   0.61779525,   0.66592082,   0.46915566,
     0.09946502,   0.69386511,   0.35224291,   0.53155506,   0.51441469,
     0.17380577,  10.67971161,   0.34481401,   0.64766522,   0.22372943,
     0.55896022,   0.59083588,   0.48872497,   0.54049871,   0.74764959,
     0.61779525,   0.34481401,    10.229388,   0.40904432,    0.5015491,
     0.52152334,   0.19684814,   0.28262256,   0.04384535,   0.61919751,
     0.66592082,   0.64766522,   0.40904432,  10.78410647,   0.12708693,
      0.3241063,    0.6984497,   0.65074097,   0.08027563,   0.56332844,
     0.46915566,   0.22372943,    0.5015491,   0.12708693,  10.52234091,
     0.76346103,   0.80932473,    0.8234331,   0.52737611,   0.65777357,
     0.09946502,   0.55896022,   0.52152334,    0.3241063,   0.76346103,
    10.54906761,   0.32865411,   0.32467483,   0.80720007,   0.36287463,
     0.69386511,   0.59083588,   0.19684814,    0.6984497,   0.80932473,
     0.32865411,  10.29729551,   0.34707933,   0.69379356,   0.87612982,
     0.35224291,   0.48872497,   0.28262256,   0.65074097,    0.8234331,
     0.32467483,   0.34707933,  10.42929929,   0.78849458,     0.159371,  
     0.53155506,   0.54049871,   0.04384535,   0.08027563,   0.52737611,
     0.80720007,   0.69379356,   0.78849458,  10.49604818,   0.43871288,
     0.51441469,   0.74764959,   0.61919751,   0.56332844,   0.65777357,
     0.36287463,   0.87612982,     0.159371,   0.43871288,  10.94535485 };

    const int nbatches = 8;
    float** batches;
    cudaMallocManaged((void **)&batches, nbatches * sizeof(float*));

    for(int i=0; i<nbatches; i++) {
        cudaMallocManaged((void **)&batches[i], N * LDA * sizeof(float));
        cudaMemcpy(batches[i], SPDmatrix, N * LDA * sizeof(float), cudaMemcpyDefault);
    }

    int blocksz = 32;
    int nblocks = nbatches;

    batchkernel<<<nblocks, blocksz>>>(batches, nbatches, N, LDA);
    refCholeskey(SPDmatrix, N);
    cudaDeviceSynchronize();

    float maxabsrelerror = 0.0f;
    for(int i = 0; i < N*N; i++) {
        float absrelerror = std::fabs(SPDmatrix[i] - batches[0][i]) / std::fabs(SPDmatrix[i]);
        maxabsrelerror = std::max(absrelerror, maxabsrelerror);
    }
    std::cout << "Maximum absolute relative error = " << maxabsrelerror << std::endl;

    cudaDeviceReset();

    return 0;
}
相关问题