Intel Xeon Phi上的快速popcount

时间:2013-04-23 08:26:47

标签: c vectorization xeon-phi intel-mic hammingweight

我正在英特尔®至强®上实现超快速弹出,因为它是各种生物信息学软件的性能热点。

我已经实现了五段代码,

#if defined(__MIC__)
#include <zmmintrin.h>
__attribute__((align(64))) static const uint32_t POPCOUNT_4bit[16] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
__attribute__((align(64))) static const uint32_t MASK_4bit[16] = {0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF};
inline uint64_t vpu_popcount1(uint64_t* buf, size_t n)  {
    register size_t result = 0;
    size_t i;
    register const __m512i popcnt = _mm512_load_epi32((void*)POPCOUNT_4bit);
    register const __m512i mask = _mm512_load_epi32((void*)MASK_4bit);
    register __m512i total;
    register __m512i shuf;

#pragma unroll(8)
    for (i = 0; i < n; i+=8) {
        shuf = _mm512_load_epi32(&buf[i]);
        _mm_prefetch((const char *)&buf[i+256], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
        total = _mm512_setzero_epi32();

        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(shuf, mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 4),  mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 8),  mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 12), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 16), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 20), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 24), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 28), mask), popcnt), total);

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
        result += _mm512_reduce_add_epi32(total);
    }

    return result;
}

__attribute__((align(64))) static const unsigned magic[] = {\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF
    };

inline uint64_t vpu_popcount2(uint64_t* buf, size_t n)  {
    register size_t result = 0;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register __m512i total;
    register __m512i shuf;

#pragma unroll(8)
    for (i = 0; i < n; i+=8) {
        shuf = _mm512_load_epi32(&buf[i]);
        _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
        total = _mm512_sub_epi32(shuf, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf,1)));
        total = _mm512_add_epi32(_mm512_and_epi32(B1, total), _mm512_and_epi32(B1,_mm512_srli_epi32(total,2)));
        total = _mm512_and_epi32(B2, _mm512_add_epi32(total, _mm512_srli_epi32(total,4)));
        total = _mm512_and_epi32(B3, _mm512_add_epi32(total, _mm512_srli_epi32(total,8)));
        total = _mm512_and_epi32(B4, _mm512_add_epi32(total, _mm512_srli_epi32(total,16)));

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
        result += _mm512_reduce_add_epi32(total);
    }

    return result;
}

inline uint64_t vpu_popcount3(uint64_t* buf, size_t n)  {
    register size_t result = 0;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register __m512i total;
    register __m512i shuf;

#pragma unroll(4)
    for (i = 0; i < n; i+=16) {
        shuf = _mm512_load_epi32(&buf[i]);
        result += _mm_countbits_64(buf[i+8]);
        _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+576], _MM_HINT_T1); // vprefetch1
        result += _mm_countbits_64(buf[i+9]);
        _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+128], _MM_HINT_T0); // vprefetch0
        total = _mm512_sub_epi32(shuf, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf,1)));
        result += _mm_countbits_64(buf[i+10]);
        total = _mm512_add_epi32(_mm512_and_epi32(B1, total), _mm512_and_epi32(B1,_mm512_srli_epi32(total,2)));
        result += _mm_countbits_64(buf[i+11]);
        total = _mm512_and_epi32(B2, _mm512_add_epi32(total, _mm512_srli_epi32(total,4)));
        result += _mm_countbits_64(buf[i+12]);
        total = _mm512_and_epi32(B3, _mm512_add_epi32(total, _mm512_srli_epi32(total,8)));
        result += _mm_countbits_64(buf[i+13]);
        total = _mm512_and_epi32(B4, _mm512_add_epi32(total, _mm512_srli_epi32(total,16)));
        result += _mm_countbits_64(buf[i+14]);

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
        result += _mm512_reduce_add_epi32(total);
        result += _mm_countbits_64(buf[i+15]);
    }

    return result;
}

/* Using VPU or SSE's machine intrinsic, CPUs not supporting SIMD 
 * will use compiler's implementation, the speed of which depends */
static inline size_t scalar_popcountu(unsigned *buf, size_t n) {
  register size_t cnt = 0;
  size_t i;
#pragma vector always
#pragma unroll(8)
  for (i = 0; i < n; i++) {
    cnt += _mm_countbits_32(buf[i]);
    _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
  }
  return cnt;
}

static inline size_t scalar_popcountlu(uint64_t *buf, size_t n) {
  register size_t cnt = 0;
  size_t i;
#pragma vector always
#pragma unroll(8)
  for (i = 0; i < n; i++) {
    cnt += _mm_countbits_64(buf[i]);
    _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
  }
  return cnt;
}
#endif

可以从https://www.dropbox.com/sh/b3sfqps19wa2oi4/iFQ9wQ1NTg

下载包含OpenMP支持的代码

使用命令:

使用英特尔C / C ++编译器XE 13编译代码
icc -debug inline-debug-info -O3 -mmic -fno-alias -ansi-alias -opt-streaming-stores always -ipo popcnt-mmic.cpp -o popcnt-mmic -vec-report=2 -openmp

代码在协处理器(61个核心)上本地运行,“122个线程”,线程亲和力使用导出“平衡”:

export OMP_NUM_THREADS=122;export KMP_AFFINITY=balanced

我正在使用Xeon Phi SE10p,B1踩踏,CentOS6.4 测试28兆字节(由rand()填充)并迭代10000次,性能如下:

Buffer allocated at: 0x7f456b000000
OpenMP scalar_popcountu       4310169 us; cnt = 28439328
OpenMP scalar_popcountlu      1421139 us; cnt = 28439328
OpenMP vpu_popcount           1489992 us; cnt = 28439328
OpenMP vpu_popcount2          1109530 us; cnt = 28439328
OpenMP vpu_popcount3           951122 us; cnt = 28439328

“scalar_popcountu”和“scalar_popcountlu”分别使用“_mm_countbits_32”和“_mm_countbits_64”内在函数,它们使用标量“popcnt”指令。设置“#pragma vector always”要求编译器将加载和求和向量化为16个无符号整数或8个无符号长整数,尽管popcount本身仍然是标量指令。

vpu_popcount1的实现类似于SSSE3 popcount实现http://wm.ite.pl/articles/sse-popcount.html。但是,1)Xeon Phi不支持整数上的压缩字节操作(最小值是双字,又称32位)和2)它不实现“绝对差值的压缩总和”指令(如SSSE3中的_mm_sad_epu8),因此减少添加是由四组“vpermf32x4”,“vpaddd”和“movslq”的组合执行的。因此,实现生成的指令比原始SSSE3版本多得多。

vpu_popcount2的实现类似于SSE2 popcount实现(可以参考“Hacker's Delight”)。该实现生成的指令少于vpu_popcount1,并且速度提高了约30%。然而,繁琐的“减少添加”仍然是无法避免的。

vpu_popcount3的实现非常具有Xeon Phi。通过向量和标量运算的混合,它比vpu_popcount2快15%(在向量操作中标量运算的散布在我的实现中是休闲的,可以根据编译器生成的汇编代码重新排列标量运算,但预期的改进就我而言是有限的)。该改进基于以下观察:1)Xeon Phi是有序调度,2)每个时钟周期可以发出两个标量指令或“1向量+ 1个标量”指令。我将展开从8减少到4以避免寄存器文件饱和。

在每个函数中预先从内存到L2 8循环的显式预取以及从L2到L1 1循环的预先显式预取将L1命中率从0.38增加到0.994。

展开会使性能提高约15%。这是违反直觉的,因为Xeon Phi是有序调度。但unroll使icc编译器能够尽可能多地进行编译时调度。

我们是否有更多技术来提升表现?

Brian Nickerson的两段更快的代码,

OpenMP vpu_popcount2          1110737 us; cnt = 28439328
OpenMP vpu_popcount3           951459 us; cnt = 28439328
OpenMP vpu_popcount3_r         815126 us; cnt = 28439328
OpenMP vpu_popcount5           746852 us; cnt = 28439328

vpu_popcount3_revised:

inline uint64_t vpu_popcount3_revised(uint64_t* buf, size_t n) {
  _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
  _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
  _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
  register size_t result;
  size_t i;

  register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
  register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
  register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
  register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
  register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
  register __m512i total0;
  register __m512i total1;
  register __m512i shuf0;
  register __m512i shuf1;
  register __m512i result0;
  register __m512i result1;

  result0 = _mm512_setzero_epi32();
  result1 = _mm512_setzero_epi32();

  for (i = 0; i < n; i+=16) {
      shuf0 = _mm512_load_epi32(&buf[i  ]);
      shuf1 = _mm512_load_epi32(&buf[i+8]);
      _mm_prefetch((const char *)&buf[i+128], _MM_HINT_T1); // vprefetch1
      _mm_prefetch((const char *)&buf[i+136], _MM_HINT_T1); // vprefetch1
      _mm_prefetch((const char *)&buf[i+16], _MM_HINT_T0); // vprefetch0
      _mm_prefetch((const char *)&buf[i+24], _MM_HINT_T0); // vprefetch0
      total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));
      total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
      total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2)));
      total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
      total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));
      total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
      total0 = _mm512_and_epi32(B3, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8)));
      total1 = _mm512_and_epi32(B3, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8)));
      total0 = _mm512_and_epi32(B4, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));
      total1 = _mm512_and_epi32(B4, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
      result0 = _mm512_add_epi32(result0,total0);
      result1 = _mm512_add_epi32(result1,total1);

  }

  result0 = _mm512_add_epi32(result0,result1);
  result  = _mm512_reduce_add_epi32(result0);

  return result;
}

vpu_popcount5:

inline uint64_t vpu_popcount5(uint64_t* buf, size_t n)  {
    _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[128], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[136], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[144], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[152], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[160], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[168], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[176], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[184], _MM_HINT_T1); // vprefetch1
    register size_t result;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register const __m512i B6 = _mm512_load_epi32((void*)(magic+80));
    register __m512i total0;
    register __m512i total1;
    register __m512i total2;
    register __m512i total3;
    register __m512i shuf0;
    register __m512i shuf1;
    register __m512i shuf2;
    register __m512i shuf3;
    register __m512i result0;
    register __m512i result1;

    result0 = _mm512_setzero_epi32();
    result1 = _mm512_setzero_epi32();

    for (i = 0; i < n; i+=32) {
            shuf0 = _mm512_load_epi32(&buf[i   ]);
            shuf1 = _mm512_load_epi32(&buf[i+ 8]);
            shuf2 = _mm512_load_epi32(&buf[i+16]);
            shuf3 = _mm512_load_epi32(&buf[i+24]);
            _mm_prefetch((const char *)&buf[i+192], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+200], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+208], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+216], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+32], _MM_HINT_T0); // vprefetch0
            _mm_prefetch((const char *)&buf[i+40], _MM_HINT_T0); // vprefetch0
            _mm_prefetch((const char *)&buf[i+48], _MM_HINT_T0); // vprefetch0
            _mm_prefetch((const char *)&buf[i+56], _MM_HINT_T0); // vprefetch0
            total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));                        //  max value in nn is 10
            total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
            total2 = _mm512_sub_epi32(shuf2, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf2,1)));
            total3 = _mm512_sub_epi32(shuf3, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf3,1)));
            total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2))); //  max value in nnnn is 0100
            total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
            total2 = _mm512_add_epi32(_mm512_and_epi32(B1, total2), _mm512_and_epi32(B1,_mm512_srli_epi32(total2,2)));
            total3 = _mm512_add_epi32(_mm512_and_epi32(B1, total3), _mm512_and_epi32(B1,_mm512_srli_epi32(total3,2)));
            total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));                      //  max value in 0000nnnn is 00001000
            total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
            total2 = _mm512_and_epi32(B2, _mm512_add_epi32(total2, _mm512_srli_epi32(total2,4)));
            total3 = _mm512_and_epi32(B2, _mm512_add_epi32(total3, _mm512_srli_epi32(total3,4)));
            total0 = _mm512_add_epi32(total0, total1);                                                                 //  max value in 000nnnnn is 00010000
            total1 = _mm512_add_epi32(total2, total3);
            total0 = _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8));                                            //  max value in xxxxxxxx00nnnnnn is 00100000
            total1 = _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8));
            total0 = _mm512_and_epi32(B6, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));                     //  max value in each element is 01000000, i.e. 64
            total1 = _mm512_and_epi32(B6, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
            result0 = _mm512_add_epi32(result0,total0);
            result1 = _mm512_add_epi32(result1,total1);
    }

    result0 = _mm512_add_epi32(result0,result1);
    result  = _mm512_reduce_add_epi32(result0);

    return result;
}

2 个答案:

答案 0 :(得分:2)

自昨天发布以来,我已经能够在自己的卡上运行您的代码和我的建议。我没有得到与你完全相同的时序,可能是由于我的硬件步进,也许与我的编译器版本有关。但这种趋势仍然存在,我的建议似乎提升了大约15%的性能提升。

我得到了额外的小性能提升,在5%到10%之间,稍微调整一下,如下面的代码所示。请注意,在以下代码段中,B6将每个元素设置为0x000000FF。在这一点上,我认为该算法可能非常接近从GDDR到L2缓存的最大可持续带宽。

(补充说明:这个断言的证明是,如果我用pop循环重复它的popcount5函数的主体十次 - 并注意这是“chunk_size”的十次快速重复输入数据的数量,因此在L2中它会炙手可热九次 - 测试的总时间仅增加了大约五倍,而不是十倍。我提出这个问题是因为我认为你的目标是调整比特计数逻辑的速度,但也许您希望部署它的应用程序实际上有一个更小和/或更热的工作集。如果是这样,DRAM引入的限制 - > L2带宽正在模糊图片。但是请注意,缩小测试输入的大小以使其在L2中保持较热状态似乎会导致其他开销 - 可能是开放式开销 - 变得相对更重要。)

inline uint64_t vpu_popcount5(uint64_t* buf, size_t n)  {
    _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[128], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[136], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[144], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[152], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[160], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[168], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[176], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[184], _MM_HINT_T1); // vprefetch1
    register size_t result;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B6 = _mm512_load_epi32((void*)(magic+80));
    register __m512i total0;
    register __m512i total1;
    register __m512i total2;
    register __m512i total3;
    register __m512i shuf0;
    register __m512i shuf1;
    register __m512i shuf2;
    register __m512i shuf3;
    register __m512i result0;
    register __m512i result1;

    result0 = _mm512_setzero_epi32();
    result1 = _mm512_setzero_epi32();

    for (i = 0; i < n; i+=32) {
        shuf0 = _mm512_load_epi32(&buf[i   ]);
        shuf1 = _mm512_load_epi32(&buf[i+ 8]);
        shuf2 = _mm512_load_epi32(&buf[i+16]);
        shuf3 = _mm512_load_epi32(&buf[i+24]);
        _mm_prefetch((const char *)&buf[i+192], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+200], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+208], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+216], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+32], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+40], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+48], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+56], _MM_HINT_T0); // vprefetch0
        total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));                        //  max value in nn is 10
        total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
        total2 = _mm512_sub_epi32(shuf2, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf2,1)));
        total3 = _mm512_sub_epi32(shuf3, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf3,1)));
        total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2))); //  max value in nnnn is 0100
        total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
        total2 = _mm512_add_epi32(_mm512_and_epi32(B1, total2), _mm512_and_epi32(B1,_mm512_srli_epi32(total2,2)));
        total3 = _mm512_add_epi32(_mm512_and_epi32(B1, total3), _mm512_and_epi32(B1,_mm512_srli_epi32(total3,2)));
        total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));                      //  max value in 0000nnnn is 00001000
        total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
        total2 = _mm512_and_epi32(B2, _mm512_add_epi32(total2, _mm512_srli_epi32(total2,4)));
        total3 = _mm512_and_epi32(B2, _mm512_add_epi32(total3, _mm512_srli_epi32(total3,4)));
        total0 = _mm512_add_epi32(total0, total1);                                                                 //  max value in 000nnnnn is 00010000
        total1 = _mm512_add_epi32(total2, total3);
        total0 = _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8));                                            //  max value in xxxxxxxx00nnnnnn is 00100000
        total1 = _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8));
        total0 = _mm512_and_epi32(B6, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));                     //  max value in each element is 01000000, i.e. 64
        total1 = _mm512_and_epi32(B6, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
        result0 = _mm512_add_epi32(result0,total0);
        result1 = _mm512_add_epi32(result1,total1);

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
    }

    result0 = _mm512_add_epi32(result0,result1);
    result  = _mm512_reduce_add_epi32(result0);

    return result;
}

答案 1 :(得分:1)

请您尝试以下变体,并报告这是否会改善您的表现?我正在解决几个我认为在你的编码中不是最优的问题:

  • 我不认为你的预取距离是非常正确的。在我看来,当索引真的与uint64相关时,你可能一直在考虑字节偏移距离。
  • 我认为没有理由在循环的每次迭代中进行缩减操作。您可以在16个SIMD元素中进行部分位计数累加,然后在循环外进行一次缩减。
  • 我不认为做标量端popcount指令和真正获得最佳VPU调度一样有利。专注于优秀的VPU时间表是最重要的。我也不认为标量popcount指令实际上与向量运算配对;即我认为它只在U型管道中得到支持。

inline uint64_t vpu_popcount3_revised(uint64_t* buf, size_t n) {
    _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
    register size_t result;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register __m512i total0;
    register __m512i total1;
    register __m512i shuf0;
    register __m512i shuf1;
    register __m512i result0;
    register __m512i result1;

    result0 = _mm512_setzero_epi32();
    result1 = _mm512_setzero_epi32();

    for (i = 0; i < n; i+=16) {
        shuf0 = _mm512_load_epi32(&buf[i  ]);
        shuf1 = _mm512_load_epi32(&buf[i+8]);
        _mm_prefetch((const char *)&buf[i+128], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+136], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+16], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+24], _MM_HINT_T0); // vprefetch0
        total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));
        total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
        total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2)));
        total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
        total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));
        total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
        total0 = _mm512_and_epi32(B3, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8)));
        total1 = _mm512_and_epi32(B3, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8)));
        total0 = _mm512_and_epi32(B4, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));
        total1 = _mm512_and_epi32(B4, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
        result0 = _mm512_add_epi32(result0,total0);
        result1 = _mm512_add_epi32(result1,total1);

    }

    /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
       is not implementated as a single instruction in VPUv1, thus
       emulated by multiple instructions*/

    result0 = _mm512_add_epi32(result0,result1);
    result  = _mm512_reduce_add_epi32(result0);

    return result;
}