通过SSE,AVX和OpenMP进行快速内存转置 [英] Fast memory transpose with SSE, AVX, and OpenMP

查看:162
本文介绍了通过SSE,AVX和OpenMP进行快速内存转置的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

对于C/C ++中的高斯卷积函数,我需要一种快速的内存转置算法.我现在要做的是

I need a fast memory transpose algorithm for my Gaussian convolution function in C/C++. What I do now is

convolute_1D
transpose
convolute_1D
transpose

事实证明,使用此方法时,滤镜大小必须较大(或大于我的预期),或者转置所花的时间比卷积要长(例如,对于1920x1080矩阵,卷积所花的时间与滤镜的转置所花的时间相同大小为35).我正在使用的当前转置算法将循环阻止/平铺以及SSE和OpenMP一起使用.我已经尝试过使用AVX的版本,但速度并不快.关于如何加快速度的任何建议?

It turns out that with this method the filter size has to be large (or larger than I expected) or the transpose takes longer than the convolution (e.g. for a 1920x1080 matrix the convolution takes the same time as the transpose for a filter size of 35). The current transpose algorithm I am using uses loop blocking/tiling along with SSE and OpenMP. I have tried a version using AVX but it's no faster. Any suggestions on how I can speed this up?

inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb) {
    __m128 row1 = _mm_load_ps(&A[0*lda]);
    __m128 row2 = _mm_load_ps(&A[1*lda]);
    __m128 row3 = _mm_load_ps(&A[2*lda]);
    __m128 row4 = _mm_load_ps(&A[3*lda]);
     _MM_TRANSPOSE4_PS(row1, row2, row3, row4);
     _mm_store_ps(&B[0*ldb], row1);
     _mm_store_ps(&B[1*ldb], row2);
     _mm_store_ps(&B[2*ldb], row3);
     _mm_store_ps(&B[3*ldb], row4);
}
//block_size = 16 works best
inline void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb ,const int block_size) {
    #pragma omp parallel for
    for(int i=0; i<n; i+=block_size) {
        for(int j=0; j<m; j+=block_size) {
            int max_i2 = i+block_size < n ? i + block_size : n;
            int max_j2 = j+block_size < m ? j + block_size : m;
            for(int i2=i; i2<max_i2; i2+=4) {
                for(int j2=j; j2<max_j2; j2+=4) {
                    transpose4x4_SSE(&A[i2*lda +j2], &B[j2*ldb + i2], lda, ldb);
                }
            }
        }
    }
}

使用AVX转置8x8浮点矩阵.它不比四个4x4换位快.

Transpose 8x8 float matrix using AVX. It's no faster than four 4x4 transposes.

inline void transpose8_ps(__m256 &row0, __m256 &row1, __m256 &row2, __m256 &row3, __m256 &row4, __m256 &row5, __m256 &row6, __m256 &row7) {
    __m256 __t0, __t1, __t2, __t3, __t4, __t5, __t6, __t7;
    __m256 __tt0, __tt1, __tt2, __tt3, __tt4, __tt5, __tt6, __tt7;
    __t0 = _mm256_unpacklo_ps(row0, row1);
    __t1 = _mm256_unpackhi_ps(row0, row1);
    __t2 = _mm256_unpacklo_ps(row2, row3);
    __t3 = _mm256_unpackhi_ps(row2, row3);
    __t4 = _mm256_unpacklo_ps(row4, row5);
    __t5 = _mm256_unpackhi_ps(row4, row5);
    __t6 = _mm256_unpacklo_ps(row6, row7);
    __t7 = _mm256_unpackhi_ps(row6, row7);
    __tt0 = _mm256_shuffle_ps(__t0,__t2,_MM_SHUFFLE(1,0,1,0));
    __tt1 = _mm256_shuffle_ps(__t0,__t2,_MM_SHUFFLE(3,2,3,2));
    __tt2 = _mm256_shuffle_ps(__t1,__t3,_MM_SHUFFLE(1,0,1,0));
    __tt3 = _mm256_shuffle_ps(__t1,__t3,_MM_SHUFFLE(3,2,3,2));
    __tt4 = _mm256_shuffle_ps(__t4,__t6,_MM_SHUFFLE(1,0,1,0));
    __tt5 = _mm256_shuffle_ps(__t4,__t6,_MM_SHUFFLE(3,2,3,2));
    __tt6 = _mm256_shuffle_ps(__t5,__t7,_MM_SHUFFLE(1,0,1,0));
    __tt7 = _mm256_shuffle_ps(__t5,__t7,_MM_SHUFFLE(3,2,3,2));
    row0 = _mm256_permute2f128_ps(__tt0, __tt4, 0x20);
    row1 = _mm256_permute2f128_ps(__tt1, __tt5, 0x20);
    row2 = _mm256_permute2f128_ps(__tt2, __tt6, 0x20);
    row3 = _mm256_permute2f128_ps(__tt3, __tt7, 0x20);
    row4 = _mm256_permute2f128_ps(__tt0, __tt4, 0x31);
    row5 = _mm256_permute2f128_ps(__tt1, __tt5, 0x31);
    row6 = _mm256_permute2f128_ps(__tt2, __tt6, 0x31);
    row7 = _mm256_permute2f128_ps(__tt3, __tt7, 0x31);
}

inline void transpose8x8_avx(float *A, float *B, const int lda, const int ldb) {
    __m256 row0 = _mm256_load_ps(&A[0*lda]);
    __m256 row1 = _mm256_load_ps(&A[1*lda]);
    __m256 row2 = _mm256_load_ps(&A[2*lda]);
    __m256 row3 = _mm256_load_ps(&A[3*lda]);
    __m256 row4 = _mm256_load_ps(&A[4*lda]);
    __m256 row5 = _mm256_load_ps(&A[5*lda]);
    __m256 row6 = _mm256_load_ps(&A[6*lda]);
    __m256 row7 = _mm256_load_ps(&A[7*lda]);
    transpose8_ps(row0, row1, row2, row3, row4, row5, row6, row7);
    _mm256_store_ps(&B[0*ldb], row0);
    _mm256_store_ps(&B[1*ldb], row1);
    _mm256_store_ps(&B[2*ldb], row2);
    _mm256_store_ps(&B[3*ldb], row3);
    _mm256_store_ps(&B[4*ldb], row4);
    _mm256_store_ps(&B[5*ldb], row5);
    _mm256_store_ps(&B[6*ldb], row6);
    _mm256_store_ps(&B[7*ldb], row7);

}

推荐答案

我猜您最好的选择是尝试将卷积和转置结合起来-即在进行过程中写出转置后的卷积结果.几乎可以肯定,转置上的内存带宽有限,因此减少用于转置的指令数量并不能真正起到帮助作用(因此,使用AVX缺乏改进).减少通过数据的次数将为您带来最佳的性能改进.

I'd guess that your best bet would be to try and combine the convolution and the transpose - i.e. write out the results of the convolve transposed as you go. You're almost certainly memory bandwidth limited on the transpose so reducing the number of instructions used for the transpose isn't really going to help (hence the lack of improvement from using AVX). Reducing the number of passes over your data is going to give you the best performance improvements.

这篇关于通过SSE,AVX和OpenMP进行快速内存转置的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆