提高SSE(SSSE3)YUV到RGB code [英] Improve SSE (SSSE3) YUV to RGB code

查看:465
本文介绍了提高SSE(SSSE3)YUV到RGB code的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我期待一些优化上证所code我写了YUV转换为RGB(平面和打包YUV功能)。

我使用SSSE3的那一刻,但如果有从后面SSE版本的实用功能也没关系。

我主要感兴趣的我怎么会制定出处理器摊位等等。

任何人都知道的那些SSE code的静态分析任何工具?

 ;
;版权所有(C)2009-2010大卫McPaul
;
;版权所有。根据MIT许可证的条款分发。
;;一个颇为未优化集SSSE3 YUV到RGB转换器
;确实每圈8个像素; inputer:
;读取YUV 8位数据并将128位
;该y值转换为在XMM0 16位
;在u值转换为16位和复制到xmm1中
;在V值转换为16位和复制到XMM2;转换:
;使用16位整数,将YUV到RGB转换呢
;结果被放入以下寄存器8位钳位值
;在XMM3的R值
;在XMM4 Tg值
;在xmm5 B值; outputer:
;写出来的RGBA像素,8位值0的阿尔法; xmm6用于划伤
; XMM7用于划伤%宏cglobal 1
全球_ 1%
定义%1%_%1
16对齐
%1:
%endmacro;转换code
%宏yuv2rgbsse2 0
; U =ü - 128
; V = - 128
; R = Y + V + V>> 2 + V>> 3 + V>>五
; G =ÿ - (U>→2 + U>→4 + U>→5) - (V>→1 + V>→3 + V>→4 + V >大于5)
; B = Y + U + U>> 1 + U>> 2 + U>> 6
;从ÿ减16
MOVDQA XMM7,[Const16]加载一个恒定的使用数据缓存(较慢的第一次提取但随后缓存)
psubsw XMM0,XMM7; Y = Y - 16
;从u和v MOVDQA XMM7减去128,[Const128]加载一个恒定的使用数据缓存(较慢的第一次提取但随后缓存)
psubsw将xmm1,XMM7; U =ü - 128
psubsw XMM2,XMM7; V = - 128
;负载R,B为Y
MOVDQA XMM3,XMM0; R =ÿ
pshufd xmm5,XMM0,0xE4; B =ÿ; R = Y + V + V>> 2 + V>> 3 + V>>五
paddsw XMM3,XMM2;新增V于r
MOVDQA XMM7,将xmm1;移动u到划伤
pshufd xmm6,XMM2,0xE4;移动V至划伤psraw xmm6,2;除以4 v
paddsw XMM3,xmm6;并添加于r
psraw xmm6,1;除以2 v
paddsw XMM3,xmm6;并添加于r
psraw xmm6,2;除以4 v
paddsw XMM3,xmm6;并添加于r; B = Y + U + U>> 1 + U>> 2 + U>> 6
paddsw xmm5,将xmm1;外接U到B
psraw xmm7,1;除以2ü
paddsw xmm5,XMM7;并添加到B
psraw xmm7,1;除以2ü
paddsw xmm5,XMM7;并添加到B
psraw xmm7,4; 32分ū
paddsw xmm5,XMM7;并添加到B; G =ÿ - U>> 2 - U>> 4 - U>> 5 - V>> 1 - V>> 3 - V>> 4 - V>>五
MOVDQA XMM7,XMM2;移动V至划伤
pshufd xmm6,将xmm1,0xE4;移动u到划伤
MOVDQA XMM4,XMM0; G =ÿpsraw xmm6,2;除以4ü
psubsw XMM4,xmm6;给G减
psraw xmm6,2;除以4ü
psubsw XMM4,xmm6;给G减
psraw xmm6,1;除以2ü
psubsw XMM4,xmm6;给G减psraw xmm7,1;除以2 v
psubsw XMM4,XMM7;给G减
psraw xmm7,2;除以4 v
psubsw XMM4,XMM7;给G减
psraw xmm7,1;除以2 v
psubsw XMM4,XMM7;给G减
psraw xmm7,1;除以2 v
psubsw XMM4,XMM7;给G减
%endmacro; outputer
%宏rgba32sse2output 0
;钳位值
PXOR XMM7,XMM7
PACKUSWB XMM3,XMM7;钳0,255和包装R以每像素8位
PACKUSWB XMM4,XMM7;钳0,255和包装G以每像素8位
PACKUSWB xmm5,XMM7;钳0,255和包装B到每像素8位
;转换为bgra32打包
punpcklbw xmm5,XMM4; bgbgbgbgbgbgbgbg
MOVDQA XMM0,xmm5;保存BG值
punpcklbw XMM3,XMM7; r0r0r0r0r0r0r0r0
punpcklwd xmm5,XMM3;下半部bgr0bgr0bgr0bgr0
punpckhwd XMM0,XMM3;上半bgr0bgr0bgr0bgr0
;写到输出PTR
movntdq [EDI],xmm5;输出前4个像素绕过缓存
movntdq [EDI + 16],XMM0;第二个输出4个像素绕过缓存
%endmacro段.data ALIGN = 16Const16 DW 16
DW 16
DW 16
DW 16
DW 16
DW 16
DW 16
DW 16Const128 DW 128
DW 128
DW 128
DW 128
DW 128
DW 128
DW 128
DW 128UMASK分贝为0x01
0x80的分贝
分贝为0x01
0x80的分贝
DB 0×05
0x80的分贝
DB 0×05
0x80的分贝
DB×09
0x80的分贝
DB×09
0x80的分贝
DB符进行
0x80的分贝
DB符进行
0x80的分贝VMASK DB×03
0x80的分贝
DB×03
0x80的分贝
分贝值为0x07
0x80的分贝
分贝值为0x07
0x80的分贝
DB 0x0B中
0x80的分贝
DB 0x0B中
0x80的分贝
分贝为0x0F
0x80的分贝
分贝为0x0F
0x80的分贝YMask分贝为0x00
0x80的分贝
DB 0X02
0x80的分贝
DB 0×04
0x80的分贝
分贝值0x06
0x80的分贝
DB 0x08的
0x80的分贝
DB的0x0A
0x80的分贝
DB的0x0C
0x80的分贝
分贝为0x0E
0x80的分贝;无效Convert_YUV422_RGBA32_SSSE3(无效* fromPtr,无效* toPtr,诠释宽度)
宽度E​​QU EBP + 16
toPtr EQU EBP + 12
fromPtr EQU EBP + 8;无效Convert_YUV420P_RGBA32_SSSE3(无效* fromYPtr,无效* fromUPtr,无效* fromVPtr,无效* toPtr,诠释宽度)
宽度1 EQU EBP + 24
toPtr1 EQU EBP + 20
fromVPtr EQU EBP + 16
fromUPtr EQU EBP + 12
fromYPtr EQU EBP + 8.text段ALIGN = 16cglobal Convert_YUV422_RGBA32_SSSE3
;储备变量
推EBP
MOV EBP,ESP
推EDI
推ESI
推ECXMOV ESI,[fromPtr]
MOV EDI,[toPtr]
MOV ECX,[宽度]
;环宽度/ 8次
SHR ECX,3
测试ECX,ECX
JNG ENDLOOP
REPEATLOOP:;遍历宽/ 8
; YUV422包装inputer
MOVDQA XMM0,[ESI]应该有YUYV YUYV YUYV YUYV
pshufd将xmm1,XMM0,0xE4;副本XMM1
MOVDQA XMM2,XMM0;副本移到xmm2
;同时抽取ÿ给y0y0
PSHUFB XMM0,[YMask]
;提取u和​​复制所以在每个YUYVü成为u0u0
PSHUFB将xmm1,[UMASK]
;提取V和复制所以在每个YUYV v成为v0v0
PSHUFB XMM2,[VMASK]yuv2rgbsse2rgba32sse2output; ENDLOOP
加入EDI,32
加ESI,16
子ECX,1;显然亚比十二月更好
JNZ REPEATLOOP
ENDLOOP:
;清理
流行ECX
流行ESI
流行EDI
MOV ESP,EBP
流行EBP
RETcglobal Convert_YUV420P_RGBA32_SSSE3
;储备变量
推EBP
MOV EBP,ESP
推EDI
推ESI
推ECX
推EAX
推EBXMOV ESI,[fromYPtr]
MOV EAX,[fromUPtr]
MOV EBX,[fromVPtr]
MOV EDI,[toPtr1]
MOV ECX,[宽度1]
;环宽度/ 8次
SHR ECX,3
测试ECX,ECX
JNG ENDLOOP1
REPEATLOOP1:;遍历宽/ 8
; YUV420平面inputer MOVQ XMM0,[ESI]取8 y值(8位)yyyyyyyy00000000
MOVD将xmm1,[EAX];取4 u值(8位)uuuu000000000000
MOVD XMM2,[EBX]取4 v值(8位)vvvv000000000000;提取Y轴
PXOR XMM7,XMM7; 00000000000000000000000000000000
punpcklbw XMM0,XMM7;交织成XMM7 XMM0 y0y0y0y0y0y0y0y0
;提取u和​​复制所以每个变得0u0u
punpcklbw将xmm1,XMM7;交织成XMM7 xmm1中u0u0u0u000000000
punpcklwd将xmm1,XMM7;再次交错u000u000u000u000
pshuflw将xmm1,xmm1中,0XA0;副本u值
pshufhw将xmm1,xmm1中,0XA0;获得u0u0
;提取物v
punpcklbw XMM2,XMM7;交织成XMM7 xmm1中v0v0v0v000000000
punpcklwd XMM2,XMM7;再次交错v000v000v000v000
pshuflw XMM2,XMM2,0XA0;副本v值
pshufhw XMM2,XMM2,0XA0;让v0v0yuv2rgbsse2rgba32sse2output; ENDLOOP
加入EDI,32
加ESI,8
添加EAX,4
添加EBX,4
子ECX,1;显然亚比十二月更好
JNZ REPEATLOOP1
ENDLOOP1:
;清理
流行EBX
流行EAX
流行ECX
流行ESI
流行EDI
MOV ESP,EBP
流行EBP
RET第.note.GNU堆栈NOALLOC NOEXEC NOWRITE PROGBITS


解决方案

如果你把U&安培; v交错在一个寄存器中,并使用PMADDWD'和precomputed常量,而不是你的移位和相加的方法,你可以COM preSS转换code约三分之一,摆脱最同时档位:

 ; XMM0 = Y Y Y Y Y Y Y Y
; XMM3 = U,V,U,V,U,V,U,V,psubsw XMM3,[Const128]
psubsw XMM0,[Const16]
MOVDQA XMM4,XMM3
MOVDQA xmm5,XMM3
PMADDWD XMM3,[const_1]
PMADDWD XMM4,[const_2]
PMADDWD xmm5,[const_3]
psrad XMM3,14
psrad XMM4,14
psrad xmm5,14
PSHUFB XMM3,XMM3,[const_4]或pshuflw&安培; pshufhw
PSHUFB XMM4,XMM4,[const_4]
PSHUFB xmm5,xmm5,[const_4]
paddsw XMM3,XMM0
paddsw XMM4,XMM0
paddsw xmm5,XMM0

如果你想让它更快速工作,具有PMADDUBSW玩应该让你在16个像素在同一时间,在复杂小幅增长工作。

大多数处理器(特别是非英特尔,臭名昭著不具有良好的工作硬件prefetcher,但是,在较小程度上,英特尔的太)将从prefetchnta受益[ESI + 256]内引发循环。

编辑:

:使用PMADDUBSW看起来是这样(正确性不能保证)的code

  const的一个:
次4分贝1,3
次4分贝5,7
常量乙:
次4分贝9,11
次4分贝13,15
const_c:8次DW设为0x00FF
const_d:次4 DD至0x00FFFFFFconst_uv_to_rgb_mul:
...
const_uv_to_rgb_add:
...MOVDQA XMM4,[ESI]
MOVDQA XMM0,XMM4
MOVDQA将xmm1,XMM4
PSHUFB XMM0,[const_a]
PSHUFB将xmm1,[const_b]
PAND XMM4,[const_c]; XMM0:UV0 UV0 UV0 UV0 UV2 UV2 UV2 UV2 UV2
;将xmm1:UV4 UV4 UV4 UV4 ...
; XMM4:Y0 0 0 Y1 Y2 Y3 0 0 0 Y4 Y5 Y6 0 0 0 Y7pmaddubsw XMM0,[const_uv_to_rgb_mul]
pmaddubsw将xmm1,[const_uv_to_rgb_mul]
paddsw XMM0,[const_uv_to_rgb_add]
paddsw将xmm1,[const_uv_to_rgb_add]
psraw XMM0,6
psraw将xmm1,6; R01 G01 B01 0 R23 G23 B23 0pshufd XMM2,XMM0,2 + 3 * 4 + 2 * 16 + 3 * 64
pshufd XMM0,XMM0,0 + 1 * 4 + 0 + 16 + 1 * 64
pshufd XMM3,将xmm1,2 + 3 * 4 + 2 * 16 + 3 * 64
pshufd将xmm1,xmm1中,0 + 1 * 4 + 0 + 16 + 1 * 64; XMM0:R01 G01 B01 0 R01 G01 B01 0
; XMM2:R23 G23 B23 0 R23 G23 B23 0
;将xmm1:R45 G45 B45 0 R45 G45 B45 0paddsw XMM0,XMM4;加上y
paddsw将xmm1,XMM4
paddsw XMM2,XMM4
paddsw XMM3,XMM4PACKUSWB XMM0,XMM2;用饱和装入0-255范围
PACKUSWB将xmm1,XMM3
PAND XMM0 [const_d]零出阿尔法字节
PAND将xmm1,[const_d]
movntdq [EDI],XMM0
movntdq [EDI + 16],将xmm1

I am looking to optimise some SSE code I wrote for converting YUV to RGB (both planar and packed YUV functions).

I am using SSSE3 at the moment, but if there are useful functions from later SSE versions that's ok.

I am mainly interested in how I would work out processor stalls and the like.

Anyone know of any tools that do static analysis of SSE code?

;
; Copyright (C) 2009-2010 David McPaul
;
; All rights reserved. Distributed under the terms of the MIT License.
;

; A rather unoptimised set of ssse3 yuv to rgb converters
; does 8 pixels per loop

; inputer:
; reads 128 bits of yuv 8 bit data and puts
; the y values converted to 16 bit in xmm0
; the u values converted to 16 bit and duplicated into xmm1
; the v values converted to 16 bit and duplicated into xmm2

; conversion:
; does the yuv to rgb conversion using 16 bit integer and the
; results are placed into the following registers as 8 bit clamped values
; r values in xmm3
; g values in xmm4
; b values in xmm5

; outputer:
; writes out the rgba pixels as 8 bit values with 0 for alpha

; xmm6 used for scratch
; xmm7 used for scratch

%macro cglobal 1
global _%1
%define %1 _%1
align 16
%1:
%endmacro

; conversion code
%macro yuv2rgbsse2 0
; u = u - 128
; v = v - 128
; r = y + v + v >> 2 + v >> 3 + v >> 5
; g = y - (u >> 2 + u >> 4 + u >> 5) - (v >> 1 + v >> 3 + v >> 4 + v >> 5)
; b = y + u + u >> 1 + u >> 2 + u >> 6
; subtract 16 from y
movdqa xmm7, [Const16] ; loads a constant using data cache (slower on first fetch but then cached)
psubsw xmm0,xmm7 ; y = y - 16
; subtract 128 from u and v movdqa xmm7, [Const128] ; loads a constant using data cache (slower on first fetch but then cached)
psubsw xmm1,xmm7 ; u = u - 128
psubsw xmm2,xmm7 ; v = v - 128
; load r,b with y
movdqa xmm3,xmm0 ; r = y
pshufd xmm5,xmm0, 0xE4 ; b = y

; r = y + v + v >> 2 + v >> 3 + v >> 5
paddsw xmm3, xmm2 ; add v to r
movdqa xmm7, xmm1 ; move u to scratch
pshufd xmm6, xmm2, 0xE4 ; move v to scratch

psraw xmm6,2 ; divide v by 4
paddsw xmm3, xmm6 ; and add to r
psraw xmm6,1 ; divide v by 2
paddsw xmm3, xmm6 ; and add to r
psraw xmm6,2 ; divide v by 4
paddsw xmm3, xmm6 ; and add to r

; b = y + u + u >> 1 + u >> 2 + u >> 6
paddsw xmm5, xmm1 ; add u to b
psraw xmm7,1 ; divide u by 2
paddsw xmm5, xmm7 ; and add to b
psraw xmm7,1 ; divide u by 2
paddsw xmm5, xmm7 ; and add to b
psraw xmm7,4 ; divide u by 32
paddsw xmm5, xmm7 ; and add to b

; g = y - u >> 2 - u >> 4 - u >> 5 - v >> 1 - v >> 3 - v >> 4 - v >> 5
movdqa xmm7,xmm2 ; move v to scratch
pshufd xmm6,xmm1, 0xE4 ; move u to scratch
movdqa xmm4,xmm0 ; g = y

psraw xmm6,2 ; divide u by 4
psubsw xmm4,xmm6 ; subtract from g
psraw xmm6,2 ; divide u by 4
psubsw xmm4,xmm6 ; subtract from g
psraw xmm6,1 ; divide u by 2
psubsw xmm4,xmm6 ; subtract from g

psraw xmm7,1 ; divide v by 2
psubsw xmm4,xmm7 ; subtract from g
psraw xmm7,2 ; divide v by 4
psubsw xmm4,xmm7 ; subtract from g
psraw xmm7,1 ; divide v by 2
psubsw xmm4,xmm7 ; subtract from g
psraw xmm7,1 ; divide v by 2
psubsw xmm4,xmm7 ; subtract from g
%endmacro

; outputer
%macro rgba32sse2output 0
; clamp values
pxor xmm7,xmm7
packuswb xmm3,xmm7 ; clamp to 0,255 and pack R to 8 bit per pixel
packuswb xmm4,xmm7 ; clamp to 0,255 and pack G to 8 bit per pixel
packuswb xmm5,xmm7 ; clamp to 0,255 and pack B to 8 bit per pixel
; convert to bgra32 packed
punpcklbw xmm5,xmm4 ; bgbgbgbgbgbgbgbg
movdqa xmm0, xmm5 ; save bg values
punpcklbw xmm3,xmm7 ; r0r0r0r0r0r0r0r0
punpcklwd xmm5,xmm3 ; lower half bgr0bgr0bgr0bgr0
punpckhwd xmm0,xmm3 ; upper half bgr0bgr0bgr0bgr0
; write to output ptr
movntdq [edi], xmm5 ; output first 4 pixels bypassing cache
movntdq [edi+16], xmm0 ; output second 4 pixels bypassing cache
%endmacro

SECTION .data align=16

Const16 dw 16
dw 16
dw 16
dw 16
dw 16
dw 16
dw 16
dw 16

Const128 dw 128
dw 128
dw 128
dw 128
dw 128
dw 128
dw 128
dw 128

UMask db 0x01
db 0x80
db 0x01
db 0x80
db 0x05
db 0x80
db 0x05
db 0x80
db 0x09
db 0x80
db 0x09
db 0x80
db 0x0d
db 0x80
db 0x0d
db 0x80

VMask db 0x03
db 0x80
db 0x03
db 0x80
db 0x07
db 0x80
db 0x07
db 0x80
db 0x0b
db 0x80
db 0x0b
db 0x80
db 0x0f
db 0x80
db 0x0f
db 0x80

YMask db 0x00
db 0x80
db 0x02
db 0x80
db 0x04
db 0x80
db 0x06
db 0x80
db 0x08
db 0x80
db 0x0a
db 0x80
db 0x0c
db 0x80
db 0x0e
db 0x80

; void Convert_YUV422_RGBA32_SSSE3(void *fromPtr, void *toPtr, int width)
width equ ebp+16
toPtr equ ebp+12
fromPtr equ ebp+8

; void Convert_YUV420P_RGBA32_SSSE3(void *fromYPtr, void *fromUPtr, void *fromVPtr, void *toPtr, int width)
width1 equ ebp+24
toPtr1 equ ebp+20
fromVPtr equ ebp+16
fromUPtr equ ebp+12
fromYPtr equ ebp+8

SECTION .text align=16

cglobal Convert_YUV422_RGBA32_SSSE3
; reserve variables
push ebp
mov ebp, esp
push edi
push esi
push ecx

mov esi, [fromPtr]
mov edi, [toPtr]
mov ecx, [width]
; loop width / 8 times
shr ecx,3
test ecx,ecx
jng ENDLOOP
REPEATLOOP: ; loop over width / 8
; YUV422 packed inputer
movdqa xmm0, [esi] ; should have yuyv yuyv yuyv yuyv
pshufd xmm1, xmm0, 0xE4 ; copy to xmm1
movdqa xmm2, xmm0 ; copy to xmm2
; extract both y giving y0y0
pshufb xmm0, [YMask]
; extract u and duplicate so each u in yuyv becomes u0u0
pshufb xmm1, [UMask]
; extract v and duplicate so each v in yuyv becomes v0v0
pshufb xmm2, [VMask]

yuv2rgbsse2

rgba32sse2output

; endloop
add edi,32
add esi,16
sub ecx, 1 ; apparently sub is better than dec
jnz REPEATLOOP
ENDLOOP:
; Cleanup
pop ecx
pop esi
pop edi
mov esp, ebp
pop ebp
ret

cglobal Convert_YUV420P_RGBA32_SSSE3
; reserve variables
push ebp
mov ebp, esp
push edi
push esi
push ecx
push eax
push ebx

mov esi, [fromYPtr]
mov eax, [fromUPtr]
mov ebx, [fromVPtr]
mov edi, [toPtr1]
mov ecx, [width1]
; loop width / 8 times
shr ecx,3
test ecx,ecx
jng ENDLOOP1
REPEATLOOP1: ; loop over width / 8
; YUV420 Planar inputer movq xmm0, [esi] ; fetch 8 y values (8 bit) yyyyyyyy00000000
movd xmm1, [eax] ; fetch 4 u values (8 bit) uuuu000000000000
movd xmm2, [ebx] ; fetch 4 v values (8 bit) vvvv000000000000

; extract y
pxor xmm7,xmm7 ; 00000000000000000000000000000000
punpcklbw xmm0,xmm7 ; interleave xmm7 into xmm0 y0y0y0y0y0y0y0y0
; extract u and duplicate so each becomes 0u0u
punpcklbw xmm1,xmm7 ; interleave xmm7 into xmm1 u0u0u0u000000000
punpcklwd xmm1,xmm7 ; interleave again u000u000u000u000
pshuflw xmm1,xmm1, 0xA0 ; copy u values
pshufhw xmm1,xmm1, 0xA0 ; to get u0u0
; extract v
punpcklbw xmm2,xmm7 ; interleave xmm7 into xmm1 v0v0v0v000000000
punpcklwd xmm2,xmm7 ; interleave again v000v000v000v000
pshuflw xmm2,xmm2, 0xA0 ; copy v values
pshufhw xmm2,xmm2, 0xA0 ; to get v0v0

yuv2rgbsse2

rgba32sse2output

; endloop
add edi,32
add esi,8
add eax,4
add ebx,4
sub ecx, 1 ; apparently sub is better than dec
jnz REPEATLOOP1
ENDLOOP1:
; Cleanup
pop ebx
pop eax
pop ecx
pop esi
pop edi
mov esp, ebp
pop ebp
ret

SECTION .note.GNU-stack noalloc noexec nowrite progbits

解决方案

If you keep u & v interleaved in one register, and use 'pmaddwd' and precomputed constants instead of your shift-and-add approach, you can compress the conversion code to about a third, and get rid of most stalls at the same time:

; xmm0 = y y y y y y y y
; xmm3 = u v u v u v u v

psubsw xmm3, [Const128]
psubsw xmm0, [Const16] 
movdqa xmm4, xmm3
movdqa xmm5, xmm3
pmaddwd xmm3, [const_1]
pmaddwd xmm4, [const_2]
pmaddwd xmm5, [const_3]
psrad xmm3, 14
psrad xmm4, 14
psrad xmm5, 14
pshufb xmm3, xmm3, [const_4] ; or pshuflw & pshufhw
pshufb xmm4, xmm4, [const_4]
pshufb xmm5, xmm5, [const_4]
paddsw xmm3, xmm0
paddsw xmm4, xmm0
paddsw xmm5, xmm0

If you want it to work even faster, playing with PMADDUBSW should allow you to work on 16 pixels at a time with a small increase in complexity.

Most processors (particularly non-Intels, notorious for not having a well-working hardware prefetcher, but, to a lesser extent, Intels too) will benefit from a prefetchnta [esi+256] thrown inside the loop.

EDIT: the code that uses PMADDUBSW could look like this (correctness not guaranteed):

const a: 
times 4 db 1,3
times 4 db 5,7
const b: 
times 4 db 9,11
times 4 db 13,15
const_c: times 8 dw 0x00ff
const_d: times 4 dd 0x00ffffff

const_uv_to_rgb_mul:
...
const_uv_to_rgb_add:
...

movdqa xmm4, [esi]
movdqa xmm0, xmm4
movdqa xmm1, xmm4
pshufb xmm0, [const_a] 
pshufb xmm1, [const_b]
pand xmm4, [const_c] 

; xmm0: uv0 uv0 uv0 uv0 uv2 uv2 uv2 uv2 uv2
; xmm1: uv4 uv4 uv4 uv4 ...
; xmm4: y0 0 y1 0 y2 0 y3 0 y4 0 y5 0 y6 0 y7 0

pmaddubsw xmm0, [const_uv_to_rgb_mul]
pmaddubsw xmm1, [const_uv_to_rgb_mul]
paddsw xmm0, [const_uv_to_rgb_add]
paddsw xmm1, [const_uv_to_rgb_add]
psraw xmm0, 6
psraw xmm1, 6

; r01 g01 b01 0 r23 g23 b23 0

pshufd xmm2, xmm0, 2+3*4+2*16+3*64
pshufd xmm0, xmm0, 0+1*4+0+16+1*64
pshufd xmm3, xmm1, 2+3*4+2*16+3*64
pshufd xmm1, xmm1, 0+1*4+0+16+1*64

; xmm0: r01 g01 b01 0 r01 g01 b01 0
; xmm2: r23 g23 b23 0 r23 g23 b23 0
; xmm1: r45 g45 b45 0 r45 g45 b45 0

paddsw xmm0, xmm4 ; add y
paddsw xmm1, xmm4 
paddsw xmm2, xmm4
paddsw xmm3, xmm4

packuswb xmm0, xmm2  ; pack with saturation into 0-255 range
packuswb xmm1, xmm3
pand xmm0, [const_d] ; zero out the alpha byte
pand xmm1, [const_d]
movntdq [edi], xmm0
movntdq [edi+16], xmm1

这篇关于提高SSE(SSSE3)YUV到RGB code的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆