Concat字节数组 [英] Concat byte arrays
问题描述
b:= make([] byte,0, sizeTotal)
b = append(b,size ...)
b = append(b,contentType ...)
b = append(b,lenCallbackid ...)
b = append (b,lenTarget ...)
b = append(b,lenAction ...)
b = append(b,lenContent ...)
b = append(b,callbackid ...)
b = append(b,target ...)
b = append(b,action ...)
b = append(b,content ...)
每个变量都是一个字节切片,大小不同于 sizeTotal
代码:
type消息struct {
size uint32
contentType uint8
callbackId string
目标字符串
操作字符串
content string
$ b var res [] byte
var b [] byte = make([] byte,0,4096)
func(m * Message)ToByte()[] byte {
callbackIdIntLen:= len(m.callbackId)
targetIntLen:= len(m.target)
actionIntLen:= len(m.action)
contentIntLen:= len(m.content)
lenCallbackid := make([] byte,4)
binary.LittleEndian.PutUint32(lenCallbackid,uint32(callbackIdIntLen))
callbackid:= [] byte(m.callbackId)
lenTarget:= make ([] byte,4)
binary.LittleEndian.PutUint32(lenTarget,uint32(targetIntLen))
target:= [] byte(m.target)
lenAction:= make([]字节,4)
binary.LittleEndian.PutUint32(lenAction,uint32(actionIntLen))
action:= [] byte(m.action)
lenContent:= make([] byte,4 )
binary.LittleEndian.PutUint32(lenContent,uint32(contentIntLen))
content:= [] byte(m.content)
sizeTotal:= 21 + callbackIdIntLen + targetIntLen + actionIntLen + contentIntLen $
$ b $ = b
b = append(b,bint)大小...)
b = append(b,byte(m.contentType))
b = append(b,lenCallbackid ...)
b = append(b,lenTarget ...)
b = append(b,lenAction ...)
b = append(b ,lenContent ...)
b = append(b,callbackid ...)
b = append(b,target ...)
b = append(b,action ...)
b = append(b,content ...)
res = b
return b
}
func FromByte(bytes [] byte)(* Message) {
size:= binary.LittleEndian.Uint32(bytes [0:4])
contentType:= bytes [4:5] [0]
lenCallbackid:= binary.LittleEndian.Uint32(字节[5:9])
lenTarget:= binary.LittleEndian.Uint32(bytes [9:13])
lenAction:= binary.LittleEndian.Uint32(bytes [13:17])
lenContent:= binary.LittleEndian.Uint32(bytes [17:21])$ b $ b callbackid:= string(bytes [21:21 + lenCallbackid])
target:= string(bytes [21 + lenCallbackid: 21 + lenCallbackid + lenTarget])
action:= string(bytes [21 + lenCallbackid + lenTarget:21 + lenCallbackid + lenTarget + lenAction])
content:= string(bytes [size-lenConten t:size])
return& Message {size,contentType,callbackid,target,action,content}
}
替补:
func BenchmarkMessageToByte(b * testing.B){
m:= NewMessage(uint8(3),agsdggsdasagdsdgsgddggds,sometarSFAFFget,somFSAFSAFFSeaction,somfasfsasfafsejsonzhit)
for n:= 0; n< b.N; n ++ {
m.ToByte()
}
}
func BenchmarkMessageFromByte(b * testing.B){
m:= NewMessage (uint8(1),sagdsgaasdg,soSASFASFASAFSFASFAGmetarget,adsgdgsagdssgdsgd,agsdsdgsagdsdgasdg)ToByte()
for n:= 0; n< b.N; n ++ {
FromByte(m)
}
}
func BenchmarkStringToByte(b * testing.B){
for n:= 0; n< b.N; (b * tests.B){
s:=
$ = b
$ [] byte(abcdefghijklmnoqrstuvwxyz)
for n:= 0; n< b.N; n ++ {
_ =字符串
}
}
func BenchmarkUintToByte(b * testing.B){
for n := 0; n< b.N; n ++ {
i:= make([] byte,4)
binary.LittleEndian.PutUint32(i,uint32(99))
}
}
func BenchmarkUintFromByte(b * testing.B){
i:= make([] byte,4)
binary.LittleEndian.PutUint32(i,uint32(99))
for n:= 0; n< b.N;
$ 结果:
BenchmarkMessageToByte 10000000 280 ns / op
BenchmarkMessageFromByte 10000000 293 ns / op
BenchmarkStringToByte 50000000 55.1 ns / op
BenchmarkStringFromByte 50000000 49.7 ns / op
BenchmarkUintToByte 1000000000 2.14 ns / op
BenchmarkUintFromByte 2000000000 1.71 ns / op
$提供的内存已被分配,一系列的x = append(x,a .. ..)。 )在Go中相当有效。在您的示例中,初始分配(make)可能比附加序列花费更多。这取决于字段的大小。考虑以下基准:
包主
$ b导入(
测试
$ b $ const const sizeTotal = 25
var res [] byte //执行堆分配
func BenchmarkWithAlloc(b * testing.B ){
a:= [] byte(abcde)
for i:= 0;我< b.N; i ++ {
x:= make([] byte,0,sizeTotal)
x = append(x,a ...)
x = append(x,a ...)
x = append(x,a ...)
x = append(x,a ...)
x = append(x,a ...)
res = x //确保x (b * test.B){
a:= [] byte(
$ b func abcde)
x:= make([] byte,0,sizeTotal)
for i:= 0;我< b.N; i ++ {
x = x [:0]
x = append(x,a ...)
x = append(x,a ...)
x = append(x,a ...)
x = append(x,a ...)
x = append(x,a ...)
res = x
}
}
在我的框中,结果是:
testing:warning:无测试运行
PASS
BenchmarkWithAlloc 10000000 116 ns / op 32 B / op 1 allocs / op
BenchmarkWithoutAlloc 50000000 24.0 ns / op 0 b / op 0 allocs / op
系统地重新分配缓冲区(即使是小的缓冲区)基准测试的速度至少慢5倍。所以,你最好希望优化这个代码,以确保你不会为你构建的每个数据包重新分配一个缓冲区。相反,您应该保留缓冲区,并为每个编组操作重新使用它。
您可以在使用以下语句分配底层缓冲区的同时重置切片:
x = x [:0]
Can someone please point at a more efficient version of the following
b:=make([]byte,0,sizeTotal)
b=append(b,size...)
b=append(b,contentType...)
b=append(b,lenCallbackid...)
b=append(b,lenTarget...)
b=append(b,lenAction...)
b=append(b,lenContent...)
b=append(b,callbackid...)
b=append(b,target...)
b=append(b,action...)
b=append(b,content...)
every variable is a byte slice apart from size sizeTotal
Update
:
Code:
type Message struct {
size uint32
contentType uint8
callbackId string
target string
action string
content string
}
var res []byte
var b []byte = make([]byte,0,4096)
func (m *Message)ToByte()[]byte{
callbackIdIntLen:=len(m.callbackId)
targetIntLen := len(m.target)
actionIntLen := len(m.action)
contentIntLen := len(m.content)
lenCallbackid:=make([]byte,4)
binary.LittleEndian.PutUint32(lenCallbackid, uint32(callbackIdIntLen))
callbackid := []byte(m.callbackId)
lenTarget := make([]byte,4)
binary.LittleEndian.PutUint32(lenTarget, uint32(targetIntLen))
target:=[]byte(m.target)
lenAction := make([]byte,4)
binary.LittleEndian.PutUint32(lenAction, uint32(actionIntLen))
action := []byte(m.action)
lenContent:= make([]byte,4)
binary.LittleEndian.PutUint32(lenContent, uint32(contentIntLen))
content := []byte(m.content)
sizeTotal:= 21+callbackIdIntLen+targetIntLen+actionIntLen+contentIntLen
size := make([]byte,4)
binary.LittleEndian.PutUint32(size, uint32(sizeTotal))
b=b[:0]
b=append(b,size...)
b=append(b,byte(m.contentType))
b=append(b,lenCallbackid...)
b=append(b,lenTarget...)
b=append(b,lenAction...)
b=append(b,lenContent...)
b=append(b,callbackid...)
b=append(b,target...)
b=append(b,action...)
b=append(b,content...)
res = b
return b
}
func FromByte(bytes []byte)(*Message){
size :=binary.LittleEndian.Uint32(bytes[0:4])
contentType :=bytes[4:5][0]
lenCallbackid:=binary.LittleEndian.Uint32(bytes[5:9])
lenTarget :=binary.LittleEndian.Uint32(bytes[9:13])
lenAction :=binary.LittleEndian.Uint32(bytes[13:17])
lenContent :=binary.LittleEndian.Uint32(bytes[17:21])
callbackid := string(bytes[21:21+lenCallbackid])
target:= string(bytes[21+lenCallbackid:21+lenCallbackid+lenTarget])
action:= string(bytes[21+lenCallbackid+lenTarget:21+lenCallbackid+lenTarget+lenAction])
content:=string(bytes[size-lenContent:size])
return &Message{size,contentType,callbackid,target,action,content}
}
Benchs
:
func BenchmarkMessageToByte(b *testing.B) {
m:=NewMessage(uint8(3),"agsdggsdasagdsdgsgddggds","sometarSFAFFget","somFSAFSAFFSeaction","somfasfsasfafsejsonzhit")
for n := 0; n < b.N; n++ {
m.ToByte()
}
}
func BenchmarkMessageFromByte(b *testing.B) {
m:=NewMessage(uint8(1),"sagdsgaasdg","soSASFASFASAFSFASFAGmetarget","adsgdgsagdssgdsgd","agsdsdgsagdsdgasdg").ToByte()
for n := 0; n < b.N; n++ {
FromByte(m)
}
}
func BenchmarkStringToByte(b *testing.B) {
for n := 0; n < b.N; n++ {
_ = []byte("abcdefghijklmnoqrstuvwxyz")
}
}
func BenchmarkStringFromByte(b *testing.B) {
s:=[]byte("abcdefghijklmnoqrstuvwxyz")
for n := 0; n < b.N; n++ {
_ = string(s)
}
}
func BenchmarkUintToByte(b *testing.B) {
for n := 0; n < b.N; n++ {
i:=make([]byte,4)
binary.LittleEndian.PutUint32(i, uint32(99))
}
}
func BenchmarkUintFromByte(b *testing.B) {
i:=make([]byte,4)
binary.LittleEndian.PutUint32(i, uint32(99))
for n := 0; n < b.N; n++ {
binary.LittleEndian.Uint32(i)
}
}
Bench results:
BenchmarkMessageToByte 10000000 280 ns/op
BenchmarkMessageFromByte 10000000 293 ns/op
BenchmarkStringToByte 50000000 55.1 ns/op
BenchmarkStringFromByte 50000000 49.7 ns/op
BenchmarkUintToByte 1000000000 2.14 ns/op
BenchmarkUintFromByte 2000000000 1.71 ns/op
解决方案 Provided memory is already allocated, a sequence of x=append(x,a...) is rather efficient in Go.
In your example, the initial allocation (make) probably costs more than the sequence of appends. It depends on the size of the fields. Consider the following benchmark:
package main
import (
"testing"
)
const sizeTotal = 25
var res []byte // To enforce heap allocation
func BenchmarkWithAlloc(b *testing.B) {
a := []byte("abcde")
for i := 0; i < b.N; i++ {
x := make([]byte, 0, sizeTotal)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
res = x // Make sure x escapes, and is therefore heap allocated
}
}
func BenchmarkWithoutAlloc(b *testing.B) {
a := []byte("abcde")
x := make([]byte, 0, sizeTotal)
for i := 0; i < b.N; i++ {
x = x[:0]
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
x = append(x, a...)
res = x
}
}
On my box, the result is:
testing: warning: no tests to run
PASS
BenchmarkWithAlloc 10000000 116 ns/op 32 B/op 1 allocs/op
BenchmarkWithoutAlloc 50000000 24.0 ns/op 0 B/op 0 allocs/op
Systematically reallocating the buffer (even a small one) makes this benchmark at least 5 times slower.
So your best hope to optimize this code it to make sure you do not reallocate a buffer for each packet you build. On the contrary, you should keep your buffer, and reuse it for each marshalling operation.
You can reset a slice while keeping its underlying buffer allocated with the following statement:
x = x[:0]
这篇关于Concat字节数组的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!