diff --git a/gridbuf/readbuf.go b/gridbuf/readbuf.go new file mode 100644 index 0000000..a3b9b1b --- /dev/null +++ b/gridbuf/readbuf.go @@ -0,0 +1,124 @@ +// Copyright 2025 CloudWeGo Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gridbuf + +import ( + "errors" + "sync" + + "github.com/bytedance/gopkg/lang/mcache" +) + +var ( + errReadBufferNotEnough = errors.New("error grid read buffer not enough") + readBufferPool = sync.Pool{ + New: func() interface{} { + return &ReadBuffer{ + pool: make([][]byte, 0, 16), + } + }, + } +) + +type ReadBuffer struct { + off int + chunk []byte + chunks [][]byte + pool [][]byte +} + +func NewReadBuffer(bufs [][]byte) *ReadBuffer { + rb := readBufferPool.Get().(*ReadBuffer) + rb.chunk = bufs[0] + rb.chunks = bufs[1:] + return rb +} + +// ReadN read n bytes from chunk, if chunk is not enough, it will read from next chunks. +// +// MAKE SURE IT CAN BE INLINE: +// `can inline (*XReadBuffer).ReadN with cost 80` +func (b *ReadBuffer) ReadN(n int) (buf []byte) { + buf = b.chunk[b.off:] + if len(buf) < n { + buf = b.readSlow(n) + } else { + b.off += n + } + return +} + +func (b *ReadBuffer) readSlow(n int) (buf []byte) { + buf = mcache.Malloc(n) + b.pool = append(b.pool, buf) + var l, m int + if len(b.chunk)-b.off > 0 { + m = copy(buf[l:], b.chunk[b.off:]) + l += m + } + for l < n { + if len(b.chunks) == 0 { + panic(errReadBufferNotEnough.Error()) + } + b.chunk = b.chunks[0] + b.off = 0 + b.chunks = b.chunks[1:] + m = copy(buf[l:], b.chunk) + l += m + } + b.off += m + return +} + +// CopyBytes copy bytes from chunk, if chunk is not enough, it will copy from next chunks. +// +// MAKE SURE IT CAN BE INLINE: +// `can inline (*XReadBuffer).CopyBytes with cost 80` +func (b *ReadBuffer) CopyBytes(buf []byte) { + n := copy(buf, b.chunk[b.off:]) + if len(buf) > n { + b.copySlow(buf) + } else { + b.off += n + } +} + +func (b *ReadBuffer) copySlow(buf []byte) { + m := len(b.chunk) - b.off + l := m + for l < len(buf) { + if len(b.chunks) == 0 { + panic(errReadBufferNotEnough.Error()) + } + b.chunk = b.chunks[0] + b.off = 0 + b.chunks = b.chunks[1:] + m = copy(buf[l:], b.chunk) + l += m + } + b.off += m +} + +func (b *ReadBuffer) Free() { + b.off = 0 + b.chunk = nil + b.chunks = nil + for i := range b.pool { + mcache.Free(b.pool[i]) + b.pool[i] = nil + } + b.pool = b.pool[:0] + readBufferPool.Put(b) +} diff --git a/gridbuf/readbuf_test.go b/gridbuf/readbuf_test.go new file mode 100644 index 0000000..9d0c9f1 --- /dev/null +++ b/gridbuf/readbuf_test.go @@ -0,0 +1,171 @@ +// Copyright 2025 CloudWeGo Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gridbuf + +import ( + "runtime/debug" + "strings" + "testing" +) + +func TestReadBuf_Inline(t *testing.T) { + var x *ReadBuffer + + defer func() { + r := recover() + if r == nil { + t.Fatal("should panic") + } + stack := string(debug.Stack()) + if !strings.Contains(stack, "ReadN(...)") { + t.Fatal("should inline ReadN") + } + }() + x.ReadN(10) + + defer func() { + r := recover() + if r == nil { + t.Fatal("should panic") + } + stack := string(debug.Stack()) + if !strings.Contains(stack, "CopyBytes(...)") { + t.Fatal("should inline CopyBytes") + } + }() + + x.CopyBytes(make([]byte, 1)) +} + +func TestReadBuf_CrossPad(t *testing.T) { + tf := func(getBuf func(x *ReadBuffer, n int) []byte) { + ori1 := make([]byte, padLength) + for i := range ori1 { + ori1[i] = 'a' + } + ori2 := make([]byte, padLength) + for i := range ori2 { + ori2[i] = 'b' + } + ori := [][]byte{ori1, ori2} + x := NewReadBuffer(ori) + defer x.Free() + buf := getBuf(x, padLength-1) + if len(buf) < padLength-1 { + t.Fatal("buf length should be great or equal to padLength-1") + } + for _, byt := range buf[:padLength-1] { + if byt != 'a' { + t.Fatal("byt should be 'a'") + } + } + buf = getBuf(x, 2) + if len(buf) < 2 { + t.Fatal("buf length should be great or equal to 2") + } + if buf[0] != 'a' { + t.Fatal("buf[0] should be 'a'") + } + if buf[1] != 'b' { + t.Fatal("buf[1] should be 'b'") + } + buf = getBuf(x, padLength-1) + if len(buf) < padLength-1 { + t.Fatal("buf length should be great or equal to padLength-1") + } + for _, byt := range buf[:padLength-1] { + if byt != 'b' { + t.Fatal("byt should be 'b'") + } + } + } + tf(func(x *ReadBuffer, n int) []byte { + return x.ReadN(n) + }) + tf(func(x *ReadBuffer, n int) []byte { + buf := make([]byte, n) + x.CopyBytes(buf) + return buf + }) +} + +func TestReadBuf_NoCrossPad(t *testing.T) { + tf := func(getBuf func(x *ReadBuffer, n int) []byte) { + ori1 := make([]byte, padLength/2) + for i := range ori1 { + ori1[i] = 'a' + } + ori2 := make([]byte, padLength/2) + for i := range ori2 { + ori2[i] = 'b' + } + ori := append(ori1, ori2...) + x := NewReadBuffer([][]byte{ori}) + defer x.Free() + + buf := getBuf(x, padLength/2) + if len(buf) < padLength/2 { + t.Fatal("buf length should be great or equal to padLength/2") + } + for _, byt := range buf[:padLength/2] { + if byt != 'a' { + t.Fatal("byt should be 'a'") + } + } + buf = getBuf(x, padLength/2) + if len(buf) < padLength/2 { + t.Fatal("buf length should be great or equal to padLength/2") + } + for _, byt := range buf[:padLength/2] { + if byt != 'b' { + t.Fatal("byt should be 'b'") + } + } + } + tf(func(x *ReadBuffer, n int) []byte { + return x.ReadN(n) + }) + tf(func(x *ReadBuffer, n int) []byte { + buf := make([]byte, n) + x.CopyBytes(buf) + return buf + }) +} + +func BenchmarkReadBuf_ReadN(b *testing.B) { + bytes := make([]byte, b.N) + buf := NewReadBuffer([][]byte{bytes}) + defer buf.Free() + + var tmp []byte + b.ResetTimer() + for i := 0; i < b.N; i++ { + tmp = buf.ReadN(1) + } + _ = tmp +} + +func BenchmarkBytes_Read(b *testing.B) { + bytes := make([]byte, b.N) + + var off int + var tmp []byte + b.ResetTimer() + for i := 0; i < b.N; i++ { + tmp = bytes[off : off+1] + off++ + } + _ = tmp +} diff --git a/gridbuf/writebuf.go b/gridbuf/writebuf.go new file mode 100644 index 0000000..eb2a8cf --- /dev/null +++ b/gridbuf/writebuf.go @@ -0,0 +1,103 @@ +// Copyright 2025 CloudWeGo Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gridbuf + +import ( + "sync" + + "github.com/bytedance/gopkg/lang/mcache" +) + +const padLength = 1 << 13 + +var writeBufferPool = sync.Pool{ + New: func() interface{} { + return &WriteBuffer{ + chunks: make([][]byte, 0, 16), + pool: make([][]byte, 0, 16), + } + }, +} + +type WriteBuffer struct { + chunks [][]byte + pool [][]byte +} + +func NewWriteBuffer() *WriteBuffer { + return writeBufferPool.Get().(*WriteBuffer) +} + +func (b *WriteBuffer) NewBuffer(old []byte, n int) []byte { + if b == nil { + return old + } + if len(old) > 0 { + b.chunks = append(b.chunks, old) + } + if n < 0 { + // n < 0 means no need to malloc + return nil + } + // refresh chunk + if n < padLength { + n = padLength + } + buf := mcache.Malloc(n) + buf = buf[:0] + b.pool = append(b.pool, buf) + return buf +} + +func (b *WriteBuffer) Free() { + if b == nil { + return + } + for i := range b.chunks { + b.chunks[i] = nil + } + b.chunks = b.chunks[:0] + for i := range b.pool { + mcache.Free(b.pool[i]) + b.pool[i] = nil + } + b.pool = b.pool[:0] + writeBufferPool.Put(b) +} + +func (b *WriteBuffer) WriteDirect(old, buf []byte) []byte { + if b == nil { + return append(old, buf...) + } + // relink chunks + if len(old) > 0 { + b.chunks = append(b.chunks, old) + } + + // write directly + b.chunks = append(b.chunks, buf) + + if cap(buf)-len(buf) > 0 { + return old[len(old):cap(old)] + } + return b.NewBuffer(nil, 0) +} + +func (b *WriteBuffer) Bytes() [][]byte { + if b == nil { + return nil + } + return b.chunks +} diff --git a/gridbuf/writebuf_test.go b/gridbuf/writebuf_test.go new file mode 100644 index 0000000..bff7eba --- /dev/null +++ b/gridbuf/writebuf_test.go @@ -0,0 +1,153 @@ +// Copyright 2025 CloudWeGo Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gridbuf + +import ( + "runtime/debug" + "strings" + "testing" +) + +func TestWriteBuffer_Inline(t *testing.T) { + var b *WriteBuffer + + defer func() { + r := recover() + if r == nil { + t.Fatal("should panic") + } + stack := string(debug.Stack()) + if !strings.Contains(stack, "MallocN(...)") { + t.Fatal("should inline MallocN") + } + }() + + b.MallocN(10) +} + +func TestWriteBuffer_CrossPad(t *testing.T) { + b := NewWriteBuffer() + defer b.Free() + buf := b.MallocN(padLength - 1) + for i := range buf { + buf[i] = 'a' + } + buf = b.MallocN(2) + for i := range buf { + buf[i] = 'b' + } + bytes := b.Bytes() + if len(bytes) != 2 { + t.Fatal("bytes length should be 2") + } + if len(bytes[0]) != padLength-1 { + t.Fatal("bytes[0] length should be padLength-1") + } + for i := range bytes[0] { + if bytes[0][i] != 'a' { + t.Fatal("bytes[0][i] should be 'a'") + } + } + if len(bytes[1]) != 2 { + t.Fatal("bytes[1] length should be 2") + } + for i := range bytes[1] { + if bytes[1][i] != 'b' { + t.Fatal("bytes[1][i] should be 'b'") + } + } +} + +func TestWriteBuffer_NoCrossPad(t *testing.T) { + b := NewWriteBuffer() + defer b.Free() + buf := b.MallocN(1024) + for i := range buf { + buf[i] = 'a' + } + buf = b.MallocN(1024) + for i := range buf { + buf[i] = 'b' + } + bytes := b.Bytes() + if len(bytes) != 1 { + t.Fatal("bytes length should be 1") + } + if len(bytes[0]) != 2048 { + t.Fatal("bytes[0] length should be 2048") + } + for i := range bytes[0] { + if i < 1024 && bytes[0][i] != 'a' { + t.Fatal("bytes[0][i] should be 'a'") + } + if i >= 1024 && bytes[0][i] != 'b' { + t.Fatal("bytes[0][i] should be 'b'") + } + } +} + +func TestWriteBuffer_WriteDirect(t *testing.T) { + b := NewWriteBuffer() + defer b.Free() + buf := b.MallocN(1024) + for i := range buf { + buf[i] = 'a' + } + b.WriteDirect([]byte{'b', 'c'}) + bytes := b.Bytes() + if len(bytes) != 2 { + t.Fatal("bytes length should be 2") + } + if len(bytes[0]) != 1024 { + t.Fatal("bytes[0] length should be 1024") + } + if len(bytes[1]) != 2 { + t.Fatal("bytes[1] length should be 2") + } + for i := range bytes[0] { + if bytes[0][i] != 'a' { + t.Fatal("bytes[0][i] should be 'a'") + } + } + for i := range bytes[1] { + if bytes[1][i] != byte('b'+i) { + t.Fatal("bytes[1][i] should be 'b'+i") + } + } +} + +func BenchmarkWriteBuf_MallocN(b *testing.B) { + x := NewWriteBuffer() + defer x.Free() + + var tmp []byte + b.ResetTimer() + for i := 0; i < b.N; i++ { + tmp = x.MallocN(1) + } + _ = tmp +} + +func BenchmarkBytes_Write(b *testing.B) { + bytes := make([]byte, b.N) + var off int + var tmp []byte + b.ResetTimer() + for i := 0; i < b.N; i++ { + tmp = bytes[off : off+1] + off++ + } + _ = tmp +} diff --git a/protocol/thrift/gridbuf.go b/protocol/thrift/gridbuf.go new file mode 100644 index 0000000..6946921 --- /dev/null +++ b/protocol/thrift/gridbuf.go @@ -0,0 +1,187 @@ +/* + * Copyright 2025 CloudWeGo Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package thrift + +import ( + "encoding/binary" + "fmt" + + "github.com/cloudwego/gopkg/gridbuf" +) + +var GridBuffer GridBufferProtocol + +type GridBufferProtocol struct{} + +// Skip skips over the value for the given type using Go implementation. +func (p GridBufferProtocol) Skip(b *gridbuf.ReadBuffer, t TType, unknownFields []byte, receiveUnknownFields bool) ([]byte, error) { + return p.skipType(b, t, defaultRecursionDepth, unknownFields, receiveUnknownFields) +} + +func (p GridBufferProtocol) skipType(b *gridbuf.ReadBuffer, t TType, maxdepth int, unknownFields []byte, receiveUnknownFields bool) ([]byte, error) { + if maxdepth == 0 { + return unknownFields, errDepthLimitExceeded + } + if n := typeToSize[t]; n > 0 { + buf := b.ReadN(int(n))[:n] + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + return unknownFields, nil + } + var err error + switch t { + case STRING: + tmp := b.ReadN(4)[:4] + n := binary.BigEndian.Uint32(tmp) + s := b.ReadN(int(n))[:n] + if receiveUnknownFields { + unknownFields = append(unknownFields, tmp...) + unknownFields = append(unknownFields, s...) + } + return unknownFields, nil + case MAP: + buf := b.ReadN(6)[:6] + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + kt, vt, sz := TType(buf[0]), TType(buf[1]), binary.BigEndian.Uint32(buf[2:]) + ksz, vsz := int(typeToSize[kt]), int(typeToSize[vt]) + if ksz > 0 && vsz > 0 { // fast path, fast skip + mapkvsize := (int(sz) * (ksz + vsz)) + buf = b.ReadN(mapkvsize)[:mapkvsize] + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + return unknownFields, nil + } + for j := int32(0); j < int32(sz); j++ { + if ksz > 0 { + kbuf := b.ReadN(ksz)[:ksz] + if receiveUnknownFields { + unknownFields = append(unknownFields, kbuf...) + } + } else if kt == STRING { + tmp := b.ReadN(4)[:4] + n := binary.BigEndian.Uint32(tmp) + s := b.ReadN(int(n))[:n] + if receiveUnknownFields { + unknownFields = append(unknownFields, tmp...) + unknownFields = append(unknownFields, s...) + } + } else { + unknownFields, err = p.skipType(b, kt, maxdepth-1, unknownFields, receiveUnknownFields) + if err != nil { + return unknownFields, err + } + } + if vsz > 0 { + vbuf := b.ReadN(vsz)[:vsz] + if receiveUnknownFields { + unknownFields = append(unknownFields, vbuf...) + } + } else if vt == STRING { + tmp := b.ReadN(4)[:4] + n := binary.BigEndian.Uint32(tmp) + s := b.ReadN(int(n))[:n] + if receiveUnknownFields { + unknownFields = append(unknownFields, tmp...) + unknownFields = append(unknownFields, s...) + } + } else { + unknownFields, err = p.skipType(b, vt, maxdepth-1, unknownFields, receiveUnknownFields) + if err != nil { + return unknownFields, err + } + } + } + return unknownFields, nil + case LIST, SET: + buf := b.ReadN(5)[:5] + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + vt, sz := TType(buf[0]), binary.BigEndian.Uint32(buf[1:]) + vsz := int(typeToSize[vt]) + if vsz > 0 { // fast path, fast skip + listvsize := int(sz) * vsz + buf = b.ReadN(listvsize)[:listvsize] + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + return unknownFields, nil + } + for j := int32(0); j < int32(sz); j++ { + if vsz > 0 { + vbuf := b.ReadN(vsz)[:vsz] + if receiveUnknownFields { + unknownFields = append(unknownFields, vbuf...) + } + } else if vt == STRING { + tmp := b.ReadN(4)[:4] + n := binary.BigEndian.Uint32(tmp) + s := b.ReadN(int(n))[:n] + if receiveUnknownFields { + unknownFields = append(unknownFields, tmp...) + unknownFields = append(unknownFields, s...) + } + } else { + unknownFields, err = p.skipType(b, vt, maxdepth-1, unknownFields, receiveUnknownFields) + if err != nil { + return unknownFields, err + } + } + } + return unknownFields, nil + case STRUCT: + for { + buf := b.ReadN(1)[:1] // TType + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + ft := TType(buf[0]) + if ft == STOP { + return unknownFields, nil + } + buf = b.ReadN(2)[:2] // Field ID + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + if sz := typeToSize[ft]; sz > 0 { + buf = b.ReadN(int(sz))[:sz] + if receiveUnknownFields { + unknownFields = append(unknownFields, buf...) + } + } else if ft == STRING { + tmp := b.ReadN(4)[:4] + n := binary.BigEndian.Uint32(tmp) + s := b.ReadN(int(n))[:n] + if receiveUnknownFields { + unknownFields = append(unknownFields, tmp...) + unknownFields = append(unknownFields, s...) + } + } else { + unknownFields, err = p.skipType(b, ft, maxdepth-1, unknownFields, receiveUnknownFields) + if err != nil { + return unknownFields, err + } + } + } + default: + return unknownFields, NewProtocolException(INVALID_DATA, fmt.Sprintf("unknown data type %d", t)) + } +} diff --git a/protocol/thrift/gridbuf_test.go b/protocol/thrift/gridbuf_test.go new file mode 100644 index 0000000..7b489f4 --- /dev/null +++ b/protocol/thrift/gridbuf_test.go @@ -0,0 +1,146 @@ +/* + * Copyright 2025 CloudWeGo Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package thrift + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cloudwego/gopkg/gridbuf" +) + +func TestGridBufferSkip(t *testing.T) { + // byte + b := Binary.AppendByte([]byte(nil), 1) + + // string + b = Binary.AppendString(b, "hello") + + // list + b = Binary.AppendListBegin(b, I32, 1) + b = Binary.AppendI32(b, 1) + + // list + b = Binary.AppendListBegin(b, STRING, 1) + b = Binary.AppendString(b, "hello") + + // list> + b = Binary.AppendListBegin(b, LIST, 1) + b = Binary.AppendListBegin(b, I32, 1) + b = Binary.AppendI32(b, 1) + + // map + b = Binary.AppendMapBegin(b, I32, I64, 1) + b = Binary.AppendI32(b, 1) + b = Binary.AppendI64(b, 2) + + // map + b = Binary.AppendMapBegin(b, I32, STRING, 1) + b = Binary.AppendI32(b, 1) + b = Binary.AppendString(b, "hello") + + // map + b = Binary.AppendMapBegin(b, STRING, I64, 1) + b = Binary.AppendString(b, "hello") + b = Binary.AppendI64(b, 2) + + // map> + b = Binary.AppendMapBegin(b, I32, LIST, 1) + b = Binary.AppendI32(b, 1) + b = Binary.AppendListBegin(b, I32, 1) + b = Binary.AppendI32(b, 1) + + // map, i32> + b = Binary.AppendMapBegin(b, LIST, I32, 1) + b = Binary.AppendListBegin(b, I32, 1) + b = Binary.AppendI32(b, 1) + b = Binary.AppendI32(b, 1) + + // struct i32, list + b = Binary.AppendFieldBegin(b, I32, 1) + b = Binary.AppendI32(b, 1) + b = Binary.AppendFieldBegin(b, LIST, 1) + b = Binary.AppendListBegin(b, I32, 1) + b = Binary.AppendI32(b, 1) + b = Binary.AppendFieldStop(b) + + tf := func(gbuf *gridbuf.ReadBuffer) { + var ufs []byte + + ufs, err := GridBuffer.Skip(gbuf, BYTE, ufs, true) + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, STRING, ufs, true) + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, LIST, ufs, true) // list + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, LIST, ufs, true) // list + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, LIST, ufs, true) // list> + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, MAP, ufs, true) // map + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, MAP, ufs, true) // map + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, MAP, ufs, true) // map + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, MAP, ufs, true) // map> + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, MAP, ufs, true) // map, i32> + require.NoError(t, err) + + ufs, err = GridBuffer.Skip(gbuf, STRUCT, ufs, true) // struct i32, list + require.NoError(t, err) + + require.Equal(t, b, ufs) + } + + // test split bytes + var nbuf [][]byte + for _, byt := range b { + nbuf = append(nbuf, []byte{byt}) + } + gbuf := gridbuf.NewReadBuffer(nbuf) + tf(gbuf) + + // test merge bytes + gbuf = gridbuf.NewReadBuffer([][]byte{b}) + tf(gbuf) + + // errDepthLimitExceeded + b = b[:0] + for i := 0; i < defaultRecursionDepth+1; i++ { + b = Binary.AppendFieldBegin(b, STRUCT, 1) + } + gbuf = gridbuf.NewReadBuffer([][]byte{b}) + _, err := GridBuffer.Skip(gbuf, STRUCT, nil, false) + require.Same(t, errDepthLimitExceeded, err) + + // unknown type + gbuf = gridbuf.NewReadBuffer([][]byte{b}) + _, err = GridBuffer.Skip(gbuf, TType(122), nil, false) + require.Error(t, err) +}