summaryrefslogtreecommitdiffstats
path: root/src/encoding/binary
diff options
context:
space:
mode:
Diffstat (limited to 'src/encoding/binary')
-rw-r--r--src/encoding/binary/binary.go811
-rw-r--r--src/encoding/binary/binary_test.go887
-rw-r--r--src/encoding/binary/example_test.go187
-rw-r--r--src/encoding/binary/native_endian_big.go14
-rw-r--r--src/encoding/binary/native_endian_little.go14
-rw-r--r--src/encoding/binary/varint.go166
-rw-r--r--src/encoding/binary/varint_test.go247
7 files changed, 2326 insertions, 0 deletions
diff --git a/src/encoding/binary/binary.go b/src/encoding/binary/binary.go
new file mode 100644
index 0000000..3fb18a7
--- /dev/null
+++ b/src/encoding/binary/binary.go
@@ -0,0 +1,811 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package binary implements simple translation between numbers and byte
+// sequences and encoding and decoding of varints.
+//
+// Numbers are translated by reading and writing fixed-size values.
+// A fixed-size value is either a fixed-size arithmetic
+// type (bool, int8, uint8, int16, float32, complex64, ...)
+// or an array or struct containing only fixed-size values.
+//
+// The varint functions encode and decode single integer values using
+// a variable-length encoding; smaller values require fewer bytes.
+// For a specification, see
+// https://developers.google.com/protocol-buffers/docs/encoding.
+//
+// This package favors simplicity over efficiency. Clients that require
+// high-performance serialization, especially for large data structures,
+// should look at more advanced solutions such as the encoding/gob
+// package or protocol buffers.
+package binary
+
+import (
+ "errors"
+ "io"
+ "math"
+ "reflect"
+ "sync"
+)
+
+// A ByteOrder specifies how to convert byte slices into
+// 16-, 32-, or 64-bit unsigned integers.
+type ByteOrder interface {
+ Uint16([]byte) uint16
+ Uint32([]byte) uint32
+ Uint64([]byte) uint64
+ PutUint16([]byte, uint16)
+ PutUint32([]byte, uint32)
+ PutUint64([]byte, uint64)
+ String() string
+}
+
+// AppendByteOrder specifies how to append 16-, 32-, or 64-bit unsigned integers
+// into a byte slice.
+type AppendByteOrder interface {
+ AppendUint16([]byte, uint16) []byte
+ AppendUint32([]byte, uint32) []byte
+ AppendUint64([]byte, uint64) []byte
+ String() string
+}
+
+// LittleEndian is the little-endian implementation of ByteOrder and AppendByteOrder.
+var LittleEndian littleEndian
+
+// BigEndian is the big-endian implementation of ByteOrder and AppendByteOrder.
+var BigEndian bigEndian
+
+type littleEndian struct{}
+
+func (littleEndian) Uint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[0]) | uint16(b[1])<<8
+}
+
+func (littleEndian) PutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+}
+
+func (littleEndian) AppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ )
+}
+
+func (littleEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func (littleEndian) PutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+}
+
+func (littleEndian) AppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ )
+}
+
+func (littleEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func (littleEndian) PutUint64(b []byte, v uint64) {
+ _ = b[7] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ b[4] = byte(v >> 32)
+ b[5] = byte(v >> 40)
+ b[6] = byte(v >> 48)
+ b[7] = byte(v >> 56)
+}
+
+func (littleEndian) AppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56),
+ )
+}
+
+func (littleEndian) String() string { return "LittleEndian" }
+
+func (littleEndian) GoString() string { return "binary.LittleEndian" }
+
+type bigEndian struct{}
+
+func (bigEndian) Uint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[1]) | uint16(b[0])<<8
+}
+
+func (bigEndian) PutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 8)
+ b[1] = byte(v)
+}
+
+func (bigEndian) AppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func (bigEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+}
+
+func (bigEndian) PutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 24)
+ b[1] = byte(v >> 16)
+ b[2] = byte(v >> 8)
+ b[3] = byte(v)
+}
+
+func (bigEndian) AppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func (bigEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+func (bigEndian) PutUint64(b []byte, v uint64) {
+ _ = b[7] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 56)
+ b[1] = byte(v >> 48)
+ b[2] = byte(v >> 40)
+ b[3] = byte(v >> 32)
+ b[4] = byte(v >> 24)
+ b[5] = byte(v >> 16)
+ b[6] = byte(v >> 8)
+ b[7] = byte(v)
+}
+
+func (bigEndian) AppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>56),
+ byte(v>>48),
+ byte(v>>40),
+ byte(v>>32),
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func (bigEndian) String() string { return "BigEndian" }
+
+func (bigEndian) GoString() string { return "binary.BigEndian" }
+
+func (nativeEndian) String() string { return "NativeEndian" }
+
+func (nativeEndian) GoString() string { return "binary.NativeEndian" }
+
+// Read reads structured binary data from r into data.
+// Data must be a pointer to a fixed-size value or a slice
+// of fixed-size values.
+// Bytes read from r are decoded using the specified byte order
+// and written to successive fields of the data.
+// When decoding boolean values, a zero byte is decoded as false, and
+// any other non-zero byte is decoded as true.
+// When reading into structs, the field data for fields with
+// blank (_) field names is skipped; i.e., blank field names
+// may be used for padding.
+// When reading into a struct, all non-blank fields must be exported
+// or Read may panic.
+//
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading some but not all the bytes,
+// Read returns ErrUnexpectedEOF.
+func Read(r io.Reader, order ByteOrder, data any) error {
+ // Fast path for basic types and slices.
+ if n := intDataSize(data); n != 0 {
+ bs := make([]byte, n)
+ if _, err := io.ReadFull(r, bs); err != nil {
+ return err
+ }
+ switch data := data.(type) {
+ case *bool:
+ *data = bs[0] != 0
+ case *int8:
+ *data = int8(bs[0])
+ case *uint8:
+ *data = bs[0]
+ case *int16:
+ *data = int16(order.Uint16(bs))
+ case *uint16:
+ *data = order.Uint16(bs)
+ case *int32:
+ *data = int32(order.Uint32(bs))
+ case *uint32:
+ *data = order.Uint32(bs)
+ case *int64:
+ *data = int64(order.Uint64(bs))
+ case *uint64:
+ *data = order.Uint64(bs)
+ case *float32:
+ *data = math.Float32frombits(order.Uint32(bs))
+ case *float64:
+ *data = math.Float64frombits(order.Uint64(bs))
+ case []bool:
+ for i, x := range bs { // Easier to loop over the input for 8-bit values.
+ data[i] = x != 0
+ }
+ case []int8:
+ for i, x := range bs {
+ data[i] = int8(x)
+ }
+ case []uint8:
+ copy(data, bs)
+ case []int16:
+ for i := range data {
+ data[i] = int16(order.Uint16(bs[2*i:]))
+ }
+ case []uint16:
+ for i := range data {
+ data[i] = order.Uint16(bs[2*i:])
+ }
+ case []int32:
+ for i := range data {
+ data[i] = int32(order.Uint32(bs[4*i:]))
+ }
+ case []uint32:
+ for i := range data {
+ data[i] = order.Uint32(bs[4*i:])
+ }
+ case []int64:
+ for i := range data {
+ data[i] = int64(order.Uint64(bs[8*i:]))
+ }
+ case []uint64:
+ for i := range data {
+ data[i] = order.Uint64(bs[8*i:])
+ }
+ case []float32:
+ for i := range data {
+ data[i] = math.Float32frombits(order.Uint32(bs[4*i:]))
+ }
+ case []float64:
+ for i := range data {
+ data[i] = math.Float64frombits(order.Uint64(bs[8*i:]))
+ }
+ default:
+ n = 0 // fast path doesn't apply
+ }
+ if n != 0 {
+ return nil
+ }
+ }
+
+ // Fallback to reflect-based decoding.
+ v := reflect.ValueOf(data)
+ size := -1
+ switch v.Kind() {
+ case reflect.Pointer:
+ v = v.Elem()
+ size = dataSize(v)
+ case reflect.Slice:
+ size = dataSize(v)
+ }
+ if size < 0 {
+ return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
+ }
+ d := &decoder{order: order, buf: make([]byte, size)}
+ if _, err := io.ReadFull(r, d.buf); err != nil {
+ return err
+ }
+ d.value(v)
+ return nil
+}
+
+// Write writes the binary representation of data into w.
+// Data must be a fixed-size value or a slice of fixed-size
+// values, or a pointer to such data.
+// Boolean values encode as one byte: 1 for true, and 0 for false.
+// Bytes written to w are encoded using the specified byte order
+// and read from successive fields of the data.
+// When writing structs, zero values are written for fields
+// with blank (_) field names.
+func Write(w io.Writer, order ByteOrder, data any) error {
+ // Fast path for basic types and slices.
+ if n := intDataSize(data); n != 0 {
+ bs := make([]byte, n)
+ switch v := data.(type) {
+ case *bool:
+ if *v {
+ bs[0] = 1
+ } else {
+ bs[0] = 0
+ }
+ case bool:
+ if v {
+ bs[0] = 1
+ } else {
+ bs[0] = 0
+ }
+ case []bool:
+ for i, x := range v {
+ if x {
+ bs[i] = 1
+ } else {
+ bs[i] = 0
+ }
+ }
+ case *int8:
+ bs[0] = byte(*v)
+ case int8:
+ bs[0] = byte(v)
+ case []int8:
+ for i, x := range v {
+ bs[i] = byte(x)
+ }
+ case *uint8:
+ bs[0] = *v
+ case uint8:
+ bs[0] = v
+ case []uint8:
+ bs = v
+ case *int16:
+ order.PutUint16(bs, uint16(*v))
+ case int16:
+ order.PutUint16(bs, uint16(v))
+ case []int16:
+ for i, x := range v {
+ order.PutUint16(bs[2*i:], uint16(x))
+ }
+ case *uint16:
+ order.PutUint16(bs, *v)
+ case uint16:
+ order.PutUint16(bs, v)
+ case []uint16:
+ for i, x := range v {
+ order.PutUint16(bs[2*i:], x)
+ }
+ case *int32:
+ order.PutUint32(bs, uint32(*v))
+ case int32:
+ order.PutUint32(bs, uint32(v))
+ case []int32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], uint32(x))
+ }
+ case *uint32:
+ order.PutUint32(bs, *v)
+ case uint32:
+ order.PutUint32(bs, v)
+ case []uint32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], x)
+ }
+ case *int64:
+ order.PutUint64(bs, uint64(*v))
+ case int64:
+ order.PutUint64(bs, uint64(v))
+ case []int64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], uint64(x))
+ }
+ case *uint64:
+ order.PutUint64(bs, *v)
+ case uint64:
+ order.PutUint64(bs, v)
+ case []uint64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], x)
+ }
+ case *float32:
+ order.PutUint32(bs, math.Float32bits(*v))
+ case float32:
+ order.PutUint32(bs, math.Float32bits(v))
+ case []float32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], math.Float32bits(x))
+ }
+ case *float64:
+ order.PutUint64(bs, math.Float64bits(*v))
+ case float64:
+ order.PutUint64(bs, math.Float64bits(v))
+ case []float64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], math.Float64bits(x))
+ }
+ }
+ _, err := w.Write(bs)
+ return err
+ }
+
+ // Fallback to reflect-based encoding.
+ v := reflect.Indirect(reflect.ValueOf(data))
+ size := dataSize(v)
+ if size < 0 {
+ return errors.New("binary.Write: some values are not fixed-sized in type " + reflect.TypeOf(data).String())
+ }
+ buf := make([]byte, size)
+ e := &encoder{order: order, buf: buf}
+ e.value(v)
+ _, err := w.Write(buf)
+ return err
+}
+
+// Size returns how many bytes Write would generate to encode the value v, which
+// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
+// If v is neither of these, Size returns -1.
+func Size(v any) int {
+ return dataSize(reflect.Indirect(reflect.ValueOf(v)))
+}
+
+var structSize sync.Map // map[reflect.Type]int
+
+// dataSize returns the number of bytes the actual data represented by v occupies in memory.
+// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
+// it returns the length of the slice times the element size and does not count the memory
+// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
+func dataSize(v reflect.Value) int {
+ switch v.Kind() {
+ case reflect.Slice:
+ if s := sizeof(v.Type().Elem()); s >= 0 {
+ return s * v.Len()
+ }
+
+ case reflect.Struct:
+ t := v.Type()
+ if size, ok := structSize.Load(t); ok {
+ return size.(int)
+ }
+ size := sizeof(t)
+ structSize.Store(t, size)
+ return size
+
+ default:
+ if v.IsValid() {
+ return sizeof(v.Type())
+ }
+ }
+
+ return -1
+}
+
+// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
+func sizeof(t reflect.Type) int {
+ switch t.Kind() {
+ case reflect.Array:
+ if s := sizeof(t.Elem()); s >= 0 {
+ return s * t.Len()
+ }
+
+ case reflect.Struct:
+ sum := 0
+ for i, n := 0, t.NumField(); i < n; i++ {
+ s := sizeof(t.Field(i).Type)
+ if s < 0 {
+ return -1
+ }
+ sum += s
+ }
+ return sum
+
+ case reflect.Bool,
+ reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ return int(t.Size())
+ }
+
+ return -1
+}
+
+type coder struct {
+ order ByteOrder
+ buf []byte
+ offset int
+}
+
+type decoder coder
+type encoder coder
+
+func (d *decoder) bool() bool {
+ x := d.buf[d.offset]
+ d.offset++
+ return x != 0
+}
+
+func (e *encoder) bool(x bool) {
+ if x {
+ e.buf[e.offset] = 1
+ } else {
+ e.buf[e.offset] = 0
+ }
+ e.offset++
+}
+
+func (d *decoder) uint8() uint8 {
+ x := d.buf[d.offset]
+ d.offset++
+ return x
+}
+
+func (e *encoder) uint8(x uint8) {
+ e.buf[e.offset] = x
+ e.offset++
+}
+
+func (d *decoder) uint16() uint16 {
+ x := d.order.Uint16(d.buf[d.offset : d.offset+2])
+ d.offset += 2
+ return x
+}
+
+func (e *encoder) uint16(x uint16) {
+ e.order.PutUint16(e.buf[e.offset:e.offset+2], x)
+ e.offset += 2
+}
+
+func (d *decoder) uint32() uint32 {
+ x := d.order.Uint32(d.buf[d.offset : d.offset+4])
+ d.offset += 4
+ return x
+}
+
+func (e *encoder) uint32(x uint32) {
+ e.order.PutUint32(e.buf[e.offset:e.offset+4], x)
+ e.offset += 4
+}
+
+func (d *decoder) uint64() uint64 {
+ x := d.order.Uint64(d.buf[d.offset : d.offset+8])
+ d.offset += 8
+ return x
+}
+
+func (e *encoder) uint64(x uint64) {
+ e.order.PutUint64(e.buf[e.offset:e.offset+8], x)
+ e.offset += 8
+}
+
+func (d *decoder) int8() int8 { return int8(d.uint8()) }
+
+func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
+
+func (d *decoder) int16() int16 { return int16(d.uint16()) }
+
+func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
+
+func (d *decoder) int32() int32 { return int32(d.uint32()) }
+
+func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
+
+func (d *decoder) int64() int64 { return int64(d.uint64()) }
+
+func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
+
+func (d *decoder) value(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Array:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ d.value(v.Index(i))
+ }
+
+ case reflect.Struct:
+ t := v.Type()
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ // Note: Calling v.CanSet() below is an optimization.
+ // It would be sufficient to check the field name,
+ // but creating the StructField info for each field is
+ // costly (run "go test -bench=ReadStruct" and compare
+ // results when making changes to this code).
+ if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ d.value(v)
+ } else {
+ d.skip(v)
+ }
+ }
+
+ case reflect.Slice:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ d.value(v.Index(i))
+ }
+
+ case reflect.Bool:
+ v.SetBool(d.bool())
+
+ case reflect.Int8:
+ v.SetInt(int64(d.int8()))
+ case reflect.Int16:
+ v.SetInt(int64(d.int16()))
+ case reflect.Int32:
+ v.SetInt(int64(d.int32()))
+ case reflect.Int64:
+ v.SetInt(d.int64())
+
+ case reflect.Uint8:
+ v.SetUint(uint64(d.uint8()))
+ case reflect.Uint16:
+ v.SetUint(uint64(d.uint16()))
+ case reflect.Uint32:
+ v.SetUint(uint64(d.uint32()))
+ case reflect.Uint64:
+ v.SetUint(d.uint64())
+
+ case reflect.Float32:
+ v.SetFloat(float64(math.Float32frombits(d.uint32())))
+ case reflect.Float64:
+ v.SetFloat(math.Float64frombits(d.uint64()))
+
+ case reflect.Complex64:
+ v.SetComplex(complex(
+ float64(math.Float32frombits(d.uint32())),
+ float64(math.Float32frombits(d.uint32())),
+ ))
+ case reflect.Complex128:
+ v.SetComplex(complex(
+ math.Float64frombits(d.uint64()),
+ math.Float64frombits(d.uint64()),
+ ))
+ }
+}
+
+func (e *encoder) value(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Array:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ e.value(v.Index(i))
+ }
+
+ case reflect.Struct:
+ t := v.Type()
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ // see comment for corresponding code in decoder.value()
+ if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ e.value(v)
+ } else {
+ e.skip(v)
+ }
+ }
+
+ case reflect.Slice:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ e.value(v.Index(i))
+ }
+
+ case reflect.Bool:
+ e.bool(v.Bool())
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch v.Type().Kind() {
+ case reflect.Int8:
+ e.int8(int8(v.Int()))
+ case reflect.Int16:
+ e.int16(int16(v.Int()))
+ case reflect.Int32:
+ e.int32(int32(v.Int()))
+ case reflect.Int64:
+ e.int64(v.Int())
+ }
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch v.Type().Kind() {
+ case reflect.Uint8:
+ e.uint8(uint8(v.Uint()))
+ case reflect.Uint16:
+ e.uint16(uint16(v.Uint()))
+ case reflect.Uint32:
+ e.uint32(uint32(v.Uint()))
+ case reflect.Uint64:
+ e.uint64(v.Uint())
+ }
+
+ case reflect.Float32, reflect.Float64:
+ switch v.Type().Kind() {
+ case reflect.Float32:
+ e.uint32(math.Float32bits(float32(v.Float())))
+ case reflect.Float64:
+ e.uint64(math.Float64bits(v.Float()))
+ }
+
+ case reflect.Complex64, reflect.Complex128:
+ switch v.Type().Kind() {
+ case reflect.Complex64:
+ x := v.Complex()
+ e.uint32(math.Float32bits(float32(real(x))))
+ e.uint32(math.Float32bits(float32(imag(x))))
+ case reflect.Complex128:
+ x := v.Complex()
+ e.uint64(math.Float64bits(real(x)))
+ e.uint64(math.Float64bits(imag(x)))
+ }
+ }
+}
+
+func (d *decoder) skip(v reflect.Value) {
+ d.offset += dataSize(v)
+}
+
+func (e *encoder) skip(v reflect.Value) {
+ n := dataSize(v)
+ zero := e.buf[e.offset : e.offset+n]
+ for i := range zero {
+ zero[i] = 0
+ }
+ e.offset += n
+}
+
+// intDataSize returns the size of the data required to represent the data when encoded.
+// It returns zero if the type cannot be implemented by the fast path in Read or Write.
+func intDataSize(data any) int {
+ switch data := data.(type) {
+ case bool, int8, uint8, *bool, *int8, *uint8:
+ return 1
+ case []bool:
+ return len(data)
+ case []int8:
+ return len(data)
+ case []uint8:
+ return len(data)
+ case int16, uint16, *int16, *uint16:
+ return 2
+ case []int16:
+ return 2 * len(data)
+ case []uint16:
+ return 2 * len(data)
+ case int32, uint32, *int32, *uint32:
+ return 4
+ case []int32:
+ return 4 * len(data)
+ case []uint32:
+ return 4 * len(data)
+ case int64, uint64, *int64, *uint64:
+ return 8
+ case []int64:
+ return 8 * len(data)
+ case []uint64:
+ return 8 * len(data)
+ case float32, *float32:
+ return 4
+ case float64, *float64:
+ return 8
+ case []float32:
+ return 4 * len(data)
+ case []float64:
+ return 8 * len(data)
+ }
+ return 0
+}
diff --git a/src/encoding/binary/binary_test.go b/src/encoding/binary/binary_test.go
new file mode 100644
index 0000000..4b22b28
--- /dev/null
+++ b/src/encoding/binary/binary_test.go
@@ -0,0 +1,887 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package binary
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strings"
+ "sync"
+ "testing"
+ "unsafe"
+)
+
+type Struct struct {
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Float32 float32
+ Float64 float64
+ Complex64 complex64
+ Complex128 complex128
+ Array [4]uint8
+ Bool bool
+ BoolArray [4]bool
+}
+
+type T struct {
+ Int int
+ Uint uint
+ Uintptr uintptr
+ Array [4]int
+}
+
+var s = Struct{
+ 0x01,
+ 0x0203,
+ 0x04050607,
+ 0x08090a0b0c0d0e0f,
+ 0x10,
+ 0x1112,
+ 0x13141516,
+ 0x1718191a1b1c1d1e,
+
+ math.Float32frombits(0x1f202122),
+ math.Float64frombits(0x232425262728292a),
+ complex(
+ math.Float32frombits(0x2b2c2d2e),
+ math.Float32frombits(0x2f303132),
+ ),
+ complex(
+ math.Float64frombits(0x333435363738393a),
+ math.Float64frombits(0x3b3c3d3e3f404142),
+ ),
+
+ [4]uint8{0x43, 0x44, 0x45, 0x46},
+
+ true,
+ [4]bool{true, false, true, false},
+}
+
+var big = []byte{
+ 1,
+ 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16,
+ 17, 18,
+ 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, 29, 30,
+
+ 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+
+ 67, 68, 69, 70,
+
+ 1,
+ 1, 0, 1, 0,
+}
+
+var little = []byte{
+ 1,
+ 3, 2,
+ 7, 6, 5, 4,
+ 15, 14, 13, 12, 11, 10, 9, 8,
+ 16,
+ 18, 17,
+ 22, 21, 20, 19,
+ 30, 29, 28, 27, 26, 25, 24, 23,
+
+ 34, 33, 32, 31,
+ 42, 41, 40, 39, 38, 37, 36, 35,
+ 46, 45, 44, 43, 50, 49, 48, 47,
+ 58, 57, 56, 55, 54, 53, 52, 51, 66, 65, 64, 63, 62, 61, 60, 59,
+
+ 67, 68, 69, 70,
+
+ 1,
+ 1, 0, 1, 0,
+}
+
+var src = []byte{1, 2, 3, 4, 5, 6, 7, 8}
+var res = []int32{0x01020304, 0x05060708}
+var putbuf = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+
+func checkResult(t *testing.T, dir string, order ByteOrder, err error, have, want any) {
+ if err != nil {
+ t.Errorf("%v %v: %v", dir, order, err)
+ return
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("%v %v:\n\thave %+v\n\twant %+v", dir, order, have, want)
+ }
+}
+
+func testRead(t *testing.T, order ByteOrder, b []byte, s1 any) {
+ var s2 Struct
+ err := Read(bytes.NewReader(b), order, &s2)
+ checkResult(t, "Read", order, err, s2, s1)
+}
+
+func testWrite(t *testing.T, order ByteOrder, b []byte, s1 any) {
+ buf := new(bytes.Buffer)
+ err := Write(buf, order, s1)
+ checkResult(t, "Write", order, err, buf.Bytes(), b)
+}
+
+func TestLittleEndianRead(t *testing.T) { testRead(t, LittleEndian, little, s) }
+func TestLittleEndianWrite(t *testing.T) { testWrite(t, LittleEndian, little, s) }
+func TestLittleEndianPtrWrite(t *testing.T) { testWrite(t, LittleEndian, little, &s) }
+
+func TestBigEndianRead(t *testing.T) { testRead(t, BigEndian, big, s) }
+func TestBigEndianWrite(t *testing.T) { testWrite(t, BigEndian, big, s) }
+func TestBigEndianPtrWrite(t *testing.T) { testWrite(t, BigEndian, big, &s) }
+
+func TestReadSlice(t *testing.T) {
+ slice := make([]int32, 2)
+ err := Read(bytes.NewReader(src), BigEndian, slice)
+ checkResult(t, "ReadSlice", BigEndian, err, slice, res)
+}
+
+func TestWriteSlice(t *testing.T) {
+ buf := new(bytes.Buffer)
+ err := Write(buf, BigEndian, res)
+ checkResult(t, "WriteSlice", BigEndian, err, buf.Bytes(), src)
+}
+
+func TestReadBool(t *testing.T) {
+ var res bool
+ var err error
+ err = Read(bytes.NewReader([]byte{0}), BigEndian, &res)
+ checkResult(t, "ReadBool", BigEndian, err, res, false)
+ res = false
+ err = Read(bytes.NewReader([]byte{1}), BigEndian, &res)
+ checkResult(t, "ReadBool", BigEndian, err, res, true)
+ res = false
+ err = Read(bytes.NewReader([]byte{2}), BigEndian, &res)
+ checkResult(t, "ReadBool", BigEndian, err, res, true)
+}
+
+func TestReadBoolSlice(t *testing.T) {
+ slice := make([]bool, 4)
+ err := Read(bytes.NewReader([]byte{0, 1, 2, 255}), BigEndian, slice)
+ checkResult(t, "ReadBoolSlice", BigEndian, err, slice, []bool{false, true, true, true})
+}
+
+// Addresses of arrays are easier to manipulate with reflection than are slices.
+var intArrays = []any{
+ &[100]int8{},
+ &[100]int16{},
+ &[100]int32{},
+ &[100]int64{},
+ &[100]uint8{},
+ &[100]uint16{},
+ &[100]uint32{},
+ &[100]uint64{},
+}
+
+func TestSliceRoundTrip(t *testing.T) {
+ buf := new(bytes.Buffer)
+ for _, array := range intArrays {
+ src := reflect.ValueOf(array).Elem()
+ unsigned := false
+ switch src.Index(0).Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ unsigned = true
+ }
+ for i := 0; i < src.Len(); i++ {
+ if unsigned {
+ src.Index(i).SetUint(uint64(i * 0x07654321))
+ } else {
+ src.Index(i).SetInt(int64(i * 0x07654321))
+ }
+ }
+ buf.Reset()
+ srcSlice := src.Slice(0, src.Len())
+ err := Write(buf, BigEndian, srcSlice.Interface())
+ if err != nil {
+ t.Fatal(err)
+ }
+ dst := reflect.New(src.Type()).Elem()
+ dstSlice := dst.Slice(0, dst.Len())
+ err = Read(buf, BigEndian, dstSlice.Interface())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(src.Interface(), dst.Interface()) {
+ t.Fatal(src)
+ }
+ }
+}
+
+func TestWriteT(t *testing.T) {
+ buf := new(bytes.Buffer)
+ ts := T{}
+ if err := Write(buf, BigEndian, ts); err == nil {
+ t.Errorf("WriteT: have err == nil, want non-nil")
+ }
+
+ tv := reflect.Indirect(reflect.ValueOf(ts))
+ for i, n := 0, tv.NumField(); i < n; i++ {
+ typ := tv.Field(i).Type().String()
+ if typ == "[4]int" {
+ typ = "int" // the problem is int, not the [4]
+ }
+ if err := Write(buf, BigEndian, tv.Field(i).Interface()); err == nil {
+ t.Errorf("WriteT.%v: have err == nil, want non-nil", tv.Field(i).Type())
+ } else if !strings.Contains(err.Error(), typ) {
+ t.Errorf("WriteT: have err == %q, want it to mention %s", err, typ)
+ }
+ }
+}
+
+type BlankFields struct {
+ A uint32
+ _ int32
+ B float64
+ _ [4]int16
+ C byte
+ _ [7]byte
+ _ struct {
+ f [8]float32
+ }
+}
+
+type BlankFieldsProbe struct {
+ A uint32
+ P0 int32
+ B float64
+ P1 [4]int16
+ C byte
+ P2 [7]byte
+ P3 struct {
+ F [8]float32
+ }
+}
+
+func TestBlankFields(t *testing.T) {
+ buf := new(bytes.Buffer)
+ b1 := BlankFields{A: 1234567890, B: 2.718281828, C: 42}
+ if err := Write(buf, LittleEndian, &b1); err != nil {
+ t.Error(err)
+ }
+
+ // zero values must have been written for blank fields
+ var p BlankFieldsProbe
+ if err := Read(buf, LittleEndian, &p); err != nil {
+ t.Error(err)
+ }
+
+ // quick test: only check first value of slices
+ if p.P0 != 0 || p.P1[0] != 0 || p.P2[0] != 0 || p.P3.F[0] != 0 {
+ t.Errorf("non-zero values for originally blank fields: %#v", p)
+ }
+
+ // write p and see if we can probe only some fields
+ if err := Write(buf, LittleEndian, &p); err != nil {
+ t.Error(err)
+ }
+
+ // read should ignore blank fields in b2
+ var b2 BlankFields
+ if err := Read(buf, LittleEndian, &b2); err != nil {
+ t.Error(err)
+ }
+ if b1.A != b2.A || b1.B != b2.B || b1.C != b2.C {
+ t.Errorf("%#v != %#v", b1, b2)
+ }
+}
+
+func TestSizeStructCache(t *testing.T) {
+ // Reset the cache, otherwise multiple test runs fail.
+ structSize = sync.Map{}
+
+ count := func() int {
+ var i int
+ structSize.Range(func(_, _ any) bool {
+ i++
+ return true
+ })
+ return i
+ }
+
+ var total int
+ added := func() int {
+ delta := count() - total
+ total += delta
+ return delta
+ }
+
+ type foo struct {
+ A uint32
+ }
+
+ type bar struct {
+ A Struct
+ B foo
+ C Struct
+ }
+
+ testcases := []struct {
+ val any
+ want int
+ }{
+ {new(foo), 1},
+ {new(bar), 1},
+ {new(bar), 0},
+ {new(struct{ A Struct }), 1},
+ {new(struct{ A Struct }), 0},
+ }
+
+ for _, tc := range testcases {
+ if Size(tc.val) == -1 {
+ t.Fatalf("Can't get the size of %T", tc.val)
+ }
+
+ if n := added(); n != tc.want {
+ t.Errorf("Sizing %T added %d entries to the cache, want %d", tc.val, n, tc.want)
+ }
+ }
+}
+
+func TestSizeInvalid(t *testing.T) {
+ testcases := []any{
+ int(0),
+ new(int),
+ (*int)(nil),
+ [1]uint{},
+ new([1]uint),
+ (*[1]uint)(nil),
+ []int{},
+ []int(nil),
+ new([]int),
+ (*[]int)(nil),
+ }
+ for _, tc := range testcases {
+ if got := Size(tc); got != -1 {
+ t.Errorf("Size(%T) = %d, want -1", tc, got)
+ }
+ }
+}
+
+// An attempt to read into a struct with an unexported field will
+// panic. This is probably not the best choice, but at this point
+// anything else would be an API change.
+
+type Unexported struct {
+ a int32
+}
+
+func TestUnexportedRead(t *testing.T) {
+ var buf bytes.Buffer
+ u1 := Unexported{a: 1}
+ if err := Write(&buf, LittleEndian, &u1); err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ if recover() == nil {
+ t.Fatal("did not panic")
+ }
+ }()
+ var u2 Unexported
+ Read(&buf, LittleEndian, &u2)
+}
+
+func TestReadErrorMsg(t *testing.T) {
+ var buf bytes.Buffer
+ read := func(data any) {
+ err := Read(&buf, LittleEndian, data)
+ want := "binary.Read: invalid type " + reflect.TypeOf(data).String()
+ if err == nil {
+ t.Errorf("%T: got no error; want %q", data, want)
+ return
+ }
+ if got := err.Error(); got != want {
+ t.Errorf("%T: got %q; want %q", data, got, want)
+ }
+ }
+ read(0)
+ s := new(struct{})
+ read(&s)
+ p := &s
+ read(&p)
+}
+
+func TestReadTruncated(t *testing.T) {
+ const data = "0123456789abcdef"
+
+ var b1 = make([]int32, 4)
+ var b2 struct {
+ A, B, C, D byte
+ E int32
+ F float64
+ }
+
+ for i := 0; i <= len(data); i++ {
+ var errWant error
+ switch i {
+ case 0:
+ errWant = io.EOF
+ case len(data):
+ errWant = nil
+ default:
+ errWant = io.ErrUnexpectedEOF
+ }
+
+ if err := Read(strings.NewReader(data[:i]), LittleEndian, &b1); err != errWant {
+ t.Errorf("Read(%d) with slice: got %v, want %v", i, err, errWant)
+ }
+ if err := Read(strings.NewReader(data[:i]), LittleEndian, &b2); err != errWant {
+ t.Errorf("Read(%d) with struct: got %v, want %v", i, err, errWant)
+ }
+ }
+}
+
+func testUint64SmallSliceLengthPanics() (panicked bool) {
+ defer func() {
+ panicked = recover() != nil
+ }()
+ b := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
+ LittleEndian.Uint64(b[:4])
+ return false
+}
+
+func testPutUint64SmallSliceLengthPanics() (panicked bool) {
+ defer func() {
+ panicked = recover() != nil
+ }()
+ b := [8]byte{}
+ LittleEndian.PutUint64(b[:4], 0x0102030405060708)
+ return false
+}
+
+func TestByteOrder(t *testing.T) {
+ type byteOrder interface {
+ ByteOrder
+ AppendByteOrder
+ }
+ buf := make([]byte, 8)
+ for _, order := range []byteOrder{LittleEndian, BigEndian} {
+ const offset = 3
+ for _, value := range []uint64{
+ 0x0000000000000000,
+ 0x0123456789abcdef,
+ 0xfedcba9876543210,
+ 0xffffffffffffffff,
+ 0xaaaaaaaaaaaaaaaa,
+ math.Float64bits(math.Pi),
+ math.Float64bits(math.E),
+ } {
+ want16 := uint16(value)
+ order.PutUint16(buf[:2], want16)
+ if got := order.Uint16(buf[:2]); got != want16 {
+ t.Errorf("PutUint16: Uint16 = %v, want %v", got, want16)
+ }
+ buf = order.AppendUint16(buf[:offset], want16)
+ if got := order.Uint16(buf[offset:]); got != want16 {
+ t.Errorf("AppendUint16: Uint16 = %v, want %v", got, want16)
+ }
+ if len(buf) != offset+2 {
+ t.Errorf("AppendUint16: len(buf) = %d, want %d", len(buf), offset+2)
+ }
+
+ want32 := uint32(value)
+ order.PutUint32(buf[:4], want32)
+ if got := order.Uint32(buf[:4]); got != want32 {
+ t.Errorf("PutUint32: Uint32 = %v, want %v", got, want32)
+ }
+ buf = order.AppendUint32(buf[:offset], want32)
+ if got := order.Uint32(buf[offset:]); got != want32 {
+ t.Errorf("AppendUint32: Uint32 = %v, want %v", got, want32)
+ }
+ if len(buf) != offset+4 {
+ t.Errorf("AppendUint32: len(buf) = %d, want %d", len(buf), offset+4)
+ }
+
+ want64 := uint64(value)
+ order.PutUint64(buf[:8], want64)
+ if got := order.Uint64(buf[:8]); got != want64 {
+ t.Errorf("PutUint64: Uint64 = %v, want %v", got, want64)
+ }
+ buf = order.AppendUint64(buf[:offset], want64)
+ if got := order.Uint64(buf[offset:]); got != want64 {
+ t.Errorf("AppendUint64: Uint64 = %v, want %v", got, want64)
+ }
+ if len(buf) != offset+8 {
+ t.Errorf("AppendUint64: len(buf) = %d, want %d", len(buf), offset+8)
+ }
+ }
+ }
+}
+
+func TestEarlyBoundsChecks(t *testing.T) {
+ if testUint64SmallSliceLengthPanics() != true {
+ t.Errorf("binary.LittleEndian.Uint64 expected to panic for small slices, but didn't")
+ }
+ if testPutUint64SmallSliceLengthPanics() != true {
+ t.Errorf("binary.LittleEndian.PutUint64 expected to panic for small slices, but didn't")
+ }
+}
+
+func TestReadInvalidDestination(t *testing.T) {
+ testReadInvalidDestination(t, BigEndian)
+ testReadInvalidDestination(t, LittleEndian)
+}
+
+func testReadInvalidDestination(t *testing.T, order ByteOrder) {
+ destinations := []any{
+ int8(0),
+ int16(0),
+ int32(0),
+ int64(0),
+
+ uint8(0),
+ uint16(0),
+ uint32(0),
+ uint64(0),
+
+ bool(false),
+ }
+
+ for _, dst := range destinations {
+ err := Read(bytes.NewReader([]byte{1, 2, 3, 4, 5, 6, 7, 8}), order, dst)
+ want := fmt.Sprintf("binary.Read: invalid type %T", dst)
+ if err == nil || err.Error() != want {
+ t.Fatalf("for type %T: got %q; want %q", dst, err, want)
+ }
+ }
+}
+
+func TestNoFixedSize(t *testing.T) {
+ type Person struct {
+ Age int
+ Weight float64
+ Height float64
+ }
+
+ person := Person{
+ Age: 27,
+ Weight: 67.3,
+ Height: 177.8,
+ }
+
+ buf := new(bytes.Buffer)
+ err := Write(buf, LittleEndian, &person)
+ if err == nil {
+ t.Fatal("binary.Write: unexpected success as size of type *binary.Person is not fixed")
+ }
+ errs := "binary.Write: some values are not fixed-sized in type *binary.Person"
+ if err.Error() != errs {
+ t.Fatalf("got %q, want %q", err, errs)
+ }
+}
+
+type byteSliceReader struct {
+ remain []byte
+}
+
+func (br *byteSliceReader) Read(p []byte) (int, error) {
+ n := copy(p, br.remain)
+ br.remain = br.remain[n:]
+ return n, nil
+}
+
+func BenchmarkReadSlice1000Int32s(b *testing.B) {
+ bsr := &byteSliceReader{}
+ slice := make([]int32, 1000)
+ buf := make([]byte, len(slice)*4)
+ b.SetBytes(int64(len(buf)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bsr.remain = buf
+ Read(bsr, BigEndian, slice)
+ }
+}
+
+func BenchmarkReadStruct(b *testing.B) {
+ bsr := &byteSliceReader{}
+ var buf bytes.Buffer
+ Write(&buf, BigEndian, &s)
+ b.SetBytes(int64(dataSize(reflect.ValueOf(s))))
+ t := s
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bsr.remain = buf.Bytes()
+ Read(bsr, BigEndian, &t)
+ }
+ b.StopTimer()
+ if b.N > 0 && !reflect.DeepEqual(s, t) {
+ b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", t, s)
+ }
+}
+
+func BenchmarkWriteStruct(b *testing.B) {
+ b.SetBytes(int64(Size(&s)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Write(io.Discard, BigEndian, &s)
+ }
+}
+
+func BenchmarkReadInts(b *testing.B) {
+ var ls Struct
+ bsr := &byteSliceReader{}
+ var r io.Reader = bsr
+ b.SetBytes(2 * (1 + 2 + 4 + 8))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bsr.remain = big
+ Read(r, BigEndian, &ls.Int8)
+ Read(r, BigEndian, &ls.Int16)
+ Read(r, BigEndian, &ls.Int32)
+ Read(r, BigEndian, &ls.Int64)
+ Read(r, BigEndian, &ls.Uint8)
+ Read(r, BigEndian, &ls.Uint16)
+ Read(r, BigEndian, &ls.Uint32)
+ Read(r, BigEndian, &ls.Uint64)
+ }
+ b.StopTimer()
+ want := s
+ want.Float32 = 0
+ want.Float64 = 0
+ want.Complex64 = 0
+ want.Complex128 = 0
+ want.Array = [4]uint8{0, 0, 0, 0}
+ want.Bool = false
+ want.BoolArray = [4]bool{false, false, false, false}
+ if b.N > 0 && !reflect.DeepEqual(ls, want) {
+ b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", ls, want)
+ }
+}
+
+func BenchmarkWriteInts(b *testing.B) {
+ buf := new(bytes.Buffer)
+ var w io.Writer = buf
+ b.SetBytes(2 * (1 + 2 + 4 + 8))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ Write(w, BigEndian, s.Int8)
+ Write(w, BigEndian, s.Int16)
+ Write(w, BigEndian, s.Int32)
+ Write(w, BigEndian, s.Int64)
+ Write(w, BigEndian, s.Uint8)
+ Write(w, BigEndian, s.Uint16)
+ Write(w, BigEndian, s.Uint32)
+ Write(w, BigEndian, s.Uint64)
+ }
+ b.StopTimer()
+ if b.N > 0 && !bytes.Equal(buf.Bytes(), big[:30]) {
+ b.Fatalf("first half doesn't match: %x %x", buf.Bytes(), big[:30])
+ }
+}
+
+func BenchmarkWriteSlice1000Int32s(b *testing.B) {
+ slice := make([]int32, 1000)
+ buf := new(bytes.Buffer)
+ var w io.Writer = buf
+ b.SetBytes(4 * 1000)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ Write(w, BigEndian, slice)
+ }
+ b.StopTimer()
+}
+
+func BenchmarkPutUint16(b *testing.B) {
+ b.SetBytes(2)
+ for i := 0; i < b.N; i++ {
+ BigEndian.PutUint16(putbuf[:2], uint16(i))
+ }
+}
+
+func BenchmarkAppendUint16(b *testing.B) {
+ b.SetBytes(2)
+ for i := 0; i < b.N; i++ {
+ putbuf = BigEndian.AppendUint16(putbuf[:0], uint16(i))
+ }
+}
+
+func BenchmarkPutUint32(b *testing.B) {
+ b.SetBytes(4)
+ for i := 0; i < b.N; i++ {
+ BigEndian.PutUint32(putbuf[:4], uint32(i))
+ }
+}
+
+func BenchmarkAppendUint32(b *testing.B) {
+ b.SetBytes(4)
+ for i := 0; i < b.N; i++ {
+ putbuf = BigEndian.AppendUint32(putbuf[:0], uint32(i))
+ }
+}
+
+func BenchmarkPutUint64(b *testing.B) {
+ b.SetBytes(8)
+ for i := 0; i < b.N; i++ {
+ BigEndian.PutUint64(putbuf[:8], uint64(i))
+ }
+}
+
+func BenchmarkAppendUint64(b *testing.B) {
+ b.SetBytes(8)
+ for i := 0; i < b.N; i++ {
+ putbuf = BigEndian.AppendUint64(putbuf[:0], uint64(i))
+ }
+}
+
+func BenchmarkLittleEndianPutUint16(b *testing.B) {
+ b.SetBytes(2)
+ for i := 0; i < b.N; i++ {
+ LittleEndian.PutUint16(putbuf[:2], uint16(i))
+ }
+}
+
+func BenchmarkLittleEndianAppendUint16(b *testing.B) {
+ b.SetBytes(2)
+ for i := 0; i < b.N; i++ {
+ putbuf = LittleEndian.AppendUint16(putbuf[:0], uint16(i))
+ }
+}
+
+func BenchmarkLittleEndianPutUint32(b *testing.B) {
+ b.SetBytes(4)
+ for i := 0; i < b.N; i++ {
+ LittleEndian.PutUint32(putbuf[:4], uint32(i))
+ }
+}
+
+func BenchmarkLittleEndianAppendUint32(b *testing.B) {
+ b.SetBytes(4)
+ for i := 0; i < b.N; i++ {
+ putbuf = LittleEndian.AppendUint32(putbuf[:0], uint32(i))
+ }
+}
+
+func BenchmarkLittleEndianPutUint64(b *testing.B) {
+ b.SetBytes(8)
+ for i := 0; i < b.N; i++ {
+ LittleEndian.PutUint64(putbuf[:8], uint64(i))
+ }
+}
+
+func BenchmarkLittleEndianAppendUint64(b *testing.B) {
+ b.SetBytes(8)
+ for i := 0; i < b.N; i++ {
+ putbuf = LittleEndian.AppendUint64(putbuf[:0], uint64(i))
+ }
+}
+
+func BenchmarkReadFloats(b *testing.B) {
+ var ls Struct
+ bsr := &byteSliceReader{}
+ var r io.Reader = bsr
+ b.SetBytes(4 + 8)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bsr.remain = big[30:]
+ Read(r, BigEndian, &ls.Float32)
+ Read(r, BigEndian, &ls.Float64)
+ }
+ b.StopTimer()
+ want := s
+ want.Int8 = 0
+ want.Int16 = 0
+ want.Int32 = 0
+ want.Int64 = 0
+ want.Uint8 = 0
+ want.Uint16 = 0
+ want.Uint32 = 0
+ want.Uint64 = 0
+ want.Complex64 = 0
+ want.Complex128 = 0
+ want.Array = [4]uint8{0, 0, 0, 0}
+ want.Bool = false
+ want.BoolArray = [4]bool{false, false, false, false}
+ if b.N > 0 && !reflect.DeepEqual(ls, want) {
+ b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", ls, want)
+ }
+}
+
+func BenchmarkWriteFloats(b *testing.B) {
+ buf := new(bytes.Buffer)
+ var w io.Writer = buf
+ b.SetBytes(4 + 8)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ Write(w, BigEndian, s.Float32)
+ Write(w, BigEndian, s.Float64)
+ }
+ b.StopTimer()
+ if b.N > 0 && !bytes.Equal(buf.Bytes(), big[30:30+4+8]) {
+ b.Fatalf("first half doesn't match: %x %x", buf.Bytes(), big[30:30+4+8])
+ }
+}
+
+func BenchmarkReadSlice1000Float32s(b *testing.B) {
+ bsr := &byteSliceReader{}
+ slice := make([]float32, 1000)
+ buf := make([]byte, len(slice)*4)
+ b.SetBytes(int64(len(buf)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bsr.remain = buf
+ Read(bsr, BigEndian, slice)
+ }
+}
+
+func BenchmarkWriteSlice1000Float32s(b *testing.B) {
+ slice := make([]float32, 1000)
+ buf := new(bytes.Buffer)
+ var w io.Writer = buf
+ b.SetBytes(4 * 1000)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ Write(w, BigEndian, slice)
+ }
+ b.StopTimer()
+}
+
+func BenchmarkReadSlice1000Uint8s(b *testing.B) {
+ bsr := &byteSliceReader{}
+ slice := make([]uint8, 1000)
+ buf := make([]byte, len(slice))
+ b.SetBytes(int64(len(buf)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bsr.remain = buf
+ Read(bsr, BigEndian, slice)
+ }
+}
+
+func BenchmarkWriteSlice1000Uint8s(b *testing.B) {
+ slice := make([]uint8, 1000)
+ buf := new(bytes.Buffer)
+ var w io.Writer = buf
+ b.SetBytes(1000)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ Write(w, BigEndian, slice)
+ }
+}
+
+func TestNativeEndian(t *testing.T) {
+ const val = 0x12345678
+ i := uint32(val)
+ s := unsafe.Slice((*byte)(unsafe.Pointer(&i)), unsafe.Sizeof(i))
+ if v := NativeEndian.Uint32(s); v != val {
+ t.Errorf("NativeEndian.Uint32 returned %#x, expected %#x", v, val)
+ }
+}
diff --git a/src/encoding/binary/example_test.go b/src/encoding/binary/example_test.go
new file mode 100644
index 0000000..4c10daa
--- /dev/null
+++ b/src/encoding/binary/example_test.go
@@ -0,0 +1,187 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package binary_test
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "math"
+)
+
+func ExampleWrite() {
+ buf := new(bytes.Buffer)
+ var pi float64 = math.Pi
+ err := binary.Write(buf, binary.LittleEndian, pi)
+ if err != nil {
+ fmt.Println("binary.Write failed:", err)
+ }
+ fmt.Printf("% x", buf.Bytes())
+ // Output: 18 2d 44 54 fb 21 09 40
+}
+
+func ExampleWrite_multi() {
+ buf := new(bytes.Buffer)
+ var data = []any{
+ uint16(61374),
+ int8(-54),
+ uint8(254),
+ }
+ for _, v := range data {
+ err := binary.Write(buf, binary.LittleEndian, v)
+ if err != nil {
+ fmt.Println("binary.Write failed:", err)
+ }
+ }
+ fmt.Printf("%x", buf.Bytes())
+ // Output: beefcafe
+}
+
+func ExampleRead() {
+ var pi float64
+ b := []byte{0x18, 0x2d, 0x44, 0x54, 0xfb, 0x21, 0x09, 0x40}
+ buf := bytes.NewReader(b)
+ err := binary.Read(buf, binary.LittleEndian, &pi)
+ if err != nil {
+ fmt.Println("binary.Read failed:", err)
+ }
+ fmt.Print(pi)
+ // Output: 3.141592653589793
+}
+
+func ExampleRead_multi() {
+ b := []byte{0x18, 0x2d, 0x44, 0x54, 0xfb, 0x21, 0x09, 0x40, 0xff, 0x01, 0x02, 0x03, 0xbe, 0xef}
+ r := bytes.NewReader(b)
+
+ var data struct {
+ PI float64
+ Uate uint8
+ Mine [3]byte
+ Too uint16
+ }
+
+ if err := binary.Read(r, binary.LittleEndian, &data); err != nil {
+ fmt.Println("binary.Read failed:", err)
+ }
+
+ fmt.Println(data.PI)
+ fmt.Println(data.Uate)
+ fmt.Printf("% x\n", data.Mine)
+ fmt.Println(data.Too)
+ // Output:
+ // 3.141592653589793
+ // 255
+ // 01 02 03
+ // 61374
+}
+
+func ExampleByteOrder_put() {
+ b := make([]byte, 4)
+ binary.LittleEndian.PutUint16(b[0:], 0x03e8)
+ binary.LittleEndian.PutUint16(b[2:], 0x07d0)
+ fmt.Printf("% x\n", b)
+ // Output:
+ // e8 03 d0 07
+}
+
+func ExampleByteOrder_get() {
+ b := []byte{0xe8, 0x03, 0xd0, 0x07}
+ x1 := binary.LittleEndian.Uint16(b[0:])
+ x2 := binary.LittleEndian.Uint16(b[2:])
+ fmt.Printf("%#04x %#04x\n", x1, x2)
+ // Output:
+ // 0x03e8 0x07d0
+}
+
+func ExamplePutUvarint() {
+ buf := make([]byte, binary.MaxVarintLen64)
+
+ for _, x := range []uint64{1, 2, 127, 128, 255, 256} {
+ n := binary.PutUvarint(buf, x)
+ fmt.Printf("%x\n", buf[:n])
+ }
+ // Output:
+ // 01
+ // 02
+ // 7f
+ // 8001
+ // ff01
+ // 8002
+}
+
+func ExamplePutVarint() {
+ buf := make([]byte, binary.MaxVarintLen64)
+
+ for _, x := range []int64{-65, -64, -2, -1, 0, 1, 2, 63, 64} {
+ n := binary.PutVarint(buf, x)
+ fmt.Printf("%x\n", buf[:n])
+ }
+ // Output:
+ // 8101
+ // 7f
+ // 03
+ // 01
+ // 00
+ // 02
+ // 04
+ // 7e
+ // 8001
+}
+
+func ExampleUvarint() {
+ inputs := [][]byte{
+ {0x01},
+ {0x02},
+ {0x7f},
+ {0x80, 0x01},
+ {0xff, 0x01},
+ {0x80, 0x02},
+ }
+ for _, b := range inputs {
+ x, n := binary.Uvarint(b)
+ if n != len(b) {
+ fmt.Println("Uvarint did not consume all of in")
+ }
+ fmt.Println(x)
+ }
+ // Output:
+ // 1
+ // 2
+ // 127
+ // 128
+ // 255
+ // 256
+}
+
+func ExampleVarint() {
+ inputs := [][]byte{
+ {0x81, 0x01},
+ {0x7f},
+ {0x03},
+ {0x01},
+ {0x00},
+ {0x02},
+ {0x04},
+ {0x7e},
+ {0x80, 0x01},
+ }
+ for _, b := range inputs {
+ x, n := binary.Varint(b)
+ if n != len(b) {
+ fmt.Println("Varint did not consume all of in")
+ }
+ fmt.Println(x)
+ }
+ // Output:
+ // -65
+ // -64
+ // -2
+ // -1
+ // 0
+ // 1
+ // 2
+ // 63
+ // 64
+}
diff --git a/src/encoding/binary/native_endian_big.go b/src/encoding/binary/native_endian_big.go
new file mode 100644
index 0000000..1a24354
--- /dev/null
+++ b/src/encoding/binary/native_endian_big.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
+
+package binary
+
+type nativeEndian struct {
+ bigEndian
+}
+
+// NativeEndian is the native-endian implementation of ByteOrder and AppendByteOrder.
+var NativeEndian nativeEndian
diff --git a/src/encoding/binary/native_endian_little.go b/src/encoding/binary/native_endian_little.go
new file mode 100644
index 0000000..67b41ae
--- /dev/null
+++ b/src/encoding/binary/native_endian_little.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm
+
+package binary
+
+type nativeEndian struct {
+ littleEndian
+}
+
+// NativeEndian is the native-endian implementation of ByteOrder and AppendByteOrder.
+var NativeEndian nativeEndian
diff --git a/src/encoding/binary/varint.go b/src/encoding/binary/varint.go
new file mode 100644
index 0000000..7b14fb2
--- /dev/null
+++ b/src/encoding/binary/varint.go
@@ -0,0 +1,166 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package binary
+
+// This file implements "varint" encoding of 64-bit integers.
+// The encoding is:
+// - unsigned integers are serialized 7 bits at a time, starting with the
+// least significant bits
+// - the most significant bit (msb) in each output byte indicates if there
+// is a continuation byte (msb = 1)
+// - signed integers are mapped to unsigned integers using "zig-zag"
+// encoding: Positive values x are written as 2*x + 0, negative values
+// are written as 2*(^x) + 1; that is, negative numbers are complemented
+// and whether to complement is encoded in bit 0.
+//
+// Design note:
+// At most 10 bytes are needed for 64-bit values. The encoding could
+// be more dense: a full 64-bit value needs an extra byte just to hold bit 63.
+// Instead, the msb of the previous byte could be used to hold bit 63 since we
+// know there can't be more than 64 bits. This is a trivial improvement and
+// would reduce the maximum encoding length to 9 bytes. However, it breaks the
+// invariant that the msb is always the "continuation bit" and thus makes the
+// format incompatible with a varint encoding for larger numbers (say 128-bit).
+
+import (
+ "errors"
+ "io"
+)
+
+// MaxVarintLenN is the maximum length of a varint-encoded N-bit integer.
+const (
+ MaxVarintLen16 = 3
+ MaxVarintLen32 = 5
+ MaxVarintLen64 = 10
+)
+
+// AppendUvarint appends the varint-encoded form of x,
+// as generated by PutUvarint, to buf and returns the extended buffer.
+func AppendUvarint(buf []byte, x uint64) []byte {
+ for x >= 0x80 {
+ buf = append(buf, byte(x)|0x80)
+ x >>= 7
+ }
+ return append(buf, byte(x))
+}
+
+// PutUvarint encodes a uint64 into buf and returns the number of bytes written.
+// If the buffer is too small, PutUvarint will panic.
+func PutUvarint(buf []byte, x uint64) int {
+ i := 0
+ for x >= 0x80 {
+ buf[i] = byte(x) | 0x80
+ x >>= 7
+ i++
+ }
+ buf[i] = byte(x)
+ return i + 1
+}
+
+// Uvarint decodes a uint64 from buf and returns that value and the
+// number of bytes read (> 0). If an error occurred, the value is 0
+// and the number of bytes n is <= 0 meaning:
+//
+// n == 0: buf too small
+// n < 0: value larger than 64 bits (overflow)
+// and -n is the number of bytes read
+func Uvarint(buf []byte) (uint64, int) {
+ var x uint64
+ var s uint
+ for i, b := range buf {
+ if i == MaxVarintLen64 {
+ // Catch byte reads past MaxVarintLen64.
+ // See issue https://golang.org/issues/41185
+ return 0, -(i + 1) // overflow
+ }
+ if b < 0x80 {
+ if i == MaxVarintLen64-1 && b > 1 {
+ return 0, -(i + 1) // overflow
+ }
+ return x | uint64(b)<<s, i + 1
+ }
+ x |= uint64(b&0x7f) << s
+ s += 7
+ }
+ return 0, 0
+}
+
+// AppendVarint appends the varint-encoded form of x,
+// as generated by PutVarint, to buf and returns the extended buffer.
+func AppendVarint(buf []byte, x int64) []byte {
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+ return AppendUvarint(buf, ux)
+}
+
+// PutVarint encodes an int64 into buf and returns the number of bytes written.
+// If the buffer is too small, PutVarint will panic.
+func PutVarint(buf []byte, x int64) int {
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+ return PutUvarint(buf, ux)
+}
+
+// Varint decodes an int64 from buf and returns that value and the
+// number of bytes read (> 0). If an error occurred, the value is 0
+// and the number of bytes n is <= 0 with the following meaning:
+//
+// n == 0: buf too small
+// n < 0: value larger than 64 bits (overflow)
+// and -n is the number of bytes read
+func Varint(buf []byte) (int64, int) {
+ ux, n := Uvarint(buf) // ok to continue in presence of error
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x, n
+}
+
+var errOverflow = errors.New("binary: varint overflows a 64-bit integer")
+
+// ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64.
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading some but not all the bytes,
+// ReadUvarint returns io.ErrUnexpectedEOF.
+func ReadUvarint(r io.ByteReader) (uint64, error) {
+ var x uint64
+ var s uint
+ for i := 0; i < MaxVarintLen64; i++ {
+ b, err := r.ReadByte()
+ if err != nil {
+ if i > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return x, err
+ }
+ if b < 0x80 {
+ if i == MaxVarintLen64-1 && b > 1 {
+ return x, errOverflow
+ }
+ return x | uint64(b)<<s, nil
+ }
+ x |= uint64(b&0x7f) << s
+ s += 7
+ }
+ return x, errOverflow
+}
+
+// ReadVarint reads an encoded signed integer from r and returns it as an int64.
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading some but not all the bytes,
+// ReadVarint returns io.ErrUnexpectedEOF.
+func ReadVarint(r io.ByteReader) (int64, error) {
+ ux, err := ReadUvarint(r) // ok to continue in presence of error
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x, err
+}
diff --git a/src/encoding/binary/varint_test.go b/src/encoding/binary/varint_test.go
new file mode 100644
index 0000000..5c3ea31
--- /dev/null
+++ b/src/encoding/binary/varint_test.go
@@ -0,0 +1,247 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package binary
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "testing"
+)
+
+func testConstant(t *testing.T, w uint, max int) {
+ buf := make([]byte, MaxVarintLen64)
+ n := PutUvarint(buf, 1<<w-1)
+ if n != max {
+ t.Errorf("MaxVarintLen%d = %d; want %d", w, max, n)
+ }
+}
+
+func TestConstants(t *testing.T) {
+ testConstant(t, 16, MaxVarintLen16)
+ testConstant(t, 32, MaxVarintLen32)
+ testConstant(t, 64, MaxVarintLen64)
+}
+
+func testVarint(t *testing.T, x int64) {
+ buf := make([]byte, MaxVarintLen64)
+ n := PutVarint(buf, x)
+ y, m := Varint(buf[0:n])
+ if x != y {
+ t.Errorf("Varint(%d): got %d", x, y)
+ }
+ if n != m {
+ t.Errorf("Varint(%d): got n = %d; want %d", x, m, n)
+ }
+
+ buf2 := []byte("prefix")
+ buf2 = AppendVarint(buf2, x)
+ if string(buf2) != "prefix"+string(buf[:n]) {
+ t.Errorf("AppendVarint(%d): got %q, want %q", x, buf2, "prefix"+string(buf[:n]))
+ }
+
+ y, err := ReadVarint(bytes.NewReader(buf))
+ if err != nil {
+ t.Errorf("ReadVarint(%d): %s", x, err)
+ }
+ if x != y {
+ t.Errorf("ReadVarint(%d): got %d", x, y)
+ }
+}
+
+func testUvarint(t *testing.T, x uint64) {
+ buf := make([]byte, MaxVarintLen64)
+ n := PutUvarint(buf, x)
+ y, m := Uvarint(buf[0:n])
+ if x != y {
+ t.Errorf("Uvarint(%d): got %d", x, y)
+ }
+ if n != m {
+ t.Errorf("Uvarint(%d): got n = %d; want %d", x, m, n)
+ }
+
+ buf2 := []byte("prefix")
+ buf2 = AppendUvarint(buf2, x)
+ if string(buf2) != "prefix"+string(buf[:n]) {
+ t.Errorf("AppendUvarint(%d): got %q, want %q", x, buf2, "prefix"+string(buf[:n]))
+ }
+
+ y, err := ReadUvarint(bytes.NewReader(buf))
+ if err != nil {
+ t.Errorf("ReadUvarint(%d): %s", x, err)
+ }
+ if x != y {
+ t.Errorf("ReadUvarint(%d): got %d", x, y)
+ }
+}
+
+var tests = []int64{
+ -1 << 63,
+ -1<<63 + 1,
+ -1,
+ 0,
+ 1,
+ 2,
+ 10,
+ 20,
+ 63,
+ 64,
+ 65,
+ 127,
+ 128,
+ 129,
+ 255,
+ 256,
+ 257,
+ 1<<63 - 1,
+}
+
+func TestVarint(t *testing.T) {
+ for _, x := range tests {
+ testVarint(t, x)
+ testVarint(t, -x)
+ }
+ for x := int64(0x7); x != 0; x <<= 1 {
+ testVarint(t, x)
+ testVarint(t, -x)
+ }
+}
+
+func TestUvarint(t *testing.T) {
+ for _, x := range tests {
+ testUvarint(t, uint64(x))
+ }
+ for x := uint64(0x7); x != 0; x <<= 1 {
+ testUvarint(t, x)
+ }
+}
+
+func TestBufferTooSmall(t *testing.T) {
+ buf := []byte{0x80, 0x80, 0x80, 0x80}
+ for i := 0; i <= len(buf); i++ {
+ buf := buf[0:i]
+ x, n := Uvarint(buf)
+ if x != 0 || n != 0 {
+ t.Errorf("Uvarint(%v): got x = %d, n = %d", buf, x, n)
+ }
+
+ x, err := ReadUvarint(bytes.NewReader(buf))
+ wantErr := io.EOF
+ if i > 0 {
+ wantErr = io.ErrUnexpectedEOF
+ }
+ if x != 0 || err != wantErr {
+ t.Errorf("ReadUvarint(%v): got x = %d, err = %s", buf, x, err)
+ }
+ }
+}
+
+// Ensure that we catch overflows of bytes going past MaxVarintLen64.
+// See issue https://golang.org/issues/41185
+func TestBufferTooBigWithOverflow(t *testing.T) {
+ tests := []struct {
+ in []byte
+ name string
+ wantN int
+ wantValue uint64
+ }{
+ {
+ name: "invalid: 1000 bytes",
+ in: func() []byte {
+ b := make([]byte, 1000)
+ for i := range b {
+ b[i] = 0xff
+ }
+ b[999] = 0
+ return b
+ }(),
+ wantN: -11,
+ wantValue: 0,
+ },
+ {
+ name: "valid: math.MaxUint64-40",
+ in: []byte{0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01},
+ wantValue: math.MaxUint64 - 40,
+ wantN: 10,
+ },
+ {
+ name: "invalid: with more than MaxVarintLen64 bytes",
+ in: []byte{0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01},
+ wantN: -11,
+ wantValue: 0,
+ },
+ {
+ name: "invalid: 10th byte",
+ in: []byte{0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f},
+ wantN: -10,
+ wantValue: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ value, n := Uvarint(tt.in)
+ if g, w := n, tt.wantN; g != w {
+ t.Errorf("bytes returned=%d, want=%d", g, w)
+ }
+ if g, w := value, tt.wantValue; g != w {
+ t.Errorf("value=%d, want=%d", g, w)
+ }
+ })
+ }
+}
+
+func testOverflow(t *testing.T, buf []byte, x0 uint64, n0 int, err0 error) {
+ x, n := Uvarint(buf)
+ if x != 0 || n != n0 {
+ t.Errorf("Uvarint(% X): got x = %d, n = %d; want 0, %d", buf, x, n, n0)
+ }
+
+ r := bytes.NewReader(buf)
+ len := r.Len()
+ x, err := ReadUvarint(r)
+ if x != x0 || err != err0 {
+ t.Errorf("ReadUvarint(%v): got x = %d, err = %s; want %d, %s", buf, x, err, x0, err0)
+ }
+ if read := len - r.Len(); read > MaxVarintLen64 {
+ t.Errorf("ReadUvarint(%v): read more than MaxVarintLen64 bytes, got %d", buf, read)
+ }
+}
+
+func TestOverflow(t *testing.T) {
+ testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2}, 0, -10, errOverflow)
+ testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0, 0}, 0, -11, errOverflow)
+ testOverflow(t, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 1<<64-1, -11, errOverflow) // 11 bytes, should overflow
+}
+
+func TestNonCanonicalZero(t *testing.T) {
+ buf := []byte{0x80, 0x80, 0x80, 0}
+ x, n := Uvarint(buf)
+ if x != 0 || n != 4 {
+ t.Errorf("Uvarint(%v): got x = %d, n = %d; want 0, 4", buf, x, n)
+
+ }
+}
+
+func BenchmarkPutUvarint32(b *testing.B) {
+ buf := make([]byte, MaxVarintLen32)
+ b.SetBytes(4)
+ for i := 0; i < b.N; i++ {
+ for j := uint(0); j < MaxVarintLen32; j++ {
+ PutUvarint(buf, 1<<(j*7))
+ }
+ }
+}
+
+func BenchmarkPutUvarint64(b *testing.B) {
+ buf := make([]byte, MaxVarintLen64)
+ b.SetBytes(8)
+ for i := 0; i < b.N; i++ {
+ for j := uint(0); j < MaxVarintLen64; j++ {
+ PutUvarint(buf, 1<<(j*7))
+ }
+ }
+}