summaryrefslogtreecommitdiffstats
path: root/src/archive/zip
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
commitccd992355df7192993c666236047820244914598 (patch)
treef00fea65147227b7743083c6148396f74cd66935 /src/archive/zip
parentInitial commit. (diff)
downloadgolang-1.21-ccd992355df7192993c666236047820244914598.tar.xz
golang-1.21-ccd992355df7192993c666236047820244914598.zip
Adding upstream version 1.21.8.upstream/1.21.8
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/archive/zip')
-rw-r--r--src/archive/zip/example_test.go93
-rw-r--r--src/archive/zip/fuzz_test.go81
-rw-r--r--src/archive/zip/reader.go979
-rw-r--r--src/archive/zip/reader_test.go1828
-rw-r--r--src/archive/zip/register.go147
-rw-r--r--src/archive/zip/struct.go419
-rw-r--r--src/archive/zip/testdata/crc32-not-streamed.zipbin0 -> 314 bytes
-rw-r--r--src/archive/zip/testdata/dd.zipbin0 -> 154 bytes
-rw-r--r--src/archive/zip/testdata/dupdir.zipbin0 -> 458 bytes
-rw-r--r--src/archive/zip/testdata/go-no-datadesc-sig.zip.base641
-rw-r--r--src/archive/zip/testdata/go-with-datadesc-sig.zipbin0 -> 242 bytes
-rw-r--r--src/archive/zip/testdata/gophercolor16x16.pngbin0 -> 785 bytes
-rw-r--r--src/archive/zip/testdata/readme.notzipbin0 -> 1906 bytes
-rw-r--r--src/archive/zip/testdata/readme.zipbin0 -> 1886 bytes
-rw-r--r--src/archive/zip/testdata/subdir.zipbin0 -> 428 bytes
-rw-r--r--src/archive/zip/testdata/symlink.zipbin0 -> 173 bytes
-rw-r--r--src/archive/zip/testdata/test-badbase.zipbin0 -> 1170 bytes
-rw-r--r--src/archive/zip/testdata/test-baddirsz.zipbin0 -> 1170 bytes
-rw-r--r--src/archive/zip/testdata/test-prefix.zipbin0 -> 1227 bytes
-rw-r--r--src/archive/zip/testdata/test-trailing-junk.zipbin0 -> 1184 bytes
-rw-r--r--src/archive/zip/testdata/test.zipbin0 -> 1170 bytes
-rw-r--r--src/archive/zip/testdata/time-22738.zipbin0 -> 140 bytes
-rw-r--r--src/archive/zip/testdata/time-7zip.zipbin0 -> 150 bytes
-rw-r--r--src/archive/zip/testdata/time-go.zipbin0 -> 148 bytes
-rw-r--r--src/archive/zip/testdata/time-infozip.zipbin0 -> 166 bytes
-rw-r--r--src/archive/zip/testdata/time-osx.zipbin0 -> 142 bytes
-rw-r--r--src/archive/zip/testdata/time-win7.zipbin0 -> 114 bytes
-rw-r--r--src/archive/zip/testdata/time-winrar.zipbin0 -> 150 bytes
-rw-r--r--src/archive/zip/testdata/time-winzip.zipbin0 -> 150 bytes
-rw-r--r--src/archive/zip/testdata/unix.zipbin0 -> 620 bytes
-rw-r--r--src/archive/zip/testdata/utf8-7zip.zipbin0 -> 146 bytes
-rw-r--r--src/archive/zip/testdata/utf8-infozip.zipbin0 -> 162 bytes
-rw-r--r--src/archive/zip/testdata/utf8-osx.zipbin0 -> 138 bytes
-rw-r--r--src/archive/zip/testdata/utf8-winrar.zipbin0 -> 146 bytes
-rw-r--r--src/archive/zip/testdata/utf8-winzip.zipbin0 -> 146 bytes
-rw-r--r--src/archive/zip/testdata/winxp.zipbin0 -> 412 bytes
-rw-r--r--src/archive/zip/testdata/zip64-2.zipbin0 -> 266 bytes
-rw-r--r--src/archive/zip/testdata/zip64.zipbin0 -> 242 bytes
-rw-r--r--src/archive/zip/writer.go634
-rw-r--r--src/archive/zip/writer_test.go604
-rw-r--r--src/archive/zip/zip_test.go828
41 files changed, 5614 insertions, 0 deletions
diff --git a/src/archive/zip/example_test.go b/src/archive/zip/example_test.go
new file mode 100644
index 0000000..1eed304
--- /dev/null
+++ b/src/archive/zip/example_test.go
@@ -0,0 +1,93 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip_test
+
+import (
+ "archive/zip"
+ "bytes"
+ "compress/flate"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+func ExampleWriter() {
+ // Create a buffer to write our archive to.
+ buf := new(bytes.Buffer)
+
+ // Create a new zip archive.
+ w := zip.NewWriter(buf)
+
+ // Add some files to the archive.
+ var files = []struct {
+ Name, Body string
+ }{
+ {"readme.txt", "This archive contains some text files."},
+ {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
+ {"todo.txt", "Get animal handling licence.\nWrite more examples."},
+ }
+ for _, file := range files {
+ f, err := w.Create(file.Name)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, err = f.Write([]byte(file.Body))
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ // Make sure to check the error on Close.
+ err := w.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func ExampleReader() {
+ // Open a zip archive for reading.
+ r, err := zip.OpenReader("testdata/readme.zip")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer r.Close()
+
+ // Iterate through the files in the archive,
+ // printing some of their contents.
+ for _, f := range r.File {
+ fmt.Printf("Contents of %s:\n", f.Name)
+ rc, err := f.Open()
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, err = io.CopyN(os.Stdout, rc, 68)
+ if err != nil {
+ log.Fatal(err)
+ }
+ rc.Close()
+ fmt.Println()
+ }
+ // Output:
+ // Contents of README:
+ // This is the source code repository for the Go programming language.
+}
+
+func ExampleWriter_RegisterCompressor() {
+ // Override the default Deflate compressor with a higher compression level.
+
+ // Create a buffer to write our archive to.
+ buf := new(bytes.Buffer)
+
+ // Create a new zip archive.
+ w := zip.NewWriter(buf)
+
+ // Register a custom Deflate compressor.
+ w.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) {
+ return flate.NewWriter(out, flate.BestCompression)
+ })
+
+ // Proceed to add files to w.
+}
diff --git a/src/archive/zip/fuzz_test.go b/src/archive/zip/fuzz_test.go
new file mode 100644
index 0000000..7dffde6
--- /dev/null
+++ b/src/archive/zip/fuzz_test.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func FuzzReader(f *testing.F) {
+ testdata, err := os.ReadDir("testdata")
+ if err != nil {
+ f.Fatalf("failed to read testdata directory: %s", err)
+ }
+ for _, de := range testdata {
+ if de.IsDir() {
+ continue
+ }
+ b, err := os.ReadFile(filepath.Join("testdata", de.Name()))
+ if err != nil {
+ f.Fatalf("failed to read testdata: %s", err)
+ }
+ f.Add(b)
+ }
+
+ f.Fuzz(func(t *testing.T, b []byte) {
+ r, err := NewReader(bytes.NewReader(b), int64(len(b)))
+ if err != nil {
+ return
+ }
+
+ type file struct {
+ header *FileHeader
+ content []byte
+ }
+ files := []file{}
+
+ for _, f := range r.File {
+ fr, err := f.Open()
+ if err != nil {
+ continue
+ }
+ content, err := io.ReadAll(fr)
+ if err != nil {
+ continue
+ }
+ files = append(files, file{header: &f.FileHeader, content: content})
+ if _, err := r.Open(f.Name); err != nil {
+ continue
+ }
+ }
+
+ // If we were unable to read anything out of the archive don't
+ // bother trying to roundtrip it.
+ if len(files) == 0 {
+ return
+ }
+
+ w := NewWriter(io.Discard)
+ for _, f := range files {
+ ww, err := w.CreateHeader(f.header)
+ if err != nil {
+ t.Fatalf("unable to write previously parsed header: %s", err)
+ }
+ if _, err := ww.Write(f.content); err != nil {
+ t.Fatalf("unable to write previously parsed content: %s", err)
+ }
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatalf("Unable to write archive: %s", err)
+ }
+
+ // TODO: We may want to check if the archive roundtrips.
+ })
+}
diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go
new file mode 100644
index 0000000..1fde1de
--- /dev/null
+++ b/src/archive/zip/reader.go
@@ -0,0 +1,979 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "hash"
+ "hash/crc32"
+ "internal/godebug"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+var zipinsecurepath = godebug.New("zipinsecurepath")
+
+var (
+ ErrFormat = errors.New("zip: not a valid zip file")
+ ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
+ ErrChecksum = errors.New("zip: checksum error")
+ ErrInsecurePath = errors.New("zip: insecure file path")
+)
+
+// A Reader serves content from a ZIP archive.
+type Reader struct {
+ r io.ReaderAt
+ File []*File
+ Comment string
+ decompressors map[uint16]Decompressor
+
+ // Some JAR files are zip files with a prefix that is a bash script.
+ // The baseOffset field is the start of the zip file proper.
+ baseOffset int64
+
+ // fileList is a list of files sorted by ename,
+ // for use by the Open method.
+ fileListOnce sync.Once
+ fileList []fileListEntry
+}
+
+// A ReadCloser is a Reader that must be closed when no longer needed.
+type ReadCloser struct {
+ f *os.File
+ Reader
+}
+
+// A File is a single file in a ZIP archive.
+// The file information is in the embedded FileHeader.
+// The file content can be accessed by calling Open.
+type File struct {
+ FileHeader
+ zip *Reader
+ zipr io.ReaderAt
+ headerOffset int64 // includes overall ZIP archive baseOffset
+ zip64 bool // zip64 extended information extra field presence
+}
+
+// OpenReader will open the Zip file specified by name and return a ReadCloser.
+//
+// If any file inside the archive uses a non-local name
+// (as defined by [filepath.IsLocal]) or a name containing backslashes
+// and the GODEBUG environment variable contains `zipinsecurepath=0`,
+// OpenReader returns the reader with an ErrInsecurePath error.
+// A future version of Go may introduce this behavior by default.
+// Programs that want to accept non-local names can ignore
+// the ErrInsecurePath error and use the returned reader.
+func OpenReader(name string) (*ReadCloser, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ r := new(ReadCloser)
+ if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath {
+ f.Close()
+ return nil, err
+ }
+ r.f = f
+ return r, err
+}
+
+// NewReader returns a new Reader reading from r, which is assumed to
+// have the given size in bytes.
+//
+// If any file inside the archive uses a non-local name
+// (as defined by [filepath.IsLocal]) or a name containing backslashes
+// and the GODEBUG environment variable contains `zipinsecurepath=0`,
+// NewReader returns the reader with an ErrInsecurePath error.
+// A future version of Go may introduce this behavior by default.
+// Programs that want to accept non-local names can ignore
+// the ErrInsecurePath error and use the returned reader.
+func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
+ if size < 0 {
+ return nil, errors.New("zip: size cannot be negative")
+ }
+ zr := new(Reader)
+ var err error
+ if err = zr.init(r, size); err != nil && err != ErrInsecurePath {
+ return nil, err
+ }
+ return zr, err
+}
+
+func (r *Reader) init(rdr io.ReaderAt, size int64) error {
+ end, baseOffset, err := readDirectoryEnd(rdr, size)
+ if err != nil {
+ return err
+ }
+ r.r = rdr
+ r.baseOffset = baseOffset
+ // Since the number of directory records is not validated, it is not
+ // safe to preallocate r.File without first checking that the specified
+ // number of files is reasonable, since a malformed archive may
+ // indicate it contains up to 1 << 128 - 1 files. Since each file has a
+ // header which will be _at least_ 30 bytes we can safely preallocate
+ // if (data size / 30) >= end.directoryRecords.
+ if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
+ r.File = make([]*File, 0, end.directoryRecords)
+ }
+ r.Comment = end.comment
+ rs := io.NewSectionReader(rdr, 0, size)
+ if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil {
+ return err
+ }
+ buf := bufio.NewReader(rs)
+
+ // The count of files inside a zip is truncated to fit in a uint16.
+ // Gloss over this by reading headers until we encounter
+ // a bad one, and then only report an ErrFormat or UnexpectedEOF if
+ // the file count modulo 65536 is incorrect.
+ for {
+ f := &File{zip: r, zipr: rdr}
+ err = readDirectoryHeader(f, buf)
+ if err == ErrFormat || err == io.ErrUnexpectedEOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ f.headerOffset += r.baseOffset
+ r.File = append(r.File, f)
+ }
+ if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
+ // Return the readDirectoryHeader error if we read
+ // the wrong number of directory entries.
+ return err
+ }
+ if zipinsecurepath.Value() == "0" {
+ for _, f := range r.File {
+ if f.Name == "" {
+ // Zip permits an empty file name field.
+ continue
+ }
+ // The zip specification states that names must use forward slashes,
+ // so consider any backslashes in the name insecure.
+ if !filepath.IsLocal(f.Name) || strings.Contains(f.Name, `\`) {
+ zipinsecurepath.IncNonDefault()
+ return ErrInsecurePath
+ }
+ }
+ }
+ return nil
+}
+
+// RegisterDecompressor registers or overrides a custom decompressor for a
+// specific method ID. If a decompressor for a given method is not found,
+// Reader will default to looking up the decompressor at the package level.
+func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
+ if r.decompressors == nil {
+ r.decompressors = make(map[uint16]Decompressor)
+ }
+ r.decompressors[method] = dcomp
+}
+
+func (r *Reader) decompressor(method uint16) Decompressor {
+ dcomp := r.decompressors[method]
+ if dcomp == nil {
+ dcomp = decompressor(method)
+ }
+ return dcomp
+}
+
+// Close closes the Zip file, rendering it unusable for I/O.
+func (rc *ReadCloser) Close() error {
+ return rc.f.Close()
+}
+
+// DataOffset returns the offset of the file's possibly-compressed
+// data, relative to the beginning of the zip file.
+//
+// Most callers should instead use Open, which transparently
+// decompresses data and verifies checksums.
+func (f *File) DataOffset() (offset int64, err error) {
+ bodyOffset, err := f.findBodyOffset()
+ if err != nil {
+ return
+ }
+ return f.headerOffset + bodyOffset, nil
+}
+
+// Open returns a ReadCloser that provides access to the File's contents.
+// Multiple files may be read concurrently.
+func (f *File) Open() (io.ReadCloser, error) {
+ bodyOffset, err := f.findBodyOffset()
+ if err != nil {
+ return nil, err
+ }
+ if strings.HasSuffix(f.Name, "/") {
+ // The ZIP specification (APPNOTE.TXT) specifies that directories, which
+ // are technically zero-byte files, must not have any associated file
+ // data. We previously tried failing here if f.CompressedSize64 != 0,
+ // but it turns out that a number of implementations (namely, the Java
+ // jar tool) don't properly set the storage method on directories
+ // resulting in a file with compressed size > 0 but uncompressed size ==
+ // 0. We still want to fail when a directory has associated uncompressed
+ // data, but we are tolerant of cases where the uncompressed size is
+ // zero but compressed size is not.
+ if f.UncompressedSize64 != 0 {
+ return &dirReader{ErrFormat}, nil
+ } else {
+ return &dirReader{io.EOF}, nil
+ }
+ }
+ size := int64(f.CompressedSize64)
+ r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
+ dcomp := f.zip.decompressor(f.Method)
+ if dcomp == nil {
+ return nil, ErrAlgorithm
+ }
+ var rc io.ReadCloser = dcomp(r)
+ var desr io.Reader
+ if f.hasDataDescriptor() {
+ desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
+ }
+ rc = &checksumReader{
+ rc: rc,
+ hash: crc32.NewIEEE(),
+ f: f,
+ desr: desr,
+ }
+ return rc, nil
+}
+
+// OpenRaw returns a Reader that provides access to the File's contents without
+// decompression.
+func (f *File) OpenRaw() (io.Reader, error) {
+ bodyOffset, err := f.findBodyOffset()
+ if err != nil {
+ return nil, err
+ }
+ r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64))
+ return r, nil
+}
+
+type dirReader struct {
+ err error
+}
+
+func (r *dirReader) Read([]byte) (int, error) {
+ return 0, r.err
+}
+
+func (r *dirReader) Close() error {
+ return nil
+}
+
+type checksumReader struct {
+ rc io.ReadCloser
+ hash hash.Hash32
+ nread uint64 // number of bytes read so far
+ f *File
+ desr io.Reader // if non-nil, where to read the data descriptor
+ err error // sticky error
+}
+
+func (r *checksumReader) Stat() (fs.FileInfo, error) {
+ return headerFileInfo{&r.f.FileHeader}, nil
+}
+
+func (r *checksumReader) Read(b []byte) (n int, err error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ n, err = r.rc.Read(b)
+ r.hash.Write(b[:n])
+ r.nread += uint64(n)
+ if r.nread > r.f.UncompressedSize64 {
+ return 0, ErrFormat
+ }
+ if err == nil {
+ return
+ }
+ if err == io.EOF {
+ if r.nread != r.f.UncompressedSize64 {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if r.desr != nil {
+ if err1 := readDataDescriptor(r.desr, r.f); err1 != nil {
+ if err1 == io.EOF {
+ err = io.ErrUnexpectedEOF
+ } else {
+ err = err1
+ }
+ } else if r.hash.Sum32() != r.f.CRC32 {
+ err = ErrChecksum
+ }
+ } else {
+ // If there's not a data descriptor, we still compare
+ // the CRC32 of what we've read against the file header
+ // or TOC's CRC32, if it seems like it was set.
+ if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
+ err = ErrChecksum
+ }
+ }
+ }
+ r.err = err
+ return
+}
+
+func (r *checksumReader) Close() error { return r.rc.Close() }
+
+// findBodyOffset does the minimum work to verify the file has a header
+// and returns the file body offset.
+func (f *File) findBodyOffset() (int64, error) {
+ var buf [fileHeaderLen]byte
+ if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
+ return 0, err
+ }
+ b := readBuf(buf[:])
+ if sig := b.uint32(); sig != fileHeaderSignature {
+ return 0, ErrFormat
+ }
+ b = b[22:] // skip over most of the header
+ filenameLen := int(b.uint16())
+ extraLen := int(b.uint16())
+ return int64(fileHeaderLen + filenameLen + extraLen), nil
+}
+
+// readDirectoryHeader attempts to read a directory header from r.
+// It returns io.ErrUnexpectedEOF if it cannot read a complete header,
+// and ErrFormat if it doesn't find a valid header signature.
+func readDirectoryHeader(f *File, r io.Reader) error {
+ var buf [directoryHeaderLen]byte
+ if _, err := io.ReadFull(r, buf[:]); err != nil {
+ return err
+ }
+ b := readBuf(buf[:])
+ if sig := b.uint32(); sig != directoryHeaderSignature {
+ return ErrFormat
+ }
+ f.CreatorVersion = b.uint16()
+ f.ReaderVersion = b.uint16()
+ f.Flags = b.uint16()
+ f.Method = b.uint16()
+ f.ModifiedTime = b.uint16()
+ f.ModifiedDate = b.uint16()
+ f.CRC32 = b.uint32()
+ f.CompressedSize = b.uint32()
+ f.UncompressedSize = b.uint32()
+ f.CompressedSize64 = uint64(f.CompressedSize)
+ f.UncompressedSize64 = uint64(f.UncompressedSize)
+ filenameLen := int(b.uint16())
+ extraLen := int(b.uint16())
+ commentLen := int(b.uint16())
+ b = b[4:] // skipped start disk number and internal attributes (2x uint16)
+ f.ExternalAttrs = b.uint32()
+ f.headerOffset = int64(b.uint32())
+ d := make([]byte, filenameLen+extraLen+commentLen)
+ if _, err := io.ReadFull(r, d); err != nil {
+ return err
+ }
+ f.Name = string(d[:filenameLen])
+ f.Extra = d[filenameLen : filenameLen+extraLen]
+ f.Comment = string(d[filenameLen+extraLen:])
+
+ // Determine the character encoding.
+ utf8Valid1, utf8Require1 := detectUTF8(f.Name)
+ utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
+ switch {
+ case !utf8Valid1 || !utf8Valid2:
+ // Name and Comment definitely not UTF-8.
+ f.NonUTF8 = true
+ case !utf8Require1 && !utf8Require2:
+ // Name and Comment use only single-byte runes that overlap with UTF-8.
+ f.NonUTF8 = false
+ default:
+ // Might be UTF-8, might be some other encoding; preserve existing flag.
+ // Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
+ // Since it is impossible to always distinguish valid UTF-8 from some
+ // other encoding (e.g., GBK or Shift-JIS), we trust the flag.
+ f.NonUTF8 = f.Flags&0x800 == 0
+ }
+
+ needUSize := f.UncompressedSize == ^uint32(0)
+ needCSize := f.CompressedSize == ^uint32(0)
+ needHeaderOffset := f.headerOffset == int64(^uint32(0))
+
+ // Best effort to find what we need.
+ // Other zip authors might not even follow the basic format,
+ // and we'll just ignore the Extra content in that case.
+ var modified time.Time
+parseExtras:
+ for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
+ fieldTag := extra.uint16()
+ fieldSize := int(extra.uint16())
+ if len(extra) < fieldSize {
+ break
+ }
+ fieldBuf := extra.sub(fieldSize)
+
+ switch fieldTag {
+ case zip64ExtraID:
+ f.zip64 = true
+
+ // update directory values from the zip64 extra block.
+ // They should only be consulted if the sizes read earlier
+ // are maxed out.
+ // See golang.org/issue/13367.
+ if needUSize {
+ needUSize = false
+ if len(fieldBuf) < 8 {
+ return ErrFormat
+ }
+ f.UncompressedSize64 = fieldBuf.uint64()
+ }
+ if needCSize {
+ needCSize = false
+ if len(fieldBuf) < 8 {
+ return ErrFormat
+ }
+ f.CompressedSize64 = fieldBuf.uint64()
+ }
+ if needHeaderOffset {
+ needHeaderOffset = false
+ if len(fieldBuf) < 8 {
+ return ErrFormat
+ }
+ f.headerOffset = int64(fieldBuf.uint64())
+ }
+ case ntfsExtraID:
+ if len(fieldBuf) < 4 {
+ continue parseExtras
+ }
+ fieldBuf.uint32() // reserved (ignored)
+ for len(fieldBuf) >= 4 { // need at least tag and size
+ attrTag := fieldBuf.uint16()
+ attrSize := int(fieldBuf.uint16())
+ if len(fieldBuf) < attrSize {
+ continue parseExtras
+ }
+ attrBuf := fieldBuf.sub(attrSize)
+ if attrTag != 1 || attrSize != 24 {
+ continue // Ignore irrelevant attributes
+ }
+
+ const ticksPerSecond = 1e7 // Windows timestamp resolution
+ ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
+ secs := int64(ts / ticksPerSecond)
+ nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
+ epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
+ modified = time.Unix(epoch.Unix()+secs, nsecs)
+ }
+ case unixExtraID, infoZipUnixExtraID:
+ if len(fieldBuf) < 8 {
+ continue parseExtras
+ }
+ fieldBuf.uint32() // AcTime (ignored)
+ ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+ modified = time.Unix(ts, 0)
+ case extTimeExtraID:
+ if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
+ continue parseExtras
+ }
+ ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+ modified = time.Unix(ts, 0)
+ }
+ }
+
+ msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
+ f.Modified = msdosModified
+ if !modified.IsZero() {
+ f.Modified = modified.UTC()
+
+ // If legacy MS-DOS timestamps are set, we can use the delta between
+ // the legacy and extended versions to estimate timezone offset.
+ //
+ // A non-UTC timezone is always used (even if offset is zero).
+ // Thus, FileHeader.Modified.Location() == time.UTC is useful for
+ // determining whether extended timestamps are present.
+ // This is necessary for users that need to do additional time
+ // calculations when dealing with legacy ZIP formats.
+ if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
+ f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
+ }
+ }
+
+ // Assume that uncompressed size 2³²-1 could plausibly happen in
+ // an old zip32 file that was sharding inputs into the largest chunks
+ // possible (or is just malicious; search the web for 42.zip).
+ // If needUSize is true still, it means we didn't see a zip64 extension.
+ // As long as the compressed size is not also 2³²-1 (implausible)
+ // and the header is not also 2³²-1 (equally implausible),
+ // accept the uncompressed size 2³²-1 as valid.
+ // If nothing else, this keeps archive/zip working with 42.zip.
+ _ = needUSize
+
+ if needCSize || needHeaderOffset {
+ return ErrFormat
+ }
+
+ return nil
+}
+
+func readDataDescriptor(r io.Reader, f *File) error {
+ var buf [dataDescriptorLen]byte
+ // The spec says: "Although not originally assigned a
+ // signature, the value 0x08074b50 has commonly been adopted
+ // as a signature value for the data descriptor record.
+ // Implementers should be aware that ZIP files may be
+ // encountered with or without this signature marking data
+ // descriptors and should account for either case when reading
+ // ZIP files to ensure compatibility."
+ //
+ // dataDescriptorLen includes the size of the signature but
+ // first read just those 4 bytes to see if it exists.
+ if _, err := io.ReadFull(r, buf[:4]); err != nil {
+ return err
+ }
+ off := 0
+ maybeSig := readBuf(buf[:4])
+ if maybeSig.uint32() != dataDescriptorSignature {
+ // No data descriptor signature. Keep these four
+ // bytes.
+ off += 4
+ }
+ if _, err := io.ReadFull(r, buf[off:12]); err != nil {
+ return err
+ }
+ b := readBuf(buf[:12])
+ if b.uint32() != f.CRC32 {
+ return ErrChecksum
+ }
+
+ // The two sizes that follow here can be either 32 bits or 64 bits
+ // but the spec is not very clear on this and different
+ // interpretations has been made causing incompatibilities. We
+ // already have the sizes from the central directory so we can
+ // just ignore these.
+
+ return nil
+}
+
+func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset int64, err error) {
+ // look for directoryEndSignature in the last 1k, then in the last 65k
+ var buf []byte
+ var directoryEndOffset int64
+ for i, bLen := range []int64{1024, 65 * 1024} {
+ if bLen > size {
+ bLen = size
+ }
+ buf = make([]byte, int(bLen))
+ if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
+ return nil, 0, err
+ }
+ if p := findSignatureInBlock(buf); p >= 0 {
+ buf = buf[p:]
+ directoryEndOffset = size - bLen + int64(p)
+ break
+ }
+ if i == 1 || bLen == size {
+ return nil, 0, ErrFormat
+ }
+ }
+
+ // read header into struct
+ b := readBuf(buf[4:]) // skip signature
+ d := &directoryEnd{
+ diskNbr: uint32(b.uint16()),
+ dirDiskNbr: uint32(b.uint16()),
+ dirRecordsThisDisk: uint64(b.uint16()),
+ directoryRecords: uint64(b.uint16()),
+ directorySize: uint64(b.uint32()),
+ directoryOffset: uint64(b.uint32()),
+ commentLen: b.uint16(),
+ }
+ l := int(d.commentLen)
+ if l > len(b) {
+ return nil, 0, errors.New("zip: invalid comment length")
+ }
+ d.comment = string(b[:l])
+
+ // These values mean that the file can be a zip64 file
+ if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
+ p, err := findDirectory64End(r, directoryEndOffset)
+ if err == nil && p >= 0 {
+ directoryEndOffset = p
+ err = readDirectory64End(r, p, d)
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ }
+
+ maxInt64 := uint64(1<<63 - 1)
+ if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 {
+ return nil, 0, ErrFormat
+ }
+
+ baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset)
+
+ // Make sure directoryOffset points to somewhere in our file.
+ if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size {
+ return nil, 0, ErrFormat
+ }
+
+ // If the directory end data tells us to use a non-zero baseOffset,
+ // but we would find a valid directory entry if we assume that the
+ // baseOffset is 0, then just use a baseOffset of 0.
+ // We've seen files in which the directory end data gives us
+ // an incorrect baseOffset.
+ if baseOffset > 0 {
+ off := int64(d.directoryOffset)
+ rs := io.NewSectionReader(r, off, size-off)
+ if readDirectoryHeader(&File{}, rs) == nil {
+ baseOffset = 0
+ }
+ }
+
+ return d, baseOffset, nil
+}
+
+// findDirectory64End tries to read the zip64 locator just before the
+// directory end and returns the offset of the zip64 directory end if
+// found.
+func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
+ locOffset := directoryEndOffset - directory64LocLen
+ if locOffset < 0 {
+ return -1, nil // no need to look for a header outside the file
+ }
+ buf := make([]byte, directory64LocLen)
+ if _, err := r.ReadAt(buf, locOffset); err != nil {
+ return -1, err
+ }
+ b := readBuf(buf)
+ if sig := b.uint32(); sig != directory64LocSignature {
+ return -1, nil
+ }
+ if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
+ return -1, nil // the file is not a valid zip64-file
+ }
+ p := b.uint64() // relative offset of the zip64 end of central directory record
+ if b.uint32() != 1 { // total number of disks
+ return -1, nil // the file is not a valid zip64-file
+ }
+ return int64(p), nil
+}
+
+// readDirectory64End reads the zip64 directory end and updates the
+// directory end with the zip64 directory end values.
+func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
+ buf := make([]byte, directory64EndLen)
+ if _, err := r.ReadAt(buf, offset); err != nil {
+ return err
+ }
+
+ b := readBuf(buf)
+ if sig := b.uint32(); sig != directory64EndSignature {
+ return ErrFormat
+ }
+
+ b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16)
+ d.diskNbr = b.uint32() // number of this disk
+ d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory
+ d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
+ d.directoryRecords = b.uint64() // total number of entries in the central directory
+ d.directorySize = b.uint64() // size of the central directory
+ d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number
+
+ return nil
+}
+
+func findSignatureInBlock(b []byte) int {
+ for i := len(b) - directoryEndLen; i >= 0; i-- {
+ // defined from directoryEndSignature in struct.go
+ if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
+ // n is length of comment
+ n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
+ if n+directoryEndLen+i <= len(b) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+type readBuf []byte
+
+func (b *readBuf) uint8() uint8 {
+ v := (*b)[0]
+ *b = (*b)[1:]
+ return v
+}
+
+func (b *readBuf) uint16() uint16 {
+ v := binary.LittleEndian.Uint16(*b)
+ *b = (*b)[2:]
+ return v
+}
+
+func (b *readBuf) uint32() uint32 {
+ v := binary.LittleEndian.Uint32(*b)
+ *b = (*b)[4:]
+ return v
+}
+
+func (b *readBuf) uint64() uint64 {
+ v := binary.LittleEndian.Uint64(*b)
+ *b = (*b)[8:]
+ return v
+}
+
+func (b *readBuf) sub(n int) readBuf {
+ b2 := (*b)[:n]
+ *b = (*b)[n:]
+ return b2
+}
+
+// A fileListEntry is a File and its ename.
+// If file == nil, the fileListEntry describes a directory without metadata.
+type fileListEntry struct {
+ name string
+ file *File
+ isDir bool
+ isDup bool
+}
+
+type fileInfoDirEntry interface {
+ fs.FileInfo
+ fs.DirEntry
+}
+
+func (f *fileListEntry) stat() (fileInfoDirEntry, error) {
+ if f.isDup {
+ return nil, errors.New(f.name + ": duplicate entries in zip file")
+ }
+ if !f.isDir {
+ return headerFileInfo{&f.file.FileHeader}, nil
+ }
+ return f, nil
+}
+
+// Only used for directories.
+func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem }
+func (f *fileListEntry) Size() int64 { return 0 }
+func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
+func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
+func (f *fileListEntry) IsDir() bool { return true }
+func (f *fileListEntry) Sys() any { return nil }
+
+func (f *fileListEntry) ModTime() time.Time {
+ if f.file == nil {
+ return time.Time{}
+ }
+ return f.file.FileHeader.Modified.UTC()
+}
+
+func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
+
+func (f *fileListEntry) String() string {
+ return fs.FormatDirEntry(f)
+}
+
+// toValidName coerces name to be a valid name for fs.FS.Open.
+func toValidName(name string) string {
+ name = strings.ReplaceAll(name, `\`, `/`)
+ p := path.Clean(name)
+
+ p = strings.TrimPrefix(p, "/")
+
+ for strings.HasPrefix(p, "../") {
+ p = p[len("../"):]
+ }
+
+ return p
+}
+
+func (r *Reader) initFileList() {
+ r.fileListOnce.Do(func() {
+ // files and knownDirs map from a file/directory name
+ // to an index into the r.fileList entry that we are
+ // building. They are used to mark duplicate entries.
+ files := make(map[string]int)
+ knownDirs := make(map[string]int)
+
+ // dirs[name] is true if name is known to be a directory,
+ // because it appears as a prefix in a path.
+ dirs := make(map[string]bool)
+
+ for _, file := range r.File {
+ isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
+ name := toValidName(file.Name)
+ if name == "" {
+ continue
+ }
+
+ if idx, ok := files[name]; ok {
+ r.fileList[idx].isDup = true
+ continue
+ }
+ if idx, ok := knownDirs[name]; ok {
+ r.fileList[idx].isDup = true
+ continue
+ }
+
+ for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
+ dirs[dir] = true
+ }
+
+ idx := len(r.fileList)
+ entry := fileListEntry{
+ name: name,
+ file: file,
+ isDir: isDir,
+ }
+ r.fileList = append(r.fileList, entry)
+ if isDir {
+ knownDirs[name] = idx
+ } else {
+ files[name] = idx
+ }
+ }
+ for dir := range dirs {
+ if _, ok := knownDirs[dir]; !ok {
+ if idx, ok := files[dir]; ok {
+ r.fileList[idx].isDup = true
+ } else {
+ entry := fileListEntry{
+ name: dir,
+ file: nil,
+ isDir: true,
+ }
+ r.fileList = append(r.fileList, entry)
+ }
+ }
+ }
+
+ sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
+ })
+}
+
+func fileEntryLess(x, y string) bool {
+ xdir, xelem, _ := split(x)
+ ydir, yelem, _ := split(y)
+ return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+// Open opens the named file in the ZIP archive,
+// using the semantics of fs.FS.Open:
+// paths are always slash separated, with no
+// leading / or ../ elements.
+func (r *Reader) Open(name string) (fs.File, error) {
+ r.initFileList()
+
+ if !fs.ValidPath(name) {
+ return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
+ }
+ e := r.openLookup(name)
+ if e == nil {
+ return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+ }
+ if e.isDir {
+ return &openDir{e, r.openReadDir(name), 0}, nil
+ }
+ rc, err := e.file.Open()
+ if err != nil {
+ return nil, err
+ }
+ return rc.(fs.File), nil
+}
+
+func split(name string) (dir, elem string, isDir bool) {
+ if len(name) > 0 && name[len(name)-1] == '/' {
+ isDir = true
+ name = name[:len(name)-1]
+ }
+ i := len(name) - 1
+ for i >= 0 && name[i] != '/' {
+ i--
+ }
+ if i < 0 {
+ return ".", name, isDir
+ }
+ return name[:i], name[i+1:], isDir
+}
+
+var dotFile = &fileListEntry{name: "./", isDir: true}
+
+func (r *Reader) openLookup(name string) *fileListEntry {
+ if name == "." {
+ return dotFile
+ }
+
+ dir, elem, _ := split(name)
+ files := r.fileList
+ i := sort.Search(len(files), func(i int) bool {
+ idir, ielem, _ := split(files[i].name)
+ return idir > dir || idir == dir && ielem >= elem
+ })
+ if i < len(files) {
+ fname := files[i].name
+ if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
+ return &files[i]
+ }
+ }
+ return nil
+}
+
+func (r *Reader) openReadDir(dir string) []fileListEntry {
+ files := r.fileList
+ i := sort.Search(len(files), func(i int) bool {
+ idir, _, _ := split(files[i].name)
+ return idir >= dir
+ })
+ j := sort.Search(len(files), func(j int) bool {
+ jdir, _, _ := split(files[j].name)
+ return jdir > dir
+ })
+ return files[i:j]
+}
+
+type openDir struct {
+ e *fileListEntry
+ files []fileListEntry
+ offset int
+}
+
+func (d *openDir) Close() error { return nil }
+func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat() }
+
+func (d *openDir) Read([]byte) (int, error) {
+ return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
+}
+
+func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
+ n := len(d.files) - d.offset
+ if count > 0 && n > count {
+ n = count
+ }
+ if n == 0 {
+ if count <= 0 {
+ return nil, nil
+ }
+ return nil, io.EOF
+ }
+ list := make([]fs.DirEntry, n)
+ for i := range list {
+ s, err := d.files[d.offset+i].stat()
+ if err != nil {
+ return nil, err
+ }
+ list[i] = s
+ }
+ d.offset += n
+ return list, nil
+}
diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
new file mode 100644
index 0000000..a67c335
--- /dev/null
+++ b/src/archive/zip/reader_test.go
@@ -0,0 +1,1828 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "internal/obscuretestdata"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+ "testing/fstest"
+ "time"
+)
+
+type ZipTest struct {
+ Name string
+ Source func() (r io.ReaderAt, size int64) // if non-nil, used instead of testdata/<Name> file
+ Comment string
+ File []ZipTestFile
+ Obscured bool // needed for Apple notarization (golang.org/issue/34986)
+ Error error // the error that Opening this file should return
+}
+
+type ZipTestFile struct {
+ Name string
+ Mode fs.FileMode
+ NonUTF8 bool
+ ModTime time.Time
+ Modified time.Time
+
+ // Information describing expected zip file content.
+ // First, reading the entire content should produce the error ContentErr.
+ // Second, if ContentErr==nil, the content should match Content.
+ // If content is large, an alternative to setting Content is to set File,
+ // which names a file in the testdata/ directory containing the
+ // uncompressed expected content.
+ // If content is very large, an alternative to setting Content or File
+ // is to set Size, which will then be checked against the header-reported size
+ // but will bypass the decompressing of the actual data.
+ // This last option is used for testing very large (multi-GB) compressed files.
+ ContentErr error
+ Content []byte
+ File string
+ Size uint64
+}
+
+var tests = []ZipTest{
+ {
+ Name: "test.zip",
+ Comment: "This is a zipfile comment.",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte("This is a test text file.\n"),
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ {
+ Name: "gophercolor16x16.png",
+ File: "gophercolor16x16.png",
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "test-trailing-junk.zip",
+ Comment: "This is a zipfile comment.",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte("This is a test text file.\n"),
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ {
+ Name: "gophercolor16x16.png",
+ File: "gophercolor16x16.png",
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "test-prefix.zip",
+ Comment: "This is a zipfile comment.",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte("This is a test text file.\n"),
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ {
+ Name: "gophercolor16x16.png",
+ File: "gophercolor16x16.png",
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "test-baddirsz.zip",
+ Comment: "This is a zipfile comment.",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte("This is a test text file.\n"),
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ {
+ Name: "gophercolor16x16.png",
+ File: "gophercolor16x16.png",
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "test-badbase.zip",
+ Comment: "This is a zipfile comment.",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte("This is a test text file.\n"),
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ {
+ Name: "gophercolor16x16.png",
+ File: "gophercolor16x16.png",
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "r.zip",
+ Source: returnRecursiveZip,
+ File: []ZipTestFile{
+ {
+ Name: "r/r.zip",
+ Content: rZipBytes(),
+ Modified: time.Date(2010, 3, 4, 0, 24, 16, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "symlink.zip",
+ File: []ZipTestFile{
+ {
+ Name: "symlink",
+ Content: []byte("../target"),
+ Modified: time.Date(2012, 2, 3, 19, 56, 48, 0, timeZone(-2*time.Hour)),
+ Mode: 0777 | fs.ModeSymlink,
+ },
+ },
+ },
+ {
+ Name: "readme.zip",
+ },
+ {
+ Name: "readme.notzip",
+ Error: ErrFormat,
+ },
+ {
+ Name: "dd.zip",
+ File: []ZipTestFile{
+ {
+ Name: "filename",
+ Content: []byte("This is a test textfile.\n"),
+ Modified: time.Date(2011, 2, 2, 13, 6, 20, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ // created in windows XP file manager.
+ Name: "winxp.zip",
+ File: []ZipTestFile{
+ {
+ Name: "hello",
+ Content: []byte("world \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, time.UTC),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/bar",
+ Content: []byte("foo \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, time.UTC),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/empty/",
+ Content: []byte{},
+ Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, time.UTC),
+ Mode: fs.ModeDir | 0777,
+ },
+ {
+ Name: "readonly",
+ Content: []byte("important \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, time.UTC),
+ Mode: 0444,
+ },
+ },
+ },
+ {
+ // created by Zip 3.0 under Linux
+ Name: "unix.zip",
+ File: []ZipTestFile{
+ {
+ Name: "hello",
+ Content: []byte("world \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, timeZone(0)),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/bar",
+ Content: []byte("foo \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, timeZone(0)),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/empty/",
+ Content: []byte{},
+ Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, timeZone(0)),
+ Mode: fs.ModeDir | 0777,
+ },
+ {
+ Name: "readonly",
+ Content: []byte("important \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, timeZone(0)),
+ Mode: 0444,
+ },
+ },
+ },
+ {
+ // created by Go, before we wrote the "optional" data
+ // descriptor signatures (which are required by macOS).
+ // Use obscured file to avoid Apple’s notarization service
+ // rejecting the toolchain due to an inability to unzip this archive.
+ // See golang.org/issue/34986
+ Name: "go-no-datadesc-sig.zip.base64",
+ Obscured: true,
+ File: []ZipTestFile{
+ {
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
+ },
+ {
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ // created by Go, after we wrote the "optional" data
+ // descriptor signatures (which are required by macOS)
+ Name: "go-with-datadesc-sig.zip",
+ File: []ZipTestFile{
+ {
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
+ },
+ {
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "Bad-CRC32-in-data-descriptor",
+ Source: returnCorruptCRC32Zip,
+ File: []ZipTestFile{
+ {
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
+ ContentErr: ErrChecksum,
+ },
+ {
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ // Tests that we verify (and accept valid) crc32s on files
+ // with crc32s in their file header (not in data descriptors)
+ {
+ Name: "crc32-not-streamed.zip",
+ File: []ZipTestFile{
+ {
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
+ },
+ {
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ // Tests that we verify (and reject invalid) crc32s on files
+ // with crc32s in their file header (not in data descriptors)
+ {
+ Name: "crc32-not-streamed.zip",
+ Source: returnCorruptNotStreamedZip,
+ File: []ZipTestFile{
+ {
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
+ ContentErr: ErrChecksum,
+ },
+ {
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "zip64.zip",
+ File: []ZipTestFile{
+ {
+ Name: "README",
+ Content: []byte("This small file is in ZIP64 format.\n"),
+ Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, time.UTC),
+ Mode: 0644,
+ },
+ },
+ },
+ // Another zip64 file with different Extras fields. (golang.org/issue/7069)
+ {
+ Name: "zip64-2.zip",
+ File: []ZipTestFile{
+ {
+ Name: "README",
+ Content: []byte("This small file is in ZIP64 format.\n"),
+ Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, timeZone(-4*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ // Largest possible non-zip64 file, with no zip64 header.
+ {
+ Name: "big.zip",
+ Source: returnBigZipBytes,
+ File: []ZipTestFile{
+ {
+ Name: "big.file",
+ Content: nil,
+ Size: 1<<32 - 1,
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "utf8-7zip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-infozip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0644,
+ // Name is valid UTF-8, but format does not have UTF-8 flag set.
+ // We don't do UTF-8 detection for multi-byte runes due to
+ // false-positives with other encodings (e.g., Shift-JIS).
+ // Format says encoding is not UTF-8, so we trust it.
+ NonUTF8: true,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-osx.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0644,
+ // Name is valid UTF-8, but format does not have UTF-8 set.
+ NonUTF8: true,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-winrar.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-winzip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867000000, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "time-7zip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-infozip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "time-osx.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "time-win7.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 58, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-winrar.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-winzip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244000000, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-go.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-22738.zip",
+ File: []ZipTestFile{
+ {
+ Name: "file",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(1999, 12, 31, 19, 0, 0, 0, timeZone(-5*time.Hour)),
+ ModTime: time.Date(1999, 12, 31, 19, 0, 0, 0, time.UTC),
+ },
+ },
+ },
+ {
+ Name: "dupdir.zip",
+ File: []ZipTestFile{
+ {
+ Name: "a/",
+ Content: []byte{},
+ Mode: fs.ModeDir | 0666,
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
+ },
+ {
+ Name: "a/b",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
+ },
+ {
+ Name: "a/b/",
+ Content: []byte{},
+ Mode: fs.ModeDir | 0666,
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
+ },
+ {
+ Name: "a/b/c",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
+ },
+ },
+ },
+}
+
+func TestReader(t *testing.T) {
+ for _, zt := range tests {
+ t.Run(zt.Name, func(t *testing.T) {
+ readTestZip(t, zt)
+ })
+ }
+}
+
+func readTestZip(t *testing.T, zt ZipTest) {
+ var z *Reader
+ var err error
+ var raw []byte
+ if zt.Source != nil {
+ rat, size := zt.Source()
+ z, err = NewReader(rat, size)
+ raw = make([]byte, size)
+ if _, err := rat.ReadAt(raw, 0); err != nil {
+ t.Errorf("ReadAt error=%v", err)
+ return
+ }
+ } else {
+ path := filepath.Join("testdata", zt.Name)
+ if zt.Obscured {
+ tf, err := obscuretestdata.DecodeToTempFile(path)
+ if err != nil {
+ t.Errorf("obscuretestdata.DecodeToTempFile(%s): %v", path, err)
+ return
+ }
+ defer os.Remove(tf)
+ path = tf
+ }
+ var rc *ReadCloser
+ rc, err = OpenReader(path)
+ if err == nil {
+ defer rc.Close()
+ z = &rc.Reader
+ }
+ var err2 error
+ raw, err2 = os.ReadFile(path)
+ if err2 != nil {
+ t.Errorf("ReadFile(%s) error=%v", path, err2)
+ return
+ }
+ }
+ if err != zt.Error {
+ t.Errorf("error=%v, want %v", err, zt.Error)
+ return
+ }
+
+ // bail if file is not zip
+ if err == ErrFormat {
+ return
+ }
+
+ // bail here if no Files expected to be tested
+ // (there may actually be files in the zip, but we don't care)
+ if zt.File == nil {
+ return
+ }
+
+ if z.Comment != zt.Comment {
+ t.Errorf("comment=%q, want %q", z.Comment, zt.Comment)
+ }
+ if len(z.File) != len(zt.File) {
+ t.Fatalf("file count=%d, want %d", len(z.File), len(zt.File))
+ }
+
+ // test read of each file
+ for i, ft := range zt.File {
+ readTestFile(t, zt, ft, z.File[i], raw)
+ }
+ if t.Failed() {
+ return
+ }
+
+ // test simultaneous reads
+ n := 0
+ done := make(chan bool)
+ for i := 0; i < 5; i++ {
+ for j, ft := range zt.File {
+ go func(j int, ft ZipTestFile) {
+ readTestFile(t, zt, ft, z.File[j], raw)
+ done <- true
+ }(j, ft)
+ n++
+ }
+ }
+ for ; n > 0; n-- {
+ <-done
+ }
+}
+
+func equalTimeAndZone(t1, t2 time.Time) bool {
+ name1, offset1 := t1.Zone()
+ name2, offset2 := t2.Zone()
+ return t1.Equal(t2) && name1 == name2 && offset1 == offset2
+}
+
+func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File, raw []byte) {
+ if f.Name != ft.Name {
+ t.Errorf("name=%q, want %q", f.Name, ft.Name)
+ }
+ if !ft.Modified.IsZero() && !equalTimeAndZone(f.Modified, ft.Modified) {
+ t.Errorf("%s: Modified=%s, want %s", f.Name, f.Modified, ft.Modified)
+ }
+ if !ft.ModTime.IsZero() && !equalTimeAndZone(f.ModTime(), ft.ModTime) {
+ t.Errorf("%s: ModTime=%s, want %s", f.Name, f.ModTime(), ft.ModTime)
+ }
+
+ testFileMode(t, f, ft.Mode)
+
+ size := uint64(f.UncompressedSize)
+ if size == uint32max {
+ size = f.UncompressedSize64
+ } else if size != f.UncompressedSize64 {
+ t.Errorf("%v: UncompressedSize=%#x does not match UncompressedSize64=%#x", f.Name, size, f.UncompressedSize64)
+ }
+
+ // Check that OpenRaw returns the correct byte segment
+ rw, err := f.OpenRaw()
+ if err != nil {
+ t.Errorf("%v: OpenRaw error=%v", f.Name, err)
+ return
+ }
+ start, err := f.DataOffset()
+ if err != nil {
+ t.Errorf("%v: DataOffset error=%v", f.Name, err)
+ return
+ }
+ got, err := io.ReadAll(rw)
+ if err != nil {
+ t.Errorf("%v: OpenRaw ReadAll error=%v", f.Name, err)
+ return
+ }
+ end := uint64(start) + f.CompressedSize64
+ want := raw[start:end]
+ if !bytes.Equal(got, want) {
+ t.Logf("got %q", got)
+ t.Logf("want %q", want)
+ t.Errorf("%v: OpenRaw returned unexpected bytes", f.Name)
+ return
+ }
+
+ r, err := f.Open()
+ if err != nil {
+ t.Errorf("%v", err)
+ return
+ }
+
+ // For very large files, just check that the size is correct.
+ // The content is expected to be all zeros.
+ // Don't bother uncompressing: too big.
+ if ft.Content == nil && ft.File == "" && ft.Size > 0 {
+ if size != ft.Size {
+ t.Errorf("%v: uncompressed size %#x, want %#x", ft.Name, size, ft.Size)
+ }
+ r.Close()
+ return
+ }
+
+ var b bytes.Buffer
+ _, err = io.Copy(&b, r)
+ if err != ft.ContentErr {
+ t.Errorf("copying contents: %v (want %v)", err, ft.ContentErr)
+ }
+ if err != nil {
+ return
+ }
+ r.Close()
+
+ if g := uint64(b.Len()); g != size {
+ t.Errorf("%v: read %v bytes but f.UncompressedSize == %v", f.Name, g, size)
+ }
+
+ var c []byte
+ if ft.Content != nil {
+ c = ft.Content
+ } else if c, err = os.ReadFile("testdata/" + ft.File); err != nil {
+ t.Error(err)
+ return
+ }
+
+ if b.Len() != len(c) {
+ t.Errorf("%s: len=%d, want %d", f.Name, b.Len(), len(c))
+ return
+ }
+
+ for i, b := range b.Bytes() {
+ if b != c[i] {
+ t.Errorf("%s: content[%d]=%q want %q", f.Name, i, b, c[i])
+ return
+ }
+ }
+}
+
+func testFileMode(t *testing.T, f *File, want fs.FileMode) {
+ mode := f.Mode()
+ if want == 0 {
+ t.Errorf("%s mode: got %v, want none", f.Name, mode)
+ } else if mode != want {
+ t.Errorf("%s mode: want %v, got %v", f.Name, want, mode)
+ }
+}
+
+func TestInvalidFiles(t *testing.T) {
+ const size = 1024 * 70 // 70kb
+ b := make([]byte, size)
+
+ // zeroes
+ _, err := NewReader(bytes.NewReader(b), size)
+ if err != ErrFormat {
+ t.Errorf("zeroes: error=%v, want %v", err, ErrFormat)
+ }
+
+ // repeated directoryEndSignatures
+ sig := make([]byte, 4)
+ binary.LittleEndian.PutUint32(sig, directoryEndSignature)
+ for i := 0; i < size-4; i += 4 {
+ copy(b[i:i+4], sig)
+ }
+ _, err = NewReader(bytes.NewReader(b), size)
+ if err != ErrFormat {
+ t.Errorf("sigs: error=%v, want %v", err, ErrFormat)
+ }
+
+ // negative size
+ _, err = NewReader(bytes.NewReader([]byte("foobar")), -1)
+ if err == nil {
+ t.Errorf("archive/zip.NewReader: expected error when negative size is passed")
+ }
+}
+
+func messWith(fileName string, corrupter func(b []byte)) (r io.ReaderAt, size int64) {
+ data, err := os.ReadFile(filepath.Join("testdata", fileName))
+ if err != nil {
+ panic("Error reading " + fileName + ": " + err.Error())
+ }
+ corrupter(data)
+ return bytes.NewReader(data), int64(len(data))
+}
+
+func returnCorruptCRC32Zip() (r io.ReaderAt, size int64) {
+ return messWith("go-with-datadesc-sig.zip", func(b []byte) {
+ // Corrupt one of the CRC32s in the data descriptor:
+ b[0x2d]++
+ })
+}
+
+func returnCorruptNotStreamedZip() (r io.ReaderAt, size int64) {
+ return messWith("crc32-not-streamed.zip", func(b []byte) {
+ // Corrupt foo.txt's final crc32 byte, in both
+ // the file header and TOC. (0x7e -> 0x7f)
+ b[0x11]++
+ b[0x9d]++
+
+ // TODO(bradfitz): add a new test that only corrupts
+ // one of these values, and verify that that's also an
+ // error. Currently, the reader code doesn't verify the
+ // fileheader and TOC's crc32 match if they're both
+ // non-zero and only the second line above, the TOC,
+ // is what matters.
+ })
+}
+
+// rZipBytes returns the bytes of a recursive zip file, without
+// putting it on disk and triggering certain virus scanners.
+func rZipBytes() []byte {
+ s := `
+0000000 50 4b 03 04 14 00 00 00 08 00 08 03 64 3c f9 f4
+0000010 89 64 48 01 00 00 b8 01 00 00 07 00 00 00 72 2f
+0000020 72 2e 7a 69 70 00 25 00 da ff 50 4b 03 04 14 00
+0000030 00 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00
+0000040 b8 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00
+0000050 2f 00 d0 ff 00 25 00 da ff 50 4b 03 04 14 00 00
+0000060 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 b8
+0000070 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 2f
+0000080 00 d0 ff c2 54 8e 57 39 00 05 00 fa ff c2 54 8e
+0000090 57 39 00 05 00 fa ff 00 05 00 fa ff 00 14 00 eb
+00000a0 ff c2 54 8e 57 39 00 05 00 fa ff 00 05 00 fa ff
+00000b0 00 14 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42
+00000c0 88 21 c4 00 00 14 00 eb ff 42 88 21 c4 00 00 14
+00000d0 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 88 21
+00000e0 c4 00 00 00 00 ff ff 00 00 00 ff ff 00 34 00 cb
+00000f0 ff 42 88 21 c4 00 00 00 00 ff ff 00 00 00 ff ff
+0000100 00 34 00 cb ff 42 e8 21 5e 0f 00 00 00 ff ff 0a
+0000110 f0 66 64 12 61 c0 15 dc e8 a0 48 bf 48 af 2a b3
+0000120 20 c0 9b 95 0d c4 67 04 42 53 06 06 06 40 00 06
+0000130 00 f9 ff 6d 01 00 00 00 00 42 e8 21 5e 0f 00 00
+0000140 00 ff ff 0a f0 66 64 12 61 c0 15 dc e8 a0 48 bf
+0000150 48 af 2a b3 20 c0 9b 95 0d c4 67 04 42 53 06 06
+0000160 06 40 00 06 00 f9 ff 6d 01 00 00 00 00 50 4b 01
+0000170 02 14 00 14 00 00 00 08 00 08 03 64 3c f9 f4 89
+0000180 64 48 01 00 00 b8 01 00 00 07 00 00 00 00 00 00
+0000190 00 00 00 00 00 00 00 00 00 00 00 72 2f 72 2e 7a
+00001a0 69 70 50 4b 05 06 00 00 00 00 01 00 01 00 35 00
+00001b0 00 00 6d 01 00 00 00 00`
+ s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
+ s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func returnRecursiveZip() (r io.ReaderAt, size int64) {
+ b := rZipBytes()
+ return bytes.NewReader(b), int64(len(b))
+}
+
+// biggestZipBytes returns the bytes of a zip file biggest.zip
+// that contains a zip file bigger.zip that contains a zip file
+// big.zip that contains big.file, which contains 2³²-1 zeros.
+// The big.zip file is interesting because it has no zip64 header,
+// much like the innermost zip files in the well-known 42.zip.
+//
+// biggest.zip was generated by changing isZip64 to use > uint32max
+// instead of >= uint32max and then running this program:
+//
+// package main
+//
+// import (
+// "archive/zip"
+// "bytes"
+// "io"
+// "log"
+// "os"
+// )
+//
+// type zeros struct{}
+//
+// func (zeros) Read(b []byte) (int, error) {
+// for i := range b {
+// b[i] = 0
+// }
+// return len(b), nil
+// }
+//
+// func main() {
+// bigZip := makeZip("big.file", io.LimitReader(zeros{}, 1<<32-1))
+// if err := os.WriteFile("/tmp/big.zip", bigZip, 0666); err != nil {
+// log.Fatal(err)
+// }
+//
+// biggerZip := makeZip("big.zip", bytes.NewReader(bigZip))
+// if err := os.WriteFile("/tmp/bigger.zip", biggerZip, 0666); err != nil {
+// log.Fatal(err)
+// }
+//
+// biggestZip := makeZip("bigger.zip", bytes.NewReader(biggerZip))
+// if err := os.WriteFile("/tmp/biggest.zip", biggestZip, 0666); err != nil {
+// log.Fatal(err)
+// }
+// }
+//
+// func makeZip(name string, r io.Reader) []byte {
+// var buf bytes.Buffer
+// w := zip.NewWriter(&buf)
+// wf, err := w.Create(name)
+// if err != nil {
+// log.Fatal(err)
+// }
+// if _, err = io.Copy(wf, r); err != nil {
+// log.Fatal(err)
+// }
+// if err := w.Close(); err != nil {
+// log.Fatal(err)
+// }
+// return buf.Bytes()
+// }
+//
+// The 4 GB of zeros compresses to 4 MB, which compresses to 20 kB,
+// which compresses to 1252 bytes (in the hex dump below).
+//
+// It's here in hex for the same reason as rZipBytes above: to avoid
+// problems with on-disk virus scanners or other zip processors.
+func biggestZipBytes() []byte {
+ s := `
+0000000 50 4b 03 04 14 00 08 00 08 00 00 00 00 00 00 00
+0000010 00 00 00 00 00 00 00 00 00 00 0a 00 00 00 62 69
+0000020 67 67 65 72 2e 7a 69 70 ec dc 6b 4c 53 67 18 07
+0000030 f0 16 c5 ca 65 2e cb b8 94 20 61 1f 44 33 c7 cd
+0000040 c0 86 4a b5 c0 62 8a 61 05 c6 cd 91 b2 54 8c 1b
+0000050 63 8b 03 9c 1b 95 52 5a e3 a0 19 6c b2 05 59 44
+0000060 64 9d 73 83 71 11 46 61 14 b9 1d 14 09 4a c3 60
+0000070 2e 4c 6e a5 60 45 02 62 81 95 b6 94 9e 9e 77 e7
+0000080 d0 43 b6 f8 71 df 96 3c e7 a4 69 ce bf cf e9 79
+0000090 ce ef 79 3f bf f1 31 db b6 bb 31 76 92 e7 f3 07
+00000a0 8b fc 9c ca cc 08 cc cb cc 5e d2 1c 88 d9 7e bb
+00000b0 4f bb 3a 3f 75 f1 5d 7f 8f c2 68 67 77 8f 25 ff
+00000c0 84 e2 93 2d ef a4 95 3d 71 4e 2c b9 b0 87 c3 be
+00000d0 3d f8 a7 60 24 61 c5 ef ae 9e c8 6c 6d 4e 69 c8
+00000e0 67 65 34 f8 37 76 2d 76 5c 54 f3 95 65 49 c7 0f
+00000f0 18 71 4b 7e 5b 6a d1 79 47 61 41 b0 4e 2a 74 45
+0000100 43 58 12 b2 5a a5 c6 7d 68 55 88 d4 98 75 18 6d
+0000110 08 d1 1f 8f 5a 9e 96 ee 45 cf a4 84 4e 4b e8 50
+0000120 a7 13 d9 06 de 52 81 97 36 b2 d7 b8 fc 2b 5f 55
+0000130 23 1f 32 59 cf 30 27 fb e2 8a b9 de 45 dd 63 9c
+0000140 4b b5 8b 96 4c 7a 62 62 cc a1 a7 cf fa f1 fe dd
+0000150 54 62 11 bf 36 78 b3 c7 b1 b5 f2 61 4d 4e dd 66
+0000160 32 2e e6 70 34 5f f4 c9 e6 6c 43 6f da 6b c6 c3
+0000170 09 2c ce 09 57 7f d2 7e b4 23 ba 7c 1b 99 bc 22
+0000180 3e f1 de 91 2f e3 9c 1b 82 cc c2 84 39 aa e6 de
+0000190 b4 69 fc cc cb 72 a6 61 45 f0 d3 1d 26 19 7c 8d
+00001a0 29 c8 66 02 be 77 6a f9 3d 34 79 17 19 c8 96 24
+00001b0 a3 ac e4 dd 3b 1a 8e c6 fe 96 38 6b bf 67 5a 23
+00001c0 f4 16 f4 e6 8a b4 fc c2 cd bf 95 66 1d bb 35 aa
+00001d0 92 7d 66 d8 08 8d a5 1f 54 2a af 09 cf 61 ff d2
+00001e0 85 9d 8f b6 d7 88 07 4a 86 03 db 64 f3 d9 92 73
+00001f0 df ec a7 fc 23 4c 8d 83 79 63 2a d9 fd 8d b3 c8
+0000200 8f 7e d4 19 85 e6 8d 1c 76 f0 8b 58 32 fd 9a d6
+0000210 85 e2 48 ad c3 d5 60 6f 7e 22 dd ef 09 49 7c 7f
+0000220 3a 45 c3 71 b7 df f3 4c 63 fb b5 d9 31 5f 6e d6
+0000230 24 1d a4 4a fe 32 a7 5c 16 48 5c 3e 08 6b 8a d3
+0000240 25 1d a2 12 a5 59 24 ea 20 5f 52 6d ad 94 db 6b
+0000250 94 b9 5d eb 4b a7 5c 44 bb 1e f2 3c 6b cf 52 c9
+0000260 e9 e5 ba 06 b9 c4 e5 0a d0 00 0d d0 00 0d d0 00
+0000270 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d
+0000280 d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0
+0000290 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00
+00002a0 0d d0 00 cd ff 9e 46 86 fa a7 7d 3a 43 d7 8e 10
+00002b0 52 e9 be e6 6e cf eb 9e 85 4d 65 ce cc 30 c1 44
+00002c0 c0 4e af bc 9c 6c 4b a0 d7 54 ff 1d d5 5c 89 fb
+00002d0 b5 34 7e c4 c2 9e f5 a0 f6 5b 7e 6e ca 73 c7 ef
+00002e0 5d be de f9 e8 81 eb a5 0a a5 63 54 2c d7 1c d1
+00002f0 89 17 85 f8 16 94 f2 8a b2 a3 f5 b6 6d df 75 cd
+0000300 90 dd 64 bd 5d 55 4e f2 55 19 1b b7 cc ef 1b ea
+0000310 2e 05 9c f4 aa 1e a8 cd a6 82 c7 59 0f 5e 9d e0
+0000320 bb fc 6c d6 99 23 eb 36 ad c6 c5 e1 d8 e1 e2 3e
+0000330 d9 90 5a f7 91 5d 6f bc 33 6d 98 47 d2 7c 2e 2f
+0000340 99 a4 25 72 85 49 2c be 0b 5b af 8f e5 6e 81 a6
+0000350 a3 5a 6f 39 53 3a ab 7a 8b 1e 26 f7 46 6c 7d 26
+0000360 53 b3 22 31 94 d3 83 f2 18 4d f5 92 33 27 53 97
+0000370 0f d3 e6 55 9c a6 c5 31 87 6f d3 f3 ae 39 6f 56
+0000380 10 7b ab 7e d0 b4 ca f2 b8 05 be 3f 0e 6e 5a 75
+0000390 ab 0c f5 37 0e ba 8e 75 71 7a aa ed 7a dd 6a 63
+00003a0 be 9b a0 97 27 6a 6f e7 d3 8b c4 7c ec d3 91 56
+00003b0 d9 ac 5e bf 16 42 2f 00 1f 93 a2 23 87 bd e2 59
+00003c0 a0 de 1a 66 c8 62 eb 55 8f 91 17 b4 61 42 7a 50
+00003d0 40 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40
+00003e0 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40 03
+00003f0 34 40 03 34 40 03 34 ff 85 86 90 8b ea 67 90 0d
+0000400 e1 42 1b d2 61 d6 79 ec fd 3e 44 28 a4 51 6c 5c
+0000410 fc d2 72 ca ba 82 18 46 16 61 cd 93 a9 0f d1 24
+0000420 17 99 e2 2c 71 16 84 0c c8 7a 13 0f 9a 5e c5 f0
+0000430 79 64 e2 12 4d c8 82 a1 81 19 2d aa 44 6d 87 54
+0000440 84 71 c1 f6 d4 ca 25 8c 77 b9 08 c7 c8 5e 10 8a
+0000450 8f 61 ed 8c ba 30 1f 79 9a c7 60 34 2b b9 8c f8
+0000460 18 a6 83 1b e3 9f ad 79 fe fd 1b 8b f1 fc 41 6f
+0000470 d4 13 1f e3 b8 83 ba 64 92 e7 eb e4 77 05 8f ba
+0000480 fa 3b 00 00 ff ff 50 4b 07 08 a6 18 b1 91 5e 04
+0000490 00 00 e4 47 00 00 50 4b 01 02 14 00 14 00 08 00
+00004a0 08 00 00 00 00 00 a6 18 b1 91 5e 04 00 00 e4 47
+00004b0 00 00 0a 00 00 00 00 00 00 00 00 00 00 00 00 00
+00004c0 00 00 00 00 62 69 67 67 65 72 2e 7a 69 70 50 4b
+00004d0 05 06 00 00 00 00 01 00 01 00 38 00 00 00 96 04
+00004e0 00 00 00 00`
+ s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
+ s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func returnBigZipBytes() (r io.ReaderAt, size int64) {
+ b := biggestZipBytes()
+ for i := 0; i < 2; i++ {
+ r, err := NewReader(bytes.NewReader(b), int64(len(b)))
+ if err != nil {
+ panic(err)
+ }
+ f, err := r.File[0].Open()
+ if err != nil {
+ panic(err)
+ }
+ b, err = io.ReadAll(f)
+ if err != nil {
+ panic(err)
+ }
+ }
+ return bytes.NewReader(b), int64(len(b))
+}
+
+func TestIssue8186(t *testing.T) {
+ // Directory headers & data found in the TOC of a JAR file.
+ dirEnts := []string{
+ "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\xaa\x1b\x06\xf0\x81\x02\x00\x00\x81\x02\x00\x00-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00res/drawable-xhdpi-v4/ic_actionbar_accept.png\xfe\xca\x00\x00\x00",
+ "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\x90K\x89\xc7t\n\x00\x00t\n\x00\x00\x0e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x02\x00\x00resources.arsc\x00\x00\x00",
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xff$\x18\xed3\x03\x00\x00\xb4\b\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\r\x00\x00AndroidManifest.xml",
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\x14\xc5K\xab\x192\x02\x00\xc8\xcd\x04\x00\v\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x10\x00\x00classes.dex",
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?E\x96\nD\xac\x01\x00\x00P\x03\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:C\x02\x00res/layout/actionbar_set_wallpaper.xml",
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?Ļ\x14\xe3\xd8\x01\x00\x00\xd8\x03\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:E\x02\x00res/layout/wallpaper_cropper.xml",
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?}\xc1\x15\x9eZ\x01\x00\x00!\x02\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`G\x02\x00META-INF/MANIFEST.MF",
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xe6\x98Ьo\x01\x00\x00\x84\x02\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcH\x02\x00META-INF/CERT.SF",
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xbfP\x96b\x86\x04\x00\x00\xb2\x06\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa9J\x02\x00META-INF/CERT.RSA",
+ }
+ for i, s := range dirEnts {
+ var f File
+ err := readDirectoryHeader(&f, strings.NewReader(s))
+ if err != nil {
+ t.Errorf("error reading #%d: %v", i, err)
+ }
+ }
+}
+
+// Verify we return ErrUnexpectedEOF when length is short.
+func TestIssue10957(t *testing.T) {
+ data := []byte("PK\x03\x040000000PK\x01\x0200000" +
+ "0000000000000000000\x00" +
+ "\x00\x00\x00\x00\x00000000000000PK\x01" +
+ "\x020000000000000000000" +
+ "00000\v\x00\x00\x00\x00\x00000000000" +
+ "00000000000000PK\x01\x0200" +
+ "00000000000000000000" +
+ "00\v\x00\x00\x00\x00\x00000000000000" +
+ "00000000000PK\x01\x020000<" +
+ "0\x00\x0000000000000000\v\x00\v" +
+ "\x00\x00\x00\x00\x0000000000\x00\x00\x00\x00000" +
+ "00000000PK\x01\x0200000000" +
+ "0000000000000000\v\x00\x00\x00" +
+ "\x00\x0000PK\x05\x06000000\x05\x00\xfd\x00\x00\x00" +
+ "\v\x00\x00\x00\x00\x00")
+ z, err := NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, f := range z.File {
+ r, err := f.Open()
+ if err != nil {
+ continue
+ }
+ if f.UncompressedSize64 < 1e6 {
+ n, err := io.Copy(io.Discard, r)
+ if i == 3 && err != io.ErrUnexpectedEOF {
+ t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err)
+ }
+ if err == nil && uint64(n) != f.UncompressedSize64 {
+ t.Errorf("file %d: bad size: copied=%d; want=%d", i, n, f.UncompressedSize64)
+ }
+ }
+ r.Close()
+ }
+}
+
+// Verify that this particular malformed zip file is rejected.
+func TestIssue10956(t *testing.T) {
+ data := []byte("PK\x06\x06PK\x06\a0000\x00\x00\x00\x00\x00\x00\x00\x00" +
+ "0000PK\x05\x06000000000000" +
+ "0000\v\x00000\x00\x00\x00\x00\x00\x00\x000")
+ r, err := NewReader(bytes.NewReader(data), int64(len(data)))
+ if err == nil {
+ t.Errorf("got nil error, want ErrFormat")
+ }
+ if r != nil {
+ t.Errorf("got non-nil Reader, want nil")
+ }
+}
+
+// Verify we return ErrUnexpectedEOF when reading truncated data descriptor.
+func TestIssue11146(t *testing.T) {
+ data := []byte("PK\x03\x040000000000000000" +
+ "000000\x01\x00\x00\x000\x01\x00\x00\xff\xff0000" +
+ "0000000000000000PK\x01\x02" +
+ "0000\b0\b\x00000000000000" +
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000PK\x05\x06\x00\x00" +
+ "\x00\x0000\x01\x00\x26\x00\x00\x008\x00\x00\x00\x00\x00")
+ z, err := NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ r, err := z.File[0].Open()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = io.ReadAll(r)
+ if err != io.ErrUnexpectedEOF {
+ t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err)
+ }
+ r.Close()
+}
+
+// Verify we do not treat non-zip64 archives as zip64
+func TestIssue12449(t *testing.T) {
+ data := []byte{
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64,
+ 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
+ 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
+ 0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a,
+ 0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0,
+ 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46,
+ 0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64,
+ 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
+ 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
+ 0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b,
+ 0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06,
+ 0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61,
+ 0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6,
+ 0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00,
+ 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+ // Read in the archive.
+ _, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+ if err != nil {
+ t.Errorf("Error reading the archive: %v", err)
+ }
+}
+
+func TestFS(t *testing.T) {
+ for _, test := range []struct {
+ file string
+ want []string
+ }{
+ {
+ "testdata/unix.zip",
+ []string{"hello", "dir/bar", "readonly"},
+ },
+ {
+ "testdata/subdir.zip",
+ []string{"a/b/c"},
+ },
+ } {
+ test := test
+ t.Run(test.file, func(t *testing.T) {
+ t.Parallel()
+ z, err := OpenReader(test.file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer z.Close()
+ if err := fstest.TestFS(z, test.want...); err != nil {
+ t.Error(err)
+ }
+ })
+ }
+}
+
+func TestFSWalk(t *testing.T) {
+ for _, test := range []struct {
+ file string
+ want []string
+ wantErr bool
+ }{
+ {
+ file: "testdata/unix.zip",
+ want: []string{".", "dir", "dir/bar", "dir/empty", "hello", "readonly"},
+ },
+ {
+ file: "testdata/subdir.zip",
+ want: []string{".", "a", "a/b", "a/b/c"},
+ },
+ {
+ file: "testdata/dupdir.zip",
+ wantErr: true,
+ },
+ } {
+ test := test
+ t.Run(test.file, func(t *testing.T) {
+ t.Parallel()
+ z, err := OpenReader(test.file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var files []string
+ sawErr := false
+ err = fs.WalkDir(z, ".", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ if !test.wantErr {
+ t.Errorf("%s: %v", path, err)
+ }
+ sawErr = true
+ return nil
+ }
+ files = append(files, path)
+ return nil
+ })
+ if err != nil {
+ t.Errorf("fs.WalkDir error: %v", err)
+ }
+ if test.wantErr && !sawErr {
+ t.Error("succeeded but want error")
+ } else if !test.wantErr && sawErr {
+ t.Error("unexpected error")
+ }
+ if test.want != nil && !reflect.DeepEqual(files, test.want) {
+ t.Errorf("got %v want %v", files, test.want)
+ }
+ })
+ }
+}
+
+func TestFSModTime(t *testing.T) {
+ t.Parallel()
+ z, err := OpenReader("testdata/subdir.zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer z.Close()
+
+ for _, test := range []struct {
+ name string
+ want time.Time
+ }{
+ {
+ "a",
+ time.Date(2021, 4, 19, 12, 29, 56, 0, timeZone(-7*time.Hour)).UTC(),
+ },
+ {
+ "a/b/c",
+ time.Date(2021, 4, 19, 12, 29, 59, 0, timeZone(-7*time.Hour)).UTC(),
+ },
+ } {
+ fi, err := fs.Stat(z, test.name)
+ if err != nil {
+ t.Errorf("%s: %v", test.name, err)
+ continue
+ }
+ if got := fi.ModTime(); !got.Equal(test.want) {
+ t.Errorf("%s: got modtime %v, want %v", test.name, got, test.want)
+ }
+ }
+}
+
+func TestCVE202127919(t *testing.T) {
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
+ // Archive containing only the file "../test.txt"
+ data := []byte{
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
+ 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
+ 0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
+ 0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
+ 0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
+ 0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
+ 0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
+ 0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
+ 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
+ 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
+ 0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
+ 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
+ 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+ r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+ if err != ErrInsecurePath {
+ t.Fatalf("Error reading the archive: %v", err)
+ }
+ _, err = r.Open("test.txt")
+ if err != nil {
+ t.Errorf("Error reading file: %v", err)
+ }
+ if len(r.File) != 1 {
+ t.Fatalf("No entries in the file list")
+ }
+ if r.File[0].Name != "../test.txt" {
+ t.Errorf("Unexpected entry name: %s", r.File[0].Name)
+ }
+ if _, err := r.File[0].Open(); err != nil {
+ t.Errorf("Error opening file: %v", err)
+ }
+}
+
+func TestOpenReaderInsecurePath(t *testing.T) {
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
+ // Archive containing only the file "../test.txt"
+ data := []byte{
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
+ 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
+ 0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
+ 0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
+ 0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
+ 0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
+ 0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
+ 0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
+ 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
+ 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
+ 0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
+ 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
+ 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+
+ // Read in the archive with the OpenReader interface
+ name := filepath.Join(t.TempDir(), "test.zip")
+ err := os.WriteFile(name, data, 0644)
+ if err != nil {
+ t.Fatalf("Unable to write out the bugos zip entry")
+ }
+ r, err := OpenReader(name)
+ if r != nil {
+ defer r.Close()
+ }
+
+ if err != ErrInsecurePath {
+ t.Fatalf("Error reading the archive, we expected ErrInsecurePath but got: %v", err)
+ }
+ _, err = r.Open("test.txt")
+ if err != nil {
+ t.Errorf("Error reading file: %v", err)
+ }
+ if len(r.File) != 1 {
+ t.Fatalf("No entries in the file list")
+ }
+ if r.File[0].Name != "../test.txt" {
+ t.Errorf("Unexpected entry name: %s", r.File[0].Name)
+ }
+ if _, err := r.File[0].Open(); err != nil {
+ t.Errorf("Error opening file: %v", err)
+ }
+}
+
+func TestCVE202133196(t *testing.T) {
+ // Archive that indicates it has 1 << 128 -1 files,
+ // this would previously cause a panic due to attempting
+ // to allocate a slice with 1 << 128 -1 elements.
+ data := []byte{
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x02,
+ 0x03, 0x62, 0x61, 0x65, 0x03, 0x04, 0x00, 0x00,
+ 0xff, 0xff, 0x50, 0x4b, 0x07, 0x08, 0xbe, 0x20,
+ 0x5c, 0x6c, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00,
+ 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00,
+ 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xbe, 0x20, 0x5c, 0x6c, 0x09, 0x00,
+ 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x02, 0x03, 0x50, 0x4b, 0x06, 0x06, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d,
+ 0x00, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x31, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00,
+ 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50,
+ 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0x00,
+ }
+ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != ErrFormat {
+ t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
+ }
+
+ // Also check that an archive containing a handful of empty
+ // files doesn't cause an issue
+ b := bytes.NewBuffer(nil)
+ w := NewWriter(b)
+ for i := 0; i < 5; i++ {
+ _, err := w.Create("")
+ if err != nil {
+ t.Fatalf("Writer.Create failed: %s", err)
+ }
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("Writer.Close failed: %s", err)
+ }
+ r, err := NewReader(bytes.NewReader(b.Bytes()), int64(b.Len()))
+ if err != nil {
+ t.Fatalf("NewReader failed: %s", err)
+ }
+ if len(r.File) != 5 {
+ t.Errorf("Archive has unexpected number of files, got %d, want 5", len(r.File))
+ }
+}
+
+func TestCVE202139293(t *testing.T) {
+ // directory size is so large, that the check in Reader.init
+ // overflows when subtracting from the archive size, causing
+ // the pre-allocation check to be bypassed.
+ data := []byte{
+ 0x50, 0x4b, 0x06, 0x06, 0x05, 0x06, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
+ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
+ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0x50, 0xfe, 0x00, 0xff, 0x00, 0x3a, 0x00, 0x00, 0x00, 0xff,
+ }
+ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != ErrFormat {
+ t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
+ }
+}
+
+func TestCVE202141772(t *testing.T) {
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
+ // Archive contains a file whose name is exclusively made up of '/', '\'
+ // characters, or "../", "..\" paths, which would previously cause a panic.
+ //
+ // Length Method Size Cmpr Date Time CRC-32 Name
+ // -------- ------ ------- ---- ---------- ----- -------- ----
+ // 0 Stored 0 0% 08-05-2021 18:32 00000000 /
+ // 0 Stored 0 0% 09-14-2021 12:59 00000000 //
+ // 0 Stored 0 0% 09-14-2021 12:59 00000000 \
+ // 11 Stored 11 0% 09-14-2021 13:04 0d4a1185 /test.txt
+ // -------- ------- --- -------
+ // 11 11 0% 4 files
+ data := []byte{
+ 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x06, 0x94, 0x05, 0x53, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0x50,
+ 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x50,
+ 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x5c, 0x50, 0x4b,
+ 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x91, 0x68, 0x2e, 0x53, 0x85, 0x11, 0x4a, 0x0d,
+ 0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
+ 0x74, 0x2e, 0x74, 0x78, 0x74, 0x68, 0x65, 0x6c,
+ 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64,
+ 0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x0a, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x06, 0x94, 0x05, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0xed, 0x41, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x50,
+ 0x4b, 0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0x00, 0x1f, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x0a,
+ 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x18, 0x00, 0x93, 0x98, 0x25, 0x57, 0x25,
+ 0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
+ 0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
+ 0xa9, 0xd7, 0x01, 0x50, 0x4b, 0x01, 0x02, 0x3f,
+ 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78,
+ 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x20, 0x00, 0x00, 0x00, 0x3f, 0x00, 0x00,
+ 0x00, 0x5c, 0x0a, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x93, 0x98,
+ 0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
+ 0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
+ 0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x50, 0x4b,
+ 0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x91, 0x68, 0x2e, 0x53, 0x85, 0x11,
+ 0x4a, 0x0d, 0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00,
+ 0x00, 0x00, 0x09, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x5e, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
+ 0x74, 0x2e, 0x74, 0x78, 0x74, 0x0a, 0x00, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x18,
+ 0x00, 0xa9, 0x80, 0x51, 0x01, 0x26, 0xa9, 0xd7,
+ 0x01, 0x31, 0xd1, 0x57, 0x01, 0x26, 0xa9, 0xd7,
+ 0x01, 0xdf, 0x48, 0x85, 0xf9, 0x25, 0xa9, 0xd7,
+ 0x01, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00,
+ 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+ r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
+ if err != ErrInsecurePath {
+ t.Fatalf("Error reading the archive: %v", err)
+ }
+ entryNames := []string{`/`, `//`, `\`, `/test.txt`}
+ var names []string
+ for _, f := range r.File {
+ names = append(names, f.Name)
+ if _, err := f.Open(); err != nil {
+ t.Errorf("Error opening %q: %v", f.Name, err)
+ }
+ if _, err := r.Open(f.Name); err == nil {
+ t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
+ }
+ }
+ if !reflect.DeepEqual(names, entryNames) {
+ t.Errorf("Unexpected file entries: %q", names)
+ }
+ if _, err := r.Open(""); err == nil {
+ t.Errorf("Opening %q with fs.FS API succeeded", "")
+ }
+ if _, err := r.Open("test.txt"); err != nil {
+ t.Errorf("Error opening %q with fs.FS API: %v", "test.txt", err)
+ }
+ dirEntries, err := fs.ReadDir(r, ".")
+ if err != nil {
+ t.Fatalf("Error reading the root directory: %v", err)
+ }
+ if len(dirEntries) != 1 || dirEntries[0].Name() != "test.txt" {
+ t.Errorf("Unexpected directory entries")
+ for _, dirEntry := range dirEntries {
+ _, err := r.Open(dirEntry.Name())
+ t.Logf("%q (Open error: %v)", dirEntry.Name(), err)
+ }
+ t.FailNow()
+ }
+ info, err := dirEntries[0].Info()
+ if err != nil {
+ t.Fatalf("Error reading info entry: %v", err)
+ }
+ if name := info.Name(); name != "test.txt" {
+ t.Errorf("Inconsistent name in info entry: %v", name)
+ }
+}
+
+func TestUnderSize(t *testing.T) {
+ z, err := OpenReader("testdata/readme.zip")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer z.Close()
+
+ for _, f := range z.File {
+ f.UncompressedSize64 = 1
+ }
+
+ for _, f := range z.File {
+ t.Run(f.Name, func(t *testing.T) {
+ rd, err := f.Open()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rd.Close()
+
+ _, err = io.Copy(io.Discard, rd)
+ if err != ErrFormat {
+ t.Fatalf("Error mismatch\n\tGot: %v\n\tWant: %v", err, ErrFormat)
+ }
+ })
+ }
+}
+
+func TestIssue54801(t *testing.T) {
+ for _, input := range []string{"testdata/readme.zip", "testdata/dd.zip"} {
+ z, err := OpenReader(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer z.Close()
+
+ for _, f := range z.File {
+ // Make file a directory
+ f.Name += "/"
+
+ t.Run(f.Name, func(t *testing.T) {
+ t.Logf("CompressedSize64: %d, Flags: %#x", f.CompressedSize64, f.Flags)
+
+ rd, err := f.Open()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rd.Close()
+
+ n, got := io.Copy(io.Discard, rd)
+ if n != 0 || got != ErrFormat {
+ t.Fatalf("Error mismatch, got: %d, %v, want: %v", n, got, ErrFormat)
+ }
+ })
+ }
+ }
+}
+
+func TestInsecurePaths(t *testing.T) {
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
+ for _, path := range []string{
+ "../foo",
+ "/foo",
+ "a/b/../../../c",
+ `a\b`,
+ } {
+ var buf bytes.Buffer
+ zw := NewWriter(&buf)
+ _, err := zw.Create(path)
+ if err != nil {
+ t.Errorf("zw.Create(%q) = %v", path, err)
+ continue
+ }
+ zw.Close()
+
+ zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != ErrInsecurePath {
+ t.Errorf("NewReader for archive with file %q: got err %v, want ErrInsecurePath", path, err)
+ continue
+ }
+ var gotPaths []string
+ for _, f := range zr.File {
+ gotPaths = append(gotPaths, f.Name)
+ }
+ if !reflect.DeepEqual(gotPaths, []string{path}) {
+ t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
+ continue
+ }
+ }
+}
+
+func TestDisableInsecurePathCheck(t *testing.T) {
+ t.Setenv("GODEBUG", "zipinsecurepath=1")
+ var buf bytes.Buffer
+ zw := NewWriter(&buf)
+ const name = "/foo"
+ _, err := zw.Create(name)
+ if err != nil {
+ t.Fatalf("zw.Create(%q) = %v", name, err)
+ }
+ zw.Close()
+ zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != nil {
+ t.Fatalf("NewReader with zipinsecurepath=1: got err %v, want nil", err)
+ }
+ var gotPaths []string
+ for _, f := range zr.File {
+ gotPaths = append(gotPaths, f.Name)
+ }
+ if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
+ t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
+ }
+}
+
+func TestCompressedDirectory(t *testing.T) {
+ // Empty Java JAR, with a compressed directory with uncompressed size 0
+ // which should not fail.
+ //
+ // Length Method Size Cmpr Date Time CRC-32 Name
+ // -------- ------ ------- ---- ---------- ----- -------- ----
+ // 0 Defl:N 2 0% 12-01-2022 16:50 00000000 META-INF/
+ // 60 Defl:N 59 2% 12-01-2022 16:50 af937e93 META-INF/MANIFEST.MF
+ // -------- ------- --- -------
+ // 60 61 -2% 2 files
+ data := []byte{
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08,
+ 0x08, 0x00, 0x49, 0x86, 0x81, 0x55, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x09, 0x00, 0x04, 0x00, 0x4d, 0x45,
+ 0x54, 0x41, 0x2d, 0x49, 0x4e, 0x46, 0x2f, 0xfe,
+ 0xca, 0x00, 0x00, 0x03, 0x00, 0x50, 0x4b, 0x07,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x03,
+ 0x04, 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x49,
+ 0x86, 0x81, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14,
+ 0x00, 0x00, 0x00, 0x4d, 0x45, 0x54, 0x41, 0x2d,
+ 0x49, 0x4e, 0x46, 0x2f, 0x4d, 0x41, 0x4e, 0x49,
+ 0x46, 0x45, 0x53, 0x54, 0x2e, 0x4d, 0x46, 0xf3,
+ 0x4d, 0xcc, 0xcb, 0x4c, 0x4b, 0x2d, 0x2e, 0xd1,
+ 0x0d, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xb3,
+ 0x52, 0x30, 0xd4, 0x33, 0xe0, 0xe5, 0x72, 0x2e,
+ 0x4a, 0x4d, 0x2c, 0x49, 0x4d, 0xd1, 0x75, 0xaa,
+ 0x04, 0x0a, 0x00, 0x45, 0xf4, 0x0c, 0x8d, 0x15,
+ 0x34, 0xdc, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0x15,
+ 0x3c, 0xf3, 0x92, 0xf5, 0x34, 0x79, 0xb9, 0x78,
+ 0xb9, 0x00, 0x50, 0x4b, 0x07, 0x08, 0x93, 0x7e,
+ 0x93, 0xaf, 0x3b, 0x00, 0x00, 0x00, 0x3c, 0x00,
+ 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00,
+ 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x49, 0x86,
+ 0x81, 0x55, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x4d, 0x45, 0x54, 0x41, 0x2d, 0x49, 0x4e, 0x46,
+ 0x2f, 0xfe, 0xca, 0x00, 0x00, 0x50, 0x4b, 0x01,
+ 0x02, 0x14, 0x00, 0x14, 0x00, 0x08, 0x08, 0x08,
+ 0x00, 0x49, 0x86, 0x81, 0x55, 0x93, 0x7e, 0x93,
+ 0xaf, 0x3b, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d,
+ 0x00, 0x00, 0x00, 0x4d, 0x45, 0x54, 0x41, 0x2d,
+ 0x49, 0x4e, 0x46, 0x2f, 0x4d, 0x41, 0x4e, 0x49,
+ 0x46, 0x45, 0x53, 0x54, 0x2e, 0x4d, 0x46, 0x50,
+ 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x02, 0x00, 0x7d, 0x00, 0x00, 0x00, 0xba,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+ r, err := NewReader(bytes.NewReader(data), int64(len(data)))
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ for _, f := range r.File {
+ r, err := f.Open()
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if _, err := io.Copy(io.Discard, r); err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ }
+}
+
+func TestBaseOffsetPlusOverflow(t *testing.T) {
+ // directoryOffset > maxInt64 && size-directoryOffset < 0
+ data := []byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0xff, 0xff, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x50, 0x4b, 0x06, 0x06, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0xff, 0xff, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x50, 0x4b, 0x06, 0x07, 0x00,
+ 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50,
+ 0x4b, 0x05, 0x06, 0x20, 0x20, 0x20, 0x20, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x20, 0x00,
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatalf("NewReader panicked: %s", r)
+ }
+ }()
+ // Previously, this would trigger a panic as we attempt to read from
+ // a io.SectionReader which would access a slice at a negative offset
+ // as the section reader offset & size were < 0.
+ NewReader(bytes.NewReader(data), int64(len(data))+1875)
+}
diff --git a/src/archive/zip/register.go b/src/archive/zip/register.go
new file mode 100644
index 0000000..4389246
--- /dev/null
+++ b/src/archive/zip/register.go
@@ -0,0 +1,147 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "sync"
+)
+
+// A Compressor returns a new compressing writer, writing to w.
+// The WriteCloser's Close method must be used to flush pending data to w.
+// The Compressor itself must be safe to invoke from multiple goroutines
+// simultaneously, but each returned writer will be used only by
+// one goroutine at a time.
+type Compressor func(w io.Writer) (io.WriteCloser, error)
+
+// A Decompressor returns a new decompressing reader, reading from r.
+// The ReadCloser's Close method must be used to release associated resources.
+// The Decompressor itself must be safe to invoke from multiple goroutines
+// simultaneously, but each returned reader will be used only by
+// one goroutine at a time.
+type Decompressor func(r io.Reader) io.ReadCloser
+
+var flateWriterPool sync.Pool
+
+func newFlateWriter(w io.Writer) io.WriteCloser {
+ fw, ok := flateWriterPool.Get().(*flate.Writer)
+ if ok {
+ fw.Reset(w)
+ } else {
+ fw, _ = flate.NewWriter(w, 5)
+ }
+ return &pooledFlateWriter{fw: fw}
+}
+
+type pooledFlateWriter struct {
+ mu sync.Mutex // guards Close and Write
+ fw *flate.Writer
+}
+
+func (w *pooledFlateWriter) Write(p []byte) (n int, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.fw == nil {
+ return 0, errors.New("Write after Close")
+ }
+ return w.fw.Write(p)
+}
+
+func (w *pooledFlateWriter) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ var err error
+ if w.fw != nil {
+ err = w.fw.Close()
+ flateWriterPool.Put(w.fw)
+ w.fw = nil
+ }
+ return err
+}
+
+var flateReaderPool sync.Pool
+
+func newFlateReader(r io.Reader) io.ReadCloser {
+ fr, ok := flateReaderPool.Get().(io.ReadCloser)
+ if ok {
+ fr.(flate.Resetter).Reset(r, nil)
+ } else {
+ fr = flate.NewReader(r)
+ }
+ return &pooledFlateReader{fr: fr}
+}
+
+type pooledFlateReader struct {
+ mu sync.Mutex // guards Close and Read
+ fr io.ReadCloser
+}
+
+func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.fr == nil {
+ return 0, errors.New("Read after Close")
+ }
+ return r.fr.Read(p)
+}
+
+func (r *pooledFlateReader) Close() error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ var err error
+ if r.fr != nil {
+ err = r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ }
+ return err
+}
+
+var (
+ compressors sync.Map // map[uint16]Compressor
+ decompressors sync.Map // map[uint16]Decompressor
+)
+
+func init() {
+ compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }))
+ compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }))
+
+ decompressors.Store(Store, Decompressor(io.NopCloser))
+ decompressors.Store(Deflate, Decompressor(newFlateReader))
+}
+
+// RegisterDecompressor allows custom decompressors for a specified method ID.
+// The common methods Store and Deflate are built in.
+func RegisterDecompressor(method uint16, dcomp Decompressor) {
+ if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
+ panic("decompressor already registered")
+ }
+}
+
+// RegisterCompressor registers custom compressors for a specified method ID.
+// The common methods Store and Deflate are built in.
+func RegisterCompressor(method uint16, comp Compressor) {
+ if _, dup := compressors.LoadOrStore(method, comp); dup {
+ panic("compressor already registered")
+ }
+}
+
+func compressor(method uint16) Compressor {
+ ci, ok := compressors.Load(method)
+ if !ok {
+ return nil
+ }
+ return ci.(Compressor)
+}
+
+func decompressor(method uint16) Decompressor {
+ di, ok := decompressors.Load(method)
+ if !ok {
+ return nil
+ }
+ return di.(Decompressor)
+}
diff --git a/src/archive/zip/struct.go b/src/archive/zip/struct.go
new file mode 100644
index 0000000..9a8e67c
--- /dev/null
+++ b/src/archive/zip/struct.go
@@ -0,0 +1,419 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package zip provides support for reading and writing ZIP archives.
+
+See the [ZIP specification] for details.
+
+This package does not support disk spanning.
+
+A note about ZIP64:
+
+To be backwards compatible the FileHeader has both 32 and 64 bit Size
+fields. The 64 bit fields will always contain the correct value and
+for normal archives both fields will be the same. For files requiring
+the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
+fields must be used instead.
+
+[ZIP specification]: https://www.pkware.com/appnote
+*/
+package zip
+
+import (
+ "io/fs"
+ "path"
+ "time"
+)
+
+// Compression methods.
+const (
+ Store uint16 = 0 // no compression
+ Deflate uint16 = 8 // DEFLATE compressed
+)
+
+const (
+ fileHeaderSignature = 0x04034b50
+ directoryHeaderSignature = 0x02014b50
+ directoryEndSignature = 0x06054b50
+ directory64LocSignature = 0x07064b50
+ directory64EndSignature = 0x06064b50
+ dataDescriptorSignature = 0x08074b50 // de-facto standard; required by OS X Finder
+ fileHeaderLen = 30 // + filename + extra
+ directoryHeaderLen = 46 // + filename + extra + comment
+ directoryEndLen = 22 // + comment
+ dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size
+ dataDescriptor64Len = 24 // two uint32: signature, crc32 | two uint64: compressed size, size
+ directory64LocLen = 20 //
+ directory64EndLen = 56 // + extra
+
+ // Constants for the first byte in CreatorVersion.
+ creatorFAT = 0
+ creatorUnix = 3
+ creatorNTFS = 11
+ creatorVFAT = 14
+ creatorMacOSX = 19
+
+ // Version numbers.
+ zipVersion20 = 20 // 2.0
+ zipVersion45 = 45 // 4.5 (reads and writes zip64 archives)
+
+ // Limits for non zip64 files.
+ uint16max = (1 << 16) - 1
+ uint32max = (1 << 32) - 1
+
+ // Extra header IDs.
+ //
+ // IDs 0..31 are reserved for official use by PKWARE.
+ // IDs above that range are defined by third-party vendors.
+ // Since ZIP lacked high precision timestamps (nor an official specification
+ // of the timezone used for the date fields), many competing extra fields
+ // have been invented. Pervasive use effectively makes them "official".
+ //
+ // See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
+ zip64ExtraID = 0x0001 // Zip64 extended information
+ ntfsExtraID = 0x000a // NTFS
+ unixExtraID = 0x000d // UNIX
+ extTimeExtraID = 0x5455 // Extended timestamp
+ infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
+)
+
+// FileHeader describes a file within a ZIP file.
+// See the [ZIP specification] for details.
+//
+// [ZIP specification]: https://www.pkware.com/appnote
+type FileHeader struct {
+ // Name is the name of the file.
+ //
+ // It must be a relative path, not start with a drive letter (such as "C:"),
+ // and must use forward slashes instead of back slashes. A trailing slash
+ // indicates that this file is a directory and should have no data.
+ Name string
+
+ // Comment is any arbitrary user-defined string shorter than 64KiB.
+ Comment string
+
+ // NonUTF8 indicates that Name and Comment are not encoded in UTF-8.
+ //
+ // By specification, the only other encoding permitted should be CP-437,
+ // but historically many ZIP readers interpret Name and Comment as whatever
+ // the system's local character encoding happens to be.
+ //
+ // This flag should only be set if the user intends to encode a non-portable
+ // ZIP file for a specific localized region. Otherwise, the Writer
+ // automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.
+ NonUTF8 bool
+
+ CreatorVersion uint16
+ ReaderVersion uint16
+ Flags uint16
+
+ // Method is the compression method. If zero, Store is used.
+ Method uint16
+
+ // Modified is the modified time of the file.
+ //
+ // When reading, an extended timestamp is preferred over the legacy MS-DOS
+ // date field, and the offset between the times is used as the timezone.
+ // If only the MS-DOS date is present, the timezone is assumed to be UTC.
+ //
+ // When writing, an extended timestamp (which is timezone-agnostic) is
+ // always emitted. The legacy MS-DOS date field is encoded according to the
+ // location of the Modified time.
+ Modified time.Time
+
+ // ModifiedTime is an MS-DOS-encoded time.
+ //
+ // Deprecated: Use Modified instead.
+ ModifiedTime uint16
+
+ // ModifiedDate is an MS-DOS-encoded date.
+ //
+ // Deprecated: Use Modified instead.
+ ModifiedDate uint16
+
+ // CRC32 is the CRC32 checksum of the file content.
+ CRC32 uint32
+
+ // CompressedSize is the compressed size of the file in bytes.
+ // If either the uncompressed or compressed size of the file
+ // does not fit in 32 bits, CompressedSize is set to ^uint32(0).
+ //
+ // Deprecated: Use CompressedSize64 instead.
+ CompressedSize uint32
+
+ // UncompressedSize is the compressed size of the file in bytes.
+ // If either the uncompressed or compressed size of the file
+ // does not fit in 32 bits, CompressedSize is set to ^uint32(0).
+ //
+ // Deprecated: Use UncompressedSize64 instead.
+ UncompressedSize uint32
+
+ // CompressedSize64 is the compressed size of the file in bytes.
+ CompressedSize64 uint64
+
+ // UncompressedSize64 is the uncompressed size of the file in bytes.
+ UncompressedSize64 uint64
+
+ Extra []byte
+ ExternalAttrs uint32 // Meaning depends on CreatorVersion
+}
+
+// FileInfo returns an fs.FileInfo for the FileHeader.
+func (h *FileHeader) FileInfo() fs.FileInfo {
+ return headerFileInfo{h}
+}
+
+// headerFileInfo implements fs.FileInfo.
+type headerFileInfo struct {
+ fh *FileHeader
+}
+
+func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) }
+func (fi headerFileInfo) Size() int64 {
+ if fi.fh.UncompressedSize64 > 0 {
+ return int64(fi.fh.UncompressedSize64)
+ }
+ return int64(fi.fh.UncompressedSize)
+}
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time {
+ if fi.fh.Modified.IsZero() {
+ return fi.fh.ModTime()
+ }
+ return fi.fh.Modified.UTC()
+}
+func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
+func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() }
+func (fi headerFileInfo) Sys() any { return fi.fh }
+
+func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
+
+func (fi headerFileInfo) String() string {
+ return fs.FormatFileInfo(fi)
+}
+
+// FileInfoHeader creates a partially-populated FileHeader from an
+// fs.FileInfo.
+// Because fs.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+// If compression is desired, callers should set the FileHeader.Method
+// field; it is unset by default.
+func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) {
+ size := fi.Size()
+ fh := &FileHeader{
+ Name: fi.Name(),
+ UncompressedSize64: uint64(size),
+ }
+ fh.SetModTime(fi.ModTime())
+ fh.SetMode(fi.Mode())
+ if fh.UncompressedSize64 > uint32max {
+ fh.UncompressedSize = uint32max
+ } else {
+ fh.UncompressedSize = uint32(fh.UncompressedSize64)
+ }
+ return fh, nil
+}
+
+type directoryEnd struct {
+ diskNbr uint32 // unused
+ dirDiskNbr uint32 // unused
+ dirRecordsThisDisk uint64 // unused
+ directoryRecords uint64
+ directorySize uint64
+ directoryOffset uint64 // relative to file
+ commentLen uint16
+ comment string
+}
+
+// timeZone returns a *time.Location based on the provided offset.
+// If the offset is non-sensible, then this uses an offset of zero.
+func timeZone(offset time.Duration) *time.Location {
+ const (
+ minOffset = -12 * time.Hour // E.g., Baker island at -12:00
+ maxOffset = +14 * time.Hour // E.g., Line island at +14:00
+ offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45
+ )
+ offset = offset.Round(offsetAlias)
+ if offset < minOffset || maxOffset < offset {
+ offset = 0
+ }
+ return time.FixedZone("", int(offset/time.Second))
+}
+
+// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
+// The resolution is 2s.
+// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx
+func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
+ return time.Date(
+ // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
+ int(dosDate>>9+1980),
+ time.Month(dosDate>>5&0xf),
+ int(dosDate&0x1f),
+
+ // time bits 0-4: second/2; 5-10: minute; 11-15: hour
+ int(dosTime>>11),
+ int(dosTime>>5&0x3f),
+ int(dosTime&0x1f*2),
+ 0, // nanoseconds
+
+ time.UTC,
+ )
+}
+
+// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
+// The resolution is 2s.
+// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
+func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
+ fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
+ fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
+ return
+}
+
+// ModTime returns the modification time in UTC using the legacy
+// ModifiedDate and ModifiedTime fields.
+//
+// Deprecated: Use Modified instead.
+func (h *FileHeader) ModTime() time.Time {
+ return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
+}
+
+// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields
+// to the given time in UTC.
+//
+// Deprecated: Use Modified instead.
+func (h *FileHeader) SetModTime(t time.Time) {
+ t = t.UTC() // Convert to UTC for compatibility
+ h.Modified = t
+ h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
+}
+
+const (
+ // Unix constants. The specification doesn't mention them,
+ // but these seem to be the values agreed on by tools.
+ s_IFMT = 0xf000
+ s_IFSOCK = 0xc000
+ s_IFLNK = 0xa000
+ s_IFREG = 0x8000
+ s_IFBLK = 0x6000
+ s_IFDIR = 0x4000
+ s_IFCHR = 0x2000
+ s_IFIFO = 0x1000
+ s_ISUID = 0x800
+ s_ISGID = 0x400
+ s_ISVTX = 0x200
+
+ msdosDir = 0x10
+ msdosReadOnly = 0x01
+)
+
+// Mode returns the permission and mode bits for the FileHeader.
+func (h *FileHeader) Mode() (mode fs.FileMode) {
+ switch h.CreatorVersion >> 8 {
+ case creatorUnix, creatorMacOSX:
+ mode = unixModeToFileMode(h.ExternalAttrs >> 16)
+ case creatorNTFS, creatorVFAT, creatorFAT:
+ mode = msdosModeToFileMode(h.ExternalAttrs)
+ }
+ if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
+ mode |= fs.ModeDir
+ }
+ return mode
+}
+
+// SetMode changes the permission and mode bits for the FileHeader.
+func (h *FileHeader) SetMode(mode fs.FileMode) {
+ h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
+ h.ExternalAttrs = fileModeToUnixMode(mode) << 16
+
+ // set MSDOS attributes too, as the original zip does.
+ if mode&fs.ModeDir != 0 {
+ h.ExternalAttrs |= msdosDir
+ }
+ if mode&0200 == 0 {
+ h.ExternalAttrs |= msdosReadOnly
+ }
+}
+
+// isZip64 reports whether the file size exceeds the 32 bit limit
+func (h *FileHeader) isZip64() bool {
+ return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max
+}
+
+func (h *FileHeader) hasDataDescriptor() bool {
+ return h.Flags&0x8 != 0
+}
+
+func msdosModeToFileMode(m uint32) (mode fs.FileMode) {
+ if m&msdosDir != 0 {
+ mode = fs.ModeDir | 0777
+ } else {
+ mode = 0666
+ }
+ if m&msdosReadOnly != 0 {
+ mode &^= 0222
+ }
+ return mode
+}
+
+func fileModeToUnixMode(mode fs.FileMode) uint32 {
+ var m uint32
+ switch mode & fs.ModeType {
+ default:
+ m = s_IFREG
+ case fs.ModeDir:
+ m = s_IFDIR
+ case fs.ModeSymlink:
+ m = s_IFLNK
+ case fs.ModeNamedPipe:
+ m = s_IFIFO
+ case fs.ModeSocket:
+ m = s_IFSOCK
+ case fs.ModeDevice:
+ m = s_IFBLK
+ case fs.ModeDevice | fs.ModeCharDevice:
+ m = s_IFCHR
+ }
+ if mode&fs.ModeSetuid != 0 {
+ m |= s_ISUID
+ }
+ if mode&fs.ModeSetgid != 0 {
+ m |= s_ISGID
+ }
+ if mode&fs.ModeSticky != 0 {
+ m |= s_ISVTX
+ }
+ return m | uint32(mode&0777)
+}
+
+func unixModeToFileMode(m uint32) fs.FileMode {
+ mode := fs.FileMode(m & 0777)
+ switch m & s_IFMT {
+ case s_IFBLK:
+ mode |= fs.ModeDevice
+ case s_IFCHR:
+ mode |= fs.ModeDevice | fs.ModeCharDevice
+ case s_IFDIR:
+ mode |= fs.ModeDir
+ case s_IFIFO:
+ mode |= fs.ModeNamedPipe
+ case s_IFLNK:
+ mode |= fs.ModeSymlink
+ case s_IFREG:
+ // nothing to do
+ case s_IFSOCK:
+ mode |= fs.ModeSocket
+ }
+ if m&s_ISGID != 0 {
+ mode |= fs.ModeSetgid
+ }
+ if m&s_ISUID != 0 {
+ mode |= fs.ModeSetuid
+ }
+ if m&s_ISVTX != 0 {
+ mode |= fs.ModeSticky
+ }
+ return mode
+}
diff --git a/src/archive/zip/testdata/crc32-not-streamed.zip b/src/archive/zip/testdata/crc32-not-streamed.zip
new file mode 100644
index 0000000..f268d88
--- /dev/null
+++ b/src/archive/zip/testdata/crc32-not-streamed.zip
Binary files differ
diff --git a/src/archive/zip/testdata/dd.zip b/src/archive/zip/testdata/dd.zip
new file mode 100644
index 0000000..e53378b
--- /dev/null
+++ b/src/archive/zip/testdata/dd.zip
Binary files differ
diff --git a/src/archive/zip/testdata/dupdir.zip b/src/archive/zip/testdata/dupdir.zip
new file mode 100644
index 0000000..292720b
--- /dev/null
+++ b/src/archive/zip/testdata/dupdir.zip
Binary files differ
diff --git a/src/archive/zip/testdata/go-no-datadesc-sig.zip.base64 b/src/archive/zip/testdata/go-no-datadesc-sig.zip.base64
new file mode 100644
index 0000000..1c2c071
--- /dev/null
+++ b/src/archive/zip/testdata/go-no-datadesc-sig.zip.base64
@@ -0,0 +1 @@
+UEsDBBQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAZm9vLnR4dFVUBQAD3lVZT3V4CwABBPUBAAAEFAAAAGZvbwqoZTJ+BAAAAAQAAABQSwMEFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGABiYXIudHh0VVQFAAPgVVlPdXgLAAEE9QEAAAQUAAAAYmFyCumzogQEAAAABAAAAFBLAQIUAxQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAAAAAAAAAAACkgQAAAABmb28udHh0VVQFAAPeVVlPdXgLAAEE9QEAAAQUAAAAUEsBAhQDFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGAAAAAAAAAAAAKSBTQAAAGJhci50eHRVVAUAA+BVWU91eAsAAQT1AQAABBQAAABQSwUGAAAAAAIAAgCaAAAAmgAAAAAA
diff --git a/src/archive/zip/testdata/go-with-datadesc-sig.zip b/src/archive/zip/testdata/go-with-datadesc-sig.zip
new file mode 100644
index 0000000..bcfe121
--- /dev/null
+++ b/src/archive/zip/testdata/go-with-datadesc-sig.zip
Binary files differ
diff --git a/src/archive/zip/testdata/gophercolor16x16.png b/src/archive/zip/testdata/gophercolor16x16.png
new file mode 100644
index 0000000..48854ff
--- /dev/null
+++ b/src/archive/zip/testdata/gophercolor16x16.png
Binary files differ
diff --git a/src/archive/zip/testdata/readme.notzip b/src/archive/zip/testdata/readme.notzip
new file mode 100644
index 0000000..79b1cb6
--- /dev/null
+++ b/src/archive/zip/testdata/readme.notzip
Binary files differ
diff --git a/src/archive/zip/testdata/readme.zip b/src/archive/zip/testdata/readme.zip
new file mode 100644
index 0000000..5642a67
--- /dev/null
+++ b/src/archive/zip/testdata/readme.zip
Binary files differ
diff --git a/src/archive/zip/testdata/subdir.zip b/src/archive/zip/testdata/subdir.zip
new file mode 100644
index 0000000..324d06b
--- /dev/null
+++ b/src/archive/zip/testdata/subdir.zip
Binary files differ
diff --git a/src/archive/zip/testdata/symlink.zip b/src/archive/zip/testdata/symlink.zip
new file mode 100644
index 0000000..af84693
--- /dev/null
+++ b/src/archive/zip/testdata/symlink.zip
Binary files differ
diff --git a/src/archive/zip/testdata/test-badbase.zip b/src/archive/zip/testdata/test-badbase.zip
new file mode 100644
index 0000000..245a62c
--- /dev/null
+++ b/src/archive/zip/testdata/test-badbase.zip
Binary files differ
diff --git a/src/archive/zip/testdata/test-baddirsz.zip b/src/archive/zip/testdata/test-baddirsz.zip
new file mode 100644
index 0000000..45b3314
--- /dev/null
+++ b/src/archive/zip/testdata/test-baddirsz.zip
Binary files differ
diff --git a/src/archive/zip/testdata/test-prefix.zip b/src/archive/zip/testdata/test-prefix.zip
new file mode 100644
index 0000000..1eabb48
--- /dev/null
+++ b/src/archive/zip/testdata/test-prefix.zip
Binary files differ
diff --git a/src/archive/zip/testdata/test-trailing-junk.zip b/src/archive/zip/testdata/test-trailing-junk.zip
new file mode 100644
index 0000000..42281b4
--- /dev/null
+++ b/src/archive/zip/testdata/test-trailing-junk.zip
Binary files differ
diff --git a/src/archive/zip/testdata/test.zip b/src/archive/zip/testdata/test.zip
new file mode 100644
index 0000000..03890c0
--- /dev/null
+++ b/src/archive/zip/testdata/test.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-22738.zip b/src/archive/zip/testdata/time-22738.zip
new file mode 100644
index 0000000..eb85b57
--- /dev/null
+++ b/src/archive/zip/testdata/time-22738.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-7zip.zip b/src/archive/zip/testdata/time-7zip.zip
new file mode 100644
index 0000000..4f74819
--- /dev/null
+++ b/src/archive/zip/testdata/time-7zip.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-go.zip b/src/archive/zip/testdata/time-go.zip
new file mode 100644
index 0000000..f008805
--- /dev/null
+++ b/src/archive/zip/testdata/time-go.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-infozip.zip b/src/archive/zip/testdata/time-infozip.zip
new file mode 100644
index 0000000..8e63948
--- /dev/null
+++ b/src/archive/zip/testdata/time-infozip.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-osx.zip b/src/archive/zip/testdata/time-osx.zip
new file mode 100644
index 0000000..e82c5c2
--- /dev/null
+++ b/src/archive/zip/testdata/time-osx.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-win7.zip b/src/archive/zip/testdata/time-win7.zip
new file mode 100644
index 0000000..8ba222b
--- /dev/null
+++ b/src/archive/zip/testdata/time-win7.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-winrar.zip b/src/archive/zip/testdata/time-winrar.zip
new file mode 100644
index 0000000..a8a19b0
--- /dev/null
+++ b/src/archive/zip/testdata/time-winrar.zip
Binary files differ
diff --git a/src/archive/zip/testdata/time-winzip.zip b/src/archive/zip/testdata/time-winzip.zip
new file mode 100644
index 0000000..f6e8f8b
--- /dev/null
+++ b/src/archive/zip/testdata/time-winzip.zip
Binary files differ
diff --git a/src/archive/zip/testdata/unix.zip b/src/archive/zip/testdata/unix.zip
new file mode 100644
index 0000000..ce1a981
--- /dev/null
+++ b/src/archive/zip/testdata/unix.zip
Binary files differ
diff --git a/src/archive/zip/testdata/utf8-7zip.zip b/src/archive/zip/testdata/utf8-7zip.zip
new file mode 100644
index 0000000..0e97884
--- /dev/null
+++ b/src/archive/zip/testdata/utf8-7zip.zip
Binary files differ
diff --git a/src/archive/zip/testdata/utf8-infozip.zip b/src/archive/zip/testdata/utf8-infozip.zip
new file mode 100644
index 0000000..25a8926
--- /dev/null
+++ b/src/archive/zip/testdata/utf8-infozip.zip
Binary files differ
diff --git a/src/archive/zip/testdata/utf8-osx.zip b/src/archive/zip/testdata/utf8-osx.zip
new file mode 100644
index 0000000..9b0c058
--- /dev/null
+++ b/src/archive/zip/testdata/utf8-osx.zip
Binary files differ
diff --git a/src/archive/zip/testdata/utf8-winrar.zip b/src/archive/zip/testdata/utf8-winrar.zip
new file mode 100644
index 0000000..4bad6c3
--- /dev/null
+++ b/src/archive/zip/testdata/utf8-winrar.zip
Binary files differ
diff --git a/src/archive/zip/testdata/utf8-winzip.zip b/src/archive/zip/testdata/utf8-winzip.zip
new file mode 100644
index 0000000..909d52e
--- /dev/null
+++ b/src/archive/zip/testdata/utf8-winzip.zip
Binary files differ
diff --git a/src/archive/zip/testdata/winxp.zip b/src/archive/zip/testdata/winxp.zip
new file mode 100644
index 0000000..3919322
--- /dev/null
+++ b/src/archive/zip/testdata/winxp.zip
Binary files differ
diff --git a/src/archive/zip/testdata/zip64-2.zip b/src/archive/zip/testdata/zip64-2.zip
new file mode 100644
index 0000000..f844e35
--- /dev/null
+++ b/src/archive/zip/testdata/zip64-2.zip
Binary files differ
diff --git a/src/archive/zip/testdata/zip64.zip b/src/archive/zip/testdata/zip64.zip
new file mode 100644
index 0000000..a2ee1fa
--- /dev/null
+++ b/src/archive/zip/testdata/zip64.zip
Binary files differ
diff --git a/src/archive/zip/writer.go b/src/archive/zip/writer.go
new file mode 100644
index 0000000..3b23cc3
--- /dev/null
+++ b/src/archive/zip/writer.go
@@ -0,0 +1,634 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "hash"
+ "hash/crc32"
+ "io"
+ "strings"
+ "unicode/utf8"
+)
+
+var (
+ errLongName = errors.New("zip: FileHeader.Name too long")
+ errLongExtra = errors.New("zip: FileHeader.Extra too long")
+)
+
+// Writer implements a zip file writer.
+type Writer struct {
+ cw *countWriter
+ dir []*header
+ last *fileWriter
+ closed bool
+ compressors map[uint16]Compressor
+ comment string
+
+ // testHookCloseSizeOffset if non-nil is called with the size
+ // of offset of the central directory at Close.
+ testHookCloseSizeOffset func(size, offset uint64)
+}
+
+type header struct {
+ *FileHeader
+ offset uint64
+ raw bool
+}
+
+// NewWriter returns a new Writer writing a zip file to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
+}
+
+// SetOffset sets the offset of the beginning of the zip data within the
+// underlying writer. It should be used when the zip data is appended to an
+// existing file, such as a binary executable.
+// It must be called before any data is written.
+func (w *Writer) SetOffset(n int64) {
+ if w.cw.count != 0 {
+ panic("zip: SetOffset called after data was written")
+ }
+ w.cw.count = n
+}
+
+// Flush flushes any buffered data to the underlying writer.
+// Calling Flush is not normally necessary; calling Close is sufficient.
+func (w *Writer) Flush() error {
+ return w.cw.w.(*bufio.Writer).Flush()
+}
+
+// SetComment sets the end-of-central-directory comment field.
+// It can only be called before Close.
+func (w *Writer) SetComment(comment string) error {
+ if len(comment) > uint16max {
+ return errors.New("zip: Writer.Comment too long")
+ }
+ w.comment = comment
+ return nil
+}
+
+// Close finishes writing the zip file by writing the central directory.
+// It does not close the underlying writer.
+func (w *Writer) Close() error {
+ if w.last != nil && !w.last.closed {
+ if err := w.last.close(); err != nil {
+ return err
+ }
+ w.last = nil
+ }
+ if w.closed {
+ return errors.New("zip: writer closed twice")
+ }
+ w.closed = true
+
+ // write central directory
+ start := w.cw.count
+ for _, h := range w.dir {
+ var buf [directoryHeaderLen]byte
+ b := writeBuf(buf[:])
+ b.uint32(uint32(directoryHeaderSignature))
+ b.uint16(h.CreatorVersion)
+ b.uint16(h.ReaderVersion)
+ b.uint16(h.Flags)
+ b.uint16(h.Method)
+ b.uint16(h.ModifiedTime)
+ b.uint16(h.ModifiedDate)
+ b.uint32(h.CRC32)
+ if h.isZip64() || h.offset >= uint32max {
+ // the file needs a zip64 header. store maxint in both
+ // 32 bit size fields (and offset later) to signal that the
+ // zip64 extra header should be used.
+ b.uint32(uint32max) // compressed size
+ b.uint32(uint32max) // uncompressed size
+
+ // append a zip64 extra block to Extra
+ var buf [28]byte // 2x uint16 + 3x uint64
+ eb := writeBuf(buf[:])
+ eb.uint16(zip64ExtraID)
+ eb.uint16(24) // size = 3x uint64
+ eb.uint64(h.UncompressedSize64)
+ eb.uint64(h.CompressedSize64)
+ eb.uint64(h.offset)
+ h.Extra = append(h.Extra, buf[:]...)
+ } else {
+ b.uint32(h.CompressedSize)
+ b.uint32(h.UncompressedSize)
+ }
+
+ b.uint16(uint16(len(h.Name)))
+ b.uint16(uint16(len(h.Extra)))
+ b.uint16(uint16(len(h.Comment)))
+ b = b[4:] // skip disk number start and internal file attr (2x uint16)
+ b.uint32(h.ExternalAttrs)
+ if h.offset > uint32max {
+ b.uint32(uint32max)
+ } else {
+ b.uint32(uint32(h.offset))
+ }
+ if _, err := w.cw.Write(buf[:]); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w.cw, h.Name); err != nil {
+ return err
+ }
+ if _, err := w.cw.Write(h.Extra); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w.cw, h.Comment); err != nil {
+ return err
+ }
+ }
+ end := w.cw.count
+
+ records := uint64(len(w.dir))
+ size := uint64(end - start)
+ offset := uint64(start)
+
+ if f := w.testHookCloseSizeOffset; f != nil {
+ f(size, offset)
+ }
+
+ if records >= uint16max || size >= uint32max || offset >= uint32max {
+ var buf [directory64EndLen + directory64LocLen]byte
+ b := writeBuf(buf[:])
+
+ // zip64 end of central directory record
+ b.uint32(directory64EndSignature)
+ b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64)
+ b.uint16(zipVersion45) // version made by
+ b.uint16(zipVersion45) // version needed to extract
+ b.uint32(0) // number of this disk
+ b.uint32(0) // number of the disk with the start of the central directory
+ b.uint64(records) // total number of entries in the central directory on this disk
+ b.uint64(records) // total number of entries in the central directory
+ b.uint64(size) // size of the central directory
+ b.uint64(offset) // offset of start of central directory with respect to the starting disk number
+
+ // zip64 end of central directory locator
+ b.uint32(directory64LocSignature)
+ b.uint32(0) // number of the disk with the start of the zip64 end of central directory
+ b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record
+ b.uint32(1) // total number of disks
+
+ if _, err := w.cw.Write(buf[:]); err != nil {
+ return err
+ }
+
+ // store max values in the regular end record to signal
+ // that the zip64 values should be used instead
+ records = uint16max
+ size = uint32max
+ offset = uint32max
+ }
+
+ // write end record
+ var buf [directoryEndLen]byte
+ b := writeBuf(buf[:])
+ b.uint32(uint32(directoryEndSignature))
+ b = b[4:] // skip over disk number and first disk number (2x uint16)
+ b.uint16(uint16(records)) // number of entries this disk
+ b.uint16(uint16(records)) // number of entries total
+ b.uint32(uint32(size)) // size of directory
+ b.uint32(uint32(offset)) // start of directory
+ b.uint16(uint16(len(w.comment))) // byte size of EOCD comment
+ if _, err := w.cw.Write(buf[:]); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w.cw, w.comment); err != nil {
+ return err
+ }
+
+ return w.cw.w.(*bufio.Writer).Flush()
+}
+
+// Create adds a file to the zip file using the provided name.
+// It returns a Writer to which the file contents should be written.
+// The file contents will be compressed using the Deflate method.
+// The name must be a relative path: it must not start with a drive
+// letter (e.g. C:) or leading slash, and only forward slashes are
+// allowed. To create a directory instead of a file, add a trailing
+// slash to the name.
+// The file's contents must be written to the io.Writer before the next
+// call to Create, CreateHeader, or Close.
+func (w *Writer) Create(name string) (io.Writer, error) {
+ header := &FileHeader{
+ Name: name,
+ Method: Deflate,
+ }
+ return w.CreateHeader(header)
+}
+
+// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string
+// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,
+// or any other common encoding).
+func detectUTF8(s string) (valid, require bool) {
+ for i := 0; i < len(s); {
+ r, size := utf8.DecodeRuneInString(s[i:])
+ i += size
+ // Officially, ZIP uses CP-437, but many readers use the system's
+ // local character encoding. Most encoding are compatible with a large
+ // subset of CP-437, which itself is ASCII-like.
+ //
+ // Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those
+ // characters with localized currency and overline characters.
+ if r < 0x20 || r > 0x7d || r == 0x5c {
+ if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
+ return false, false
+ }
+ require = true
+ }
+ }
+ return true, require
+}
+
+// prepare performs the bookkeeping operations required at the start of
+// CreateHeader and CreateRaw.
+func (w *Writer) prepare(fh *FileHeader) error {
+ if w.last != nil && !w.last.closed {
+ if err := w.last.close(); err != nil {
+ return err
+ }
+ }
+ if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
+ // See https://golang.org/issue/11144 confusion.
+ return errors.New("archive/zip: invalid duplicate FileHeader")
+ }
+ return nil
+}
+
+// CreateHeader adds a file to the zip archive using the provided FileHeader
+// for the file metadata. Writer takes ownership of fh and may mutate
+// its fields. The caller must not modify fh after calling CreateHeader.
+//
+// This returns a Writer to which the file contents should be written.
+// The file's contents must be written to the io.Writer before the next
+// call to Create, CreateHeader, CreateRaw, or Close.
+func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
+ if err := w.prepare(fh); err != nil {
+ return nil, err
+ }
+
+ // The ZIP format has a sad state of affairs regarding character encoding.
+ // Officially, the name and comment fields are supposed to be encoded
+ // in CP-437 (which is mostly compatible with ASCII), unless the UTF-8
+ // flag bit is set. However, there are several problems:
+ //
+ // * Many ZIP readers still do not support UTF-8.
+ // * If the UTF-8 flag is cleared, several readers simply interpret the
+ // name and comment fields as whatever the local system encoding is.
+ //
+ // In order to avoid breaking readers without UTF-8 support,
+ // we avoid setting the UTF-8 flag if the strings are CP-437 compatible.
+ // However, if the strings require multibyte UTF-8 encoding and is a
+ // valid UTF-8 string, then we set the UTF-8 bit.
+ //
+ // For the case, where the user explicitly wants to specify the encoding
+ // as UTF-8, they will need to set the flag bit themselves.
+ utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
+ utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
+ switch {
+ case fh.NonUTF8:
+ fh.Flags &^= 0x800
+ case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
+ fh.Flags |= 0x800
+ }
+
+ fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
+ fh.ReaderVersion = zipVersion20
+
+ // If Modified is set, this takes precedence over MS-DOS timestamp fields.
+ if !fh.Modified.IsZero() {
+ // Contrary to the FileHeader.SetModTime method, we intentionally
+ // do not convert to UTC, because we assume the user intends to encode
+ // the date using the specified timezone. A user may want this control
+ // because many legacy ZIP readers interpret the timestamp according
+ // to the local timezone.
+ //
+ // The timezone is only non-UTC if a user directly sets the Modified
+ // field directly themselves. All other approaches sets UTC.
+ fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
+
+ // Use "extended timestamp" format since this is what Info-ZIP uses.
+ // Nearly every major ZIP implementation uses a different format,
+ // but at least most seem to be able to understand the other formats.
+ //
+ // This format happens to be identical for both local and central header
+ // if modification time is the only timestamp being encoded.
+ var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32)
+ mt := uint32(fh.Modified.Unix())
+ eb := writeBuf(mbuf[:])
+ eb.uint16(extTimeExtraID)
+ eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32)
+ eb.uint8(1) // Flags: ModTime
+ eb.uint32(mt) // ModTime
+ fh.Extra = append(fh.Extra, mbuf[:]...)
+ }
+
+ var (
+ ow io.Writer
+ fw *fileWriter
+ )
+ h := &header{
+ FileHeader: fh,
+ offset: uint64(w.cw.count),
+ }
+
+ if strings.HasSuffix(fh.Name, "/") {
+ // Set the compression method to Store to ensure data length is truly zero,
+ // which the writeHeader method always encodes for the size fields.
+ // This is necessary as most compression formats have non-zero lengths
+ // even when compressing an empty string.
+ fh.Method = Store
+ fh.Flags &^= 0x8 // we will not write a data descriptor
+
+ // Explicitly clear sizes as they have no meaning for directories.
+ fh.CompressedSize = 0
+ fh.CompressedSize64 = 0
+ fh.UncompressedSize = 0
+ fh.UncompressedSize64 = 0
+
+ ow = dirWriter{}
+ } else {
+ fh.Flags |= 0x8 // we will write a data descriptor
+
+ fw = &fileWriter{
+ zipw: w.cw,
+ compCount: &countWriter{w: w.cw},
+ crc32: crc32.NewIEEE(),
+ }
+ comp := w.compressor(fh.Method)
+ if comp == nil {
+ return nil, ErrAlgorithm
+ }
+ var err error
+ fw.comp, err = comp(fw.compCount)
+ if err != nil {
+ return nil, err
+ }
+ fw.rawCount = &countWriter{w: fw.comp}
+ fw.header = h
+ ow = fw
+ }
+ w.dir = append(w.dir, h)
+ if err := writeHeader(w.cw, h); err != nil {
+ return nil, err
+ }
+ // If we're creating a directory, fw is nil.
+ w.last = fw
+ return ow, nil
+}
+
+func writeHeader(w io.Writer, h *header) error {
+ const maxUint16 = 1<<16 - 1
+ if len(h.Name) > maxUint16 {
+ return errLongName
+ }
+ if len(h.Extra) > maxUint16 {
+ return errLongExtra
+ }
+
+ var buf [fileHeaderLen]byte
+ b := writeBuf(buf[:])
+ b.uint32(uint32(fileHeaderSignature))
+ b.uint16(h.ReaderVersion)
+ b.uint16(h.Flags)
+ b.uint16(h.Method)
+ b.uint16(h.ModifiedTime)
+ b.uint16(h.ModifiedDate)
+ // In raw mode (caller does the compression), the values are either
+ // written here or in the trailing data descriptor based on the header
+ // flags.
+ if h.raw && !h.hasDataDescriptor() {
+ b.uint32(h.CRC32)
+ b.uint32(uint32(min64(h.CompressedSize64, uint32max)))
+ b.uint32(uint32(min64(h.UncompressedSize64, uint32max)))
+ } else {
+ // When this package handle the compression, these values are
+ // always written to the trailing data descriptor.
+ b.uint32(0) // crc32
+ b.uint32(0) // compressed size
+ b.uint32(0) // uncompressed size
+ }
+ b.uint16(uint16(len(h.Name)))
+ b.uint16(uint16(len(h.Extra)))
+ if _, err := w.Write(buf[:]); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, h.Name); err != nil {
+ return err
+ }
+ _, err := w.Write(h.Extra)
+ return err
+}
+
+func min64(x, y uint64) uint64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+// CreateRaw adds a file to the zip archive using the provided FileHeader and
+// returns a Writer to which the file contents should be written. The file's
+// contents must be written to the io.Writer before the next call to Create,
+// CreateHeader, CreateRaw, or Close.
+//
+// In contrast to CreateHeader, the bytes passed to Writer are not compressed.
+func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
+ if err := w.prepare(fh); err != nil {
+ return nil, err
+ }
+
+ fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max))
+ fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max))
+
+ h := &header{
+ FileHeader: fh,
+ offset: uint64(w.cw.count),
+ raw: true,
+ }
+ w.dir = append(w.dir, h)
+ if err := writeHeader(w.cw, h); err != nil {
+ return nil, err
+ }
+
+ if strings.HasSuffix(fh.Name, "/") {
+ w.last = nil
+ return dirWriter{}, nil
+ }
+
+ fw := &fileWriter{
+ header: h,
+ zipw: w.cw,
+ }
+ w.last = fw
+ return fw, nil
+}
+
+// Copy copies the file f (obtained from a Reader) into w. It copies the raw
+// form directly bypassing decompression, compression, and validation.
+func (w *Writer) Copy(f *File) error {
+ r, err := f.OpenRaw()
+ if err != nil {
+ return err
+ }
+ fw, err := w.CreateRaw(&f.FileHeader)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(fw, r)
+ return err
+}
+
+// RegisterCompressor registers or overrides a custom compressor for a specific
+// method ID. If a compressor for a given method is not found, Writer will
+// default to looking up the compressor at the package level.
+func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
+ if w.compressors == nil {
+ w.compressors = make(map[uint16]Compressor)
+ }
+ w.compressors[method] = comp
+}
+
+func (w *Writer) compressor(method uint16) Compressor {
+ comp := w.compressors[method]
+ if comp == nil {
+ comp = compressor(method)
+ }
+ return comp
+}
+
+type dirWriter struct{}
+
+func (dirWriter) Write(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ return 0, errors.New("zip: write to directory")
+}
+
+type fileWriter struct {
+ *header
+ zipw io.Writer
+ rawCount *countWriter
+ comp io.WriteCloser
+ compCount *countWriter
+ crc32 hash.Hash32
+ closed bool
+}
+
+func (w *fileWriter) Write(p []byte) (int, error) {
+ if w.closed {
+ return 0, errors.New("zip: write to closed file")
+ }
+ if w.raw {
+ return w.zipw.Write(p)
+ }
+ w.crc32.Write(p)
+ return w.rawCount.Write(p)
+}
+
+func (w *fileWriter) close() error {
+ if w.closed {
+ return errors.New("zip: file closed twice")
+ }
+ w.closed = true
+ if w.raw {
+ return w.writeDataDescriptor()
+ }
+ if err := w.comp.Close(); err != nil {
+ return err
+ }
+
+ // update FileHeader
+ fh := w.header.FileHeader
+ fh.CRC32 = w.crc32.Sum32()
+ fh.CompressedSize64 = uint64(w.compCount.count)
+ fh.UncompressedSize64 = uint64(w.rawCount.count)
+
+ if fh.isZip64() {
+ fh.CompressedSize = uint32max
+ fh.UncompressedSize = uint32max
+ fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions
+ } else {
+ fh.CompressedSize = uint32(fh.CompressedSize64)
+ fh.UncompressedSize = uint32(fh.UncompressedSize64)
+ }
+
+ return w.writeDataDescriptor()
+}
+
+func (w *fileWriter) writeDataDescriptor() error {
+ if !w.hasDataDescriptor() {
+ return nil
+ }
+ // Write data descriptor. This is more complicated than one would
+ // think, see e.g. comments in zipfile.c:putextended() and
+ // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
+ // The approach here is to write 8 byte sizes if needed without
+ // adding a zip64 extra in the local header (too late anyway).
+ var buf []byte
+ if w.isZip64() {
+ buf = make([]byte, dataDescriptor64Len)
+ } else {
+ buf = make([]byte, dataDescriptorLen)
+ }
+ b := writeBuf(buf)
+ b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X
+ b.uint32(w.CRC32)
+ if w.isZip64() {
+ b.uint64(w.CompressedSize64)
+ b.uint64(w.UncompressedSize64)
+ } else {
+ b.uint32(w.CompressedSize)
+ b.uint32(w.UncompressedSize)
+ }
+ _, err := w.zipw.Write(buf)
+ return err
+}
+
+type countWriter struct {
+ w io.Writer
+ count int64
+}
+
+func (w *countWriter) Write(p []byte) (int, error) {
+ n, err := w.w.Write(p)
+ w.count += int64(n)
+ return n, err
+}
+
+type nopCloser struct {
+ io.Writer
+}
+
+func (w nopCloser) Close() error {
+ return nil
+}
+
+type writeBuf []byte
+
+func (b *writeBuf) uint8(v uint8) {
+ (*b)[0] = v
+ *b = (*b)[1:]
+}
+
+func (b *writeBuf) uint16(v uint16) {
+ binary.LittleEndian.PutUint16(*b, v)
+ *b = (*b)[2:]
+}
+
+func (b *writeBuf) uint32(v uint32) {
+ binary.LittleEndian.PutUint32(*b, v)
+ *b = (*b)[4:]
+}
+
+func (b *writeBuf) uint64(v uint64) {
+ binary.LittleEndian.PutUint64(*b, v)
+ *b = (*b)[8:]
+}
diff --git a/src/archive/zip/writer_test.go b/src/archive/zip/writer_test.go
new file mode 100644
index 0000000..2b73eca
--- /dev/null
+++ b/src/archive/zip/writer_test.go
@@ -0,0 +1,604 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "io/fs"
+ "math/rand"
+ "os"
+ "strings"
+ "testing"
+ "time"
+)
+
+// TODO(adg): a more sophisticated test suite
+
+type WriteTest struct {
+ Name string
+ Data []byte
+ Method uint16
+ Mode fs.FileMode
+}
+
+var writeTests = []WriteTest{
+ {
+ Name: "foo",
+ Data: []byte("Rabbits, guinea pigs, gophers, marsupial rats, and quolls."),
+ Method: Store,
+ Mode: 0666,
+ },
+ {
+ Name: "bar",
+ Data: nil, // large data set in the test
+ Method: Deflate,
+ Mode: 0644,
+ },
+ {
+ Name: "setuid",
+ Data: []byte("setuid file"),
+ Method: Deflate,
+ Mode: 0755 | fs.ModeSetuid,
+ },
+ {
+ Name: "setgid",
+ Data: []byte("setgid file"),
+ Method: Deflate,
+ Mode: 0755 | fs.ModeSetgid,
+ },
+ {
+ Name: "symlink",
+ Data: []byte("../link/target"),
+ Method: Deflate,
+ Mode: 0755 | fs.ModeSymlink,
+ },
+ {
+ Name: "device",
+ Data: []byte("device file"),
+ Method: Deflate,
+ Mode: 0755 | fs.ModeDevice,
+ },
+ {
+ Name: "chardevice",
+ Data: []byte("char device file"),
+ Method: Deflate,
+ Mode: 0755 | fs.ModeDevice | fs.ModeCharDevice,
+ },
+}
+
+func TestWriter(t *testing.T) {
+ largeData := make([]byte, 1<<17)
+ if _, err := rand.Read(largeData); err != nil {
+ t.Fatal("rand.Read failed:", err)
+ }
+ writeTests[1].Data = largeData
+ defer func() {
+ writeTests[1].Data = nil
+ }()
+
+ // write a zip file
+ buf := new(bytes.Buffer)
+ w := NewWriter(buf)
+
+ for _, wt := range writeTests {
+ testCreate(t, w, &wt)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // read it back
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, wt := range writeTests {
+ testReadFile(t, r.File[i], &wt)
+ }
+}
+
+// TestWriterComment is test for EOCD comment read/write.
+func TestWriterComment(t *testing.T) {
+ var tests = []struct {
+ comment string
+ ok bool
+ }{
+ {"hi, hello", true},
+ {"hi, こんにちわ", true},
+ {strings.Repeat("a", uint16max), true},
+ {strings.Repeat("a", uint16max+1), false},
+ }
+
+ for _, test := range tests {
+ // write a zip file
+ buf := new(bytes.Buffer)
+ w := NewWriter(buf)
+ if err := w.SetComment(test.comment); err != nil {
+ if test.ok {
+ t.Fatalf("SetComment: unexpected error %v", err)
+ }
+ continue
+ } else {
+ if !test.ok {
+ t.Fatalf("SetComment: unexpected success, want error")
+ }
+ }
+
+ if err := w.Close(); test.ok == (err != nil) {
+ t.Fatal(err)
+ }
+
+ if w.closed != test.ok {
+ t.Fatalf("Writer.closed: got %v, want %v", w.closed, test.ok)
+ }
+
+ // skip read test in failure cases
+ if !test.ok {
+ continue
+ }
+
+ // read it back
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if r.Comment != test.comment {
+ t.Fatalf("Reader.Comment: got %v, want %v", r.Comment, test.comment)
+ }
+ }
+}
+
+func TestWriterUTF8(t *testing.T) {
+ var utf8Tests = []struct {
+ name string
+ comment string
+ nonUTF8 bool
+ flags uint16
+ }{
+ {
+ name: "hi, hello",
+ comment: "in the world",
+ flags: 0x8,
+ },
+ {
+ name: "hi, こんにちわ",
+ comment: "in the world",
+ flags: 0x808,
+ },
+ {
+ name: "hi, こんにちわ",
+ comment: "in the world",
+ nonUTF8: true,
+ flags: 0x8,
+ },
+ {
+ name: "hi, hello",
+ comment: "in the 世界",
+ flags: 0x808,
+ },
+ {
+ name: "hi, こんにちわ",
+ comment: "in the 世界",
+ flags: 0x808,
+ },
+ {
+ name: "the replacement rune is �",
+ comment: "the replacement rune is �",
+ flags: 0x808,
+ },
+ {
+ // Name is Japanese encoded in Shift JIS.
+ name: "\x93\xfa\x96{\x8c\xea.txt",
+ comment: "in the 世界",
+ flags: 0x008, // UTF-8 must not be set
+ },
+ }
+
+ // write a zip file
+ buf := new(bytes.Buffer)
+ w := NewWriter(buf)
+
+ for _, test := range utf8Tests {
+ h := &FileHeader{
+ Name: test.name,
+ Comment: test.comment,
+ NonUTF8: test.nonUTF8,
+ Method: Deflate,
+ }
+ w, err := w.CreateHeader(h)
+ if err != nil {
+ t.Fatal(err)
+ }
+ w.Write([]byte{})
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // read it back
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, test := range utf8Tests {
+ flags := r.File[i].Flags
+ if flags != test.flags {
+ t.Errorf("CreateHeader(name=%q comment=%q nonUTF8=%v): flags=%#x, want %#x", test.name, test.comment, test.nonUTF8, flags, test.flags)
+ }
+ }
+}
+
+func TestWriterTime(t *testing.T) {
+ var buf bytes.Buffer
+ h := &FileHeader{
+ Name: "test.txt",
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+ }
+ w := NewWriter(&buf)
+ if _, err := w.CreateHeader(h); err != nil {
+ t.Fatalf("unexpected CreateHeader error: %v", err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("unexpected Close error: %v", err)
+ }
+
+ want, err := os.ReadFile("testdata/time-go.zip")
+ if err != nil {
+ t.Fatalf("unexpected ReadFile error: %v", err)
+ }
+ if got := buf.Bytes(); !bytes.Equal(got, want) {
+ fmt.Printf("%x\n%x\n", got, want)
+ t.Error("contents of time-go.zip differ")
+ }
+}
+
+func TestWriterOffset(t *testing.T) {
+ largeData := make([]byte, 1<<17)
+ if _, err := rand.Read(largeData); err != nil {
+ t.Fatal("rand.Read failed:", err)
+ }
+ writeTests[1].Data = largeData
+ defer func() {
+ writeTests[1].Data = nil
+ }()
+
+ // write a zip file
+ buf := new(bytes.Buffer)
+ existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3}
+ n, _ := buf.Write(existingData)
+ w := NewWriter(buf)
+ w.SetOffset(int64(n))
+
+ for _, wt := range writeTests {
+ testCreate(t, w, &wt)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // read it back
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, wt := range writeTests {
+ testReadFile(t, r.File[i], &wt)
+ }
+}
+
+func TestWriterFlush(t *testing.T) {
+ var buf bytes.Buffer
+ w := NewWriter(struct{ io.Writer }{&buf})
+ _, err := w.Create("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if buf.Len() > 0 {
+ t.Fatalf("Unexpected %d bytes already in buffer", buf.Len())
+ }
+ if err := w.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ if buf.Len() == 0 {
+ t.Fatal("No bytes written after Flush")
+ }
+}
+
+func TestWriterDir(t *testing.T) {
+ w := NewWriter(io.Discard)
+ dw, err := w.Create("dir/")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := dw.Write(nil); err != nil {
+ t.Errorf("Write(nil) to directory: got %v, want nil", err)
+ }
+ if _, err := dw.Write([]byte("hello")); err == nil {
+ t.Error(`Write("hello") to directory: got nil error, want non-nil`)
+ }
+}
+
+func TestWriterDirAttributes(t *testing.T) {
+ var buf bytes.Buffer
+ w := NewWriter(&buf)
+ if _, err := w.CreateHeader(&FileHeader{
+ Name: "dir/",
+ Method: Deflate,
+ CompressedSize64: 1234,
+ UncompressedSize64: 5678,
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+ b := buf.Bytes()
+
+ var sig [4]byte
+ binary.LittleEndian.PutUint32(sig[:], uint32(fileHeaderSignature))
+
+ idx := bytes.Index(b, sig[:])
+ if idx == -1 {
+ t.Fatal("file header not found")
+ }
+ b = b[idx:]
+
+ if !bytes.Equal(b[6:10], []byte{0, 0, 0, 0}) { // FileHeader.Flags: 0, FileHeader.Method: 0
+ t.Errorf("unexpected method and flags: %v", b[6:10])
+ }
+
+ if !bytes.Equal(b[14:26], make([]byte, 12)) { // FileHeader.{CRC32,CompressSize,UncompressedSize} all zero.
+ t.Errorf("unexpected crc, compress and uncompressed size to be 0 was: %v", b[14:26])
+ }
+
+ binary.LittleEndian.PutUint32(sig[:], uint32(dataDescriptorSignature))
+ if bytes.Contains(b, sig[:]) {
+ t.Error("there should be no data descriptor")
+ }
+}
+
+func TestWriterCopy(t *testing.T) {
+ // make a zip file
+ buf := new(bytes.Buffer)
+ w := NewWriter(buf)
+ for _, wt := range writeTests {
+ testCreate(t, w, &wt)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // read it back
+ src, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, wt := range writeTests {
+ testReadFile(t, src.File[i], &wt)
+ }
+
+ // make a new zip file copying the old compressed data.
+ buf2 := new(bytes.Buffer)
+ dst := NewWriter(buf2)
+ for _, f := range src.File {
+ if err := dst.Copy(f); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := dst.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // read the new one back
+ r, err := NewReader(bytes.NewReader(buf2.Bytes()), int64(buf2.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, wt := range writeTests {
+ testReadFile(t, r.File[i], &wt)
+ }
+}
+
+func TestWriterCreateRaw(t *testing.T) {
+ files := []struct {
+ name string
+ content []byte
+ method uint16
+ flags uint16
+ crc32 uint32
+ uncompressedSize uint64
+ compressedSize uint64
+ }{
+ {
+ name: "small store w desc",
+ content: []byte("gophers"),
+ method: Store,
+ flags: 0x8,
+ },
+ {
+ name: "small deflate wo desc",
+ content: bytes.Repeat([]byte("abcdefg"), 2048),
+ method: Deflate,
+ },
+ }
+
+ // write a zip file
+ archive := new(bytes.Buffer)
+ w := NewWriter(archive)
+
+ for i := range files {
+ f := &files[i]
+ f.crc32 = crc32.ChecksumIEEE(f.content)
+ size := uint64(len(f.content))
+ f.uncompressedSize = size
+ f.compressedSize = size
+
+ var compressedContent []byte
+ if f.method == Deflate {
+ var buf bytes.Buffer
+ w, err := flate.NewWriter(&buf, flate.BestSpeed)
+ if err != nil {
+ t.Fatalf("flate.NewWriter err = %v", err)
+ }
+ _, err = w.Write(f.content)
+ if err != nil {
+ t.Fatalf("flate Write err = %v", err)
+ }
+ err = w.Close()
+ if err != nil {
+ t.Fatalf("flate Writer.Close err = %v", err)
+ }
+ compressedContent = buf.Bytes()
+ f.compressedSize = uint64(len(compressedContent))
+ }
+
+ h := &FileHeader{
+ Name: f.name,
+ Method: f.method,
+ Flags: f.flags,
+ CRC32: f.crc32,
+ CompressedSize64: f.compressedSize,
+ UncompressedSize64: f.uncompressedSize,
+ }
+ w, err := w.CreateRaw(h)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if compressedContent != nil {
+ _, err = w.Write(compressedContent)
+ } else {
+ _, err = w.Write(f.content)
+ }
+ if err != nil {
+ t.Fatalf("%s Write got %v; want nil", f.name, err)
+ }
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // read it back
+ r, err := NewReader(bytes.NewReader(archive.Bytes()), int64(archive.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, want := range files {
+ got := r.File[i]
+ if got.Name != want.name {
+ t.Errorf("got Name %s; want %s", got.Name, want.name)
+ }
+ if got.Method != want.method {
+ t.Errorf("%s: got Method %#x; want %#x", want.name, got.Method, want.method)
+ }
+ if got.Flags != want.flags {
+ t.Errorf("%s: got Flags %#x; want %#x", want.name, got.Flags, want.flags)
+ }
+ if got.CRC32 != want.crc32 {
+ t.Errorf("%s: got CRC32 %#x; want %#x", want.name, got.CRC32, want.crc32)
+ }
+ if got.CompressedSize64 != want.compressedSize {
+ t.Errorf("%s: got CompressedSize64 %d; want %d", want.name, got.CompressedSize64, want.compressedSize)
+ }
+ if got.UncompressedSize64 != want.uncompressedSize {
+ t.Errorf("%s: got UncompressedSize64 %d; want %d", want.name, got.UncompressedSize64, want.uncompressedSize)
+ }
+
+ r, err := got.Open()
+ if err != nil {
+ t.Errorf("%s: Open err = %v", got.Name, err)
+ continue
+ }
+
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ t.Errorf("%s: ReadAll err = %v", got.Name, err)
+ continue
+ }
+
+ if !bytes.Equal(buf, want.content) {
+ t.Errorf("%v: ReadAll returned unexpected bytes", got.Name)
+ }
+ }
+}
+
+func testCreate(t *testing.T, w *Writer, wt *WriteTest) {
+ header := &FileHeader{
+ Name: wt.Name,
+ Method: wt.Method,
+ }
+ if wt.Mode != 0 {
+ header.SetMode(wt.Mode)
+ }
+ f, err := w.CreateHeader(header)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = f.Write(wt.Data)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func testReadFile(t *testing.T, f *File, wt *WriteTest) {
+ if f.Name != wt.Name {
+ t.Fatalf("File name: got %q, want %q", f.Name, wt.Name)
+ }
+ testFileMode(t, f, wt.Mode)
+ rc, err := f.Open()
+ if err != nil {
+ t.Fatalf("opening %s: %v", f.Name, err)
+ }
+ b, err := io.ReadAll(rc)
+ if err != nil {
+ t.Fatalf("reading %s: %v", f.Name, err)
+ }
+ err = rc.Close()
+ if err != nil {
+ t.Fatalf("closing %s: %v", f.Name, err)
+ }
+ if !bytes.Equal(b, wt.Data) {
+ t.Errorf("File contents %q, want %q", b, wt.Data)
+ }
+}
+
+func BenchmarkCompressedZipGarbage(b *testing.B) {
+ bigBuf := bytes.Repeat([]byte("a"), 1<<20)
+
+ runOnce := func(buf *bytes.Buffer) {
+ buf.Reset()
+ zw := NewWriter(buf)
+ for j := 0; j < 3; j++ {
+ w, _ := zw.CreateHeader(&FileHeader{
+ Name: "foo",
+ Method: Deflate,
+ })
+ w.Write(bigBuf)
+ }
+ zw.Close()
+ }
+
+ b.ReportAllocs()
+ // Run once and then reset the timer.
+ // This effectively discards the very large initial flate setup cost,
+ // as well as the initialization of bigBuf.
+ runOnce(&bytes.Buffer{})
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ var buf bytes.Buffer
+ for pb.Next() {
+ runOnce(&buf)
+ }
+ })
+}
diff --git a/src/archive/zip/zip_test.go b/src/archive/zip/zip_test.go
new file mode 100644
index 0000000..7d1de07
--- /dev/null
+++ b/src/archive/zip/zip_test.go
@@ -0,0 +1,828 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests that involve both reading and writing.
+
+package zip
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash"
+ "internal/testenv"
+ "io"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestOver65kFiles(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+ buf := new(strings.Builder)
+ w := NewWriter(buf)
+ const nFiles = (1 << 16) + 42
+ for i := 0; i < nFiles; i++ {
+ _, err := w.CreateHeader(&FileHeader{
+ Name: fmt.Sprintf("%d.dat", i),
+ Method: Store, // Deflate is too slow when it is compiled with -race flag
+ })
+ if err != nil {
+ t.Fatalf("creating file %d: %v", i, err)
+ }
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("Writer.Close: %v", err)
+ }
+ s := buf.String()
+ zr, err := NewReader(strings.NewReader(s), int64(len(s)))
+ if err != nil {
+ t.Fatalf("NewReader: %v", err)
+ }
+ if got := len(zr.File); got != nFiles {
+ t.Fatalf("File contains %d files, want %d", got, nFiles)
+ }
+ for i := 0; i < nFiles; i++ {
+ want := fmt.Sprintf("%d.dat", i)
+ if zr.File[i].Name != want {
+ t.Fatalf("File(%d) = %q, want %q", i, zr.File[i].Name, want)
+ }
+ }
+}
+
+func TestModTime(t *testing.T) {
+ var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC)
+ fh := new(FileHeader)
+ fh.SetModTime(testTime)
+ outTime := fh.ModTime()
+ if !outTime.Equal(testTime) {
+ t.Errorf("times don't match: got %s, want %s", outTime, testTime)
+ }
+}
+
+func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) {
+ fi := fh.FileInfo()
+ fh2, err := FileInfoHeader(fi)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := fh2.Name, fh.Name; got != want {
+ t.Errorf("Name: got %s, want %s\n", got, want)
+ }
+ if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want {
+ t.Errorf("UncompressedSize: got %d, want %d\n", got, want)
+ }
+ if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want {
+ t.Errorf("UncompressedSize64: got %d, want %d\n", got, want)
+ }
+ if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want {
+ t.Errorf("ModifiedTime: got %d, want %d\n", got, want)
+ }
+ if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want {
+ t.Errorf("ModifiedDate: got %d, want %d\n", got, want)
+ }
+
+ if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh {
+ t.Errorf("Sys didn't return original *FileHeader")
+ }
+}
+
+func TestFileHeaderRoundTrip(t *testing.T) {
+ fh := &FileHeader{
+ Name: "foo.txt",
+ UncompressedSize: 987654321,
+ ModifiedTime: 1234,
+ ModifiedDate: 5678,
+ }
+ testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t)
+}
+
+func TestFileHeaderRoundTrip64(t *testing.T) {
+ fh := &FileHeader{
+ Name: "foo.txt",
+ UncompressedSize64: 9876543210,
+ ModifiedTime: 1234,
+ ModifiedDate: 5678,
+ }
+ testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t)
+}
+
+func TestFileHeaderRoundTripModified(t *testing.T) {
+ fh := &FileHeader{
+ Name: "foo.txt",
+ UncompressedSize: 987654321,
+ Modified: time.Now().Local(),
+ ModifiedTime: 1234,
+ ModifiedDate: 5678,
+ }
+ fi := fh.FileInfo()
+ fh2, err := FileInfoHeader(fi)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := fh2.Modified, fh.Modified.UTC(); got != want {
+ t.Errorf("Modified: got %s, want %s\n", got, want)
+ }
+ if got, want := fi.ModTime(), fh.Modified.UTC(); got != want {
+ t.Errorf("Modified: got %s, want %s\n", got, want)
+ }
+}
+
+func TestFileHeaderRoundTripWithoutModified(t *testing.T) {
+ fh := &FileHeader{
+ Name: "foo.txt",
+ UncompressedSize: 987654321,
+ ModifiedTime: 1234,
+ ModifiedDate: 5678,
+ }
+ fi := fh.FileInfo()
+ fh2, err := FileInfoHeader(fi)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := fh2.ModTime(), fh.ModTime(); got != want {
+ t.Errorf("Modified: got %s, want %s\n", got, want)
+ }
+ if got, want := fi.ModTime(), fh.ModTime(); got != want {
+ t.Errorf("Modified: got %s, want %s\n", got, want)
+ }
+}
+
+type repeatedByte struct {
+ off int64
+ b byte
+ n int64
+}
+
+// rleBuffer is a run-length-encoded byte buffer.
+// It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt,
+// allowing random-access reads.
+type rleBuffer struct {
+ buf []repeatedByte
+}
+
+func (r *rleBuffer) Size() int64 {
+ if len(r.buf) == 0 {
+ return 0
+ }
+ last := &r.buf[len(r.buf)-1]
+ return last.off + last.n
+}
+
+func (r *rleBuffer) Write(p []byte) (n int, err error) {
+ var rp *repeatedByte
+ if len(r.buf) > 0 {
+ rp = &r.buf[len(r.buf)-1]
+ // Fast path, if p is entirely the same byte repeated.
+ if lastByte := rp.b; len(p) > 0 && p[0] == lastByte {
+ if bytes.Count(p, []byte{lastByte}) == len(p) {
+ rp.n += int64(len(p))
+ return len(p), nil
+ }
+ }
+ }
+
+ for _, b := range p {
+ if rp == nil || rp.b != b {
+ r.buf = append(r.buf, repeatedByte{r.Size(), b, 1})
+ rp = &r.buf[len(r.buf)-1]
+ } else {
+ rp.n++
+ }
+ }
+ return len(p), nil
+}
+
+func min(x, y int64) int64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func memset(a []byte, b byte) {
+ if len(a) == 0 {
+ return
+ }
+ // Double, until we reach power of 2 >= len(a), same as bytes.Repeat,
+ // but without allocation.
+ a[0] = b
+ for i, l := 1, len(a); i < l; i *= 2 {
+ copy(a[i:], a[:i])
+ }
+}
+
+func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
+ if len(p) == 0 {
+ return
+ }
+ skipParts := sort.Search(len(r.buf), func(i int) bool {
+ part := &r.buf[i]
+ return part.off+part.n > off
+ })
+ parts := r.buf[skipParts:]
+ if len(parts) > 0 {
+ skipBytes := off - parts[0].off
+ for _, part := range parts {
+ repeat := int(min(part.n-skipBytes, int64(len(p)-n)))
+ memset(p[n:n+repeat], part.b)
+ n += repeat
+ if n == len(p) {
+ return
+ }
+ skipBytes = 0
+ }
+ }
+ if n != len(p) {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code.
+func TestRLEBuffer(t *testing.T) {
+ b := new(rleBuffer)
+ var all []byte
+ writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"}
+ for _, w := range writes {
+ b.Write([]byte(w))
+ all = append(all, w...)
+ }
+ if len(b.buf) != 10 {
+ t.Fatalf("len(b.buf) = %d; want 10", len(b.buf))
+ }
+
+ for i := 0; i < len(all); i++ {
+ for j := 0; j < len(all)-i; j++ {
+ buf := make([]byte, j)
+ n, err := b.ReadAt(buf, int64(i))
+ if err != nil || n != len(buf) {
+ t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf))
+ }
+ if !bytes.Equal(buf, all[i:i+j]) {
+ t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j])
+ }
+ }
+ }
+}
+
+// fakeHash32 is a dummy Hash32 that always returns 0.
+type fakeHash32 struct {
+ hash.Hash32
+}
+
+func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil }
+func (fakeHash32) Sum32() uint32 { return 0 }
+
+func TestZip64(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ t.Parallel()
+ const size = 1 << 32 // before the "END\n" part
+ buf := testZip64(t, size)
+ testZip64DirectoryRecordLength(buf, t)
+}
+
+func TestZip64EdgeCase(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ t.Parallel()
+ // Test a zip file with uncompressed size 0xFFFFFFFF.
+ // That's the magic marker for a 64-bit file, so even though
+ // it fits in a 32-bit field we must use the 64-bit field.
+ // Go 1.5 and earlier got this wrong,
+ // writing an invalid zip file.
+ const size = 1<<32 - 1 - int64(len("END\n")) // before the "END\n" part
+ buf := testZip64(t, size)
+ testZip64DirectoryRecordLength(buf, t)
+}
+
+// Tests that we generate a zip64 file if the directory at offset
+// 0xFFFFFFFF, but not before.
+func TestZip64DirectoryOffset(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ t.Parallel()
+ const filename = "huge.txt"
+ gen := func(wantOff uint64) func(*Writer) {
+ return func(w *Writer) {
+ w.testHookCloseSizeOffset = func(size, off uint64) {
+ if off != wantOff {
+ t.Errorf("central directory offset = %d (%x); want %d", off, off, wantOff)
+ }
+ }
+ f, err := w.CreateHeader(&FileHeader{
+ Name: filename,
+ Method: Store,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.(*fileWriter).crc32 = fakeHash32{}
+ size := wantOff - fileHeaderLen - uint64(len(filename)) - dataDescriptorLen
+ if _, err := io.CopyN(f, zeros{}, int64(size)); err != nil {
+ t.Fatal(err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ t.Run("uint32max-2_NoZip64", func(t *testing.T) {
+ t.Parallel()
+ if generatesZip64(t, gen(0xfffffffe)) {
+ t.Error("unexpected zip64")
+ }
+ })
+ t.Run("uint32max-1_Zip64", func(t *testing.T) {
+ t.Parallel()
+ if !generatesZip64(t, gen(0xffffffff)) {
+ t.Error("expected zip64")
+ }
+ })
+}
+
+// At 16k records, we need to generate a zip64 file.
+func TestZip64ManyRecords(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ t.Parallel()
+ gen := func(numRec int) func(*Writer) {
+ return func(w *Writer) {
+ for i := 0; i < numRec; i++ {
+ _, err := w.CreateHeader(&FileHeader{
+ Name: "a.txt",
+ Method: Store,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ // 16k-1 records shouldn't make a zip64:
+ t.Run("uint16max-1_NoZip64", func(t *testing.T) {
+ t.Parallel()
+ if generatesZip64(t, gen(0xfffe)) {
+ t.Error("unexpected zip64")
+ }
+ })
+ // 16k records should make a zip64:
+ t.Run("uint16max_Zip64", func(t *testing.T) {
+ t.Parallel()
+ if !generatesZip64(t, gen(0xffff)) {
+ t.Error("expected zip64")
+ }
+ })
+}
+
+// suffixSaver is an io.Writer & io.ReaderAt that remembers the last 0
+// to 'keep' bytes of data written to it. Call Suffix to get the
+// suffix bytes.
+type suffixSaver struct {
+ keep int
+ buf []byte
+ start int
+ size int64
+}
+
+func (ss *suffixSaver) Size() int64 { return ss.size }
+
+var errDiscardedBytes = errors.New("ReadAt of discarded bytes")
+
+func (ss *suffixSaver) ReadAt(p []byte, off int64) (n int, err error) {
+ back := ss.size - off
+ if back > int64(ss.keep) {
+ return 0, errDiscardedBytes
+ }
+ suf := ss.Suffix()
+ n = copy(p, suf[len(suf)-int(back):])
+ if n != len(p) {
+ err = io.EOF
+ }
+ return
+}
+
+func (ss *suffixSaver) Suffix() []byte {
+ if len(ss.buf) < ss.keep {
+ return ss.buf
+ }
+ buf := make([]byte, ss.keep)
+ n := copy(buf, ss.buf[ss.start:])
+ copy(buf[n:], ss.buf[:])
+ return buf
+}
+
+func (ss *suffixSaver) Write(p []byte) (n int, err error) {
+ n = len(p)
+ ss.size += int64(len(p))
+ if len(ss.buf) < ss.keep {
+ space := ss.keep - len(ss.buf)
+ add := len(p)
+ if add > space {
+ add = space
+ }
+ ss.buf = append(ss.buf, p[:add]...)
+ p = p[add:]
+ }
+ for len(p) > 0 {
+ n := copy(ss.buf[ss.start:], p)
+ p = p[n:]
+ ss.start += n
+ if ss.start == ss.keep {
+ ss.start = 0
+ }
+ }
+ return
+}
+
+// generatesZip64 reports whether f wrote a zip64 file.
+// f is also responsible for closing w.
+func generatesZip64(t *testing.T, f func(w *Writer)) bool {
+ ss := &suffixSaver{keep: 10 << 20}
+ w := NewWriter(ss)
+ f(w)
+ return suffixIsZip64(t, ss)
+}
+
+type sizedReaderAt interface {
+ io.ReaderAt
+ Size() int64
+}
+
+func suffixIsZip64(t *testing.T, zip sizedReaderAt) bool {
+ d := make([]byte, 1024)
+ if _, err := zip.ReadAt(d, zip.Size()-int64(len(d))); err != nil {
+ t.Fatalf("ReadAt: %v", err)
+ }
+
+ sigOff := findSignatureInBlock(d)
+ if sigOff == -1 {
+ t.Errorf("failed to find signature in block")
+ return false
+ }
+
+ dirOff, err := findDirectory64End(zip, zip.Size()-int64(len(d))+int64(sigOff))
+ if err != nil {
+ t.Fatalf("findDirectory64End: %v", err)
+ }
+ if dirOff == -1 {
+ return false
+ }
+
+ d = make([]byte, directory64EndLen)
+ if _, err := zip.ReadAt(d, dirOff); err != nil {
+ t.Fatalf("ReadAt(off=%d): %v", dirOff, err)
+ }
+
+ b := readBuf(d)
+ if sig := b.uint32(); sig != directory64EndSignature {
+ return false
+ }
+
+ size := b.uint64()
+ if size != directory64EndLen-12 {
+ t.Errorf("expected length of %d, got %d", directory64EndLen-12, size)
+ }
+ return true
+}
+
+// Zip64 is required if the total size of the records is uint32max.
+func TestZip64LargeDirectory(t *testing.T) {
+ if runtime.GOARCH == "wasm" {
+ t.Skip("too slow on wasm")
+ }
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ t.Parallel()
+ // gen returns a func that writes a zip with a wantLen bytes
+ // of central directory.
+ gen := func(wantLen int64) func(*Writer) {
+ return func(w *Writer) {
+ w.testHookCloseSizeOffset = func(size, off uint64) {
+ if size != uint64(wantLen) {
+ t.Errorf("Close central directory size = %d; want %d", size, wantLen)
+ }
+ }
+
+ uint16string := strings.Repeat(".", uint16max)
+ remain := wantLen
+ for remain > 0 {
+ commentLen := int(uint16max) - directoryHeaderLen - 1
+ thisRecLen := directoryHeaderLen + int(uint16max) + commentLen
+ if int64(thisRecLen) > remain {
+ remove := thisRecLen - int(remain)
+ commentLen -= remove
+ thisRecLen -= remove
+ }
+ remain -= int64(thisRecLen)
+ f, err := w.CreateHeader(&FileHeader{
+ Name: uint16string,
+ Comment: uint16string[:commentLen],
+ })
+ if err != nil {
+ t.Fatalf("CreateHeader: %v", err)
+ }
+ f.(*fileWriter).crc32 = fakeHash32{}
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ }
+ }
+ t.Run("uint32max-1_NoZip64", func(t *testing.T) {
+ t.Parallel()
+ if generatesZip64(t, gen(uint32max-1)) {
+ t.Error("unexpected zip64")
+ }
+ })
+ t.Run("uint32max_HasZip64", func(t *testing.T) {
+ t.Parallel()
+ if !generatesZip64(t, gen(uint32max)) {
+ t.Error("expected zip64")
+ }
+ })
+}
+
+func testZip64(t testing.TB, size int64) *rleBuffer {
+ const chunkSize = 1024
+ chunks := int(size / chunkSize)
+ // write size bytes plus "END\n" to a zip file
+ buf := new(rleBuffer)
+ w := NewWriter(buf)
+ f, err := w.CreateHeader(&FileHeader{
+ Name: "huge.txt",
+ Method: Store,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.(*fileWriter).crc32 = fakeHash32{}
+ chunk := make([]byte, chunkSize)
+ for i := range chunk {
+ chunk[i] = '.'
+ }
+ for i := 0; i < chunks; i++ {
+ _, err := f.Write(chunk)
+ if err != nil {
+ t.Fatal("write chunk:", err)
+ }
+ }
+ if frag := int(size % chunkSize); frag > 0 {
+ _, err := f.Write(chunk[:frag])
+ if err != nil {
+ t.Fatal("write chunk:", err)
+ }
+ }
+ end := []byte("END\n")
+ _, err = f.Write(end)
+ if err != nil {
+ t.Fatal("write end:", err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // read back zip file and check that we get to the end of it
+ r, err := NewReader(buf, int64(buf.Size()))
+ if err != nil {
+ t.Fatal("reader:", err)
+ }
+ f0 := r.File[0]
+ rc, err := f0.Open()
+ if err != nil {
+ t.Fatal("opening:", err)
+ }
+ rc.(*checksumReader).hash = fakeHash32{}
+ for i := 0; i < chunks; i++ {
+ _, err := io.ReadFull(rc, chunk)
+ if err != nil {
+ t.Fatal("read:", err)
+ }
+ }
+ if frag := int(size % chunkSize); frag > 0 {
+ _, err := io.ReadFull(rc, chunk[:frag])
+ if err != nil {
+ t.Fatal("read:", err)
+ }
+ }
+ gotEnd, err := io.ReadAll(rc)
+ if err != nil {
+ t.Fatal("read end:", err)
+ }
+ if !bytes.Equal(gotEnd, end) {
+ t.Errorf("End of zip64 archive %q, want %q", gotEnd, end)
+ }
+ err = rc.Close()
+ if err != nil {
+ t.Fatal("closing:", err)
+ }
+ if size+int64(len("END\n")) >= 1<<32-1 {
+ if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
+ t.Errorf("UncompressedSize %#x, want %#x", got, want)
+ }
+ }
+
+ if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want {
+ t.Errorf("UncompressedSize64 %#x, want %#x", got, want)
+ }
+
+ return buf
+}
+
+// Issue 9857
+func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) {
+ if !suffixIsZip64(t, buf) {
+ t.Fatal("not a zip64")
+ }
+}
+
+func testValidHeader(h *FileHeader, t *testing.T) {
+ var buf bytes.Buffer
+ z := NewWriter(&buf)
+
+ f, err := z.CreateHeader(h)
+ if err != nil {
+ t.Fatalf("error creating header: %v", err)
+ }
+ if _, err := f.Write([]byte("hi")); err != nil {
+ t.Fatalf("error writing content: %v", err)
+ }
+ if err := z.Close(); err != nil {
+ t.Fatalf("error closing zip writer: %v", err)
+ }
+
+ b := buf.Bytes()
+ zf, err := NewReader(bytes.NewReader(b), int64(len(b)))
+ if err != nil {
+ t.Fatalf("got %v, expected nil", err)
+ }
+ zh := zf.File[0].FileHeader
+ if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) {
+ t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi"))
+ }
+}
+
+// Issue 4302.
+func TestHeaderInvalidTagAndSize(t *testing.T) {
+ const timeFormat = "20060102T150405.000.txt"
+
+ ts := time.Now()
+ filename := ts.Format(timeFormat)
+
+ h := FileHeader{
+ Name: filename,
+ Method: Deflate,
+ Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing
+ }
+ h.SetModTime(ts)
+
+ testValidHeader(&h, t)
+}
+
+func TestHeaderTooShort(t *testing.T) {
+ h := FileHeader{
+ Name: "foo.txt",
+ Method: Deflate,
+ Extra: []byte{zip64ExtraID}, // missing size and second half of tag, but Extra is best-effort parsing
+ }
+ testValidHeader(&h, t)
+}
+
+func TestHeaderTooLongErr(t *testing.T) {
+ var headerTests = []struct {
+ name string
+ extra []byte
+ wanterr error
+ }{
+ {
+ name: strings.Repeat("x", 1<<16),
+ extra: []byte{},
+ wanterr: errLongName,
+ },
+ {
+ name: "long_extra",
+ extra: bytes.Repeat([]byte{0xff}, 1<<16),
+ wanterr: errLongExtra,
+ },
+ }
+
+ // write a zip file
+ buf := new(bytes.Buffer)
+ w := NewWriter(buf)
+
+ for _, test := range headerTests {
+ h := &FileHeader{
+ Name: test.name,
+ Extra: test.extra,
+ }
+ _, err := w.CreateHeader(h)
+ if err != test.wanterr {
+ t.Errorf("error=%v, want %v", err, test.wanterr)
+ }
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestHeaderIgnoredSize(t *testing.T) {
+ h := FileHeader{
+ Name: "foo.txt",
+ Method: Deflate,
+ Extra: []byte{zip64ExtraID & 0xFF, zip64ExtraID >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted
+ }
+ testValidHeader(&h, t)
+}
+
+// Issue 4393. It is valid to have an extra data header
+// which contains no body.
+func TestZeroLengthHeader(t *testing.T) {
+ h := FileHeader{
+ Name: "extadata.txt",
+ Method: Deflate,
+ Extra: []byte{
+ 85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5
+ 85, 120, 0, 0, // tag 30805 size 0
+ },
+ }
+ testValidHeader(&h, t)
+}
+
+// Just benchmarking how fast the Zip64 test above is. Not related to
+// our zip performance, since the test above disabled CRC32 and flate.
+func BenchmarkZip64Test(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testZip64(b, 1<<26)
+ }
+}
+
+func BenchmarkZip64TestSizes(b *testing.B) {
+ for _, size := range []int64{1 << 12, 1 << 20, 1 << 26} {
+ b.Run(fmt.Sprint(size), func(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ testZip64(b, size)
+ }
+ })
+ })
+ }
+}
+
+func TestSuffixSaver(t *testing.T) {
+ const keep = 10
+ ss := &suffixSaver{keep: keep}
+ ss.Write([]byte("abc"))
+ if got := string(ss.Suffix()); got != "abc" {
+ t.Errorf("got = %q; want abc", got)
+ }
+ ss.Write([]byte("defghijklmno"))
+ if got := string(ss.Suffix()); got != "fghijklmno" {
+ t.Errorf("got = %q; want fghijklmno", got)
+ }
+ if got, want := ss.Size(), int64(len("abc")+len("defghijklmno")); got != want {
+ t.Errorf("Size = %d; want %d", got, want)
+ }
+ buf := make([]byte, ss.Size())
+ for off := int64(0); off < ss.Size(); off++ {
+ for size := 1; size <= int(ss.Size()-off); size++ {
+ readBuf := buf[:size]
+ n, err := ss.ReadAt(readBuf, off)
+ if off < ss.Size()-keep {
+ if err != errDiscardedBytes {
+ t.Errorf("off %d, size %d = %v, %v (%q); want errDiscardedBytes", off, size, n, err, readBuf[:n])
+ }
+ continue
+ }
+ want := "abcdefghijklmno"[off : off+int64(size)]
+ got := string(readBuf[:n])
+ if err != nil || got != want {
+ t.Errorf("off %d, size %d = %v, %v (%q); want %q", off, size, n, err, got, want)
+ }
+ }
+ }
+
+}
+
+type zeros struct{}
+
+func (zeros) Read(p []byte) (int, error) {
+ for i := range p {
+ p[i] = 0
+ }
+ return len(p), nil
+}