summaryrefslogtreecommitdiffstats
path: root/third_party/rust/zip/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/rust/zip/src
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--third_party/rust/zip/src/aes.rs185
-rw-r--r--third_party/rust/zip/src/aes_ctr.rs281
-rw-r--r--third_party/rust/zip/src/compression.rs207
-rw-r--r--third_party/rust/zip/src/cp437.rs204
-rw-r--r--third_party/rust/zip/src/crc32.rs100
-rw-r--r--third_party/rust/zip/src/lib.rs44
-rw-r--r--third_party/rust/zip/src/read.rs1310
-rw-r--r--third_party/rust/zip/src/result.rs83
-rw-r--r--third_party/rust/zip/src/spec.rs207
-rw-r--r--third_party/rust/zip/src/types.rs573
-rw-r--r--third_party/rust/zip/src/write.rs1471
-rw-r--r--third_party/rust/zip/src/zipcrypto.rs184
12 files changed, 4849 insertions, 0 deletions
diff --git a/third_party/rust/zip/src/aes.rs b/third_party/rust/zip/src/aes.rs
new file mode 100644
index 0000000000..8997705c69
--- /dev/null
+++ b/third_party/rust/zip/src/aes.rs
@@ -0,0 +1,185 @@
+//! Implementation of the AES decryption for zip files.
+//!
+//! This was implemented according to the [WinZip specification](https://www.winzip.com/win/en/aes_info.html).
+//! Note that using CRC with AES depends on the used encryption specification, AE-1 or AE-2.
+//! If the file is marked as encrypted with AE-2 the CRC field is ignored, even if it isn't set to 0.
+
+use crate::aes_ctr;
+use crate::types::AesMode;
+use constant_time_eq::constant_time_eq;
+use hmac::{Hmac, Mac};
+use sha1::Sha1;
+use std::io::{self, Read};
+
+/// The length of the password verifcation value in bytes
+const PWD_VERIFY_LENGTH: usize = 2;
+/// The length of the authentication code in bytes
+const AUTH_CODE_LENGTH: usize = 10;
+/// The number of iterations used with PBKDF2
+const ITERATION_COUNT: u32 = 1000;
+
+/// Create a AesCipher depending on the used `AesMode` and the given `key`.
+///
+/// # Panics
+///
+/// This panics if `key` doesn't have the correct size for the chosen aes mode.
+fn cipher_from_mode(aes_mode: AesMode, key: &[u8]) -> Box<dyn aes_ctr::AesCipher> {
+ match aes_mode {
+ AesMode::Aes128 => Box::new(aes_ctr::AesCtrZipKeyStream::<aes_ctr::Aes128>::new(key))
+ as Box<dyn aes_ctr::AesCipher>,
+ AesMode::Aes192 => Box::new(aes_ctr::AesCtrZipKeyStream::<aes_ctr::Aes192>::new(key))
+ as Box<dyn aes_ctr::AesCipher>,
+ AesMode::Aes256 => Box::new(aes_ctr::AesCtrZipKeyStream::<aes_ctr::Aes256>::new(key))
+ as Box<dyn aes_ctr::AesCipher>,
+ }
+}
+
+// An aes encrypted file starts with a salt, whose length depends on the used aes mode
+// followed by a 2 byte password verification value
+// then the variable length encrypted data
+// and lastly a 10 byte authentication code
+pub struct AesReader<R> {
+ reader: R,
+ aes_mode: AesMode,
+ data_length: u64,
+}
+
+impl<R: Read> AesReader<R> {
+ pub fn new(reader: R, aes_mode: AesMode, compressed_size: u64) -> AesReader<R> {
+ let data_length = compressed_size
+ - (PWD_VERIFY_LENGTH + AUTH_CODE_LENGTH + aes_mode.salt_length()) as u64;
+
+ Self {
+ reader,
+ aes_mode,
+ data_length,
+ }
+ }
+
+ /// Read the AES header bytes and validate the password.
+ ///
+ /// Even if the validation succeeds, there is still a 1 in 65536 chance that an incorrect
+ /// password was provided.
+ /// It isn't possible to check the authentication code in this step. This will be done after
+ /// reading and decrypting the file.
+ ///
+ /// # Returns
+ ///
+ /// If the password verification failed `Ok(None)` will be returned to match the validate
+ /// method of ZipCryptoReader.
+ pub fn validate(mut self, password: &[u8]) -> io::Result<Option<AesReaderValid<R>>> {
+ let salt_length = self.aes_mode.salt_length();
+ let key_length = self.aes_mode.key_length();
+
+ let mut salt = vec![0; salt_length];
+ self.reader.read_exact(&mut salt)?;
+
+ // next are 2 bytes used for password verification
+ let mut pwd_verification_value = vec![0; PWD_VERIFY_LENGTH];
+ self.reader.read_exact(&mut pwd_verification_value)?;
+
+ // derive a key from the password and salt
+ // the length depends on the aes key length
+ let derived_key_len = 2 * key_length + PWD_VERIFY_LENGTH;
+ let mut derived_key: Vec<u8> = vec![0; derived_key_len];
+
+ // use PBKDF2 with HMAC-Sha1 to derive the key
+ pbkdf2::pbkdf2::<Hmac<Sha1>>(password, &salt, ITERATION_COUNT, &mut derived_key);
+ let decrypt_key = &derived_key[0..key_length];
+ let hmac_key = &derived_key[key_length..key_length * 2];
+ let pwd_verify = &derived_key[derived_key_len - 2..];
+
+ // the last 2 bytes should equal the password verification value
+ if pwd_verification_value != pwd_verify {
+ // wrong password
+ return Ok(None);
+ }
+
+ let cipher = cipher_from_mode(self.aes_mode, decrypt_key);
+ let hmac = Hmac::<Sha1>::new_from_slice(hmac_key).unwrap();
+
+ Ok(Some(AesReaderValid {
+ reader: self.reader,
+ data_remaining: self.data_length,
+ cipher,
+ hmac,
+ finalized: false,
+ }))
+ }
+}
+
+/// A reader for aes encrypted files, which has already passed the first password check.
+///
+/// There is a 1 in 65536 chance that an invalid password passes that check.
+/// After the data has been read and decrypted an HMAC will be checked and provide a final means
+/// to check if either the password is invalid or if the data has been changed.
+pub struct AesReaderValid<R: Read> {
+ reader: R,
+ data_remaining: u64,
+ cipher: Box<dyn aes_ctr::AesCipher>,
+ hmac: Hmac<Sha1>,
+ finalized: bool,
+}
+
+impl<R: Read> Read for AesReaderValid<R> {
+ /// This implementation does not fulfill all requirements set in the trait documentation.
+ ///
+ /// ```txt
+ /// "If an error is returned then it must be guaranteed that no bytes were read."
+ /// ```
+ ///
+ /// Whether this applies to errors that occur while reading the encrypted data depends on the
+ /// underlying reader. If the error occurs while verifying the HMAC, the reader might become
+ /// practically unusable, since its position after the error is not known.
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ if self.data_remaining == 0 {
+ return Ok(0);
+ }
+
+ // get the number of bytes to read, compare as u64 to make sure we can read more than
+ // 2^32 bytes even on 32 bit systems.
+ let bytes_to_read = self.data_remaining.min(buf.len() as u64) as usize;
+ let read = self.reader.read(&mut buf[0..bytes_to_read])?;
+ self.data_remaining -= read as u64;
+
+ // Update the hmac with the encrypted data
+ self.hmac.update(&buf[0..read]);
+
+ // decrypt the data
+ self.cipher.crypt_in_place(&mut buf[0..read]);
+
+ // if there is no data left to read, check the integrity of the data
+ if self.data_remaining == 0 {
+ assert!(
+ !self.finalized,
+ "Tried to use an already finalized HMAC. This is a bug!"
+ );
+ self.finalized = true;
+
+ // Zip uses HMAC-Sha1-80, which only uses the first half of the hash
+ // see https://www.winzip.com/win/en/aes_info.html#auth-faq
+ let mut read_auth_code = [0; AUTH_CODE_LENGTH];
+ self.reader.read_exact(&mut read_auth_code)?;
+ let computed_auth_code = &self.hmac.finalize_reset().into_bytes()[0..AUTH_CODE_LENGTH];
+
+ // use constant time comparison to mitigate timing attacks
+ if !constant_time_eq(computed_auth_code, &read_auth_code) {
+ return Err(
+ io::Error::new(
+ io::ErrorKind::InvalidData,
+ "Invalid authentication code, this could be due to an invalid password or errors in the data"
+ )
+ );
+ }
+ }
+
+ Ok(read)
+ }
+}
+
+impl<R: Read> AesReaderValid<R> {
+ /// Consumes this decoder, returning the underlying reader.
+ pub fn into_inner(self) -> R {
+ self.reader
+ }
+}
diff --git a/third_party/rust/zip/src/aes_ctr.rs b/third_party/rust/zip/src/aes_ctr.rs
new file mode 100644
index 0000000000..0f34335cb4
--- /dev/null
+++ b/third_party/rust/zip/src/aes_ctr.rs
@@ -0,0 +1,281 @@
+//! A counter mode (CTR) for AES to work with the encryption used in zip files.
+//!
+//! This was implemented since the zip specification requires the mode to not use a nonce and uses a
+//! different byte order (little endian) than NIST (big endian).
+//! See [AesCtrZipKeyStream](./struct.AesCtrZipKeyStream.html) for more information.
+
+use aes::cipher::generic_array::GenericArray;
+use aes::{BlockEncrypt, NewBlockCipher};
+use byteorder::WriteBytesExt;
+use std::{any, fmt};
+
+/// Internal block size of an AES cipher.
+const AES_BLOCK_SIZE: usize = 16;
+
+/// AES-128.
+#[derive(Debug)]
+pub struct Aes128;
+/// AES-192
+#[derive(Debug)]
+pub struct Aes192;
+/// AES-256.
+#[derive(Debug)]
+pub struct Aes256;
+
+/// An AES cipher kind.
+pub trait AesKind {
+ /// Key type.
+ type Key: AsRef<[u8]>;
+ /// Cipher used to decrypt.
+ type Cipher;
+}
+
+impl AesKind for Aes128 {
+ type Key = [u8; 16];
+ type Cipher = aes::Aes128;
+}
+
+impl AesKind for Aes192 {
+ type Key = [u8; 24];
+ type Cipher = aes::Aes192;
+}
+
+impl AesKind for Aes256 {
+ type Key = [u8; 32];
+ type Cipher = aes::Aes256;
+}
+
+/// An AES-CTR key stream generator.
+///
+/// Implements the slightly non-standard AES-CTR variant used by WinZip AES encryption.
+///
+/// Typical AES-CTR implementations combine a nonce with a 64 bit counter. WinZIP AES instead uses
+/// no nonce and also uses a different byte order (little endian) than NIST (big endian).
+///
+/// The stream implements the `Read` trait; encryption or decryption is performed by XOR-ing the
+/// bytes from the key stream with the ciphertext/plaintext.
+pub struct AesCtrZipKeyStream<C: AesKind> {
+ /// Current AES counter.
+ counter: u128,
+ /// AES cipher instance.
+ cipher: C::Cipher,
+ /// Stores the currently available keystream bytes.
+ buffer: [u8; AES_BLOCK_SIZE],
+ /// Number of bytes already used up from `buffer`.
+ pos: usize,
+}
+
+impl<C> fmt::Debug for AesCtrZipKeyStream<C>
+where
+ C: AesKind,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "AesCtrZipKeyStream<{}>(counter: {})",
+ any::type_name::<C>(),
+ self.counter
+ )
+ }
+}
+
+impl<C> AesCtrZipKeyStream<C>
+where
+ C: AesKind,
+ C::Cipher: NewBlockCipher,
+{
+ /// Creates a new zip variant AES-CTR key stream.
+ ///
+ /// # Panics
+ ///
+ /// This panics if `key` doesn't have the correct size for cipher `C`.
+ pub fn new(key: &[u8]) -> AesCtrZipKeyStream<C> {
+ AesCtrZipKeyStream {
+ counter: 1,
+ cipher: C::Cipher::new(GenericArray::from_slice(key)),
+ buffer: [0u8; AES_BLOCK_SIZE],
+ pos: AES_BLOCK_SIZE,
+ }
+ }
+}
+
+impl<C> AesCipher for AesCtrZipKeyStream<C>
+where
+ C: AesKind,
+ C::Cipher: BlockEncrypt,
+{
+ /// Decrypt or encrypt `target`.
+ #[inline]
+ fn crypt_in_place(&mut self, mut target: &mut [u8]) {
+ while !target.is_empty() {
+ if self.pos == AES_BLOCK_SIZE {
+ // Note: AES block size is always 16 bytes, same as u128.
+ self.buffer
+ .as_mut()
+ .write_u128::<byteorder::LittleEndian>(self.counter)
+ .expect("did not expect u128 le conversion to fail");
+ self.cipher
+ .encrypt_block(GenericArray::from_mut_slice(&mut self.buffer));
+ self.counter += 1;
+ self.pos = 0;
+ }
+
+ let target_len = target.len().min(AES_BLOCK_SIZE - self.pos);
+
+ xor(
+ &mut target[0..target_len],
+ &self.buffer[self.pos..(self.pos + target_len)],
+ );
+ target = &mut target[target_len..];
+ self.pos += target_len;
+ }
+ }
+}
+
+/// This trait allows using generic AES ciphers with different key sizes.
+pub trait AesCipher {
+ fn crypt_in_place(&mut self, target: &mut [u8]);
+}
+
+/// XORs a slice in place with another slice.
+#[inline]
+fn xor(dest: &mut [u8], src: &[u8]) {
+ assert_eq!(dest.len(), src.len());
+
+ for (lhs, rhs) in dest.iter_mut().zip(src.iter()) {
+ *lhs ^= *rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Aes128, Aes192, Aes256, AesCipher, AesCtrZipKeyStream, AesKind};
+ use aes::{BlockEncrypt, NewBlockCipher};
+
+ /// Checks whether `crypt_in_place` produces the correct plaintext after one use and yields the
+ /// cipertext again after applying it again.
+ fn roundtrip<Aes>(key: &[u8], ciphertext: &mut [u8], expected_plaintext: &[u8])
+ where
+ Aes: AesKind,
+ Aes::Cipher: NewBlockCipher + BlockEncrypt,
+ {
+ let mut key_stream = AesCtrZipKeyStream::<Aes>::new(key);
+
+ let mut plaintext: Vec<u8> = ciphertext.to_vec();
+ key_stream.crypt_in_place(plaintext.as_mut_slice());
+ assert_eq!(plaintext, expected_plaintext.to_vec());
+
+ // Round-tripping should yield the ciphertext again.
+ let mut key_stream = AesCtrZipKeyStream::<Aes>::new(key);
+ key_stream.crypt_in_place(&mut plaintext);
+ assert_eq!(plaintext, ciphertext.to_vec());
+ }
+
+ #[test]
+ #[should_panic]
+ fn new_with_wrong_key_size() {
+ AesCtrZipKeyStream::<Aes128>::new(&[1, 2, 3, 4, 5]);
+ }
+
+ // The data used in these tests was generated with p7zip without any compression.
+ // It's not possible to recreate the exact same data, since a random salt is used for encryption.
+ // `7z a -phelloworld -mem=AES256 -mx=0 aes256_40byte.zip 40byte_data.txt`
+ #[test]
+ fn crypt_aes_256_0_byte() {
+ let mut ciphertext = [];
+ let expected_plaintext = &[];
+ let key = [
+ 0x0b, 0xec, 0x2e, 0xf2, 0x46, 0xf0, 0x7e, 0x35, 0x16, 0x54, 0xe0, 0x98, 0x10, 0xb3,
+ 0x18, 0x55, 0x24, 0xa3, 0x9e, 0x0e, 0x40, 0xe7, 0x92, 0xad, 0xb2, 0x8a, 0x48, 0xf4,
+ 0x5c, 0xd0, 0xc0, 0x54,
+ ];
+
+ roundtrip::<Aes256>(&key, &mut ciphertext, expected_plaintext);
+ }
+
+ #[test]
+ fn crypt_aes_128_5_byte() {
+ let mut ciphertext = [0x98, 0xa9, 0x8c, 0x26, 0x0e];
+ let expected_plaintext = b"asdf\n";
+ let key = [
+ 0xe0, 0x25, 0x7b, 0x57, 0x97, 0x6a, 0xa4, 0x23, 0xab, 0x94, 0xaa, 0x44, 0xfd, 0x47,
+ 0x4f, 0xa5,
+ ];
+
+ roundtrip::<Aes128>(&key, &mut ciphertext, expected_plaintext);
+ }
+
+ #[test]
+ fn crypt_aes_192_5_byte() {
+ let mut ciphertext = [0x36, 0x55, 0x5c, 0x61, 0x3c];
+ let expected_plaintext = b"asdf\n";
+ let key = [
+ 0xe4, 0x4a, 0x88, 0x52, 0x8f, 0xf7, 0x0b, 0x81, 0x7b, 0x75, 0xf1, 0x74, 0x21, 0x37,
+ 0x8c, 0x90, 0xad, 0xbe, 0x4a, 0x65, 0xa8, 0x96, 0x0e, 0xcc,
+ ];
+
+ roundtrip::<Aes192>(&key, &mut ciphertext, expected_plaintext);
+ }
+
+ #[test]
+ fn crypt_aes_256_5_byte() {
+ let mut ciphertext = [0xc2, 0x47, 0xc0, 0xdc, 0x56];
+ let expected_plaintext = b"asdf\n";
+ let key = [
+ 0x79, 0x5e, 0x17, 0xf2, 0xc6, 0x3d, 0x28, 0x9b, 0x4b, 0x4b, 0xbb, 0xa9, 0xba, 0xc9,
+ 0xa5, 0xee, 0x3a, 0x4f, 0x0f, 0x4b, 0x29, 0xbd, 0xe9, 0xb8, 0x41, 0x9c, 0x41, 0xa5,
+ 0x15, 0xb2, 0x86, 0xab,
+ ];
+
+ roundtrip::<Aes256>(&key, &mut ciphertext, expected_plaintext);
+ }
+
+ #[test]
+ fn crypt_aes_128_40_byte() {
+ let mut ciphertext = [
+ 0xcf, 0x72, 0x6b, 0xa1, 0xb2, 0x0f, 0xdf, 0xaa, 0x10, 0xad, 0x9c, 0x7f, 0x6d, 0x1c,
+ 0x8d, 0xb5, 0x16, 0x7e, 0xbb, 0x11, 0x69, 0x52, 0x8c, 0x89, 0x80, 0x32, 0xaa, 0x76,
+ 0xa6, 0x18, 0x31, 0x98, 0xee, 0xdd, 0x22, 0x68, 0xb7, 0xe6, 0x77, 0xd2,
+ ];
+ let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n";
+ let key = [
+ 0x43, 0x2b, 0x6d, 0xbe, 0x05, 0x76, 0x6c, 0x9e, 0xde, 0xca, 0x3b, 0xf8, 0xaf, 0x5d,
+ 0x81, 0xb6,
+ ];
+
+ roundtrip::<Aes128>(&key, &mut ciphertext, expected_plaintext);
+ }
+
+ #[test]
+ fn crypt_aes_192_40_byte() {
+ let mut ciphertext = [
+ 0xa6, 0xfc, 0x52, 0x79, 0x2c, 0x6c, 0xfe, 0x68, 0xb1, 0xa8, 0xb3, 0x07, 0x52, 0x8b,
+ 0x82, 0xa6, 0x87, 0x9c, 0x72, 0x42, 0x3a, 0xf8, 0xc6, 0xa9, 0xc9, 0xfb, 0x61, 0x19,
+ 0x37, 0xb9, 0x56, 0x62, 0xf4, 0xfc, 0x5e, 0x7a, 0xdd, 0x55, 0x0a, 0x48,
+ ];
+ let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n";
+ let key = [
+ 0xac, 0x92, 0x41, 0xba, 0xde, 0xd9, 0x02, 0xfe, 0x40, 0x92, 0x20, 0xf6, 0x56, 0x03,
+ 0xfe, 0xae, 0x1b, 0xba, 0x01, 0x97, 0x97, 0x79, 0xbb, 0xa6,
+ ];
+
+ roundtrip::<Aes192>(&key, &mut ciphertext, expected_plaintext);
+ }
+
+ #[test]
+ fn crypt_aes_256_40_byte() {
+ let mut ciphertext = [
+ 0xa9, 0x99, 0xbd, 0xea, 0x82, 0x9b, 0x8f, 0x2f, 0xb7, 0x52, 0x2f, 0x6b, 0xd8, 0xf6,
+ 0xab, 0x0e, 0x24, 0x51, 0x9e, 0x18, 0x0f, 0xc0, 0x8f, 0x54, 0x15, 0x80, 0xae, 0xbc,
+ 0xa0, 0x5c, 0x8a, 0x11, 0x8d, 0x14, 0x7e, 0xc5, 0xb4, 0xae, 0xd3, 0x37,
+ ];
+ let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n";
+ let key = [
+ 0x64, 0x7c, 0x7a, 0xde, 0xf0, 0xf2, 0x61, 0x49, 0x1c, 0xf1, 0xf1, 0xe3, 0x37, 0xfc,
+ 0xe1, 0x4d, 0x4a, 0x77, 0xd4, 0xeb, 0x9e, 0x3d, 0x75, 0xce, 0x9a, 0x3e, 0x10, 0x50,
+ 0xc2, 0x07, 0x36, 0xb6,
+ ];
+
+ roundtrip::<Aes256>(&key, &mut ciphertext, expected_plaintext);
+ }
+}
diff --git a/third_party/rust/zip/src/compression.rs b/third_party/rust/zip/src/compression.rs
new file mode 100644
index 0000000000..abd8b5300f
--- /dev/null
+++ b/third_party/rust/zip/src/compression.rs
@@ -0,0 +1,207 @@
+//! Possible ZIP compression methods.
+
+use std::fmt;
+
+#[allow(deprecated)]
+/// Identifies the storage format used to compress a file within a ZIP archive.
+///
+/// Each file's compression method is stored alongside it, allowing the
+/// contents to be read without context.
+///
+/// When creating ZIP files, you may choose the method to use with
+/// [`crate::write::FileOptions::compression_method`]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[non_exhaustive]
+pub enum CompressionMethod {
+ /// Store the file as is
+ Stored,
+ /// Compress the file using Deflate
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ Deflated,
+ /// Compress the file using BZIP2
+ #[cfg(feature = "bzip2")]
+ Bzip2,
+ /// Encrypted using AES.
+ ///
+ /// The actual compression method has to be taken from the AES extra data field
+ /// or from `ZipFileData`.
+ #[cfg(feature = "aes-crypto")]
+ Aes,
+ /// Compress the file using ZStandard
+ #[cfg(feature = "zstd")]
+ Zstd,
+ /// Unsupported compression method
+ #[deprecated(since = "0.5.7", note = "use the constants instead")]
+ Unsupported(u16),
+}
+#[allow(deprecated, missing_docs)]
+/// All compression methods defined for the ZIP format
+impl CompressionMethod {
+ pub const STORE: Self = CompressionMethod::Stored;
+ pub const SHRINK: Self = CompressionMethod::Unsupported(1);
+ pub const REDUCE_1: Self = CompressionMethod::Unsupported(2);
+ pub const REDUCE_2: Self = CompressionMethod::Unsupported(3);
+ pub const REDUCE_3: Self = CompressionMethod::Unsupported(4);
+ pub const REDUCE_4: Self = CompressionMethod::Unsupported(5);
+ pub const IMPLODE: Self = CompressionMethod::Unsupported(6);
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ pub const DEFLATE: Self = CompressionMethod::Deflated;
+ #[cfg(not(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ )))]
+ pub const DEFLATE: Self = CompressionMethod::Unsupported(8);
+ pub const DEFLATE64: Self = CompressionMethod::Unsupported(9);
+ pub const PKWARE_IMPLODE: Self = CompressionMethod::Unsupported(10);
+ #[cfg(feature = "bzip2")]
+ pub const BZIP2: Self = CompressionMethod::Bzip2;
+ #[cfg(not(feature = "bzip2"))]
+ pub const BZIP2: Self = CompressionMethod::Unsupported(12);
+ pub const LZMA: Self = CompressionMethod::Unsupported(14);
+ pub const IBM_ZOS_CMPSC: Self = CompressionMethod::Unsupported(16);
+ pub const IBM_TERSE: Self = CompressionMethod::Unsupported(18);
+ pub const ZSTD_DEPRECATED: Self = CompressionMethod::Unsupported(20);
+ #[cfg(feature = "zstd")]
+ pub const ZSTD: Self = CompressionMethod::Zstd;
+ #[cfg(not(feature = "zstd"))]
+ pub const ZSTD: Self = CompressionMethod::Unsupported(93);
+ pub const MP3: Self = CompressionMethod::Unsupported(94);
+ pub const XZ: Self = CompressionMethod::Unsupported(95);
+ pub const JPEG: Self = CompressionMethod::Unsupported(96);
+ pub const WAVPACK: Self = CompressionMethod::Unsupported(97);
+ pub const PPMD: Self = CompressionMethod::Unsupported(98);
+ #[cfg(feature = "aes-crypto")]
+ pub const AES: Self = CompressionMethod::Aes;
+ #[cfg(not(feature = "aes-crypto"))]
+ pub const AES: Self = CompressionMethod::Unsupported(99);
+}
+impl CompressionMethod {
+ /// Converts an u16 to its corresponding CompressionMethod
+ #[deprecated(
+ since = "0.5.7",
+ note = "use a constant to construct a compression method"
+ )]
+ pub fn from_u16(val: u16) -> CompressionMethod {
+ #[allow(deprecated)]
+ match val {
+ 0 => CompressionMethod::Stored,
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ 8 => CompressionMethod::Deflated,
+ #[cfg(feature = "bzip2")]
+ 12 => CompressionMethod::Bzip2,
+ #[cfg(feature = "zstd")]
+ 93 => CompressionMethod::Zstd,
+ #[cfg(feature = "aes-crypto")]
+ 99 => CompressionMethod::Aes,
+
+ v => CompressionMethod::Unsupported(v),
+ }
+ }
+
+ /// Converts a CompressionMethod to a u16
+ #[deprecated(
+ since = "0.5.7",
+ note = "to match on other compression methods, use a constant"
+ )]
+ pub fn to_u16(self) -> u16 {
+ #[allow(deprecated)]
+ match self {
+ CompressionMethod::Stored => 0,
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ CompressionMethod::Deflated => 8,
+ #[cfg(feature = "bzip2")]
+ CompressionMethod::Bzip2 => 12,
+ #[cfg(feature = "aes-crypto")]
+ CompressionMethod::Aes => 99,
+ #[cfg(feature = "zstd")]
+ CompressionMethod::Zstd => 93,
+
+ CompressionMethod::Unsupported(v) => v,
+ }
+ }
+}
+
+impl fmt::Display for CompressionMethod {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Just duplicate what the Debug format looks like, i.e, the enum key:
+ write!(f, "{:?}", self)
+ }
+}
+
+/// The compression methods which have been implemented.
+pub const SUPPORTED_COMPRESSION_METHODS: &[CompressionMethod] = &[
+ CompressionMethod::Stored,
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ CompressionMethod::Deflated,
+ #[cfg(feature = "bzip2")]
+ CompressionMethod::Bzip2,
+ #[cfg(feature = "zstd")]
+ CompressionMethod::Zstd,
+];
+
+#[cfg(test)]
+mod test {
+ use super::{CompressionMethod, SUPPORTED_COMPRESSION_METHODS};
+
+ #[test]
+ fn from_eq_to() {
+ for v in 0..(u16::MAX as u32 + 1) {
+ #[allow(deprecated)]
+ let from = CompressionMethod::from_u16(v as u16);
+ #[allow(deprecated)]
+ let to = from.to_u16() as u32;
+ assert_eq!(v, to);
+ }
+ }
+
+ #[test]
+ fn to_eq_from() {
+ fn check_match(method: CompressionMethod) {
+ #[allow(deprecated)]
+ let to = method.to_u16();
+ #[allow(deprecated)]
+ let from = CompressionMethod::from_u16(to);
+ #[allow(deprecated)]
+ let back = from.to_u16();
+ assert_eq!(to, back);
+ }
+
+ for &method in SUPPORTED_COMPRESSION_METHODS {
+ check_match(method);
+ }
+ }
+
+ #[test]
+ fn to_display_fmt() {
+ fn check_match(method: CompressionMethod) {
+ let debug_str = format!("{:?}", method);
+ let display_str = format!("{}", method);
+ assert_eq!(debug_str, display_str);
+ }
+
+ for &method in SUPPORTED_COMPRESSION_METHODS {
+ check_match(method);
+ }
+ }
+}
diff --git a/third_party/rust/zip/src/cp437.rs b/third_party/rust/zip/src/cp437.rs
new file mode 100644
index 0000000000..4dba9af12f
--- /dev/null
+++ b/third_party/rust/zip/src/cp437.rs
@@ -0,0 +1,204 @@
+//! Convert a string in IBM codepage 437 to UTF-8
+
+/// Trait to convert IBM codepage 437 to the target type
+pub trait FromCp437 {
+ /// Target type
+ type Target;
+
+ /// Function that does the conversion from cp437.
+ /// Generally allocations will be avoided if all data falls into the ASCII range.
+ #[allow(clippy::wrong_self_convention)]
+ fn from_cp437(self) -> Self::Target;
+}
+
+impl<'a> FromCp437 for &'a [u8] {
+ type Target = ::std::borrow::Cow<'a, str>;
+
+ fn from_cp437(self) -> Self::Target {
+ if self.iter().all(|c| *c < 0x80) {
+ ::std::str::from_utf8(self).unwrap().into()
+ } else {
+ self.iter().map(|c| to_char(*c)).collect::<String>().into()
+ }
+ }
+}
+
+impl FromCp437 for Vec<u8> {
+ type Target = String;
+
+ fn from_cp437(self) -> Self::Target {
+ if self.iter().all(|c| *c < 0x80) {
+ String::from_utf8(self).unwrap()
+ } else {
+ self.into_iter().map(to_char).collect()
+ }
+ }
+}
+
+fn to_char(input: u8) -> char {
+ let output = match input {
+ 0x00..=0x7f => input as u32,
+ 0x80 => 0x00c7,
+ 0x81 => 0x00fc,
+ 0x82 => 0x00e9,
+ 0x83 => 0x00e2,
+ 0x84 => 0x00e4,
+ 0x85 => 0x00e0,
+ 0x86 => 0x00e5,
+ 0x87 => 0x00e7,
+ 0x88 => 0x00ea,
+ 0x89 => 0x00eb,
+ 0x8a => 0x00e8,
+ 0x8b => 0x00ef,
+ 0x8c => 0x00ee,
+ 0x8d => 0x00ec,
+ 0x8e => 0x00c4,
+ 0x8f => 0x00c5,
+ 0x90 => 0x00c9,
+ 0x91 => 0x00e6,
+ 0x92 => 0x00c6,
+ 0x93 => 0x00f4,
+ 0x94 => 0x00f6,
+ 0x95 => 0x00f2,
+ 0x96 => 0x00fb,
+ 0x97 => 0x00f9,
+ 0x98 => 0x00ff,
+ 0x99 => 0x00d6,
+ 0x9a => 0x00dc,
+ 0x9b => 0x00a2,
+ 0x9c => 0x00a3,
+ 0x9d => 0x00a5,
+ 0x9e => 0x20a7,
+ 0x9f => 0x0192,
+ 0xa0 => 0x00e1,
+ 0xa1 => 0x00ed,
+ 0xa2 => 0x00f3,
+ 0xa3 => 0x00fa,
+ 0xa4 => 0x00f1,
+ 0xa5 => 0x00d1,
+ 0xa6 => 0x00aa,
+ 0xa7 => 0x00ba,
+ 0xa8 => 0x00bf,
+ 0xa9 => 0x2310,
+ 0xaa => 0x00ac,
+ 0xab => 0x00bd,
+ 0xac => 0x00bc,
+ 0xad => 0x00a1,
+ 0xae => 0x00ab,
+ 0xaf => 0x00bb,
+ 0xb0 => 0x2591,
+ 0xb1 => 0x2592,
+ 0xb2 => 0x2593,
+ 0xb3 => 0x2502,
+ 0xb4 => 0x2524,
+ 0xb5 => 0x2561,
+ 0xb6 => 0x2562,
+ 0xb7 => 0x2556,
+ 0xb8 => 0x2555,
+ 0xb9 => 0x2563,
+ 0xba => 0x2551,
+ 0xbb => 0x2557,
+ 0xbc => 0x255d,
+ 0xbd => 0x255c,
+ 0xbe => 0x255b,
+ 0xbf => 0x2510,
+ 0xc0 => 0x2514,
+ 0xc1 => 0x2534,
+ 0xc2 => 0x252c,
+ 0xc3 => 0x251c,
+ 0xc4 => 0x2500,
+ 0xc5 => 0x253c,
+ 0xc6 => 0x255e,
+ 0xc7 => 0x255f,
+ 0xc8 => 0x255a,
+ 0xc9 => 0x2554,
+ 0xca => 0x2569,
+ 0xcb => 0x2566,
+ 0xcc => 0x2560,
+ 0xcd => 0x2550,
+ 0xce => 0x256c,
+ 0xcf => 0x2567,
+ 0xd0 => 0x2568,
+ 0xd1 => 0x2564,
+ 0xd2 => 0x2565,
+ 0xd3 => 0x2559,
+ 0xd4 => 0x2558,
+ 0xd5 => 0x2552,
+ 0xd6 => 0x2553,
+ 0xd7 => 0x256b,
+ 0xd8 => 0x256a,
+ 0xd9 => 0x2518,
+ 0xda => 0x250c,
+ 0xdb => 0x2588,
+ 0xdc => 0x2584,
+ 0xdd => 0x258c,
+ 0xde => 0x2590,
+ 0xdf => 0x2580,
+ 0xe0 => 0x03b1,
+ 0xe1 => 0x00df,
+ 0xe2 => 0x0393,
+ 0xe3 => 0x03c0,
+ 0xe4 => 0x03a3,
+ 0xe5 => 0x03c3,
+ 0xe6 => 0x00b5,
+ 0xe7 => 0x03c4,
+ 0xe8 => 0x03a6,
+ 0xe9 => 0x0398,
+ 0xea => 0x03a9,
+ 0xeb => 0x03b4,
+ 0xec => 0x221e,
+ 0xed => 0x03c6,
+ 0xee => 0x03b5,
+ 0xef => 0x2229,
+ 0xf0 => 0x2261,
+ 0xf1 => 0x00b1,
+ 0xf2 => 0x2265,
+ 0xf3 => 0x2264,
+ 0xf4 => 0x2320,
+ 0xf5 => 0x2321,
+ 0xf6 => 0x00f7,
+ 0xf7 => 0x2248,
+ 0xf8 => 0x00b0,
+ 0xf9 => 0x2219,
+ 0xfa => 0x00b7,
+ 0xfb => 0x221a,
+ 0xfc => 0x207f,
+ 0xfd => 0x00b2,
+ 0xfe => 0x25a0,
+ 0xff => 0x00a0,
+ };
+ ::std::char::from_u32(output).unwrap()
+}
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn to_char_valid() {
+ for i in 0x00_u32..0x100 {
+ super::to_char(i as u8);
+ }
+ }
+
+ #[test]
+ fn ascii() {
+ for i in 0x00..0x80 {
+ assert_eq!(super::to_char(i), i as char);
+ }
+ }
+
+ #[test]
+ fn example_slice() {
+ use super::FromCp437;
+ let data = b"Cura\x87ao";
+ assert!(::std::str::from_utf8(data).is_err());
+ assert_eq!(data.from_cp437(), "Curaçao");
+ }
+
+ #[test]
+ fn example_vec() {
+ use super::FromCp437;
+ let data = vec![0xCC, 0xCD, 0xCD, 0xB9];
+ assert!(String::from_utf8(data.clone()).is_err());
+ assert_eq!(&data.from_cp437(), "╠══╣");
+ }
+}
diff --git a/third_party/rust/zip/src/crc32.rs b/third_party/rust/zip/src/crc32.rs
new file mode 100644
index 0000000000..ebace898dc
--- /dev/null
+++ b/third_party/rust/zip/src/crc32.rs
@@ -0,0 +1,100 @@
+//! Helper module to compute a CRC32 checksum
+
+use std::io;
+use std::io::prelude::*;
+
+use crc32fast::Hasher;
+
+/// Reader that validates the CRC32 when it reaches the EOF.
+pub struct Crc32Reader<R> {
+ inner: R,
+ hasher: Hasher,
+ check: u32,
+ /// Signals if `inner` stores aes encrypted data.
+ /// AE-2 encrypted data doesn't use crc and sets the value to 0.
+ ae2_encrypted: bool,
+}
+
+impl<R> Crc32Reader<R> {
+ /// Get a new Crc32Reader which checks the inner reader against checksum.
+ /// The check is disabled if `ae2_encrypted == true`.
+ pub(crate) fn new(inner: R, checksum: u32, ae2_encrypted: bool) -> Crc32Reader<R> {
+ Crc32Reader {
+ inner,
+ hasher: Hasher::new(),
+ check: checksum,
+ ae2_encrypted,
+ }
+ }
+
+ fn check_matches(&self) -> bool {
+ self.check == self.hasher.clone().finalize()
+ }
+
+ pub fn into_inner(self) -> R {
+ self.inner
+ }
+}
+
+impl<R: Read> Read for Crc32Reader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let invalid_check = !buf.is_empty() && !self.check_matches() && !self.ae2_encrypted;
+
+ let count = match self.inner.read(buf) {
+ Ok(0) if invalid_check => {
+ return Err(io::Error::new(io::ErrorKind::Other, "Invalid checksum"))
+ }
+ Ok(n) => n,
+ Err(e) => return Err(e),
+ };
+ self.hasher.update(&buf[0..count]);
+ Ok(count)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use std::io::Read;
+
+ #[test]
+ fn test_empty_reader() {
+ let data: &[u8] = b"";
+ let mut buf = [0; 1];
+
+ let mut reader = Crc32Reader::new(data, 0, false);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+
+ let mut reader = Crc32Reader::new(data, 1, false);
+ assert!(reader
+ .read(&mut buf)
+ .unwrap_err()
+ .to_string()
+ .contains("Invalid checksum"));
+ }
+
+ #[test]
+ fn test_byte_by_byte() {
+ let data: &[u8] = b"1234";
+ let mut buf = [0; 1];
+
+ let mut reader = Crc32Reader::new(data, 0x9be3e0a3, false);
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ // Can keep reading 0 bytes after the end
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ }
+
+ #[test]
+ fn test_zero_read() {
+ let data: &[u8] = b"1234";
+ let mut buf = [0; 5];
+
+ let mut reader = Crc32Reader::new(data, 0x9be3e0a3, false);
+ assert_eq!(reader.read(&mut buf[..0]).unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).unwrap(), 4);
+ }
+}
diff --git a/third_party/rust/zip/src/lib.rs b/third_party/rust/zip/src/lib.rs
new file mode 100644
index 0000000000..0fee99cc8d
--- /dev/null
+++ b/third_party/rust/zip/src/lib.rs
@@ -0,0 +1,44 @@
+//! A library for reading and writing ZIP archives.
+//! ZIP is a format designed for cross-platform file "archiving".
+//! That is, storing a collection of files in a single datastream
+//! to make them easier to share between computers.
+//! Additionally, ZIP is able to compress and encrypt files in its
+//! archives.
+//!
+//! The current implementation is based on [PKWARE's APPNOTE.TXT v6.3.9](https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT)
+//!
+//! ---
+//!
+//! [`zip`](`crate`) has support for the most common ZIP archives found in common use.
+//! However, in special cases,
+//! there are some zip archives that are difficult to read or write.
+//!
+//! This is a list of supported features:
+//!
+//! | | Reading | Writing |
+//! | ------- | ------ | ------- |
+//! | Deflate | ✅ [->](`crate::ZipArchive::by_name`) | ✅ [->](`crate::write::FileOptions::compression_method`) |
+//!
+//!
+//!
+
+#![warn(missing_docs)]
+
+pub use crate::compression::{CompressionMethod, SUPPORTED_COMPRESSION_METHODS};
+pub use crate::read::ZipArchive;
+pub use crate::types::DateTime;
+pub use crate::write::ZipWriter;
+
+#[cfg(feature = "aes-crypto")]
+mod aes;
+#[cfg(feature = "aes-crypto")]
+mod aes_ctr;
+mod compression;
+mod cp437;
+mod crc32;
+pub mod read;
+pub mod result;
+mod spec;
+mod types;
+pub mod write;
+mod zipcrypto;
diff --git a/third_party/rust/zip/src/read.rs b/third_party/rust/zip/src/read.rs
new file mode 100644
index 0000000000..728ddf579e
--- /dev/null
+++ b/third_party/rust/zip/src/read.rs
@@ -0,0 +1,1310 @@
+//! Types for reading ZIP archives
+
+#[cfg(feature = "aes-crypto")]
+use crate::aes::{AesReader, AesReaderValid};
+use crate::compression::CompressionMethod;
+use crate::cp437::FromCp437;
+use crate::crc32::Crc32Reader;
+use crate::result::{InvalidPassword, ZipError, ZipResult};
+use crate::spec;
+use crate::types::{AesMode, AesVendorVersion, AtomicU64, DateTime, System, ZipFileData};
+use crate::zipcrypto::{ZipCryptoReader, ZipCryptoReaderValid, ZipCryptoValidator};
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::borrow::Cow;
+use std::collections::HashMap;
+use std::io::{self, prelude::*};
+use std::path::{Component, Path};
+use std::sync::Arc;
+
+#[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+))]
+use flate2::read::DeflateDecoder;
+
+#[cfg(feature = "bzip2")]
+use bzip2::read::BzDecoder;
+
+#[cfg(feature = "zstd")]
+use zstd::stream::read::Decoder as ZstdDecoder;
+
+mod ffi {
+ pub const S_IFDIR: u32 = 0o0040000;
+ pub const S_IFREG: u32 = 0o0100000;
+}
+
+// Put the struct declaration in a private module to convince rustdoc to display ZipArchive nicely
+pub(crate) mod zip_archive {
+ /// Extract immutable data from `ZipArchive` to make it cheap to clone
+ #[derive(Debug)]
+ pub(crate) struct Shared {
+ pub(super) files: Vec<super::ZipFileData>,
+ pub(super) names_map: super::HashMap<String, usize>,
+ pub(super) offset: u64,
+ pub(super) comment: Vec<u8>,
+ }
+
+ /// ZIP archive reader
+ ///
+ /// At the moment, this type is cheap to clone if this is the case for the
+ /// reader it uses. However, this is not guaranteed by this crate and it may
+ /// change in the future.
+ ///
+ /// ```no_run
+ /// use std::io::prelude::*;
+ /// fn list_zip_contents(reader: impl Read + Seek) -> zip::result::ZipResult<()> {
+ /// let mut zip = zip::ZipArchive::new(reader)?;
+ ///
+ /// for i in 0..zip.len() {
+ /// let mut file = zip.by_index(i)?;
+ /// println!("Filename: {}", file.name());
+ /// std::io::copy(&mut file, &mut std::io::stdout());
+ /// }
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ #[derive(Clone, Debug)]
+ pub struct ZipArchive<R> {
+ pub(super) reader: R,
+ pub(super) shared: super::Arc<Shared>,
+ }
+}
+
+pub use zip_archive::ZipArchive;
+#[allow(clippy::large_enum_variant)]
+enum CryptoReader<'a> {
+ Plaintext(io::Take<&'a mut dyn Read>),
+ ZipCrypto(ZipCryptoReaderValid<io::Take<&'a mut dyn Read>>),
+ #[cfg(feature = "aes-crypto")]
+ Aes {
+ reader: AesReaderValid<io::Take<&'a mut dyn Read>>,
+ vendor_version: AesVendorVersion,
+ },
+}
+
+impl<'a> Read for CryptoReader<'a> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ match self {
+ CryptoReader::Plaintext(r) => r.read(buf),
+ CryptoReader::ZipCrypto(r) => r.read(buf),
+ #[cfg(feature = "aes-crypto")]
+ CryptoReader::Aes { reader: r, .. } => r.read(buf),
+ }
+ }
+}
+
+impl<'a> CryptoReader<'a> {
+ /// Consumes this decoder, returning the underlying reader.
+ pub fn into_inner(self) -> io::Take<&'a mut dyn Read> {
+ match self {
+ CryptoReader::Plaintext(r) => r,
+ CryptoReader::ZipCrypto(r) => r.into_inner(),
+ #[cfg(feature = "aes-crypto")]
+ CryptoReader::Aes { reader: r, .. } => r.into_inner(),
+ }
+ }
+
+ /// Returns `true` if the data is encrypted using AE2.
+ pub fn is_ae2_encrypted(&self) -> bool {
+ #[cfg(feature = "aes-crypto")]
+ return matches!(
+ self,
+ CryptoReader::Aes {
+ vendor_version: AesVendorVersion::Ae2,
+ ..
+ }
+ );
+ #[cfg(not(feature = "aes-crypto"))]
+ false
+ }
+}
+
+enum ZipFileReader<'a> {
+ NoReader,
+ Raw(io::Take<&'a mut dyn io::Read>),
+ Stored(Crc32Reader<CryptoReader<'a>>),
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ Deflated(Crc32Reader<flate2::read::DeflateDecoder<CryptoReader<'a>>>),
+ #[cfg(feature = "bzip2")]
+ Bzip2(Crc32Reader<BzDecoder<CryptoReader<'a>>>),
+ #[cfg(feature = "zstd")]
+ Zstd(Crc32Reader<ZstdDecoder<'a, io::BufReader<CryptoReader<'a>>>>),
+}
+
+impl<'a> Read for ZipFileReader<'a> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ match self {
+ ZipFileReader::NoReader => panic!("ZipFileReader was in an invalid state"),
+ ZipFileReader::Raw(r) => r.read(buf),
+ ZipFileReader::Stored(r) => r.read(buf),
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ ZipFileReader::Deflated(r) => r.read(buf),
+ #[cfg(feature = "bzip2")]
+ ZipFileReader::Bzip2(r) => r.read(buf),
+ #[cfg(feature = "zstd")]
+ ZipFileReader::Zstd(r) => r.read(buf),
+ }
+ }
+}
+
+impl<'a> ZipFileReader<'a> {
+ /// Consumes this decoder, returning the underlying reader.
+ pub fn into_inner(self) -> io::Take<&'a mut dyn Read> {
+ match self {
+ ZipFileReader::NoReader => panic!("ZipFileReader was in an invalid state"),
+ ZipFileReader::Raw(r) => r,
+ ZipFileReader::Stored(r) => r.into_inner().into_inner(),
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ ZipFileReader::Deflated(r) => r.into_inner().into_inner().into_inner(),
+ #[cfg(feature = "bzip2")]
+ ZipFileReader::Bzip2(r) => r.into_inner().into_inner().into_inner(),
+ #[cfg(feature = "zstd")]
+ ZipFileReader::Zstd(r) => r.into_inner().finish().into_inner().into_inner(),
+ }
+ }
+}
+
+/// A struct for reading a zip file
+pub struct ZipFile<'a> {
+ data: Cow<'a, ZipFileData>,
+ crypto_reader: Option<CryptoReader<'a>>,
+ reader: ZipFileReader<'a>,
+}
+
+fn find_content<'a>(
+ data: &ZipFileData,
+ reader: &'a mut (impl Read + Seek),
+) -> ZipResult<io::Take<&'a mut dyn Read>> {
+ // Parse local header
+ reader.seek(io::SeekFrom::Start(data.header_start))?;
+ let signature = reader.read_u32::<LittleEndian>()?;
+ if signature != spec::LOCAL_FILE_HEADER_SIGNATURE {
+ return Err(ZipError::InvalidArchive("Invalid local file header"));
+ }
+
+ reader.seek(io::SeekFrom::Current(22))?;
+ let file_name_length = reader.read_u16::<LittleEndian>()? as u64;
+ let extra_field_length = reader.read_u16::<LittleEndian>()? as u64;
+ let magic_and_header = 4 + 22 + 2 + 2;
+ let data_start = data.header_start + magic_and_header + file_name_length + extra_field_length;
+ data.data_start.store(data_start);
+
+ reader.seek(io::SeekFrom::Start(data_start))?;
+ Ok((reader as &mut dyn Read).take(data.compressed_size))
+}
+
+#[allow(clippy::too_many_arguments)]
+fn make_crypto_reader<'a>(
+ compression_method: crate::compression::CompressionMethod,
+ crc32: u32,
+ last_modified_time: DateTime,
+ using_data_descriptor: bool,
+ reader: io::Take<&'a mut dyn io::Read>,
+ password: Option<&[u8]>,
+ aes_info: Option<(AesMode, AesVendorVersion)>,
+ #[cfg(feature = "aes-crypto")] compressed_size: u64,
+) -> ZipResult<Result<CryptoReader<'a>, InvalidPassword>> {
+ #[allow(deprecated)]
+ {
+ if let CompressionMethod::Unsupported(_) = compression_method {
+ return unsupported_zip_error("Compression method not supported");
+ }
+ }
+
+ let reader = match (password, aes_info) {
+ #[cfg(not(feature = "aes-crypto"))]
+ (Some(_), Some(_)) => {
+ return Err(ZipError::UnsupportedArchive(
+ "AES encrypted files cannot be decrypted without the aes-crypto feature.",
+ ))
+ }
+ #[cfg(feature = "aes-crypto")]
+ (Some(password), Some((aes_mode, vendor_version))) => {
+ match AesReader::new(reader, aes_mode, compressed_size).validate(password)? {
+ None => return Ok(Err(InvalidPassword)),
+ Some(r) => CryptoReader::Aes {
+ reader: r,
+ vendor_version,
+ },
+ }
+ }
+ (Some(password), None) => {
+ let validator = if using_data_descriptor {
+ ZipCryptoValidator::InfoZipMsdosTime(last_modified_time.timepart())
+ } else {
+ ZipCryptoValidator::PkzipCrc32(crc32)
+ };
+ match ZipCryptoReader::new(reader, password).validate(validator)? {
+ None => return Ok(Err(InvalidPassword)),
+ Some(r) => CryptoReader::ZipCrypto(r),
+ }
+ }
+ (None, Some(_)) => return Ok(Err(InvalidPassword)),
+ (None, None) => CryptoReader::Plaintext(reader),
+ };
+ Ok(Ok(reader))
+}
+
+fn make_reader(
+ compression_method: CompressionMethod,
+ crc32: u32,
+ reader: CryptoReader,
+) -> ZipFileReader {
+ let ae2_encrypted = reader.is_ae2_encrypted();
+
+ match compression_method {
+ CompressionMethod::Stored => {
+ ZipFileReader::Stored(Crc32Reader::new(reader, crc32, ae2_encrypted))
+ }
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ CompressionMethod::Deflated => {
+ let deflate_reader = DeflateDecoder::new(reader);
+ ZipFileReader::Deflated(Crc32Reader::new(deflate_reader, crc32, ae2_encrypted))
+ }
+ #[cfg(feature = "bzip2")]
+ CompressionMethod::Bzip2 => {
+ let bzip2_reader = BzDecoder::new(reader);
+ ZipFileReader::Bzip2(Crc32Reader::new(bzip2_reader, crc32, ae2_encrypted))
+ }
+ #[cfg(feature = "zstd")]
+ CompressionMethod::Zstd => {
+ let zstd_reader = ZstdDecoder::new(reader).unwrap();
+ ZipFileReader::Zstd(Crc32Reader::new(zstd_reader, crc32, ae2_encrypted))
+ }
+ _ => panic!("Compression method not supported"),
+ }
+}
+
+impl<R: Read + io::Seek> ZipArchive<R> {
+ /// Get the directory start offset and number of files. This is done in a
+ /// separate function to ease the control flow design.
+ pub(crate) fn get_directory_counts(
+ reader: &mut R,
+ footer: &spec::CentralDirectoryEnd,
+ cde_start_pos: u64,
+ ) -> ZipResult<(u64, u64, usize)> {
+ // See if there's a ZIP64 footer. The ZIP64 locator if present will
+ // have its signature 20 bytes in front of the standard footer. The
+ // standard footer, in turn, is 22+N bytes large, where N is the
+ // comment length. Therefore:
+ let zip64locator = if reader
+ .seek(io::SeekFrom::End(
+ -(20 + 22 + footer.zip_file_comment.len() as i64),
+ ))
+ .is_ok()
+ {
+ match spec::Zip64CentralDirectoryEndLocator::parse(reader) {
+ Ok(loc) => Some(loc),
+ Err(ZipError::InvalidArchive(_)) => {
+ // No ZIP64 header; that's actually fine. We're done here.
+ None
+ }
+ Err(e) => {
+ // Yikes, a real problem
+ return Err(e);
+ }
+ }
+ } else {
+ // Empty Zip files will have nothing else so this error might be fine. If
+ // not, we'll find out soon.
+ None
+ };
+
+ match zip64locator {
+ None => {
+ // Some zip files have data prepended to them, resulting in the
+ // offsets all being too small. Get the amount of error by comparing
+ // the actual file position we found the CDE at with the offset
+ // recorded in the CDE.
+ let archive_offset = cde_start_pos
+ .checked_sub(footer.central_directory_size as u64)
+ .and_then(|x| x.checked_sub(footer.central_directory_offset as u64))
+ .ok_or(ZipError::InvalidArchive(
+ "Invalid central directory size or offset",
+ ))?;
+
+ let directory_start = footer.central_directory_offset as u64 + archive_offset;
+ let number_of_files = footer.number_of_files_on_this_disk as usize;
+ Ok((archive_offset, directory_start, number_of_files))
+ }
+ Some(locator64) => {
+ // If we got here, this is indeed a ZIP64 file.
+
+ if footer.disk_number as u32 != locator64.disk_with_central_directory {
+ return unsupported_zip_error(
+ "Support for multi-disk files is not implemented",
+ );
+ }
+
+ // We need to reassess `archive_offset`. We know where the ZIP64
+ // central-directory-end structure *should* be, but unfortunately we
+ // don't know how to precisely relate that location to our current
+ // actual offset in the file, since there may be junk at its
+ // beginning. Therefore we need to perform another search, as in
+ // read::CentralDirectoryEnd::find_and_parse, except now we search
+ // forward.
+
+ let search_upper_bound = cde_start_pos
+ .checked_sub(60) // minimum size of Zip64CentralDirectoryEnd + Zip64CentralDirectoryEndLocator
+ .ok_or(ZipError::InvalidArchive(
+ "File cannot contain ZIP64 central directory end",
+ ))?;
+ let (footer, archive_offset) = spec::Zip64CentralDirectoryEnd::find_and_parse(
+ reader,
+ locator64.end_of_central_directory_offset,
+ search_upper_bound,
+ )?;
+
+ if footer.disk_number != footer.disk_with_central_directory {
+ return unsupported_zip_error(
+ "Support for multi-disk files is not implemented",
+ );
+ }
+
+ let directory_start = footer
+ .central_directory_offset
+ .checked_add(archive_offset)
+ .ok_or({
+ ZipError::InvalidArchive("Invalid central directory size or offset")
+ })?;
+
+ Ok((
+ archive_offset,
+ directory_start,
+ footer.number_of_files as usize,
+ ))
+ }
+ }
+ }
+
+ /// Read a ZIP archive, collecting the files it contains
+ ///
+ /// This uses the central directory record of the ZIP file, and ignores local file headers
+ pub fn new(mut reader: R) -> ZipResult<ZipArchive<R>> {
+ let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut reader)?;
+
+ if footer.disk_number != footer.disk_with_central_directory {
+ return unsupported_zip_error("Support for multi-disk files is not implemented");
+ }
+
+ let (archive_offset, directory_start, number_of_files) =
+ Self::get_directory_counts(&mut reader, &footer, cde_start_pos)?;
+
+ // If the parsed number of files is greater than the offset then
+ // something fishy is going on and we shouldn't trust number_of_files.
+ let file_capacity = if number_of_files > cde_start_pos as usize {
+ 0
+ } else {
+ number_of_files
+ };
+
+ let mut files = Vec::with_capacity(file_capacity);
+ let mut names_map = HashMap::with_capacity(file_capacity);
+
+ if reader.seek(io::SeekFrom::Start(directory_start)).is_err() {
+ return Err(ZipError::InvalidArchive(
+ "Could not seek to start of central directory",
+ ));
+ }
+
+ for _ in 0..number_of_files {
+ let file = central_header_to_zip_file(&mut reader, archive_offset)?;
+ names_map.insert(file.file_name.clone(), files.len());
+ files.push(file);
+ }
+
+ let shared = Arc::new(zip_archive::Shared {
+ files,
+ names_map,
+ offset: archive_offset,
+ comment: footer.zip_file_comment,
+ });
+
+ Ok(ZipArchive { reader, shared })
+ }
+ /// Extract a Zip archive into a directory, overwriting files if they
+ /// already exist. Paths are sanitized with [`ZipFile::enclosed_name`].
+ ///
+ /// Extraction is not atomic; If an error is encountered, some of the files
+ /// may be left on disk.
+ pub fn extract<P: AsRef<Path>>(&mut self, directory: P) -> ZipResult<()> {
+ use std::fs;
+
+ for i in 0..self.len() {
+ let mut file = self.by_index(i)?;
+ let filepath = file
+ .enclosed_name()
+ .ok_or(ZipError::InvalidArchive("Invalid file path"))?;
+
+ let outpath = directory.as_ref().join(filepath);
+
+ if file.name().ends_with('/') {
+ fs::create_dir_all(&outpath)?;
+ } else {
+ if let Some(p) = outpath.parent() {
+ if !p.exists() {
+ fs::create_dir_all(&p)?;
+ }
+ }
+ let mut outfile = fs::File::create(&outpath)?;
+ io::copy(&mut file, &mut outfile)?;
+ }
+ // Get and Set permissions
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+ if let Some(mode) = file.unix_mode() {
+ fs::set_permissions(&outpath, fs::Permissions::from_mode(mode))?;
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// Number of files contained in this zip.
+ pub fn len(&self) -> usize {
+ self.shared.files.len()
+ }
+
+ /// Whether this zip archive contains no files
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Get the offset from the beginning of the underlying reader that this zip begins at, in bytes.
+ ///
+ /// Normally this value is zero, but if the zip has arbitrary data prepended to it, then this value will be the size
+ /// of that prepended data.
+ pub fn offset(&self) -> u64 {
+ self.shared.offset
+ }
+
+ /// Get the comment of the zip archive.
+ pub fn comment(&self) -> &[u8] {
+ &self.shared.comment
+ }
+
+ /// Returns an iterator over all the file and directory names in this archive.
+ pub fn file_names(&self) -> impl Iterator<Item = &str> {
+ self.shared.names_map.keys().map(|s| s.as_str())
+ }
+
+ /// Search for a file entry by name, decrypt with given password
+ ///
+ /// # Warning
+ ///
+ /// The implementation of the cryptographic algorithms has not
+ /// gone through a correctness review, and you should assume it is insecure:
+ /// passwords used with this API may be compromised.
+ ///
+ /// This function sometimes accepts wrong password. This is because the ZIP spec only allows us
+ /// to check for a 1/256 chance that the password is correct.
+ /// There are many passwords out there that will also pass the validity checks
+ /// we are able to perform. This is a weakness of the ZipCrypto algorithm,
+ /// due to its fairly primitive approach to cryptography.
+ pub fn by_name_decrypt<'a>(
+ &'a mut self,
+ name: &str,
+ password: &[u8],
+ ) -> ZipResult<Result<ZipFile<'a>, InvalidPassword>> {
+ self.by_name_with_optional_password(name, Some(password))
+ }
+
+ /// Search for a file entry by name
+ pub fn by_name<'a>(&'a mut self, name: &str) -> ZipResult<ZipFile<'a>> {
+ Ok(self.by_name_with_optional_password(name, None)?.unwrap())
+ }
+
+ fn by_name_with_optional_password<'a>(
+ &'a mut self,
+ name: &str,
+ password: Option<&[u8]>,
+ ) -> ZipResult<Result<ZipFile<'a>, InvalidPassword>> {
+ let index = match self.shared.names_map.get(name) {
+ Some(index) => *index,
+ None => {
+ return Err(ZipError::FileNotFound);
+ }
+ };
+ self.by_index_with_optional_password(index, password)
+ }
+
+ /// Get a contained file by index, decrypt with given password
+ ///
+ /// # Warning
+ ///
+ /// The implementation of the cryptographic algorithms has not
+ /// gone through a correctness review, and you should assume it is insecure:
+ /// passwords used with this API may be compromised.
+ ///
+ /// This function sometimes accepts wrong password. This is because the ZIP spec only allows us
+ /// to check for a 1/256 chance that the password is correct.
+ /// There are many passwords out there that will also pass the validity checks
+ /// we are able to perform. This is a weakness of the ZipCrypto algorithm,
+ /// due to its fairly primitive approach to cryptography.
+ pub fn by_index_decrypt<'a>(
+ &'a mut self,
+ file_number: usize,
+ password: &[u8],
+ ) -> ZipResult<Result<ZipFile<'a>, InvalidPassword>> {
+ self.by_index_with_optional_password(file_number, Some(password))
+ }
+
+ /// Get a contained file by index
+ pub fn by_index(&mut self, file_number: usize) -> ZipResult<ZipFile<'_>> {
+ Ok(self
+ .by_index_with_optional_password(file_number, None)?
+ .unwrap())
+ }
+
+ /// Get a contained file by index without decompressing it
+ pub fn by_index_raw(&mut self, file_number: usize) -> ZipResult<ZipFile<'_>> {
+ let reader = &mut self.reader;
+ self.shared
+ .files
+ .get(file_number)
+ .ok_or(ZipError::FileNotFound)
+ .and_then(move |data| {
+ Ok(ZipFile {
+ crypto_reader: None,
+ reader: ZipFileReader::Raw(find_content(data, reader)?),
+ data: Cow::Borrowed(data),
+ })
+ })
+ }
+
+ fn by_index_with_optional_password<'a>(
+ &'a mut self,
+ file_number: usize,
+ mut password: Option<&[u8]>,
+ ) -> ZipResult<Result<ZipFile<'a>, InvalidPassword>> {
+ let data = self
+ .shared
+ .files
+ .get(file_number)
+ .ok_or(ZipError::FileNotFound)?;
+
+ match (password, data.encrypted) {
+ (None, true) => return Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)),
+ (Some(_), false) => password = None, //Password supplied, but none needed! Discard.
+ _ => {}
+ }
+ let limit_reader = find_content(data, &mut self.reader)?;
+
+ match make_crypto_reader(
+ data.compression_method,
+ data.crc32,
+ data.last_modified_time,
+ data.using_data_descriptor,
+ limit_reader,
+ password,
+ data.aes_mode,
+ #[cfg(feature = "aes-crypto")]
+ data.compressed_size,
+ ) {
+ Ok(Ok(crypto_reader)) => Ok(Ok(ZipFile {
+ crypto_reader: Some(crypto_reader),
+ reader: ZipFileReader::NoReader,
+ data: Cow::Borrowed(data),
+ })),
+ Err(e) => Err(e),
+ Ok(Err(e)) => Ok(Err(e)),
+ }
+ }
+
+ /// Unwrap and return the inner reader object
+ ///
+ /// The position of the reader is undefined.
+ pub fn into_inner(self) -> R {
+ self.reader
+ }
+}
+
+fn unsupported_zip_error<T>(detail: &'static str) -> ZipResult<T> {
+ Err(ZipError::UnsupportedArchive(detail))
+}
+
+/// Parse a central directory entry to collect the information for the file.
+pub(crate) fn central_header_to_zip_file<R: Read + io::Seek>(
+ reader: &mut R,
+ archive_offset: u64,
+) -> ZipResult<ZipFileData> {
+ let central_header_start = reader.stream_position()?;
+ // Parse central header
+ let signature = reader.read_u32::<LittleEndian>()?;
+ if signature != spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE {
+ return Err(ZipError::InvalidArchive("Invalid Central Directory header"));
+ }
+
+ let version_made_by = reader.read_u16::<LittleEndian>()?;
+ let _version_to_extract = reader.read_u16::<LittleEndian>()?;
+ let flags = reader.read_u16::<LittleEndian>()?;
+ let encrypted = flags & 1 == 1;
+ let is_utf8 = flags & (1 << 11) != 0;
+ let using_data_descriptor = flags & (1 << 3) != 0;
+ let compression_method = reader.read_u16::<LittleEndian>()?;
+ let last_mod_time = reader.read_u16::<LittleEndian>()?;
+ let last_mod_date = reader.read_u16::<LittleEndian>()?;
+ let crc32 = reader.read_u32::<LittleEndian>()?;
+ let compressed_size = reader.read_u32::<LittleEndian>()?;
+ let uncompressed_size = reader.read_u32::<LittleEndian>()?;
+ let file_name_length = reader.read_u16::<LittleEndian>()? as usize;
+ let extra_field_length = reader.read_u16::<LittleEndian>()? as usize;
+ let file_comment_length = reader.read_u16::<LittleEndian>()? as usize;
+ let _disk_number = reader.read_u16::<LittleEndian>()?;
+ let _internal_file_attributes = reader.read_u16::<LittleEndian>()?;
+ let external_file_attributes = reader.read_u32::<LittleEndian>()?;
+ let offset = reader.read_u32::<LittleEndian>()? as u64;
+ let mut file_name_raw = vec![0; file_name_length];
+ reader.read_exact(&mut file_name_raw)?;
+ let mut extra_field = vec![0; extra_field_length];
+ reader.read_exact(&mut extra_field)?;
+ let mut file_comment_raw = vec![0; file_comment_length];
+ reader.read_exact(&mut file_comment_raw)?;
+
+ let file_name = match is_utf8 {
+ true => String::from_utf8_lossy(&*file_name_raw).into_owned(),
+ false => file_name_raw.clone().from_cp437(),
+ };
+ let file_comment = match is_utf8 {
+ true => String::from_utf8_lossy(&*file_comment_raw).into_owned(),
+ false => file_comment_raw.from_cp437(),
+ };
+
+ // Construct the result
+ let mut result = ZipFileData {
+ system: System::from_u8((version_made_by >> 8) as u8),
+ version_made_by: version_made_by as u8,
+ encrypted,
+ using_data_descriptor,
+ compression_method: {
+ #[allow(deprecated)]
+ CompressionMethod::from_u16(compression_method)
+ },
+ compression_level: None,
+ last_modified_time: DateTime::from_msdos(last_mod_date, last_mod_time),
+ crc32,
+ compressed_size: compressed_size as u64,
+ uncompressed_size: uncompressed_size as u64,
+ file_name,
+ file_name_raw,
+ extra_field,
+ file_comment,
+ header_start: offset,
+ central_header_start,
+ data_start: AtomicU64::new(0),
+ external_attributes: external_file_attributes,
+ large_file: false,
+ aes_mode: None,
+ };
+
+ match parse_extra_field(&mut result) {
+ Ok(..) | Err(ZipError::Io(..)) => {}
+ Err(e) => return Err(e),
+ }
+
+ let aes_enabled = result.compression_method == CompressionMethod::AES;
+ if aes_enabled && result.aes_mode.is_none() {
+ return Err(ZipError::InvalidArchive(
+ "AES encryption without AES extra data field",
+ ));
+ }
+
+ // Account for shifted zip offsets.
+ result.header_start = result
+ .header_start
+ .checked_add(archive_offset)
+ .ok_or(ZipError::InvalidArchive("Archive header is too large"))?;
+
+ Ok(result)
+}
+
+fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> {
+ let mut reader = io::Cursor::new(&file.extra_field);
+
+ while (reader.position() as usize) < file.extra_field.len() {
+ let kind = reader.read_u16::<LittleEndian>()?;
+ let len = reader.read_u16::<LittleEndian>()?;
+ let mut len_left = len as i64;
+ match kind {
+ // Zip64 extended information extra field
+ 0x0001 => {
+ if file.uncompressed_size == spec::ZIP64_BYTES_THR {
+ file.large_file = true;
+ file.uncompressed_size = reader.read_u64::<LittleEndian>()?;
+ len_left -= 8;
+ }
+ if file.compressed_size == spec::ZIP64_BYTES_THR {
+ file.large_file = true;
+ file.compressed_size = reader.read_u64::<LittleEndian>()?;
+ len_left -= 8;
+ }
+ if file.header_start == spec::ZIP64_BYTES_THR {
+ file.header_start = reader.read_u64::<LittleEndian>()?;
+ len_left -= 8;
+ }
+ }
+ 0x9901 => {
+ // AES
+ if len != 7 {
+ return Err(ZipError::UnsupportedArchive(
+ "AES extra data field has an unsupported length",
+ ));
+ }
+ let vendor_version = reader.read_u16::<LittleEndian>()?;
+ let vendor_id = reader.read_u16::<LittleEndian>()?;
+ let aes_mode = reader.read_u8()?;
+ let compression_method = reader.read_u16::<LittleEndian>()?;
+
+ if vendor_id != 0x4541 {
+ return Err(ZipError::InvalidArchive("Invalid AES vendor"));
+ }
+ let vendor_version = match vendor_version {
+ 0x0001 => AesVendorVersion::Ae1,
+ 0x0002 => AesVendorVersion::Ae2,
+ _ => return Err(ZipError::InvalidArchive("Invalid AES vendor version")),
+ };
+ match aes_mode {
+ 0x01 => file.aes_mode = Some((AesMode::Aes128, vendor_version)),
+ 0x02 => file.aes_mode = Some((AesMode::Aes192, vendor_version)),
+ 0x03 => file.aes_mode = Some((AesMode::Aes256, vendor_version)),
+ _ => return Err(ZipError::InvalidArchive("Invalid AES encryption strength")),
+ };
+ file.compression_method = {
+ #[allow(deprecated)]
+ CompressionMethod::from_u16(compression_method)
+ };
+ }
+ _ => {
+ // Other fields are ignored
+ }
+ }
+
+ // We could also check for < 0 to check for errors
+ if len_left > 0 {
+ reader.seek(io::SeekFrom::Current(len_left))?;
+ }
+ }
+ Ok(())
+}
+
+/// Methods for retrieving information on zip files
+impl<'a> ZipFile<'a> {
+ fn get_reader(&mut self) -> &mut ZipFileReader<'a> {
+ if let ZipFileReader::NoReader = self.reader {
+ let data = &self.data;
+ let crypto_reader = self.crypto_reader.take().expect("Invalid reader state");
+ self.reader = make_reader(data.compression_method, data.crc32, crypto_reader)
+ }
+ &mut self.reader
+ }
+
+ pub(crate) fn get_raw_reader(&mut self) -> &mut dyn Read {
+ if let ZipFileReader::NoReader = self.reader {
+ let crypto_reader = self.crypto_reader.take().expect("Invalid reader state");
+ self.reader = ZipFileReader::Raw(crypto_reader.into_inner())
+ }
+ &mut self.reader
+ }
+
+ /// Get the version of the file
+ pub fn version_made_by(&self) -> (u8, u8) {
+ (
+ self.data.version_made_by / 10,
+ self.data.version_made_by % 10,
+ )
+ }
+
+ /// Get the name of the file
+ ///
+ /// # Warnings
+ ///
+ /// It is dangerous to use this name directly when extracting an archive.
+ /// It may contain an absolute path (`/etc/shadow`), or break out of the
+ /// current directory (`../runtime`). Carelessly writing to these paths
+ /// allows an attacker to craft a ZIP archive that will overwrite critical
+ /// files.
+ ///
+ /// You can use the [`ZipFile::enclosed_name`] method to validate the name
+ /// as a safe path.
+ pub fn name(&self) -> &str {
+ &self.data.file_name
+ }
+
+ /// Get the name of the file, in the raw (internal) byte representation.
+ ///
+ /// The encoding of this data is currently undefined.
+ pub fn name_raw(&self) -> &[u8] {
+ &self.data.file_name_raw
+ }
+
+ /// Get the name of the file in a sanitized form. It truncates the name to the first NULL byte,
+ /// removes a leading '/' and removes '..' parts.
+ #[deprecated(
+ since = "0.5.7",
+ note = "by stripping `..`s from the path, the meaning of paths can change.
+ `mangled_name` can be used if this behaviour is desirable"
+ )]
+ pub fn sanitized_name(&self) -> ::std::path::PathBuf {
+ self.mangled_name()
+ }
+
+ /// Rewrite the path, ignoring any path components with special meaning.
+ ///
+ /// - Absolute paths are made relative
+ /// - [`ParentDir`]s are ignored
+ /// - Truncates the filename at a NULL byte
+ ///
+ /// This is appropriate if you need to be able to extract *something* from
+ /// any archive, but will easily misrepresent trivial paths like
+ /// `foo/../bar` as `foo/bar` (instead of `bar`). Because of this,
+ /// [`ZipFile::enclosed_name`] is the better option in most scenarios.
+ ///
+ /// [`ParentDir`]: `Component::ParentDir`
+ pub fn mangled_name(&self) -> ::std::path::PathBuf {
+ self.data.file_name_sanitized()
+ }
+
+ /// Ensure the file path is safe to use as a [`Path`].
+ ///
+ /// - It can't contain NULL bytes
+ /// - It can't resolve to a path outside the current directory
+ /// > `foo/../bar` is fine, `foo/../../bar` is not.
+ /// - It can't be an absolute path
+ ///
+ /// This will read well-formed ZIP files correctly, and is resistant
+ /// to path-based exploits. It is recommended over
+ /// [`ZipFile::mangled_name`].
+ pub fn enclosed_name(&self) -> Option<&Path> {
+ if self.data.file_name.contains('\0') {
+ return None;
+ }
+ let path = Path::new(&self.data.file_name);
+ let mut depth = 0usize;
+ for component in path.components() {
+ match component {
+ Component::Prefix(_) | Component::RootDir => return None,
+ Component::ParentDir => depth = depth.checked_sub(1)?,
+ Component::Normal(_) => depth += 1,
+ Component::CurDir => (),
+ }
+ }
+ Some(path)
+ }
+
+ /// Get the comment of the file
+ pub fn comment(&self) -> &str {
+ &self.data.file_comment
+ }
+
+ /// Get the compression method used to store the file
+ pub fn compression(&self) -> CompressionMethod {
+ self.data.compression_method
+ }
+
+ /// Get the size of the file in the archive
+ pub fn compressed_size(&self) -> u64 {
+ self.data.compressed_size
+ }
+
+ /// Get the size of the file when uncompressed
+ pub fn size(&self) -> u64 {
+ self.data.uncompressed_size
+ }
+
+ /// Get the time the file was last modified
+ pub fn last_modified(&self) -> DateTime {
+ self.data.last_modified_time
+ }
+ /// Returns whether the file is actually a directory
+ pub fn is_dir(&self) -> bool {
+ self.name()
+ .chars()
+ .rev()
+ .next()
+ .map_or(false, |c| c == '/' || c == '\\')
+ }
+
+ /// Returns whether the file is a regular file
+ pub fn is_file(&self) -> bool {
+ !self.is_dir()
+ }
+
+ /// Get unix mode for the file
+ pub fn unix_mode(&self) -> Option<u32> {
+ if self.data.external_attributes == 0 {
+ return None;
+ }
+
+ match self.data.system {
+ System::Unix => Some(self.data.external_attributes >> 16),
+ System::Dos => {
+ // Interpret MS-DOS directory bit
+ let mut mode = if 0x10 == (self.data.external_attributes & 0x10) {
+ ffi::S_IFDIR | 0o0775
+ } else {
+ ffi::S_IFREG | 0o0664
+ };
+ if 0x01 == (self.data.external_attributes & 0x01) {
+ // Read-only bit; strip write permissions
+ mode &= 0o0555;
+ }
+ Some(mode)
+ }
+ _ => None,
+ }
+ }
+
+ /// Get the CRC32 hash of the original file
+ pub fn crc32(&self) -> u32 {
+ self.data.crc32
+ }
+
+ /// Get the extra data of the zip header for this file
+ pub fn extra_data(&self) -> &[u8] {
+ &self.data.extra_field
+ }
+
+ /// Get the starting offset of the data of the compressed file
+ pub fn data_start(&self) -> u64 {
+ self.data.data_start.load()
+ }
+
+ /// Get the starting offset of the zip header for this file
+ pub fn header_start(&self) -> u64 {
+ self.data.header_start
+ }
+ /// Get the starting offset of the zip header in the central directory for this file
+ pub fn central_header_start(&self) -> u64 {
+ self.data.central_header_start
+ }
+}
+
+impl<'a> Read for ZipFile<'a> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.get_reader().read(buf)
+ }
+}
+
+impl<'a> Drop for ZipFile<'a> {
+ fn drop(&mut self) {
+ // self.data is Owned, this reader is constructed by a streaming reader.
+ // In this case, we want to exhaust the reader so that the next file is accessible.
+ if let Cow::Owned(_) = self.data {
+ let mut buffer = [0; 1 << 16];
+
+ // Get the inner `Take` reader so all decryption, decompression and CRC calculation is skipped.
+ let mut reader: std::io::Take<&mut dyn std::io::Read> = match &mut self.reader {
+ ZipFileReader::NoReader => {
+ let innerreader = ::std::mem::replace(&mut self.crypto_reader, None);
+ innerreader.expect("Invalid reader state").into_inner()
+ }
+ reader => {
+ let innerreader = ::std::mem::replace(reader, ZipFileReader::NoReader);
+ innerreader.into_inner()
+ }
+ };
+
+ loop {
+ match reader.read(&mut buffer) {
+ Ok(0) => break,
+ Ok(_) => (),
+ Err(e) => panic!(
+ "Could not consume all of the output of the current ZipFile: {:?}",
+ e
+ ),
+ }
+ }
+ }
+ }
+}
+
+/// Read ZipFile structures from a non-seekable reader.
+///
+/// This is an alternative method to read a zip file. If possible, use the ZipArchive functions
+/// as some information will be missing when reading this manner.
+///
+/// Reads a file header from the start of the stream. Will return `Ok(Some(..))` if a file is
+/// present at the start of the stream. Returns `Ok(None)` if the start of the central directory
+/// is encountered. No more files should be read after this.
+///
+/// The Drop implementation of ZipFile ensures that the reader will be correctly positioned after
+/// the structure is done.
+///
+/// Missing fields are:
+/// * `comment`: set to an empty string
+/// * `data_start`: set to 0
+/// * `external_attributes`: `unix_mode()`: will return None
+pub fn read_zipfile_from_stream<'a, R: io::Read>(
+ reader: &'a mut R,
+) -> ZipResult<Option<ZipFile<'_>>> {
+ let signature = reader.read_u32::<LittleEndian>()?;
+
+ match signature {
+ spec::LOCAL_FILE_HEADER_SIGNATURE => (),
+ spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE => return Ok(None),
+ _ => return Err(ZipError::InvalidArchive("Invalid local file header")),
+ }
+
+ let version_made_by = reader.read_u16::<LittleEndian>()?;
+ let flags = reader.read_u16::<LittleEndian>()?;
+ let encrypted = flags & 1 == 1;
+ let is_utf8 = flags & (1 << 11) != 0;
+ let using_data_descriptor = flags & (1 << 3) != 0;
+ #[allow(deprecated)]
+ let compression_method = CompressionMethod::from_u16(reader.read_u16::<LittleEndian>()?);
+ let last_mod_time = reader.read_u16::<LittleEndian>()?;
+ let last_mod_date = reader.read_u16::<LittleEndian>()?;
+ let crc32 = reader.read_u32::<LittleEndian>()?;
+ let compressed_size = reader.read_u32::<LittleEndian>()?;
+ let uncompressed_size = reader.read_u32::<LittleEndian>()?;
+ let file_name_length = reader.read_u16::<LittleEndian>()? as usize;
+ let extra_field_length = reader.read_u16::<LittleEndian>()? as usize;
+
+ let mut file_name_raw = vec![0; file_name_length];
+ reader.read_exact(&mut file_name_raw)?;
+ let mut extra_field = vec![0; extra_field_length];
+ reader.read_exact(&mut extra_field)?;
+
+ let file_name = match is_utf8 {
+ true => String::from_utf8_lossy(&*file_name_raw).into_owned(),
+ false => file_name_raw.clone().from_cp437(),
+ };
+
+ let mut result = ZipFileData {
+ system: System::from_u8((version_made_by >> 8) as u8),
+ version_made_by: version_made_by as u8,
+ encrypted,
+ using_data_descriptor,
+ compression_method,
+ compression_level: None,
+ last_modified_time: DateTime::from_msdos(last_mod_date, last_mod_time),
+ crc32,
+ compressed_size: compressed_size as u64,
+ uncompressed_size: uncompressed_size as u64,
+ file_name,
+ file_name_raw,
+ extra_field,
+ file_comment: String::new(), // file comment is only available in the central directory
+ // header_start and data start are not available, but also don't matter, since seeking is
+ // not available.
+ header_start: 0,
+ data_start: AtomicU64::new(0),
+ central_header_start: 0,
+ // The external_attributes field is only available in the central directory.
+ // We set this to zero, which should be valid as the docs state 'If input came
+ // from standard input, this field is set to zero.'
+ external_attributes: 0,
+ large_file: false,
+ aes_mode: None,
+ };
+
+ match parse_extra_field(&mut result) {
+ Ok(..) | Err(ZipError::Io(..)) => {}
+ Err(e) => return Err(e),
+ }
+
+ if encrypted {
+ return unsupported_zip_error("Encrypted files are not supported");
+ }
+ if using_data_descriptor {
+ return unsupported_zip_error("The file length is not available in the local header");
+ }
+
+ let limit_reader = (reader as &'a mut dyn io::Read).take(result.compressed_size as u64);
+
+ let result_crc32 = result.crc32;
+ let result_compression_method = result.compression_method;
+ let crypto_reader = make_crypto_reader(
+ result_compression_method,
+ result_crc32,
+ result.last_modified_time,
+ result.using_data_descriptor,
+ limit_reader,
+ None,
+ None,
+ #[cfg(feature = "aes-crypto")]
+ result.compressed_size,
+ )?
+ .unwrap();
+
+ Ok(Some(ZipFile {
+ data: Cow::Owned(result),
+ crypto_reader: None,
+ reader: make_reader(result_compression_method, result_crc32, crypto_reader),
+ }))
+}
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn invalid_offset() {
+ use super::ZipArchive;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/invalid_offset.zip"));
+ let reader = ZipArchive::new(io::Cursor::new(v));
+ assert!(reader.is_err());
+ }
+
+ #[test]
+ fn invalid_offset2() {
+ use super::ZipArchive;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/invalid_offset2.zip"));
+ let reader = ZipArchive::new(io::Cursor::new(v));
+ assert!(reader.is_err());
+ }
+
+ #[test]
+ fn zip64_with_leading_junk() {
+ use super::ZipArchive;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/zip64_demo.zip"));
+ let reader = ZipArchive::new(io::Cursor::new(v)).unwrap();
+ assert_eq!(reader.len(), 1);
+ }
+
+ #[test]
+ fn zip_contents() {
+ use super::ZipArchive;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip"));
+ let mut reader = ZipArchive::new(io::Cursor::new(v)).unwrap();
+ assert_eq!(reader.comment(), b"");
+ assert_eq!(reader.by_index(0).unwrap().central_header_start(), 77);
+ }
+
+ #[test]
+ fn zip_read_streaming() {
+ use super::read_zipfile_from_stream;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip"));
+ let mut reader = io::Cursor::new(v);
+ loop {
+ if read_zipfile_from_stream(&mut reader).unwrap().is_none() {
+ break;
+ }
+ }
+ }
+
+ #[test]
+ fn zip_clone() {
+ use super::ZipArchive;
+ use std::io::{self, Read};
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip"));
+ let mut reader1 = ZipArchive::new(io::Cursor::new(v)).unwrap();
+ let mut reader2 = reader1.clone();
+
+ let mut file1 = reader1.by_index(0).unwrap();
+ let mut file2 = reader2.by_index(0).unwrap();
+
+ let t = file1.last_modified();
+ assert_eq!(
+ (
+ t.year(),
+ t.month(),
+ t.day(),
+ t.hour(),
+ t.minute(),
+ t.second()
+ ),
+ (1980, 1, 1, 0, 0, 0)
+ );
+
+ let mut buf1 = [0; 5];
+ let mut buf2 = [0; 5];
+ let mut buf3 = [0; 5];
+ let mut buf4 = [0; 5];
+
+ file1.read_exact(&mut buf1).unwrap();
+ file2.read_exact(&mut buf2).unwrap();
+ file1.read_exact(&mut buf3).unwrap();
+ file2.read_exact(&mut buf4).unwrap();
+
+ assert_eq!(buf1, buf2);
+ assert_eq!(buf3, buf4);
+ assert_ne!(buf1, buf3);
+ }
+
+ #[test]
+ fn file_and_dir_predicates() {
+ use super::ZipArchive;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/files_and_dirs.zip"));
+ let mut zip = ZipArchive::new(io::Cursor::new(v)).unwrap();
+
+ for i in 0..zip.len() {
+ let zip_file = zip.by_index(i).unwrap();
+ let full_name = zip_file.enclosed_name().unwrap();
+ let file_name = full_name.file_name().unwrap().to_str().unwrap();
+ assert!(
+ (file_name.starts_with("dir") && zip_file.is_dir())
+ || (file_name.starts_with("file") && zip_file.is_file())
+ );
+ }
+ }
+
+ /// test case to ensure we don't preemptively over allocate based on the
+ /// declared number of files in the CDE of an invalid zip when the number of
+ /// files declared is more than the alleged offset in the CDE
+ #[test]
+ fn invalid_cde_number_of_files_allocation_smaller_offset() {
+ use super::ZipArchive;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!(
+ "../tests/data/invalid_cde_number_of_files_allocation_smaller_offset.zip"
+ ));
+ let reader = ZipArchive::new(io::Cursor::new(v));
+ assert!(reader.is_err());
+ }
+
+ /// test case to ensure we don't preemptively over allocate based on the
+ /// declared number of files in the CDE of an invalid zip when the number of
+ /// files declared is less than the alleged offset in the CDE
+ #[test]
+ fn invalid_cde_number_of_files_allocation_greater_offset() {
+ use super::ZipArchive;
+ use std::io;
+
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!(
+ "../tests/data/invalid_cde_number_of_files_allocation_greater_offset.zip"
+ ));
+ let reader = ZipArchive::new(io::Cursor::new(v));
+ assert!(reader.is_err());
+ }
+}
diff --git a/third_party/rust/zip/src/result.rs b/third_party/rust/zip/src/result.rs
new file mode 100644
index 0000000000..72a30e4881
--- /dev/null
+++ b/third_party/rust/zip/src/result.rs
@@ -0,0 +1,83 @@
+//! Error types that can be emitted from this library
+
+use std::error::Error;
+use std::fmt;
+use std::io;
+
+/// Generic result type with ZipError as its error variant
+pub type ZipResult<T> = Result<T, ZipError>;
+
+/// The given password is wrong
+#[derive(Debug)]
+pub struct InvalidPassword;
+
+impl fmt::Display for InvalidPassword {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "invalid password for file in archive")
+ }
+}
+
+impl Error for InvalidPassword {}
+
+/// Error type for Zip
+#[derive(Debug)]
+pub enum ZipError {
+ /// An Error caused by I/O
+ Io(io::Error),
+
+ /// This file is probably not a zip archive
+ InvalidArchive(&'static str),
+
+ /// This archive is not supported
+ UnsupportedArchive(&'static str),
+
+ /// The requested file could not be found in the archive
+ FileNotFound,
+}
+
+impl From<io::Error> for ZipError {
+ fn from(err: io::Error) -> ZipError {
+ ZipError::Io(err)
+ }
+}
+
+impl fmt::Display for ZipError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ ZipError::Io(err) => write!(fmt, "{}", err),
+ ZipError::InvalidArchive(err) => write!(fmt, "invalid Zip archive: {}", err),
+ ZipError::UnsupportedArchive(err) => write!(fmt, "unsupported Zip archive: {}", err),
+ ZipError::FileNotFound => write!(fmt, "specified file not found in archive"),
+ }
+ }
+}
+
+impl Error for ZipError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ ZipError::Io(err) => Some(err),
+ _ => None,
+ }
+ }
+}
+
+impl ZipError {
+ /// The text used as an error when a password is required and not supplied
+ ///
+ /// ```rust,no_run
+ /// # use zip::result::ZipError;
+ /// # let mut archive = zip::ZipArchive::new(std::io::Cursor::new(&[])).unwrap();
+ /// match archive.by_index(1) {
+ /// Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)) => eprintln!("a password is needed to unzip this file"),
+ /// _ => (),
+ /// }
+ /// # ()
+ /// ```
+ pub const PASSWORD_REQUIRED: &'static str = "Password required to decrypt file";
+}
+
+impl From<ZipError> for io::Error {
+ fn from(err: ZipError) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, err)
+ }
+}
diff --git a/third_party/rust/zip/src/spec.rs b/third_party/rust/zip/src/spec.rs
new file mode 100644
index 0000000000..3ffcf7323c
--- /dev/null
+++ b/third_party/rust/zip/src/spec.rs
@@ -0,0 +1,207 @@
+use crate::result::{ZipError, ZipResult};
+use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
+use std::io;
+use std::io::prelude::*;
+
+pub const LOCAL_FILE_HEADER_SIGNATURE: u32 = 0x04034b50;
+pub const CENTRAL_DIRECTORY_HEADER_SIGNATURE: u32 = 0x02014b50;
+const CENTRAL_DIRECTORY_END_SIGNATURE: u32 = 0x06054b50;
+pub const ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE: u32 = 0x06064b50;
+const ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE: u32 = 0x07064b50;
+
+pub const ZIP64_BYTES_THR: u64 = u32::MAX as u64;
+pub const ZIP64_ENTRY_THR: usize = u16::MAX as usize;
+
+pub struct CentralDirectoryEnd {
+ pub disk_number: u16,
+ pub disk_with_central_directory: u16,
+ pub number_of_files_on_this_disk: u16,
+ pub number_of_files: u16,
+ pub central_directory_size: u32,
+ pub central_directory_offset: u32,
+ pub zip_file_comment: Vec<u8>,
+}
+
+impl CentralDirectoryEnd {
+ pub fn parse<T: Read>(reader: &mut T) -> ZipResult<CentralDirectoryEnd> {
+ let magic = reader.read_u32::<LittleEndian>()?;
+ if magic != CENTRAL_DIRECTORY_END_SIGNATURE {
+ return Err(ZipError::InvalidArchive("Invalid digital signature header"));
+ }
+ let disk_number = reader.read_u16::<LittleEndian>()?;
+ let disk_with_central_directory = reader.read_u16::<LittleEndian>()?;
+ let number_of_files_on_this_disk = reader.read_u16::<LittleEndian>()?;
+ let number_of_files = reader.read_u16::<LittleEndian>()?;
+ let central_directory_size = reader.read_u32::<LittleEndian>()?;
+ let central_directory_offset = reader.read_u32::<LittleEndian>()?;
+ let zip_file_comment_length = reader.read_u16::<LittleEndian>()? as usize;
+ let mut zip_file_comment = vec![0; zip_file_comment_length];
+ reader.read_exact(&mut zip_file_comment)?;
+
+ Ok(CentralDirectoryEnd {
+ disk_number,
+ disk_with_central_directory,
+ number_of_files_on_this_disk,
+ number_of_files,
+ central_directory_size,
+ central_directory_offset,
+ zip_file_comment,
+ })
+ }
+
+ pub fn find_and_parse<T: Read + io::Seek>(
+ reader: &mut T,
+ ) -> ZipResult<(CentralDirectoryEnd, u64)> {
+ const HEADER_SIZE: u64 = 22;
+ const BYTES_BETWEEN_MAGIC_AND_COMMENT_SIZE: u64 = HEADER_SIZE - 6;
+ let file_length = reader.seek(io::SeekFrom::End(0))?;
+
+ let search_upper_bound = file_length.saturating_sub(HEADER_SIZE + ::std::u16::MAX as u64);
+
+ if file_length < HEADER_SIZE {
+ return Err(ZipError::InvalidArchive("Invalid zip header"));
+ }
+
+ let mut pos = file_length - HEADER_SIZE;
+ while pos >= search_upper_bound {
+ reader.seek(io::SeekFrom::Start(pos as u64))?;
+ if reader.read_u32::<LittleEndian>()? == CENTRAL_DIRECTORY_END_SIGNATURE {
+ reader.seek(io::SeekFrom::Current(
+ BYTES_BETWEEN_MAGIC_AND_COMMENT_SIZE as i64,
+ ))?;
+ let cde_start_pos = reader.seek(io::SeekFrom::Start(pos as u64))?;
+ return CentralDirectoryEnd::parse(reader).map(|cde| (cde, cde_start_pos));
+ }
+ pos = match pos.checked_sub(1) {
+ Some(p) => p,
+ None => break,
+ };
+ }
+ Err(ZipError::InvalidArchive(
+ "Could not find central directory end",
+ ))
+ }
+
+ pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
+ writer.write_u32::<LittleEndian>(CENTRAL_DIRECTORY_END_SIGNATURE)?;
+ writer.write_u16::<LittleEndian>(self.disk_number)?;
+ writer.write_u16::<LittleEndian>(self.disk_with_central_directory)?;
+ writer.write_u16::<LittleEndian>(self.number_of_files_on_this_disk)?;
+ writer.write_u16::<LittleEndian>(self.number_of_files)?;
+ writer.write_u32::<LittleEndian>(self.central_directory_size)?;
+ writer.write_u32::<LittleEndian>(self.central_directory_offset)?;
+ writer.write_u16::<LittleEndian>(self.zip_file_comment.len() as u16)?;
+ writer.write_all(&self.zip_file_comment)?;
+ Ok(())
+ }
+}
+
+pub struct Zip64CentralDirectoryEndLocator {
+ pub disk_with_central_directory: u32,
+ pub end_of_central_directory_offset: u64,
+ pub number_of_disks: u32,
+}
+
+impl Zip64CentralDirectoryEndLocator {
+ pub fn parse<T: Read>(reader: &mut T) -> ZipResult<Zip64CentralDirectoryEndLocator> {
+ let magic = reader.read_u32::<LittleEndian>()?;
+ if magic != ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE {
+ return Err(ZipError::InvalidArchive(
+ "Invalid zip64 locator digital signature header",
+ ));
+ }
+ let disk_with_central_directory = reader.read_u32::<LittleEndian>()?;
+ let end_of_central_directory_offset = reader.read_u64::<LittleEndian>()?;
+ let number_of_disks = reader.read_u32::<LittleEndian>()?;
+
+ Ok(Zip64CentralDirectoryEndLocator {
+ disk_with_central_directory,
+ end_of_central_directory_offset,
+ number_of_disks,
+ })
+ }
+
+ pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
+ writer.write_u32::<LittleEndian>(ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE)?;
+ writer.write_u32::<LittleEndian>(self.disk_with_central_directory)?;
+ writer.write_u64::<LittleEndian>(self.end_of_central_directory_offset)?;
+ writer.write_u32::<LittleEndian>(self.number_of_disks)?;
+ Ok(())
+ }
+}
+
+pub struct Zip64CentralDirectoryEnd {
+ pub version_made_by: u16,
+ pub version_needed_to_extract: u16,
+ pub disk_number: u32,
+ pub disk_with_central_directory: u32,
+ pub number_of_files_on_this_disk: u64,
+ pub number_of_files: u64,
+ pub central_directory_size: u64,
+ pub central_directory_offset: u64,
+ //pub extensible_data_sector: Vec<u8>, <-- We don't do anything with this at the moment.
+}
+
+impl Zip64CentralDirectoryEnd {
+ pub fn find_and_parse<T: Read + io::Seek>(
+ reader: &mut T,
+ nominal_offset: u64,
+ search_upper_bound: u64,
+ ) -> ZipResult<(Zip64CentralDirectoryEnd, u64)> {
+ let mut pos = nominal_offset;
+
+ while pos <= search_upper_bound {
+ reader.seek(io::SeekFrom::Start(pos))?;
+
+ if reader.read_u32::<LittleEndian>()? == ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE {
+ let archive_offset = pos - nominal_offset;
+
+ let _record_size = reader.read_u64::<LittleEndian>()?;
+ // We would use this value if we did anything with the "zip64 extensible data sector".
+
+ let version_made_by = reader.read_u16::<LittleEndian>()?;
+ let version_needed_to_extract = reader.read_u16::<LittleEndian>()?;
+ let disk_number = reader.read_u32::<LittleEndian>()?;
+ let disk_with_central_directory = reader.read_u32::<LittleEndian>()?;
+ let number_of_files_on_this_disk = reader.read_u64::<LittleEndian>()?;
+ let number_of_files = reader.read_u64::<LittleEndian>()?;
+ let central_directory_size = reader.read_u64::<LittleEndian>()?;
+ let central_directory_offset = reader.read_u64::<LittleEndian>()?;
+
+ return Ok((
+ Zip64CentralDirectoryEnd {
+ version_made_by,
+ version_needed_to_extract,
+ disk_number,
+ disk_with_central_directory,
+ number_of_files_on_this_disk,
+ number_of_files,
+ central_directory_size,
+ central_directory_offset,
+ },
+ archive_offset,
+ ));
+ }
+
+ pos += 1;
+ }
+
+ Err(ZipError::InvalidArchive(
+ "Could not find ZIP64 central directory end",
+ ))
+ }
+
+ pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
+ writer.write_u32::<LittleEndian>(ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE)?;
+ writer.write_u64::<LittleEndian>(44)?; // record size
+ writer.write_u16::<LittleEndian>(self.version_made_by)?;
+ writer.write_u16::<LittleEndian>(self.version_needed_to_extract)?;
+ writer.write_u32::<LittleEndian>(self.disk_number)?;
+ writer.write_u32::<LittleEndian>(self.disk_with_central_directory)?;
+ writer.write_u64::<LittleEndian>(self.number_of_files_on_this_disk)?;
+ writer.write_u64::<LittleEndian>(self.number_of_files)?;
+ writer.write_u64::<LittleEndian>(self.central_directory_size)?;
+ writer.write_u64::<LittleEndian>(self.central_directory_offset)?;
+ Ok(())
+ }
+}
diff --git a/third_party/rust/zip/src/types.rs b/third_party/rust/zip/src/types.rs
new file mode 100644
index 0000000000..b65fad4017
--- /dev/null
+++ b/third_party/rust/zip/src/types.rs
@@ -0,0 +1,573 @@
+//! Types that specify what is contained in a ZIP.
+#[cfg(doc)]
+use {crate::read::ZipFile, crate::write::FileOptions};
+
+#[cfg(not(any(
+ all(target_arch = "arm", target_pointer_width = "32"),
+ target_arch = "mips",
+ target_arch = "powerpc"
+)))]
+use std::sync::atomic;
+
+#[cfg(any(
+ all(target_arch = "arm", target_pointer_width = "32"),
+ target_arch = "mips",
+ target_arch = "powerpc"
+))]
+mod atomic {
+ use crossbeam_utils::sync::ShardedLock;
+ pub use std::sync::atomic::Ordering;
+
+ #[derive(Debug, Default)]
+ pub struct AtomicU64 {
+ value: ShardedLock<u64>,
+ }
+
+ impl AtomicU64 {
+ pub fn new(v: u64) -> Self {
+ Self {
+ value: ShardedLock::new(v),
+ }
+ }
+ pub fn get_mut(&mut self) -> &mut u64 {
+ self.value.get_mut().unwrap()
+ }
+ pub fn load(&self, _: Ordering) -> u64 {
+ *self.value.read().unwrap()
+ }
+ pub fn store(&self, value: u64, _: Ordering) {
+ *self.value.write().unwrap() = value;
+ }
+ }
+}
+
+#[cfg(feature = "time")]
+use time::{error::ComponentRange, Date, Month, OffsetDateTime, PrimitiveDateTime, Time};
+
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum System {
+ Dos = 0,
+ Unix = 3,
+ Unknown,
+}
+
+impl System {
+ pub fn from_u8(system: u8) -> System {
+ use self::System::*;
+
+ match system {
+ 0 => Dos,
+ 3 => Unix,
+ _ => Unknown,
+ }
+ }
+}
+
+/// Representation of a moment in time.
+///
+/// Zip files use an old format from DOS to store timestamps,
+/// with its own set of peculiarities.
+/// For example, it has a resolution of 2 seconds!
+///
+/// A [`DateTime`] can be stored directly in a zipfile with [`FileOptions::last_modified_time`],
+/// or read from one with [`ZipFile::last_modified`]
+///
+/// # Warning
+///
+/// Because there is no timezone associated with the [`DateTime`], they should ideally only
+/// be used for user-facing descriptions. This also means [`DateTime::to_time`] returns an
+/// [`OffsetDateTime`] (which is the equivalent of chrono's `NaiveDateTime`).
+///
+/// Modern zip files store more precise timestamps, which are ignored by [`crate::read::ZipArchive`],
+/// so keep in mind that these timestamps are unreliable. [We're working on this](https://github.com/zip-rs/zip/issues/156#issuecomment-652981904).
+#[derive(Debug, Clone, Copy)]
+pub struct DateTime {
+ year: u16,
+ month: u8,
+ day: u8,
+ hour: u8,
+ minute: u8,
+ second: u8,
+}
+
+impl ::std::default::Default for DateTime {
+ /// Constructs an 'default' datetime of 1980-01-01 00:00:00
+ fn default() -> DateTime {
+ DateTime {
+ year: 1980,
+ month: 1,
+ day: 1,
+ hour: 0,
+ minute: 0,
+ second: 0,
+ }
+ }
+}
+
+impl DateTime {
+ /// Converts an msdos (u16, u16) pair to a DateTime object
+ pub fn from_msdos(datepart: u16, timepart: u16) -> DateTime {
+ let seconds = (timepart & 0b0000000000011111) << 1;
+ let minutes = (timepart & 0b0000011111100000) >> 5;
+ let hours = (timepart & 0b1111100000000000) >> 11;
+ let days = datepart & 0b0000000000011111;
+ let months = (datepart & 0b0000000111100000) >> 5;
+ let years = (datepart & 0b1111111000000000) >> 9;
+
+ DateTime {
+ year: (years + 1980) as u16,
+ month: months as u8,
+ day: days as u8,
+ hour: hours as u8,
+ minute: minutes as u8,
+ second: seconds as u8,
+ }
+ }
+
+ /// Constructs a DateTime from a specific date and time
+ ///
+ /// The bounds are:
+ /// * year: [1980, 2107]
+ /// * month: [1, 12]
+ /// * day: [1, 31]
+ /// * hour: [0, 23]
+ /// * minute: [0, 59]
+ /// * second: [0, 60]
+ #[allow(clippy::result_unit_err)]
+ pub fn from_date_and_time(
+ year: u16,
+ month: u8,
+ day: u8,
+ hour: u8,
+ minute: u8,
+ second: u8,
+ ) -> Result<DateTime, ()> {
+ if (1980..=2107).contains(&year)
+ && month >= 1
+ && month <= 12
+ && day >= 1
+ && day <= 31
+ && hour <= 23
+ && minute <= 59
+ && second <= 60
+ {
+ Ok(DateTime {
+ year,
+ month,
+ day,
+ hour,
+ minute,
+ second,
+ })
+ } else {
+ Err(())
+ }
+ }
+
+ #[cfg(feature = "time")]
+ /// Converts a OffsetDateTime object to a DateTime
+ ///
+ /// Returns `Err` when this object is out of bounds
+ #[allow(clippy::result_unit_err)]
+ pub fn from_time(dt: OffsetDateTime) -> Result<DateTime, ()> {
+ if dt.year() >= 1980 && dt.year() <= 2107 {
+ Ok(DateTime {
+ year: (dt.year()) as u16,
+ month: (dt.month()) as u8,
+ day: dt.day() as u8,
+ hour: dt.hour() as u8,
+ minute: dt.minute() as u8,
+ second: dt.second() as u8,
+ })
+ } else {
+ Err(())
+ }
+ }
+
+ /// Gets the time portion of this datetime in the msdos representation
+ pub fn timepart(&self) -> u16 {
+ ((self.second as u16) >> 1) | ((self.minute as u16) << 5) | ((self.hour as u16) << 11)
+ }
+
+ /// Gets the date portion of this datetime in the msdos representation
+ pub fn datepart(&self) -> u16 {
+ (self.day as u16) | ((self.month as u16) << 5) | ((self.year - 1980) << 9)
+ }
+
+ #[cfg(feature = "time")]
+ /// Converts the DateTime to a OffsetDateTime structure
+ pub fn to_time(&self) -> Result<OffsetDateTime, ComponentRange> {
+ use std::convert::TryFrom;
+
+ let date =
+ Date::from_calendar_date(self.year as i32, Month::try_from(self.month)?, self.day)?;
+ let time = Time::from_hms(self.hour, self.minute, self.second)?;
+ Ok(PrimitiveDateTime::new(date, time).assume_utc())
+ }
+
+ /// Get the year. There is no epoch, i.e. 2018 will be returned as 2018.
+ pub fn year(&self) -> u16 {
+ self.year
+ }
+
+ /// Get the month, where 1 = january and 12 = december
+ ///
+ /// # Warning
+ ///
+ /// When read from a zip file, this may not be a reasonable value
+ pub fn month(&self) -> u8 {
+ self.month
+ }
+
+ /// Get the day
+ ///
+ /// # Warning
+ ///
+ /// When read from a zip file, this may not be a reasonable value
+ pub fn day(&self) -> u8 {
+ self.day
+ }
+
+ /// Get the hour
+ ///
+ /// # Warning
+ ///
+ /// When read from a zip file, this may not be a reasonable value
+ pub fn hour(&self) -> u8 {
+ self.hour
+ }
+
+ /// Get the minute
+ ///
+ /// # Warning
+ ///
+ /// When read from a zip file, this may not be a reasonable value
+ pub fn minute(&self) -> u8 {
+ self.minute
+ }
+
+ /// Get the second
+ ///
+ /// # Warning
+ ///
+ /// When read from a zip file, this may not be a reasonable value
+ pub fn second(&self) -> u8 {
+ self.second
+ }
+}
+
+pub const DEFAULT_VERSION: u8 = 46;
+
+/// A type like `AtomicU64` except it implements `Clone` and has predefined
+/// ordering.
+///
+/// It uses `Relaxed` ordering because it is not used for synchronisation.
+#[derive(Debug)]
+pub struct AtomicU64(atomic::AtomicU64);
+
+impl AtomicU64 {
+ pub fn new(v: u64) -> Self {
+ Self(atomic::AtomicU64::new(v))
+ }
+
+ pub fn load(&self) -> u64 {
+ self.0.load(atomic::Ordering::Relaxed)
+ }
+
+ pub fn store(&self, val: u64) {
+ self.0.store(val, atomic::Ordering::Relaxed)
+ }
+
+ pub fn get_mut(&mut self) -> &mut u64 {
+ self.0.get_mut()
+ }
+}
+
+impl Clone for AtomicU64 {
+ fn clone(&self) -> Self {
+ Self(atomic::AtomicU64::new(self.load()))
+ }
+}
+
+/// Structure representing a ZIP file.
+#[derive(Debug, Clone)]
+pub struct ZipFileData {
+ /// Compatibility of the file attribute information
+ pub system: System,
+ /// Specification version
+ pub version_made_by: u8,
+ /// True if the file is encrypted.
+ pub encrypted: bool,
+ /// True if the file uses a data-descriptor section
+ pub using_data_descriptor: bool,
+ /// Compression method used to store the file
+ pub compression_method: crate::compression::CompressionMethod,
+ /// Compression level to store the file
+ pub compression_level: Option<i32>,
+ /// Last modified time. This will only have a 2 second precision.
+ pub last_modified_time: DateTime,
+ /// CRC32 checksum
+ pub crc32: u32,
+ /// Size of the file in the ZIP
+ pub compressed_size: u64,
+ /// Size of the file when extracted
+ pub uncompressed_size: u64,
+ /// Name of the file
+ pub file_name: String,
+ /// Raw file name. To be used when file_name was incorrectly decoded.
+ pub file_name_raw: Vec<u8>,
+ /// Extra field usually used for storage expansion
+ pub extra_field: Vec<u8>,
+ /// File comment
+ pub file_comment: String,
+ /// Specifies where the local header of the file starts
+ pub header_start: u64,
+ /// Specifies where the central header of the file starts
+ ///
+ /// Note that when this is not known, it is set to 0
+ pub central_header_start: u64,
+ /// Specifies where the compressed data of the file starts
+ pub data_start: AtomicU64,
+ /// External file attributes
+ pub external_attributes: u32,
+ /// Reserve local ZIP64 extra field
+ pub large_file: bool,
+ /// AES mode if applicable
+ pub aes_mode: Option<(AesMode, AesVendorVersion)>,
+}
+
+impl ZipFileData {
+ pub fn file_name_sanitized(&self) -> ::std::path::PathBuf {
+ let no_null_filename = match self.file_name.find('\0') {
+ Some(index) => &self.file_name[0..index],
+ None => &self.file_name,
+ }
+ .to_string();
+
+ // zip files can contain both / and \ as separators regardless of the OS
+ // and as we want to return a sanitized PathBuf that only supports the
+ // OS separator let's convert incompatible separators to compatible ones
+ let separator = ::std::path::MAIN_SEPARATOR;
+ let opposite_separator = match separator {
+ '/' => '\\',
+ _ => '/',
+ };
+ let filename =
+ no_null_filename.replace(&opposite_separator.to_string(), &separator.to_string());
+
+ ::std::path::Path::new(&filename)
+ .components()
+ .filter(|component| matches!(*component, ::std::path::Component::Normal(..)))
+ .fold(::std::path::PathBuf::new(), |mut path, ref cur| {
+ path.push(cur.as_os_str());
+ path
+ })
+ }
+
+ pub fn zip64_extension(&self) -> bool {
+ self.uncompressed_size > 0xFFFFFFFF
+ || self.compressed_size > 0xFFFFFFFF
+ || self.header_start > 0xFFFFFFFF
+ }
+
+ pub fn version_needed(&self) -> u16 {
+ // higher versions matched first
+ match (self.zip64_extension(), self.compression_method) {
+ #[cfg(feature = "bzip2")]
+ (_, crate::compression::CompressionMethod::Bzip2) => 46,
+ (true, _) => 45,
+ _ => 20,
+ }
+ }
+}
+
+/// The encryption specification used to encrypt a file with AES.
+///
+/// According to the [specification](https://www.winzip.com/win/en/aes_info.html#winzip11) AE-2
+/// does not make use of the CRC check.
+#[derive(Copy, Clone, Debug)]
+pub enum AesVendorVersion {
+ Ae1,
+ Ae2,
+}
+
+/// AES variant used.
+#[derive(Copy, Clone, Debug)]
+pub enum AesMode {
+ Aes128,
+ Aes192,
+ Aes256,
+}
+
+#[cfg(feature = "aes-crypto")]
+impl AesMode {
+ pub fn salt_length(&self) -> usize {
+ self.key_length() / 2
+ }
+
+ pub fn key_length(&self) -> usize {
+ match self {
+ Self::Aes128 => 16,
+ Self::Aes192 => 24,
+ Self::Aes256 => 32,
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn system() {
+ use super::System;
+ assert_eq!(System::Dos as u16, 0u16);
+ assert_eq!(System::Unix as u16, 3u16);
+ assert_eq!(System::from_u8(0), System::Dos);
+ assert_eq!(System::from_u8(3), System::Unix);
+ }
+
+ #[test]
+ fn sanitize() {
+ use super::*;
+ let file_name = "/path/../../../../etc/./passwd\0/etc/shadow".to_string();
+ let data = ZipFileData {
+ system: System::Dos,
+ version_made_by: 0,
+ encrypted: false,
+ using_data_descriptor: false,
+ compression_method: crate::compression::CompressionMethod::Stored,
+ compression_level: None,
+ last_modified_time: DateTime::default(),
+ crc32: 0,
+ compressed_size: 0,
+ uncompressed_size: 0,
+ file_name: file_name.clone(),
+ file_name_raw: file_name.into_bytes(),
+ extra_field: Vec::new(),
+ file_comment: String::new(),
+ header_start: 0,
+ data_start: AtomicU64::new(0),
+ central_header_start: 0,
+ external_attributes: 0,
+ large_file: false,
+ aes_mode: None,
+ };
+ assert_eq!(
+ data.file_name_sanitized(),
+ ::std::path::PathBuf::from("path/etc/passwd")
+ );
+ }
+
+ #[test]
+ #[allow(clippy::unusual_byte_groupings)]
+ fn datetime_default() {
+ use super::DateTime;
+ let dt = DateTime::default();
+ assert_eq!(dt.timepart(), 0);
+ assert_eq!(dt.datepart(), 0b0000000_0001_00001);
+ }
+
+ #[test]
+ #[allow(clippy::unusual_byte_groupings)]
+ fn datetime_max() {
+ use super::DateTime;
+ let dt = DateTime::from_date_and_time(2107, 12, 31, 23, 59, 60).unwrap();
+ assert_eq!(dt.timepart(), 0b10111_111011_11110);
+ assert_eq!(dt.datepart(), 0b1111111_1100_11111);
+ }
+
+ #[test]
+ fn datetime_bounds() {
+ use super::DateTime;
+
+ assert!(DateTime::from_date_and_time(2000, 1, 1, 23, 59, 60).is_ok());
+ assert!(DateTime::from_date_and_time(2000, 1, 1, 24, 0, 0).is_err());
+ assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 60, 0).is_err());
+ assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 0, 61).is_err());
+
+ assert!(DateTime::from_date_and_time(2107, 12, 31, 0, 0, 0).is_ok());
+ assert!(DateTime::from_date_and_time(1980, 1, 1, 0, 0, 0).is_ok());
+ assert!(DateTime::from_date_and_time(1979, 1, 1, 0, 0, 0).is_err());
+ assert!(DateTime::from_date_and_time(1980, 0, 1, 0, 0, 0).is_err());
+ assert!(DateTime::from_date_and_time(1980, 1, 0, 0, 0, 0).is_err());
+ assert!(DateTime::from_date_and_time(2108, 12, 31, 0, 0, 0).is_err());
+ assert!(DateTime::from_date_and_time(2107, 13, 31, 0, 0, 0).is_err());
+ assert!(DateTime::from_date_and_time(2107, 12, 32, 0, 0, 0).is_err());
+ }
+
+ #[cfg(feature = "time")]
+ use time::{format_description::well_known::Rfc3339, OffsetDateTime};
+
+ #[cfg(feature = "time")]
+ #[test]
+ fn datetime_from_time_bounds() {
+ use super::DateTime;
+ use time::macros::datetime;
+
+ // 1979-12-31 23:59:59
+ assert!(DateTime::from_time(datetime!(1979-12-31 23:59:59 UTC)).is_err());
+
+ // 1980-01-01 00:00:00
+ assert!(DateTime::from_time(datetime!(1980-01-01 00:00:00 UTC)).is_ok());
+
+ // 2107-12-31 23:59:59
+ assert!(DateTime::from_time(datetime!(2107-12-31 23:59:59 UTC)).is_ok());
+
+ // 2108-01-01 00:00:00
+ assert!(DateTime::from_time(datetime!(2108-01-01 00:00:00 UTC)).is_err());
+ }
+
+ #[test]
+ fn time_conversion() {
+ use super::DateTime;
+ let dt = DateTime::from_msdos(0x4D71, 0x54CF);
+ assert_eq!(dt.year(), 2018);
+ assert_eq!(dt.month(), 11);
+ assert_eq!(dt.day(), 17);
+ assert_eq!(dt.hour(), 10);
+ assert_eq!(dt.minute(), 38);
+ assert_eq!(dt.second(), 30);
+
+ #[cfg(feature = "time")]
+ assert_eq!(
+ dt.to_time().unwrap().format(&Rfc3339).unwrap(),
+ "2018-11-17T10:38:30Z"
+ );
+ }
+
+ #[test]
+ fn time_out_of_bounds() {
+ use super::DateTime;
+ let dt = DateTime::from_msdos(0xFFFF, 0xFFFF);
+ assert_eq!(dt.year(), 2107);
+ assert_eq!(dt.month(), 15);
+ assert_eq!(dt.day(), 31);
+ assert_eq!(dt.hour(), 31);
+ assert_eq!(dt.minute(), 63);
+ assert_eq!(dt.second(), 62);
+
+ #[cfg(feature = "time")]
+ assert!(dt.to_time().is_err());
+
+ let dt = DateTime::from_msdos(0x0000, 0x0000);
+ assert_eq!(dt.year(), 1980);
+ assert_eq!(dt.month(), 0);
+ assert_eq!(dt.day(), 0);
+ assert_eq!(dt.hour(), 0);
+ assert_eq!(dt.minute(), 0);
+ assert_eq!(dt.second(), 0);
+
+ #[cfg(feature = "time")]
+ assert!(dt.to_time().is_err());
+ }
+
+ #[cfg(feature = "time")]
+ #[test]
+ fn time_at_january() {
+ use super::DateTime;
+
+ // 2020-01-01 00:00:00
+ let clock = OffsetDateTime::from_unix_timestamp(1_577_836_800).unwrap();
+
+ assert!(DateTime::from_time(clock).is_ok());
+ }
+}
diff --git a/third_party/rust/zip/src/write.rs b/third_party/rust/zip/src/write.rs
new file mode 100644
index 0000000000..61ce378c0c
--- /dev/null
+++ b/third_party/rust/zip/src/write.rs
@@ -0,0 +1,1471 @@
+//! Types for creating ZIP archives
+
+use crate::compression::CompressionMethod;
+use crate::read::{central_header_to_zip_file, ZipArchive, ZipFile};
+use crate::result::{ZipError, ZipResult};
+use crate::spec;
+use crate::types::{AtomicU64, DateTime, System, ZipFileData, DEFAULT_VERSION};
+use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
+use crc32fast::Hasher;
+use std::default::Default;
+use std::io;
+use std::io::prelude::*;
+use std::mem;
+
+#[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+))]
+use flate2::write::DeflateEncoder;
+
+#[cfg(feature = "bzip2")]
+use bzip2::write::BzEncoder;
+
+#[cfg(feature = "time")]
+use time::OffsetDateTime;
+
+#[cfg(feature = "zstd")]
+use zstd::stream::write::Encoder as ZstdEncoder;
+
+enum GenericZipWriter<W: Write + io::Seek> {
+ Closed,
+ Storer(W),
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ Deflater(DeflateEncoder<W>),
+ #[cfg(feature = "bzip2")]
+ Bzip2(BzEncoder<W>),
+ #[cfg(feature = "zstd")]
+ Zstd(ZstdEncoder<'static, W>),
+}
+// Put the struct declaration in a private module to convince rustdoc to display ZipWriter nicely
+pub(crate) mod zip_writer {
+ use super::*;
+ /// ZIP archive generator
+ ///
+ /// Handles the bookkeeping involved in building an archive, and provides an
+ /// API to edit its contents.
+ ///
+ /// ```
+ /// # fn doit() -> zip::result::ZipResult<()>
+ /// # {
+ /// # use zip::ZipWriter;
+ /// use std::io::Write;
+ /// use zip::write::FileOptions;
+ ///
+ /// // We use a buffer here, though you'd normally use a `File`
+ /// let mut buf = [0; 65536];
+ /// let mut zip = zip::ZipWriter::new(std::io::Cursor::new(&mut buf[..]));
+ ///
+ /// let options = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored);
+ /// zip.start_file("hello_world.txt", options)?;
+ /// zip.write(b"Hello, World!")?;
+ ///
+ /// // Apply the changes you've made.
+ /// // Dropping the `ZipWriter` will have the same effect, but may silently fail
+ /// zip.finish()?;
+ ///
+ /// # Ok(())
+ /// # }
+ /// # doit().unwrap();
+ /// ```
+ pub struct ZipWriter<W: Write + io::Seek> {
+ pub(super) inner: GenericZipWriter<W>,
+ pub(super) files: Vec<ZipFileData>,
+ pub(super) stats: ZipWriterStats,
+ pub(super) writing_to_file: bool,
+ pub(super) writing_to_extra_field: bool,
+ pub(super) writing_to_central_extra_field_only: bool,
+ pub(super) writing_raw: bool,
+ pub(super) comment: Vec<u8>,
+ }
+}
+pub use zip_writer::ZipWriter;
+
+#[derive(Default)]
+struct ZipWriterStats {
+ hasher: Hasher,
+ start: u64,
+ bytes_written: u64,
+}
+
+struct ZipRawValues {
+ crc32: u32,
+ compressed_size: u64,
+ uncompressed_size: u64,
+}
+
+/// Metadata for a file to be written
+#[derive(Copy, Clone)]
+pub struct FileOptions {
+ compression_method: CompressionMethod,
+ compression_level: Option<i32>,
+ last_modified_time: DateTime,
+ permissions: Option<u32>,
+ large_file: bool,
+}
+
+impl FileOptions {
+ /// Construct a new FileOptions object
+ pub fn default() -> FileOptions {
+ FileOptions {
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ compression_method: CompressionMethod::Deflated,
+ #[cfg(not(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ )))]
+ compression_method: CompressionMethod::Stored,
+ compression_level: None,
+ #[cfg(feature = "time")]
+ last_modified_time: DateTime::from_time(OffsetDateTime::now_utc()).unwrap_or_default(),
+ #[cfg(not(feature = "time"))]
+ last_modified_time: DateTime::default(),
+ permissions: None,
+ large_file: false,
+ }
+ }
+
+ /// Set the compression method for the new file
+ ///
+ /// The default is `CompressionMethod::Deflated`. If the deflate compression feature is
+ /// disabled, `CompressionMethod::Stored` becomes the default.
+ #[must_use]
+ pub fn compression_method(mut self, method: CompressionMethod) -> FileOptions {
+ self.compression_method = method;
+ self
+ }
+
+ /// Set the compression level for the new file
+ ///
+ /// `None` value specifies default compression level.
+ ///
+ /// Range of values depends on compression method:
+ /// * `Deflated`: 0 - 9. Default is 6
+ /// * `Bzip2`: 0 - 9. Default is 6
+ /// * `Zstd`: -7 - 22, with zero being mapped to default level. Default is 3
+ /// * others: only `None` is allowed
+ #[must_use]
+ pub fn compression_level(mut self, level: Option<i32>) -> FileOptions {
+ self.compression_level = level;
+ self
+ }
+
+ /// Set the last modified time
+ ///
+ /// The default is the current timestamp if the 'time' feature is enabled, and 1980-01-01
+ /// otherwise
+ #[must_use]
+ pub fn last_modified_time(mut self, mod_time: DateTime) -> FileOptions {
+ self.last_modified_time = mod_time;
+ self
+ }
+
+ /// Set the permissions for the new file.
+ ///
+ /// The format is represented with unix-style permissions.
+ /// The default is `0o644`, which represents `rw-r--r--` for files,
+ /// and `0o755`, which represents `rwxr-xr-x` for directories.
+ ///
+ /// This method only preserves the file permissions bits (via a `& 0o777`) and discards
+ /// higher file mode bits. So it cannot be used to denote an entry as a directory,
+ /// symlink, or other special file type.
+ #[must_use]
+ pub fn unix_permissions(mut self, mode: u32) -> FileOptions {
+ self.permissions = Some(mode & 0o777);
+ self
+ }
+
+ /// Set whether the new file's compressed and uncompressed size is less than 4 GiB.
+ ///
+ /// If set to `false` and the file exceeds the limit, an I/O error is thrown. If set to `true`,
+ /// readers will require ZIP64 support and if the file does not exceed the limit, 20 B are
+ /// wasted. The default is `false`.
+ #[must_use]
+ pub fn large_file(mut self, large: bool) -> FileOptions {
+ self.large_file = large;
+ self
+ }
+}
+
+impl Default for FileOptions {
+ fn default() -> Self {
+ Self::default()
+ }
+}
+
+impl<W: Write + io::Seek> Write for ZipWriter<W> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ if !self.writing_to_file {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "No file has been started",
+ ));
+ }
+ match self.inner.ref_mut() {
+ Some(ref mut w) => {
+ if self.writing_to_extra_field {
+ self.files.last_mut().unwrap().extra_field.write(buf)
+ } else {
+ let write_result = w.write(buf);
+ if let Ok(count) = write_result {
+ self.stats.update(&buf[0..count]);
+ if self.stats.bytes_written > spec::ZIP64_BYTES_THR
+ && !self.files.last_mut().unwrap().large_file
+ {
+ let _inner = mem::replace(&mut self.inner, GenericZipWriter::Closed);
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "Large file option has not been set",
+ ));
+ }
+ }
+ write_result
+ }
+ }
+ None => Err(io::Error::new(
+ io::ErrorKind::BrokenPipe,
+ "ZipWriter was already closed",
+ )),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ match self.inner.ref_mut() {
+ Some(ref mut w) => w.flush(),
+ None => Err(io::Error::new(
+ io::ErrorKind::BrokenPipe,
+ "ZipWriter was already closed",
+ )),
+ }
+ }
+}
+
+impl ZipWriterStats {
+ fn update(&mut self, buf: &[u8]) {
+ self.hasher.update(buf);
+ self.bytes_written += buf.len() as u64;
+ }
+}
+
+impl<A: Read + Write + io::Seek> ZipWriter<A> {
+ /// Initializes the archive from an existing ZIP archive, making it ready for append.
+ pub fn new_append(mut readwriter: A) -> ZipResult<ZipWriter<A>> {
+ let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut readwriter)?;
+
+ if footer.disk_number != footer.disk_with_central_directory {
+ return Err(ZipError::UnsupportedArchive(
+ "Support for multi-disk files is not implemented",
+ ));
+ }
+
+ let (archive_offset, directory_start, number_of_files) =
+ ZipArchive::get_directory_counts(&mut readwriter, &footer, cde_start_pos)?;
+
+ if readwriter
+ .seek(io::SeekFrom::Start(directory_start))
+ .is_err()
+ {
+ return Err(ZipError::InvalidArchive(
+ "Could not seek to start of central directory",
+ ));
+ }
+
+ let files = (0..number_of_files)
+ .map(|_| central_header_to_zip_file(&mut readwriter, archive_offset))
+ .collect::<Result<Vec<_>, _>>()?;
+
+ let _ = readwriter.seek(io::SeekFrom::Start(directory_start)); // seek directory_start to overwrite it
+
+ Ok(ZipWriter {
+ inner: GenericZipWriter::Storer(readwriter),
+ files,
+ stats: Default::default(),
+ writing_to_file: false,
+ writing_to_extra_field: false,
+ writing_to_central_extra_field_only: false,
+ comment: footer.zip_file_comment,
+ writing_raw: true, // avoid recomputing the last file's header
+ })
+ }
+}
+
+impl<W: Write + io::Seek> ZipWriter<W> {
+ /// Initializes the archive.
+ ///
+ /// Before writing to this object, the [`ZipWriter::start_file`] function should be called.
+ pub fn new(inner: W) -> ZipWriter<W> {
+ ZipWriter {
+ inner: GenericZipWriter::Storer(inner),
+ files: Vec::new(),
+ stats: Default::default(),
+ writing_to_file: false,
+ writing_to_extra_field: false,
+ writing_to_central_extra_field_only: false,
+ writing_raw: false,
+ comment: Vec::new(),
+ }
+ }
+
+ /// Set ZIP archive comment.
+ pub fn set_comment<S>(&mut self, comment: S)
+ where
+ S: Into<String>,
+ {
+ self.set_raw_comment(comment.into().into())
+ }
+
+ /// Set ZIP archive comment.
+ ///
+ /// This sets the raw bytes of the comment. The comment
+ /// is typically expected to be encoded in UTF-8
+ pub fn set_raw_comment(&mut self, comment: Vec<u8>) {
+ self.comment = comment;
+ }
+
+ /// Start a new file for with the requested options.
+ fn start_entry<S>(
+ &mut self,
+ name: S,
+ options: FileOptions,
+ raw_values: Option<ZipRawValues>,
+ ) -> ZipResult<()>
+ where
+ S: Into<String>,
+ {
+ self.finish_file()?;
+
+ let raw_values = raw_values.unwrap_or(ZipRawValues {
+ crc32: 0,
+ compressed_size: 0,
+ uncompressed_size: 0,
+ });
+
+ {
+ let writer = self.inner.get_plain();
+ let header_start = writer.stream_position()?;
+
+ let permissions = options.permissions.unwrap_or(0o100644);
+ let mut file = ZipFileData {
+ system: System::Unix,
+ version_made_by: DEFAULT_VERSION,
+ encrypted: false,
+ using_data_descriptor: false,
+ compression_method: options.compression_method,
+ compression_level: options.compression_level,
+ last_modified_time: options.last_modified_time,
+ crc32: raw_values.crc32,
+ compressed_size: raw_values.compressed_size,
+ uncompressed_size: raw_values.uncompressed_size,
+ file_name: name.into(),
+ file_name_raw: Vec::new(), // Never used for saving
+ extra_field: Vec::new(),
+ file_comment: String::new(),
+ header_start,
+ data_start: AtomicU64::new(0),
+ central_header_start: 0,
+ external_attributes: permissions << 16,
+ large_file: options.large_file,
+ aes_mode: None,
+ };
+ write_local_file_header(writer, &file)?;
+
+ let header_end = writer.stream_position()?;
+ self.stats.start = header_end;
+ *file.data_start.get_mut() = header_end;
+
+ self.stats.bytes_written = 0;
+ self.stats.hasher = Hasher::new();
+
+ self.files.push(file);
+ }
+
+ Ok(())
+ }
+
+ fn finish_file(&mut self) -> ZipResult<()> {
+ if self.writing_to_extra_field {
+ // Implicitly calling [`ZipWriter::end_extra_data`] for empty files.
+ self.end_extra_data()?;
+ }
+ self.inner.switch_to(CompressionMethod::Stored, None)?;
+ let writer = self.inner.get_plain();
+
+ if !self.writing_raw {
+ let file = match self.files.last_mut() {
+ None => return Ok(()),
+ Some(f) => f,
+ };
+ file.crc32 = self.stats.hasher.clone().finalize();
+ file.uncompressed_size = self.stats.bytes_written;
+
+ let file_end = writer.stream_position()?;
+ file.compressed_size = file_end - self.stats.start;
+
+ update_local_file_header(writer, file)?;
+ writer.seek(io::SeekFrom::Start(file_end))?;
+ }
+
+ self.writing_to_file = false;
+ self.writing_raw = false;
+ Ok(())
+ }
+
+ /// Create a file in the archive and start writing its' contents.
+ ///
+ /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`]
+ pub fn start_file<S>(&mut self, name: S, mut options: FileOptions) -> ZipResult<()>
+ where
+ S: Into<String>,
+ {
+ if options.permissions.is_none() {
+ options.permissions = Some(0o644);
+ }
+ *options.permissions.as_mut().unwrap() |= 0o100000;
+ self.start_entry(name, options, None)?;
+ self.inner
+ .switch_to(options.compression_method, options.compression_level)?;
+ self.writing_to_file = true;
+ Ok(())
+ }
+
+ /// Starts a file, taking a Path as argument.
+ ///
+ /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal'
+ /// Components, such as a starting '/' or '..' and '.'.
+ #[deprecated(
+ since = "0.5.7",
+ note = "by stripping `..`s from the path, the meaning of paths can change. Use `start_file` instead."
+ )]
+ pub fn start_file_from_path(
+ &mut self,
+ path: &std::path::Path,
+ options: FileOptions,
+ ) -> ZipResult<()> {
+ self.start_file(path_to_string(path), options)
+ }
+
+ /// Create an aligned file in the archive and start writing its' contents.
+ ///
+ /// Returns the number of padding bytes required to align the file.
+ ///
+ /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`]
+ pub fn start_file_aligned<S>(
+ &mut self,
+ name: S,
+ options: FileOptions,
+ align: u16,
+ ) -> Result<u64, ZipError>
+ where
+ S: Into<String>,
+ {
+ let data_start = self.start_file_with_extra_data(name, options)?;
+ let align = align as u64;
+ if align > 1 && data_start % align != 0 {
+ let pad_length = (align - (data_start + 4) % align) % align;
+ let pad = vec![0; pad_length as usize];
+ self.write_all(b"za").map_err(ZipError::from)?; // 0x617a
+ self.write_u16::<LittleEndian>(pad.len() as u16)
+ .map_err(ZipError::from)?;
+ self.write_all(&pad).map_err(ZipError::from)?;
+ assert_eq!(self.end_local_start_central_extra_data()? % align, 0);
+ }
+ let extra_data_end = self.end_extra_data()?;
+ Ok(extra_data_end - data_start)
+ }
+
+ /// Create a file in the archive and start writing its extra data first.
+ ///
+ /// Finish writing extra data and start writing file data with [`ZipWriter::end_extra_data`].
+ /// Optionally, distinguish local from central extra data with
+ /// [`ZipWriter::end_local_start_central_extra_data`].
+ ///
+ /// Returns the preliminary starting offset of the file data without any extra data allowing to
+ /// align the file data by calculating a pad length to be prepended as part of the extra data.
+ ///
+ /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`]
+ ///
+ /// ```
+ /// use byteorder::{LittleEndian, WriteBytesExt};
+ /// use zip::{ZipArchive, ZipWriter, result::ZipResult};
+ /// use zip::{write::FileOptions, CompressionMethod};
+ /// use std::io::{Write, Cursor};
+ ///
+ /// # fn main() -> ZipResult<()> {
+ /// let mut archive = Cursor::new(Vec::new());
+ ///
+ /// {
+ /// let mut zip = ZipWriter::new(&mut archive);
+ /// let options = FileOptions::default()
+ /// .compression_method(CompressionMethod::Stored);
+ ///
+ /// zip.start_file_with_extra_data("identical_extra_data.txt", options)?;
+ /// let extra_data = b"local and central extra data";
+ /// zip.write_u16::<LittleEndian>(0xbeef)?;
+ /// zip.write_u16::<LittleEndian>(extra_data.len() as u16)?;
+ /// zip.write_all(extra_data)?;
+ /// zip.end_extra_data()?;
+ /// zip.write_all(b"file data")?;
+ ///
+ /// let data_start = zip.start_file_with_extra_data("different_extra_data.txt", options)?;
+ /// let extra_data = b"local extra data";
+ /// zip.write_u16::<LittleEndian>(0xbeef)?;
+ /// zip.write_u16::<LittleEndian>(extra_data.len() as u16)?;
+ /// zip.write_all(extra_data)?;
+ /// let data_start = data_start as usize + 4 + extra_data.len() + 4;
+ /// let align = 64;
+ /// let pad_length = (align - data_start % align) % align;
+ /// assert_eq!(pad_length, 19);
+ /// zip.write_u16::<LittleEndian>(0xdead)?;
+ /// zip.write_u16::<LittleEndian>(pad_length as u16)?;
+ /// zip.write_all(&vec![0; pad_length])?;
+ /// let data_start = zip.end_local_start_central_extra_data()?;
+ /// assert_eq!(data_start as usize % align, 0);
+ /// let extra_data = b"central extra data";
+ /// zip.write_u16::<LittleEndian>(0xbeef)?;
+ /// zip.write_u16::<LittleEndian>(extra_data.len() as u16)?;
+ /// zip.write_all(extra_data)?;
+ /// zip.end_extra_data()?;
+ /// zip.write_all(b"file data")?;
+ ///
+ /// zip.finish()?;
+ /// }
+ ///
+ /// let mut zip = ZipArchive::new(archive)?;
+ /// assert_eq!(&zip.by_index(0)?.extra_data()[4..], b"local and central extra data");
+ /// assert_eq!(&zip.by_index(1)?.extra_data()[4..], b"central extra data");
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn start_file_with_extra_data<S>(
+ &mut self,
+ name: S,
+ mut options: FileOptions,
+ ) -> ZipResult<u64>
+ where
+ S: Into<String>,
+ {
+ if options.permissions.is_none() {
+ options.permissions = Some(0o644);
+ }
+ *options.permissions.as_mut().unwrap() |= 0o100000;
+ self.start_entry(name, options, None)?;
+ self.writing_to_file = true;
+ self.writing_to_extra_field = true;
+ Ok(self.files.last().unwrap().data_start.load())
+ }
+
+ /// End local and start central extra data. Requires [`ZipWriter::start_file_with_extra_data`].
+ ///
+ /// Returns the final starting offset of the file data.
+ pub fn end_local_start_central_extra_data(&mut self) -> ZipResult<u64> {
+ let data_start = self.end_extra_data()?;
+ self.files.last_mut().unwrap().extra_field.clear();
+ self.writing_to_extra_field = true;
+ self.writing_to_central_extra_field_only = true;
+ Ok(data_start)
+ }
+
+ /// End extra data and start file data. Requires [`ZipWriter::start_file_with_extra_data`].
+ ///
+ /// Returns the final starting offset of the file data.
+ pub fn end_extra_data(&mut self) -> ZipResult<u64> {
+ // Require `start_file_with_extra_data()`. Ensures `file` is some.
+ if !self.writing_to_extra_field {
+ return Err(ZipError::Io(io::Error::new(
+ io::ErrorKind::Other,
+ "Not writing to extra field",
+ )));
+ }
+ let file = self.files.last_mut().unwrap();
+
+ validate_extra_data(file)?;
+
+ let data_start = file.data_start.get_mut();
+
+ if !self.writing_to_central_extra_field_only {
+ let writer = self.inner.get_plain();
+
+ // Append extra data to local file header and keep it for central file header.
+ writer.write_all(&file.extra_field)?;
+
+ // Update final `data_start`.
+ let header_end = *data_start + file.extra_field.len() as u64;
+ self.stats.start = header_end;
+ *data_start = header_end;
+
+ // Update extra field length in local file header.
+ let extra_field_length =
+ if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16;
+ writer.seek(io::SeekFrom::Start(file.header_start + 28))?;
+ writer.write_u16::<LittleEndian>(extra_field_length)?;
+ writer.seek(io::SeekFrom::Start(header_end))?;
+
+ self.inner
+ .switch_to(file.compression_method, file.compression_level)?;
+ }
+
+ self.writing_to_extra_field = false;
+ self.writing_to_central_extra_field_only = false;
+ Ok(*data_start)
+ }
+
+ /// Add a new file using the already compressed data from a ZIP file being read and renames it, this
+ /// allows faster copies of the `ZipFile` since there is no need to decompress and compress it again.
+ /// Any `ZipFile` metadata is copied and not checked, for example the file CRC.
+
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::{Read, Seek, Write};
+ /// use zip::{ZipArchive, ZipWriter};
+ ///
+ /// fn copy_rename<R, W>(
+ /// src: &mut ZipArchive<R>,
+ /// dst: &mut ZipWriter<W>,
+ /// ) -> zip::result::ZipResult<()>
+ /// where
+ /// R: Read + Seek,
+ /// W: Write + Seek,
+ /// {
+ /// // Retrieve file entry by name
+ /// let file = src.by_name("src_file.txt")?;
+ ///
+ /// // Copy and rename the previously obtained file entry to the destination zip archive
+ /// dst.raw_copy_file_rename(file, "new_name.txt")?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn raw_copy_file_rename<S>(&mut self, mut file: ZipFile, name: S) -> ZipResult<()>
+ where
+ S: Into<String>,
+ {
+ let mut options = FileOptions::default()
+ .large_file(file.compressed_size().max(file.size()) > spec::ZIP64_BYTES_THR)
+ .last_modified_time(file.last_modified())
+ .compression_method(file.compression());
+ if let Some(perms) = file.unix_mode() {
+ options = options.unix_permissions(perms);
+ }
+
+ let raw_values = ZipRawValues {
+ crc32: file.crc32(),
+ compressed_size: file.compressed_size(),
+ uncompressed_size: file.size(),
+ };
+
+ self.start_entry(name, options, Some(raw_values))?;
+ self.writing_to_file = true;
+ self.writing_raw = true;
+
+ io::copy(file.get_raw_reader(), self)?;
+
+ Ok(())
+ }
+
+ /// Add a new file using the already compressed data from a ZIP file being read, this allows faster
+ /// copies of the `ZipFile` since there is no need to decompress and compress it again. Any `ZipFile`
+ /// metadata is copied and not checked, for example the file CRC.
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::{Read, Seek, Write};
+ /// use zip::{ZipArchive, ZipWriter};
+ ///
+ /// fn copy<R, W>(src: &mut ZipArchive<R>, dst: &mut ZipWriter<W>) -> zip::result::ZipResult<()>
+ /// where
+ /// R: Read + Seek,
+ /// W: Write + Seek,
+ /// {
+ /// // Retrieve file entry by name
+ /// let file = src.by_name("src_file.txt")?;
+ ///
+ /// // Copy the previously obtained file entry to the destination zip archive
+ /// dst.raw_copy_file(file)?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn raw_copy_file(&mut self, file: ZipFile) -> ZipResult<()> {
+ let name = file.name().to_owned();
+ self.raw_copy_file_rename(file, name)
+ }
+
+ /// Add a directory entry.
+ ///
+ /// You can't write data to the file afterwards.
+ pub fn add_directory<S>(&mut self, name: S, mut options: FileOptions) -> ZipResult<()>
+ where
+ S: Into<String>,
+ {
+ if options.permissions.is_none() {
+ options.permissions = Some(0o755);
+ }
+ *options.permissions.as_mut().unwrap() |= 0o40000;
+ options.compression_method = CompressionMethod::Stored;
+
+ let name_as_string = name.into();
+ // Append a slash to the filename if it does not end with it.
+ let name_with_slash = match name_as_string.chars().last() {
+ Some('/') | Some('\\') => name_as_string,
+ _ => name_as_string + "/",
+ };
+
+ self.start_entry(name_with_slash, options, None)?;
+ self.writing_to_file = false;
+ Ok(())
+ }
+
+ /// Add a directory entry, taking a Path as argument.
+ ///
+ /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal'
+ /// Components, such as a starting '/' or '..' and '.'.
+ #[deprecated(
+ since = "0.5.7",
+ note = "by stripping `..`s from the path, the meaning of paths can change. Use `add_directory` instead."
+ )]
+ pub fn add_directory_from_path(
+ &mut self,
+ path: &std::path::Path,
+ options: FileOptions,
+ ) -> ZipResult<()> {
+ self.add_directory(path_to_string(path), options)
+ }
+
+ /// Finish the last file and write all other zip-structures
+ ///
+ /// This will return the writer, but one should normally not append any data to the end of the file.
+ /// Note that the zipfile will also be finished on drop.
+ pub fn finish(&mut self) -> ZipResult<W> {
+ self.finalize()?;
+ let inner = mem::replace(&mut self.inner, GenericZipWriter::Closed);
+ Ok(inner.unwrap())
+ }
+
+ /// Add a symlink entry.
+ ///
+ /// The zip archive will contain an entry for path `name` which is a symlink to `target`.
+ ///
+ /// No validation or normalization of the paths is performed. For best results,
+ /// callers should normalize `\` to `/` and ensure symlinks are relative to other
+ /// paths within the zip archive.
+ ///
+ /// WARNING: not all zip implementations preserve symlinks on extract. Some zip
+ /// implementations may materialize a symlink as a regular file, possibly with the
+ /// content incorrectly set to the symlink target. For maximum portability, consider
+ /// storing a regular file instead.
+ pub fn add_symlink<N, T>(
+ &mut self,
+ name: N,
+ target: T,
+ mut options: FileOptions,
+ ) -> ZipResult<()>
+ where
+ N: Into<String>,
+ T: Into<String>,
+ {
+ if options.permissions.is_none() {
+ options.permissions = Some(0o777);
+ }
+ *options.permissions.as_mut().unwrap() |= 0o120000;
+ // The symlink target is stored as file content. And compressing the target path
+ // likely wastes space. So always store.
+ options.compression_method = CompressionMethod::Stored;
+
+ self.start_entry(name, options, None)?;
+ self.writing_to_file = true;
+ self.write_all(target.into().as_bytes())?;
+ self.writing_to_file = false;
+
+ Ok(())
+ }
+
+ fn finalize(&mut self) -> ZipResult<()> {
+ self.finish_file()?;
+
+ {
+ let writer = self.inner.get_plain();
+
+ let central_start = writer.stream_position()?;
+ for file in self.files.iter() {
+ write_central_directory_header(writer, file)?;
+ }
+ let central_size = writer.stream_position()? - central_start;
+
+ if self.files.len() > spec::ZIP64_ENTRY_THR
+ || central_size.max(central_start) > spec::ZIP64_BYTES_THR
+ {
+ let zip64_footer = spec::Zip64CentralDirectoryEnd {
+ version_made_by: DEFAULT_VERSION as u16,
+ version_needed_to_extract: DEFAULT_VERSION as u16,
+ disk_number: 0,
+ disk_with_central_directory: 0,
+ number_of_files_on_this_disk: self.files.len() as u64,
+ number_of_files: self.files.len() as u64,
+ central_directory_size: central_size,
+ central_directory_offset: central_start,
+ };
+
+ zip64_footer.write(writer)?;
+
+ let zip64_footer = spec::Zip64CentralDirectoryEndLocator {
+ disk_with_central_directory: 0,
+ end_of_central_directory_offset: central_start + central_size,
+ number_of_disks: 1,
+ };
+
+ zip64_footer.write(writer)?;
+ }
+
+ let number_of_files = self.files.len().min(spec::ZIP64_ENTRY_THR) as u16;
+ let footer = spec::CentralDirectoryEnd {
+ disk_number: 0,
+ disk_with_central_directory: 0,
+ zip_file_comment: self.comment.clone(),
+ number_of_files_on_this_disk: number_of_files,
+ number_of_files,
+ central_directory_size: central_size.min(spec::ZIP64_BYTES_THR) as u32,
+ central_directory_offset: central_start.min(spec::ZIP64_BYTES_THR) as u32,
+ };
+
+ footer.write(writer)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl<W: Write + io::Seek> Drop for ZipWriter<W> {
+ fn drop(&mut self) {
+ if !self.inner.is_closed() {
+ if let Err(e) = self.finalize() {
+ let _ = write!(io::stderr(), "ZipWriter drop failed: {:?}", e);
+ }
+ }
+ }
+}
+
+impl<W: Write + io::Seek> GenericZipWriter<W> {
+ fn switch_to(
+ &mut self,
+ compression: CompressionMethod,
+ compression_level: Option<i32>,
+ ) -> ZipResult<()> {
+ match self.current_compression() {
+ Some(method) if method == compression => return Ok(()),
+ None => {
+ return Err(io::Error::new(
+ io::ErrorKind::BrokenPipe,
+ "ZipWriter was already closed",
+ )
+ .into())
+ }
+ _ => {}
+ }
+
+ let bare = match mem::replace(self, GenericZipWriter::Closed) {
+ GenericZipWriter::Storer(w) => w,
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ GenericZipWriter::Deflater(w) => w.finish()?,
+ #[cfg(feature = "bzip2")]
+ GenericZipWriter::Bzip2(w) => w.finish()?,
+ #[cfg(feature = "zstd")]
+ GenericZipWriter::Zstd(w) => w.finish()?,
+ GenericZipWriter::Closed => {
+ return Err(io::Error::new(
+ io::ErrorKind::BrokenPipe,
+ "ZipWriter was already closed",
+ )
+ .into())
+ }
+ };
+
+ *self = {
+ #[allow(deprecated)]
+ match compression {
+ CompressionMethod::Stored => {
+ if compression_level.is_some() {
+ return Err(ZipError::UnsupportedArchive(
+ "Unsupported compression level",
+ ));
+ }
+
+ GenericZipWriter::Storer(bare)
+ }
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ CompressionMethod::Deflated => GenericZipWriter::Deflater(DeflateEncoder::new(
+ bare,
+ flate2::Compression::new(
+ clamp_opt(
+ compression_level
+ .unwrap_or(flate2::Compression::default().level() as i32),
+ deflate_compression_level_range(),
+ )
+ .ok_or(ZipError::UnsupportedArchive(
+ "Unsupported compression level",
+ ))? as u32,
+ ),
+ )),
+ #[cfg(feature = "bzip2")]
+ CompressionMethod::Bzip2 => GenericZipWriter::Bzip2(BzEncoder::new(
+ bare,
+ bzip2::Compression::new(
+ clamp_opt(
+ compression_level
+ .unwrap_or(bzip2::Compression::default().level() as i32),
+ bzip2_compression_level_range(),
+ )
+ .ok_or(ZipError::UnsupportedArchive(
+ "Unsupported compression level",
+ ))? as u32,
+ ),
+ )),
+ CompressionMethod::AES => {
+ return Err(ZipError::UnsupportedArchive(
+ "AES compression is not supported for writing",
+ ))
+ }
+ #[cfg(feature = "zstd")]
+ CompressionMethod::Zstd => GenericZipWriter::Zstd(
+ ZstdEncoder::new(
+ bare,
+ clamp_opt(
+ compression_level.unwrap_or(zstd::DEFAULT_COMPRESSION_LEVEL),
+ zstd::compression_level_range(),
+ )
+ .ok_or(ZipError::UnsupportedArchive(
+ "Unsupported compression level",
+ ))?,
+ )
+ .unwrap(),
+ ),
+ CompressionMethod::Unsupported(..) => {
+ return Err(ZipError::UnsupportedArchive("Unsupported compression"))
+ }
+ }
+ };
+
+ Ok(())
+ }
+
+ fn ref_mut(&mut self) -> Option<&mut dyn Write> {
+ match *self {
+ GenericZipWriter::Storer(ref mut w) => Some(w as &mut dyn Write),
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ GenericZipWriter::Deflater(ref mut w) => Some(w as &mut dyn Write),
+ #[cfg(feature = "bzip2")]
+ GenericZipWriter::Bzip2(ref mut w) => Some(w as &mut dyn Write),
+ #[cfg(feature = "zstd")]
+ GenericZipWriter::Zstd(ref mut w) => Some(w as &mut dyn Write),
+ GenericZipWriter::Closed => None,
+ }
+ }
+
+ fn is_closed(&self) -> bool {
+ matches!(*self, GenericZipWriter::Closed)
+ }
+
+ fn get_plain(&mut self) -> &mut W {
+ match *self {
+ GenericZipWriter::Storer(ref mut w) => w,
+ _ => panic!("Should have switched to stored beforehand"),
+ }
+ }
+
+ fn current_compression(&self) -> Option<CompressionMethod> {
+ match *self {
+ GenericZipWriter::Storer(..) => Some(CompressionMethod::Stored),
+ #[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+ ))]
+ GenericZipWriter::Deflater(..) => Some(CompressionMethod::Deflated),
+ #[cfg(feature = "bzip2")]
+ GenericZipWriter::Bzip2(..) => Some(CompressionMethod::Bzip2),
+ #[cfg(feature = "zstd")]
+ GenericZipWriter::Zstd(..) => Some(CompressionMethod::Zstd),
+ GenericZipWriter::Closed => None,
+ }
+ }
+
+ fn unwrap(self) -> W {
+ match self {
+ GenericZipWriter::Storer(w) => w,
+ _ => panic!("Should have switched to stored beforehand"),
+ }
+ }
+}
+
+#[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib"
+))]
+fn deflate_compression_level_range() -> std::ops::RangeInclusive<i32> {
+ let min = flate2::Compression::none().level() as i32;
+ let max = flate2::Compression::best().level() as i32;
+ min..=max
+}
+
+#[cfg(feature = "bzip2")]
+fn bzip2_compression_level_range() -> std::ops::RangeInclusive<i32> {
+ let min = bzip2::Compression::none().level() as i32;
+ let max = bzip2::Compression::best().level() as i32;
+ min..=max
+}
+
+#[cfg(any(
+ feature = "deflate",
+ feature = "deflate-miniz",
+ feature = "deflate-zlib",
+ feature = "bzip2",
+ feature = "zstd"
+))]
+fn clamp_opt<T: Ord + Copy>(value: T, range: std::ops::RangeInclusive<T>) -> Option<T> {
+ if range.contains(&value) {
+ Some(value)
+ } else {
+ None
+ }
+}
+
+fn write_local_file_header<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> {
+ // local file header signature
+ writer.write_u32::<LittleEndian>(spec::LOCAL_FILE_HEADER_SIGNATURE)?;
+ // version needed to extract
+ writer.write_u16::<LittleEndian>(file.version_needed())?;
+ // general purpose bit flag
+ let flag = if !file.file_name.is_ascii() {
+ 1u16 << 11
+ } else {
+ 0
+ };
+ writer.write_u16::<LittleEndian>(flag)?;
+ // Compression method
+ #[allow(deprecated)]
+ writer.write_u16::<LittleEndian>(file.compression_method.to_u16())?;
+ // last mod file time and last mod file date
+ writer.write_u16::<LittleEndian>(file.last_modified_time.timepart())?;
+ writer.write_u16::<LittleEndian>(file.last_modified_time.datepart())?;
+ // crc-32
+ writer.write_u32::<LittleEndian>(file.crc32)?;
+ // compressed size and uncompressed size
+ if file.large_file {
+ writer.write_u32::<LittleEndian>(spec::ZIP64_BYTES_THR as u32)?;
+ writer.write_u32::<LittleEndian>(spec::ZIP64_BYTES_THR as u32)?;
+ } else {
+ writer.write_u32::<LittleEndian>(file.compressed_size as u32)?;
+ writer.write_u32::<LittleEndian>(file.uncompressed_size as u32)?;
+ }
+ // file name length
+ writer.write_u16::<LittleEndian>(file.file_name.as_bytes().len() as u16)?;
+ // extra field length
+ let extra_field_length = if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16;
+ writer.write_u16::<LittleEndian>(extra_field_length)?;
+ // file name
+ writer.write_all(file.file_name.as_bytes())?;
+ // zip64 extra field
+ if file.large_file {
+ write_local_zip64_extra_field(writer, file)?;
+ }
+
+ Ok(())
+}
+
+fn update_local_file_header<T: Write + io::Seek>(
+ writer: &mut T,
+ file: &ZipFileData,
+) -> ZipResult<()> {
+ const CRC32_OFFSET: u64 = 14;
+ writer.seek(io::SeekFrom::Start(file.header_start + CRC32_OFFSET))?;
+ writer.write_u32::<LittleEndian>(file.crc32)?;
+ if file.large_file {
+ update_local_zip64_extra_field(writer, file)?;
+ } else {
+ // check compressed size as well as it can also be slightly larger than uncompressed size
+ if file.compressed_size > spec::ZIP64_BYTES_THR {
+ return Err(ZipError::Io(io::Error::new(
+ io::ErrorKind::Other,
+ "Large file option has not been set",
+ )));
+ }
+ writer.write_u32::<LittleEndian>(file.compressed_size as u32)?;
+ // uncompressed size is already checked on write to catch it as soon as possible
+ writer.write_u32::<LittleEndian>(file.uncompressed_size as u32)?;
+ }
+ Ok(())
+}
+
+fn write_central_directory_header<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> {
+ // buffer zip64 extra field to determine its variable length
+ let mut zip64_extra_field = [0; 28];
+ let zip64_extra_field_length =
+ write_central_zip64_extra_field(&mut zip64_extra_field.as_mut(), file)?;
+
+ // central file header signature
+ writer.write_u32::<LittleEndian>(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?;
+ // version made by
+ let version_made_by = (file.system as u16) << 8 | (file.version_made_by as u16);
+ writer.write_u16::<LittleEndian>(version_made_by)?;
+ // version needed to extract
+ writer.write_u16::<LittleEndian>(file.version_needed())?;
+ // general puprose bit flag
+ let flag = if !file.file_name.is_ascii() {
+ 1u16 << 11
+ } else {
+ 0
+ };
+ writer.write_u16::<LittleEndian>(flag)?;
+ // compression method
+ #[allow(deprecated)]
+ writer.write_u16::<LittleEndian>(file.compression_method.to_u16())?;
+ // last mod file time + date
+ writer.write_u16::<LittleEndian>(file.last_modified_time.timepart())?;
+ writer.write_u16::<LittleEndian>(file.last_modified_time.datepart())?;
+ // crc-32
+ writer.write_u32::<LittleEndian>(file.crc32)?;
+ // compressed size
+ writer.write_u32::<LittleEndian>(file.compressed_size.min(spec::ZIP64_BYTES_THR) as u32)?;
+ // uncompressed size
+ writer.write_u32::<LittleEndian>(file.uncompressed_size.min(spec::ZIP64_BYTES_THR) as u32)?;
+ // file name length
+ writer.write_u16::<LittleEndian>(file.file_name.as_bytes().len() as u16)?;
+ // extra field length
+ writer.write_u16::<LittleEndian>(zip64_extra_field_length + file.extra_field.len() as u16)?;
+ // file comment length
+ writer.write_u16::<LittleEndian>(0)?;
+ // disk number start
+ writer.write_u16::<LittleEndian>(0)?;
+ // internal file attribytes
+ writer.write_u16::<LittleEndian>(0)?;
+ // external file attributes
+ writer.write_u32::<LittleEndian>(file.external_attributes)?;
+ // relative offset of local header
+ writer.write_u32::<LittleEndian>(file.header_start.min(spec::ZIP64_BYTES_THR) as u32)?;
+ // file name
+ writer.write_all(file.file_name.as_bytes())?;
+ // zip64 extra field
+ writer.write_all(&zip64_extra_field[..zip64_extra_field_length as usize])?;
+ // extra field
+ writer.write_all(&file.extra_field)?;
+ // file comment
+ // <none>
+
+ Ok(())
+}
+
+fn validate_extra_data(file: &ZipFileData) -> ZipResult<()> {
+ let mut data = file.extra_field.as_slice();
+
+ if data.len() > spec::ZIP64_ENTRY_THR {
+ return Err(ZipError::Io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "Extra data exceeds extra field",
+ )));
+ }
+
+ while !data.is_empty() {
+ let left = data.len();
+ if left < 4 {
+ return Err(ZipError::Io(io::Error::new(
+ io::ErrorKind::Other,
+ "Incomplete extra data header",
+ )));
+ }
+ let kind = data.read_u16::<LittleEndian>()?;
+ let size = data.read_u16::<LittleEndian>()? as usize;
+ let left = left - 4;
+
+ if kind == 0x0001 {
+ return Err(ZipError::Io(io::Error::new(
+ io::ErrorKind::Other,
+ "No custom ZIP64 extra data allowed",
+ )));
+ }
+
+ #[cfg(not(feature = "unreserved"))]
+ {
+ if kind <= 31 || EXTRA_FIELD_MAPPING.iter().any(|&mapped| mapped == kind) {
+ return Err(ZipError::Io(io::Error::new(
+ io::ErrorKind::Other,
+ format!(
+ "Extra data header ID {:#06} requires crate feature \"unreserved\"",
+ kind,
+ ),
+ )));
+ }
+ }
+
+ if size > left {
+ return Err(ZipError::Io(io::Error::new(
+ io::ErrorKind::Other,
+ "Extra data size exceeds extra field",
+ )));
+ }
+
+ data = &data[size..];
+ }
+
+ Ok(())
+}
+
+fn write_local_zip64_extra_field<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> {
+ // This entry in the Local header MUST include BOTH original
+ // and compressed file size fields.
+ writer.write_u16::<LittleEndian>(0x0001)?;
+ writer.write_u16::<LittleEndian>(16)?;
+ writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
+ writer.write_u64::<LittleEndian>(file.compressed_size)?;
+ // Excluded fields:
+ // u32: disk start number
+ Ok(())
+}
+
+fn update_local_zip64_extra_field<T: Write + io::Seek>(
+ writer: &mut T,
+ file: &ZipFileData,
+) -> ZipResult<()> {
+ let zip64_extra_field = file.header_start + 30 + file.file_name.as_bytes().len() as u64;
+ writer.seek(io::SeekFrom::Start(zip64_extra_field + 4))?;
+ writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
+ writer.write_u64::<LittleEndian>(file.compressed_size)?;
+ // Excluded fields:
+ // u32: disk start number
+ Ok(())
+}
+
+fn write_central_zip64_extra_field<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<u16> {
+ // The order of the fields in the zip64 extended
+ // information record is fixed, but the fields MUST
+ // only appear if the corresponding Local or Central
+ // directory record field is set to 0xFFFF or 0xFFFFFFFF.
+ let mut size = 0;
+ let uncompressed_size = file.uncompressed_size > spec::ZIP64_BYTES_THR;
+ let compressed_size = file.compressed_size > spec::ZIP64_BYTES_THR;
+ let header_start = file.header_start > spec::ZIP64_BYTES_THR;
+ if uncompressed_size {
+ size += 8;
+ }
+ if compressed_size {
+ size += 8;
+ }
+ if header_start {
+ size += 8;
+ }
+ if size > 0 {
+ writer.write_u16::<LittleEndian>(0x0001)?;
+ writer.write_u16::<LittleEndian>(size)?;
+ size += 4;
+
+ if uncompressed_size {
+ writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
+ }
+ if compressed_size {
+ writer.write_u64::<LittleEndian>(file.compressed_size)?;
+ }
+ if header_start {
+ writer.write_u64::<LittleEndian>(file.header_start)?;
+ }
+ // Excluded fields:
+ // u32: disk start number
+ }
+ Ok(size)
+}
+
+fn path_to_string(path: &std::path::Path) -> String {
+ let mut path_str = String::new();
+ for component in path.components() {
+ if let std::path::Component::Normal(os_str) = component {
+ if !path_str.is_empty() {
+ path_str.push('/');
+ }
+ path_str.push_str(&*os_str.to_string_lossy());
+ }
+ }
+ path_str
+}
+
+#[cfg(test)]
+mod test {
+ use super::{FileOptions, ZipWriter};
+ use crate::compression::CompressionMethod;
+ use crate::types::DateTime;
+ use std::io;
+ use std::io::Write;
+
+ #[test]
+ fn write_empty_zip() {
+ let mut writer = ZipWriter::new(io::Cursor::new(Vec::new()));
+ writer.set_comment("ZIP");
+ let result = writer.finish().unwrap();
+ assert_eq!(result.get_ref().len(), 25);
+ assert_eq!(
+ *result.get_ref(),
+ [80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 90, 73, 80]
+ );
+ }
+
+ #[test]
+ fn unix_permissions_bitmask() {
+ // unix_permissions() throws away upper bits.
+ let options = FileOptions::default().unix_permissions(0o120777);
+ assert_eq!(options.permissions, Some(0o777));
+ }
+
+ #[test]
+ fn write_zip_dir() {
+ let mut writer = ZipWriter::new(io::Cursor::new(Vec::new()));
+ writer
+ .add_directory(
+ "test",
+ FileOptions::default().last_modified_time(
+ DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(),
+ ),
+ )
+ .unwrap();
+ assert!(writer
+ .write(b"writing to a directory is not allowed, and will not write any data")
+ .is_err());
+ let result = writer.finish().unwrap();
+ assert_eq!(result.get_ref().len(), 108);
+ assert_eq!(
+ *result.get_ref(),
+ &[
+ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 5, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0,
+ 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 237, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0,
+ 1, 0, 51, 0, 0, 0, 35, 0, 0, 0, 0, 0,
+ ] as &[u8]
+ );
+ }
+
+ #[test]
+ fn write_symlink_simple() {
+ let mut writer = ZipWriter::new(io::Cursor::new(Vec::new()));
+ writer
+ .add_symlink(
+ "name",
+ "target",
+ FileOptions::default().last_modified_time(
+ DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(),
+ ),
+ )
+ .unwrap();
+ assert!(writer
+ .write(b"writing to a symlink is not allowed and will not write any data")
+ .is_err());
+ let result = writer.finish().unwrap();
+ assert_eq!(result.get_ref().len(), 112);
+ assert_eq!(
+ *result.get_ref(),
+ &[
+ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0,
+ 6, 0, 0, 0, 4, 0, 0, 0, 110, 97, 109, 101, 116, 97, 114, 103, 101, 116, 80, 75, 1,
+ 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0, 6, 0,
+ 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 161, 0, 0, 0, 0, 110, 97, 109, 101,
+ 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 50, 0, 0, 0, 40, 0, 0, 0, 0, 0
+ ] as &[u8],
+ );
+ }
+
+ #[test]
+ fn write_symlink_wonky_paths() {
+ let mut writer = ZipWriter::new(io::Cursor::new(Vec::new()));
+ writer
+ .add_symlink(
+ "directory\\link",
+ "/absolute/symlink\\with\\mixed/slashes",
+ FileOptions::default().last_modified_time(
+ DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(),
+ ),
+ )
+ .unwrap();
+ assert!(writer
+ .write(b"writing to a symlink is not allowed and will not write any data")
+ .is_err());
+ let result = writer.finish().unwrap();
+ assert_eq!(result.get_ref().len(), 162);
+ assert_eq!(
+ *result.get_ref(),
+ &[
+ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95, 41, 81, 245, 36, 0, 0, 0,
+ 36, 0, 0, 0, 14, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105,
+ 110, 107, 47, 97, 98, 115, 111, 108, 117, 116, 101, 47, 115, 121, 109, 108, 105,
+ 110, 107, 92, 119, 105, 116, 104, 92, 109, 105, 120, 101, 100, 47, 115, 108, 97,
+ 115, 104, 101, 115, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95,
+ 41, 81, 245, 36, 0, 0, 0, 36, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255,
+ 161, 0, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105, 110,
+ 107, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 60, 0, 0, 0, 80, 0, 0, 0, 0, 0
+ ] as &[u8],
+ );
+ }
+
+ #[test]
+ fn write_mimetype_zip() {
+ let mut writer = ZipWriter::new(io::Cursor::new(Vec::new()));
+ let options = FileOptions {
+ compression_method: CompressionMethod::Stored,
+ compression_level: None,
+ last_modified_time: DateTime::default(),
+ permissions: Some(33188),
+ large_file: false,
+ };
+ writer.start_file("mimetype", options).unwrap();
+ writer
+ .write_all(b"application/vnd.oasis.opendocument.text")
+ .unwrap();
+ let result = writer.finish().unwrap();
+
+ assert_eq!(result.get_ref().len(), 153);
+ let mut v = Vec::new();
+ v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip"));
+ assert_eq!(result.get_ref(), &v);
+ }
+
+ #[test]
+ fn path_to_string() {
+ let mut path = std::path::PathBuf::new();
+ #[cfg(windows)]
+ path.push(r"C:\");
+ #[cfg(unix)]
+ path.push("/");
+ path.push("windows");
+ path.push("..");
+ path.push(".");
+ path.push("system32");
+ let path_str = super::path_to_string(&path);
+ assert_eq!(path_str, "windows/system32");
+ }
+}
+
+#[cfg(not(feature = "unreserved"))]
+const EXTRA_FIELD_MAPPING: [u16; 49] = [
+ 0x0001, 0x0007, 0x0008, 0x0009, 0x000a, 0x000c, 0x000d, 0x000e, 0x000f, 0x0014, 0x0015, 0x0016,
+ 0x0017, 0x0018, 0x0019, 0x0020, 0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690, 0x07c8, 0x2605,
+ 0x2705, 0x2805, 0x334d, 0x4341, 0x4453, 0x4704, 0x470f, 0x4b46, 0x4c41, 0x4d49, 0x4f4c, 0x5356,
+ 0x5455, 0x554e, 0x5855, 0x6375, 0x6542, 0x7075, 0x756e, 0x7855, 0xa11e, 0xa220, 0xfd4a, 0x9901,
+ 0x9902,
+];
diff --git a/third_party/rust/zip/src/zipcrypto.rs b/third_party/rust/zip/src/zipcrypto.rs
new file mode 100644
index 0000000000..91d403951b
--- /dev/null
+++ b/third_party/rust/zip/src/zipcrypto.rs
@@ -0,0 +1,184 @@
+//! Implementation of the ZipCrypto algorithm
+//!
+//! The following paper was used to implement the ZipCrypto algorithm:
+//! [https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf](https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf)
+
+use std::num::Wrapping;
+
+/// A container to hold the current key state
+struct ZipCryptoKeys {
+ key_0: Wrapping<u32>,
+ key_1: Wrapping<u32>,
+ key_2: Wrapping<u32>,
+}
+
+impl ZipCryptoKeys {
+ fn new() -> ZipCryptoKeys {
+ ZipCryptoKeys {
+ key_0: Wrapping(0x12345678),
+ key_1: Wrapping(0x23456789),
+ key_2: Wrapping(0x34567890),
+ }
+ }
+
+ fn update(&mut self, input: u8) {
+ self.key_0 = ZipCryptoKeys::crc32(self.key_0, input);
+ self.key_1 =
+ (self.key_1 + (self.key_0 & Wrapping(0xff))) * Wrapping(0x08088405) + Wrapping(1);
+ self.key_2 = ZipCryptoKeys::crc32(self.key_2, (self.key_1 >> 24).0 as u8);
+ }
+
+ fn stream_byte(&mut self) -> u8 {
+ let temp: Wrapping<u16> = Wrapping(self.key_2.0 as u16) | Wrapping(3);
+ ((temp * (temp ^ Wrapping(1))) >> 8).0 as u8
+ }
+
+ fn decrypt_byte(&mut self, cipher_byte: u8) -> u8 {
+ let plain_byte: u8 = self.stream_byte() ^ cipher_byte;
+ self.update(plain_byte);
+ plain_byte
+ }
+
+ #[allow(dead_code)]
+ fn encrypt_byte(&mut self, plain_byte: u8) -> u8 {
+ let cipher_byte: u8 = self.stream_byte() ^ plain_byte;
+ self.update(plain_byte);
+ cipher_byte
+ }
+
+ fn crc32(crc: Wrapping<u32>, input: u8) -> Wrapping<u32> {
+ (crc >> 8) ^ Wrapping(CRCTABLE[((crc & Wrapping(0xff)).0 as u8 ^ input) as usize])
+ }
+}
+
+/// A ZipCrypto reader with unverified password
+pub struct ZipCryptoReader<R> {
+ file: R,
+ keys: ZipCryptoKeys,
+}
+
+pub enum ZipCryptoValidator {
+ PkzipCrc32(u32),
+ InfoZipMsdosTime(u16),
+}
+
+impl<R: std::io::Read> ZipCryptoReader<R> {
+ /// Note: The password is `&[u8]` and not `&str` because the
+ /// [zip specification](https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.3.TXT)
+ /// does not specify password encoding (see function `update_keys` in the specification).
+ /// Therefore, if `&str` was used, the password would be UTF-8 and it
+ /// would be impossible to decrypt files that were encrypted with a
+ /// password byte sequence that is unrepresentable in UTF-8.
+ pub fn new(file: R, password: &[u8]) -> ZipCryptoReader<R> {
+ let mut result = ZipCryptoReader {
+ file,
+ keys: ZipCryptoKeys::new(),
+ };
+
+ // Key the cipher by updating the keys with the password.
+ for byte in password.iter() {
+ result.keys.update(*byte);
+ }
+
+ result
+ }
+
+ /// Read the ZipCrypto header bytes and validate the password.
+ pub fn validate(
+ mut self,
+ validator: ZipCryptoValidator,
+ ) -> Result<Option<ZipCryptoReaderValid<R>>, std::io::Error> {
+ // ZipCrypto prefixes a file with a 12 byte header
+ let mut header_buf = [0u8; 12];
+ self.file.read_exact(&mut header_buf)?;
+ for byte in header_buf.iter_mut() {
+ *byte = self.keys.decrypt_byte(*byte);
+ }
+
+ match validator {
+ ZipCryptoValidator::PkzipCrc32(crc32_plaintext) => {
+ // PKZIP before 2.0 used 2 byte CRC check.
+ // PKZIP 2.0+ used 1 byte CRC check. It's more secure.
+ // We also use 1 byte CRC.
+
+ if (crc32_plaintext >> 24) as u8 != header_buf[11] {
+ return Ok(None); // Wrong password
+ }
+ }
+ ZipCryptoValidator::InfoZipMsdosTime(last_mod_time) => {
+ // Info-ZIP modification to ZipCrypto format:
+ // If bit 3 of the general purpose bit flag is set
+ // (indicates that the file uses a data-descriptor section),
+ // it uses high byte of 16-bit File Time.
+ // Info-ZIP code probably writes 2 bytes of File Time.
+ // We check only 1 byte.
+
+ if (last_mod_time >> 8) as u8 != header_buf[11] {
+ return Ok(None); // Wrong password
+ }
+ }
+ }
+
+ Ok(Some(ZipCryptoReaderValid { reader: self }))
+ }
+}
+
+/// A ZipCrypto reader with verified password
+pub struct ZipCryptoReaderValid<R> {
+ reader: ZipCryptoReader<R>,
+}
+
+impl<R: std::io::Read> std::io::Read for ZipCryptoReaderValid<R> {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ // Note: There might be potential for optimization. Inspiration can be found at:
+ // https://github.com/kornelski/7z/blob/master/CPP/7zip/Crypto/ZipCrypto.cpp
+
+ let result = self.reader.file.read(buf);
+ for byte in buf.iter_mut() {
+ *byte = self.reader.keys.decrypt_byte(*byte);
+ }
+ result
+ }
+}
+
+impl<R: std::io::Read> ZipCryptoReaderValid<R> {
+ /// Consumes this decoder, returning the underlying reader.
+ pub fn into_inner(self) -> R {
+ self.reader.file
+ }
+}
+
+static CRCTABLE: [u32; 256] = [
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
+];