summaryrefslogtreecommitdiffstats
path: root/vendor/sha-1-0.8.2/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/sha-1-0.8.2/src
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/sha-1-0.8.2/src')
-rw-r--r--vendor/sha-1-0.8.2/src/aarch64.rs9
-rw-r--r--vendor/sha-1-0.8.2/src/consts.rs19
-rw-r--r--vendor/sha-1-0.8.2/src/lib.rs141
-rw-r--r--vendor/sha-1-0.8.2/src/utils.rs300
4 files changed, 469 insertions, 0 deletions
diff --git a/vendor/sha-1-0.8.2/src/aarch64.rs b/vendor/sha-1-0.8.2/src/aarch64.rs
new file mode 100644
index 000000000..d3cffecd7
--- /dev/null
+++ b/vendor/sha-1-0.8.2/src/aarch64.rs
@@ -0,0 +1,9 @@
+// TODO: Import those from libc, see https://github.com/rust-lang/libc/pull/1638
+const AT_HWCAP: u64 = 16;
+const HWCAP_SHA1: u64 = 32;
+
+#[inline(always)]
+pub fn sha1_supported() -> bool {
+ let hwcaps: u64 = unsafe { ::libc::getauxval(AT_HWCAP) };
+ (hwcaps & HWCAP_SHA1) != 0
+}
diff --git a/vendor/sha-1-0.8.2/src/consts.rs b/vendor/sha-1-0.8.2/src/consts.rs
new file mode 100644
index 000000000..628d4d6e0
--- /dev/null
+++ b/vendor/sha-1-0.8.2/src/consts.rs
@@ -0,0 +1,19 @@
+#![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))]
+
+pub const STATE_LEN: usize = 5;
+
+#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
+pub const BLOCK_LEN: usize = 16;
+
+#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
+pub const K0: u32 = 0x5A827999u32;
+#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
+pub const K1: u32 = 0x6ED9EBA1u32;
+#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
+pub const K2: u32 = 0x8F1BBCDCu32;
+#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
+pub const K3: u32 = 0xCA62C1D6u32;
+
+pub const H: [u32; STATE_LEN] = [
+ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+];
diff --git a/vendor/sha-1-0.8.2/src/lib.rs b/vendor/sha-1-0.8.2/src/lib.rs
new file mode 100644
index 000000000..374c234c2
--- /dev/null
+++ b/vendor/sha-1-0.8.2/src/lib.rs
@@ -0,0 +1,141 @@
+//! An implementation of the [SHA-1][1] cryptographic hash algorithm.
+//!
+//! # Usage
+//!
+//! ```rust
+//! # #[macro_use] extern crate hex_literal;
+//! # extern crate sha1;
+//! # fn main() {
+//! use sha1::{Sha1, Digest};
+//!
+//! // create a Sha1 object
+//! let mut hasher = Sha1::new();
+//!
+//! // process input message
+//! hasher.input(b"hello world");
+//!
+//! // acquire hash digest in the form of GenericArray,
+//! // which in this case is equivalent to [u8; 20]
+//! let result = hasher.result();
+//! assert_eq!(result[..], hex!("2aae6c35c94fcfb415dbe95f408b9ce91ee846ed"));
+//! # }
+//! ```
+//!
+//! Also see [RustCrypto/hashes][2] readme.
+//!
+//! [1]: https://en.wikipedia.org/wiki/SHA-1
+//! [2]: https://github.com/RustCrypto/hashes
+#![no_std]
+#![doc(html_logo_url =
+ "https://raw.githubusercontent.com/RustCrypto/meta/master/logo_small.png")]
+
+// Give relevant error messages if the user tries to enable AArch64 asm on unsupported platforms.
+#[cfg(all(feature = "asm-aarch64", target_arch = "aarch64", not(target_os = "linux")))]
+compile_error!("Your OS isn’t yet supported for runtime-checking of AArch64 features.");
+#[cfg(all(feature = "asm-aarch64", target_os = "linux", not(target_arch = "aarch64")))]
+compile_error!("Enable the \"asm\" feature instead of \"asm-aarch64\" on non-AArch64 Linux systems.");
+#[cfg(all(not(feature = "asm-aarch64"), feature = "asm", target_arch = "aarch64", target_os = "linux"))]
+compile_error!("Enable the \"asm-aarch64\" feature on AArch64 if you want to use asm.");
+
+extern crate block_buffer;
+#[macro_use] extern crate opaque_debug;
+#[macro_use] pub extern crate digest;
+#[cfg(feature = "std")]
+extern crate std;
+#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
+extern crate fake_simd as simd;
+#[cfg(feature = "asm-aarch64")]
+extern crate libc;
+
+#[cfg(feature = "asm")]
+extern crate sha1_asm;
+#[cfg(all(feature = "asm", not(feature = "asm-aarch64")))]
+#[inline(always)]
+fn compress(state: &mut [u32; 5], block: &GenericArray<u8, U64>) {
+ let block: &[u8; 64] = unsafe { core::mem::transmute(block) };
+ sha1_asm::compress(state, block);
+}
+#[cfg(feature = "asm-aarch64")]
+mod aarch64;
+#[cfg(feature = "asm-aarch64")]
+#[inline(always)]
+fn compress(state: &mut [u32; 5], block: &GenericArray<u8, U64>) {
+ // TODO: Replace this platform-specific call with is_aarch64_feature_detected!("sha1") once
+ // that macro is stabilised and https://github.com/rust-lang/rfcs/pull/2725 is implemented
+ // to let us use it on no_std.
+ if aarch64::sha1_supported() {
+ let block: &[u8; 64] = unsafe { core::mem::transmute(block) };
+ sha1_asm::compress(state, block);
+ } else {
+ utils::compress(state, block);
+ }
+}
+
+#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
+mod utils;
+#[cfg(not(feature = "asm"))]
+use utils::compress;
+
+pub use digest::Digest;
+use digest::{Input, BlockInput, FixedOutput, Reset};
+use digest::generic_array::GenericArray;
+use digest::generic_array::typenum::{U20, U64};
+use block_buffer::BlockBuffer;
+use block_buffer::byteorder::{BE, ByteOrder};
+
+mod consts;
+use consts::{STATE_LEN, H};
+
+/// Structure representing the state of a SHA-1 computation
+#[derive(Clone)]
+pub struct Sha1 {
+ h: [u32; STATE_LEN],
+ len: u64,
+ buffer: BlockBuffer<U64>,
+}
+
+impl Default for Sha1 {
+ fn default() -> Self {
+ Sha1{ h: H, len: 0u64, buffer: Default::default() }
+ }
+}
+
+impl BlockInput for Sha1 {
+ type BlockSize = U64;
+}
+
+impl Input for Sha1 {
+ fn input<B: AsRef<[u8]>>(&mut self, input: B) {
+ let input = input.as_ref();
+ // Assumes that `length_bits<<3` will not overflow
+ self.len += input.len() as u64;
+ let state = &mut self.h;
+ self.buffer.input(input, |d| compress(state, d));
+ }
+}
+
+impl FixedOutput for Sha1 {
+ type OutputSize = U20;
+
+ fn fixed_result(mut self) -> GenericArray<u8, Self::OutputSize> {
+ {
+ let state = &mut self.h;
+ let l = self.len << 3;
+ self.buffer.len64_padding::<BE, _>(l, |d| compress(state, d));
+ }
+ let mut out = GenericArray::default();
+ BE::write_u32_into(&self.h,&mut out);
+ out
+ }
+}
+
+impl Reset for Sha1 {
+ fn reset(&mut self) {
+ self.h = H;
+ self.len = 0;
+ self.buffer.reset();
+ }
+}
+
+impl_opaque_debug!(Sha1);
+impl_write!(Sha1);
diff --git a/vendor/sha-1-0.8.2/src/utils.rs b/vendor/sha-1-0.8.2/src/utils.rs
new file mode 100644
index 000000000..e8b941a6e
--- /dev/null
+++ b/vendor/sha-1-0.8.2/src/utils.rs
@@ -0,0 +1,300 @@
+#![cfg_attr(feature = "cargo-clippy", allow(many_single_char_names))]
+
+use consts::{BLOCK_LEN, K0, K1, K2, K3};
+use block_buffer::byteorder::{BE, ByteOrder};
+use simd::u32x4;
+use digest::generic_array::GenericArray;
+use digest::generic_array::typenum::U64;
+
+type Block = GenericArray<u8, U64>;
+
+/// Not an intrinsic, but gets the first element of a vector.
+#[inline]
+pub fn sha1_first(w0: u32x4) -> u32 {
+ w0.0
+}
+
+/// Not an intrinsic, but adds a word to the first element of a vector.
+#[inline]
+pub fn sha1_first_add(e: u32, w0: u32x4) -> u32x4 {
+ let u32x4(a, b, c, d) = w0;
+ u32x4(e.wrapping_add(a), b, c, d)
+}
+
+/// Emulates `llvm.x86.sha1msg1` intrinsic.
+fn sha1msg1(a: u32x4, b: u32x4) -> u32x4 {
+ let u32x4(_, _, w2, w3) = a;
+ let u32x4(w4, w5, _, _) = b;
+ a ^ u32x4(w2, w3, w4, w5)
+}
+
+/// Emulates `llvm.x86.sha1msg2` intrinsic.
+fn sha1msg2(a: u32x4, b: u32x4) -> u32x4 {
+ let u32x4(x0, x1, x2, x3) = a;
+ let u32x4(_, w13, w14, w15) = b;
+
+ let w16 = (x0 ^ w13).rotate_left(1);
+ let w17 = (x1 ^ w14).rotate_left(1);
+ let w18 = (x2 ^ w15).rotate_left(1);
+ let w19 = (x3 ^ w16).rotate_left(1);
+
+ u32x4(w16, w17, w18, w19)
+}
+
+/// Performs 4 rounds of the message schedule update.
+/*
+pub fn sha1_schedule_x4(v0: u32x4, v1: u32x4, v2: u32x4, v3: u32x4) -> u32x4 {
+ sha1msg2(sha1msg1(v0, v1) ^ v2, v3)
+}
+*/
+
+/// Emulates `llvm.x86.sha1nexte` intrinsic.
+#[inline]
+fn sha1_first_half(abcd: u32x4, msg: u32x4) -> u32x4 {
+ sha1_first_add(sha1_first(abcd).rotate_left(30), msg)
+}
+
+/// Emulates `llvm.x86.sha1rnds4` intrinsic.
+/// Performs 4 rounds of the message block digest.
+fn sha1_digest_round_x4(abcd: u32x4, work: u32x4, i: i8) -> u32x4 {
+ const K0V: u32x4 = u32x4(K0, K0, K0, K0);
+ const K1V: u32x4 = u32x4(K1, K1, K1, K1);
+ const K2V: u32x4 = u32x4(K2, K2, K2, K2);
+ const K3V: u32x4 = u32x4(K3, K3, K3, K3);
+
+ match i {
+ 0 => sha1rnds4c(abcd, work + K0V),
+ 1 => sha1rnds4p(abcd, work + K1V),
+ 2 => sha1rnds4m(abcd, work + K2V),
+ 3 => sha1rnds4p(abcd, work + K3V),
+ _ => unreachable!("unknown icosaround index"),
+ }
+}
+
+/// Not an intrinsic, but helps emulate `llvm.x86.sha1rnds4` intrinsic.
+fn sha1rnds4c(abcd: u32x4, msg: u32x4) -> u32x4 {
+ let u32x4(mut a, mut b, mut c, mut d) = abcd;
+ let u32x4(t, u, v, w) = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_202 {
+ ($a:expr, $b:expr, $c:expr) => ($c ^ ($a & ($b ^ $c)))
+ } // Choose, MD5F, SHA1C
+
+ e = e.wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_202!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d.wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_202!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c.wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_202!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b.wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_202!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ u32x4(b, c, d, e)
+}
+
+/// Not an intrinsic, but helps emulate `llvm.x86.sha1rnds4` intrinsic.
+fn sha1rnds4p(abcd: u32x4, msg: u32x4) -> u32x4 {
+ let u32x4(mut a, mut b, mut c, mut d) = abcd;
+ let u32x4(t, u, v, w) = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_150 {
+ ($a:expr, $b:expr, $c:expr) => ($a ^ $b ^ $c)
+ } // Parity, XOR, MD5H, SHA1P
+
+ e = e.wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_150!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d.wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_150!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c.wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_150!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b.wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_150!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ u32x4(b, c, d, e)
+}
+
+/// Not an intrinsic, but helps emulate `llvm.x86.sha1rnds4` intrinsic.
+fn sha1rnds4m(abcd: u32x4, msg: u32x4) -> u32x4 {
+ let u32x4(mut a, mut b, mut c, mut d) = abcd;
+ let u32x4(t, u, v, w) = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_232 {
+ ($a:expr, $b:expr, $c:expr) => (($a & $b) ^ ($a & $c) ^ ($b & $c))
+ } // Majority, SHA1M
+
+ e = e.wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_232!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d.wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_232!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c.wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_232!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b.wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_232!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ u32x4(b, c, d, e)
+}
+
+/// Process a block with the SHA-1 algorithm.
+fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) {
+
+ macro_rules! schedule {
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => (
+ sha1msg2(sha1msg1($v0, $v1) ^ $v2, $v3)
+ )
+ }
+
+ macro_rules! rounds4 {
+ ($h0:ident, $h1:ident, $wk:expr, $i:expr) => (
+ sha1_digest_round_x4($h0, sha1_first_half($h1, $wk), $i)
+ )
+ }
+
+ // Rounds 0..20
+ // TODO: replace with `u32x4::load`
+ let mut h0 = u32x4(state[0], state[1], state[2], state[3]);
+ let mut w0 = u32x4(block[0], block[1], block[2], block[3]);
+ let mut h1 = sha1_digest_round_x4(h0, sha1_first_add(state[4], w0), 0);
+ let mut w1 = u32x4(block[4], block[5], block[6], block[7]);
+ h0 = rounds4!(h1, h0, w1, 0);
+ let mut w2 = u32x4(block[8], block[9], block[10], block[11]);
+ h1 = rounds4!(h0, h1, w2, 0);
+ let mut w3 = u32x4(block[12], block[13], block[14], block[15]);
+ h0 = rounds4!(h1, h0, w3, 0);
+ let mut w4 = schedule!(w0, w1, w2, w3);
+ h1 = rounds4!(h0, h1, w4, 0);
+
+ // Rounds 20..40
+ w0 = schedule!(w1, w2, w3, w4);
+ h0 = rounds4!(h1, h0, w0, 1);
+ w1 = schedule!(w2, w3, w4, w0);
+ h1 = rounds4!(h0, h1, w1, 1);
+ w2 = schedule!(w3, w4, w0, w1);
+ h0 = rounds4!(h1, h0, w2, 1);
+ w3 = schedule!(w4, w0, w1, w2);
+ h1 = rounds4!(h0, h1, w3, 1);
+ w4 = schedule!(w0, w1, w2, w3);
+ h0 = rounds4!(h1, h0, w4, 1);
+
+ // Rounds 40..60
+ w0 = schedule!(w1, w2, w3, w4);
+ h1 = rounds4!(h0, h1, w0, 2);
+ w1 = schedule!(w2, w3, w4, w0);
+ h0 = rounds4!(h1, h0, w1, 2);
+ w2 = schedule!(w3, w4, w0, w1);
+ h1 = rounds4!(h0, h1, w2, 2);
+ w3 = schedule!(w4, w0, w1, w2);
+ h0 = rounds4!(h1, h0, w3, 2);
+ w4 = schedule!(w0, w1, w2, w3);
+ h1 = rounds4!(h0, h1, w4, 2);
+
+ // Rounds 60..80
+ w0 = schedule!(w1, w2, w3, w4);
+ h0 = rounds4!(h1, h0, w0, 3);
+ w1 = schedule!(w2, w3, w4, w0);
+ h1 = rounds4!(h0, h1, w1, 3);
+ w2 = schedule!(w3, w4, w0, w1);
+ h0 = rounds4!(h1, h0, w2, 3);
+ w3 = schedule!(w4, w0, w1, w2);
+ h1 = rounds4!(h0, h1, w3, 3);
+ w4 = schedule!(w0, w1, w2, w3);
+ h0 = rounds4!(h1, h0, w4, 3);
+
+ let e = sha1_first(h1).rotate_left(30);
+ let u32x4(a, b, c, d) = h0;
+
+ state[0] = state[0].wrapping_add(a);
+ state[1] = state[1].wrapping_add(b);
+ state[2] = state[2].wrapping_add(c);
+ state[3] = state[3].wrapping_add(d);
+ state[4] = state[4].wrapping_add(e);
+}
+
+/// Process a block with the SHA-1 algorithm. (See more...)
+///
+/// SHA-1 is a cryptographic hash function, and as such, it operates
+/// on an arbitrary number of bytes. This function operates on a fixed
+/// number of bytes. If you call this function with anything other than
+/// 64 bytes, then it will panic! This function takes two arguments:
+///
+/// * `state` is reference to an **array** of 5 words.
+/// * `block` is reference to a **slice** of 64 bytes.
+///
+/// If you want the function that performs a message digest on an arbitrary
+/// number of bytes, then see also the `Sha1` struct above.
+///
+/// # Implementation
+///
+/// First, some background. Both ARM and Intel are releasing documentation
+/// that they plan to include instruction set extensions for SHA1 and SHA256
+/// sometime in the near future. Second, LLVM won't lower these intrinsics yet,
+/// so these functions were written emulate these instructions. Finally,
+/// the block function implemented with these emulated intrinsics turned out
+/// to be quite fast! What follows is a discussion of this CPU-level view
+/// of the SHA-1 algorithm and how it relates to the mathematical definition.
+///
+/// The SHA instruction set extensions can be divided up into two categories:
+///
+/// * message work schedule update calculation ("schedule" v., "work" n.)
+/// * message block 80-round digest calculation ("digest" v., "block" n.)
+///
+/// The schedule-related functions can be used to easily perform 4 rounds
+/// of the message work schedule update calculation, as shown below:
+///
+/// ```ignore
+/// macro_rules! schedule_x4 {
+/// ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => (
+/// sha1msg2(sha1msg1($v0, $v1) ^ $v2, $v3)
+/// )
+/// }
+///
+/// macro_rules! round_x4 {
+/// ($h0:ident, $h1:ident, $wk:expr, $i:expr) => (
+/// sha1rnds4($h0, sha1_first_half($h1, $wk), $i)
+/// )
+/// }
+/// ```
+///
+/// and also shown above is how the digest-related functions can be used to
+/// perform 4 rounds of the message block digest calculation.
+///
+pub fn compress(state: &mut [u32; 5], block: &Block) {
+ let mut block_u32 = [0u32; BLOCK_LEN];
+ BE::read_u32_into(block, &mut block_u32[..]);
+ sha1_digest_block_u32(state, &block_u32);
+}