summaryrefslogtreecommitdiffstats
path: root/vendor/sha1/src/compress
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/sha1/src/compress')
-rw-r--r--vendor/sha1/src/compress/aarch64.rs18
-rw-r--r--vendor/sha1/src/compress/soft.rs260
-rw-r--r--vendor/sha1/src/compress/x86.rs112
3 files changed, 390 insertions, 0 deletions
diff --git a/vendor/sha1/src/compress/aarch64.rs b/vendor/sha1/src/compress/aarch64.rs
new file mode 100644
index 000000000..5952d1f62
--- /dev/null
+++ b/vendor/sha1/src/compress/aarch64.rs
@@ -0,0 +1,18 @@
+//! SHA-1 `aarch64` backend.
+
+// Per rustc target feature docs for `aarch64-unknown-linux-gnu` and
+// `aarch64-apple-darwin` platforms, the `sha2` target feature enables
+// SHA-1 as well:
+//
+// > Enable SHA1 and SHA256 support.
+cpufeatures::new!(sha1_hwcap, "sha2");
+
+pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) {
+ // TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725
+ // after stabilization
+ if sha1_hwcap::get() {
+ sha1_asm::compress(state, blocks);
+ } else {
+ super::soft::compress(state, blocks);
+ }
+}
diff --git a/vendor/sha1/src/compress/soft.rs b/vendor/sha1/src/compress/soft.rs
new file mode 100644
index 000000000..0b9fb2701
--- /dev/null
+++ b/vendor/sha1/src/compress/soft.rs
@@ -0,0 +1,260 @@
+#![allow(clippy::many_single_char_names)]
+use super::BLOCK_SIZE;
+use core::convert::TryInto;
+
+const K: [u32; 4] = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6];
+
+#[inline(always)]
+fn add(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ [
+ a[0].wrapping_add(b[0]),
+ a[1].wrapping_add(b[1]),
+ a[2].wrapping_add(b[2]),
+ a[3].wrapping_add(b[3]),
+ ]
+}
+
+#[inline(always)]
+fn xor(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ [a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2], a[3] ^ b[3]]
+}
+
+#[inline]
+pub fn sha1_first_add(e: u32, w0: [u32; 4]) -> [u32; 4] {
+ let [a, b, c, d] = w0;
+ [e.wrapping_add(a), b, c, d]
+}
+
+fn sha1msg1(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ let [_, _, w2, w3] = a;
+ let [w4, w5, _, _] = b;
+ [a[0] ^ w2, a[1] ^ w3, a[2] ^ w4, a[3] ^ w5]
+}
+
+fn sha1msg2(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ let [x0, x1, x2, x3] = a;
+ let [_, w13, w14, w15] = b;
+
+ let w16 = (x0 ^ w13).rotate_left(1);
+ let w17 = (x1 ^ w14).rotate_left(1);
+ let w18 = (x2 ^ w15).rotate_left(1);
+ let w19 = (x3 ^ w16).rotate_left(1);
+
+ [w16, w17, w18, w19]
+}
+
+#[inline]
+fn sha1_first_half(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ sha1_first_add(abcd[0].rotate_left(30), msg)
+}
+
+fn sha1_digest_round_x4(abcd: [u32; 4], work: [u32; 4], i: i8) -> [u32; 4] {
+ match i {
+ 0 => sha1rnds4c(abcd, add(work, [K[0]; 4])),
+ 1 => sha1rnds4p(abcd, add(work, [K[1]; 4])),
+ 2 => sha1rnds4m(abcd, add(work, [K[2]; 4])),
+ 3 => sha1rnds4p(abcd, add(work, [K[3]; 4])),
+ _ => unreachable!("unknown icosaround index"),
+ }
+}
+
+fn sha1rnds4c(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ let [mut a, mut b, mut c, mut d] = abcd;
+ let [t, u, v, w] = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_202 {
+ ($a:expr, $b:expr, $c:expr) => {
+ $c ^ ($a & ($b ^ $c))
+ };
+ } // Choose, MD5F, SHA1C
+
+ e = e
+ .wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_202!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d
+ .wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_202!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c
+ .wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_202!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b
+ .wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_202!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ [b, c, d, e]
+}
+
+fn sha1rnds4p(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ let [mut a, mut b, mut c, mut d] = abcd;
+ let [t, u, v, w] = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_150 {
+ ($a:expr, $b:expr, $c:expr) => {
+ $a ^ $b ^ $c
+ };
+ } // Parity, XOR, MD5H, SHA1P
+
+ e = e
+ .wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_150!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d
+ .wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_150!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c
+ .wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_150!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b
+ .wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_150!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ [b, c, d, e]
+}
+
+fn sha1rnds4m(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ let [mut a, mut b, mut c, mut d] = abcd;
+ let [t, u, v, w] = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_232 {
+ ($a:expr, $b:expr, $c:expr) => {
+ ($a & $b) ^ ($a & $c) ^ ($b & $c)
+ };
+ } // Majority, SHA1M
+
+ e = e
+ .wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_232!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d
+ .wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_232!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c
+ .wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_232!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b
+ .wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_232!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ [b, c, d, e]
+}
+
+macro_rules! rounds4 {
+ ($h0:ident, $h1:ident, $wk:expr, $i:expr) => {
+ sha1_digest_round_x4($h0, sha1_first_half($h1, $wk), $i)
+ };
+}
+
+macro_rules! schedule {
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {
+ sha1msg2(xor(sha1msg1($v0, $v1), $v2), $v3)
+ };
+}
+
+macro_rules! schedule_rounds4 {
+ (
+ $h0:ident, $h1:ident,
+ $w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr,
+ $i:expr
+ ) => {
+ $w4 = schedule!($w0, $w1, $w2, $w3);
+ $h1 = rounds4!($h0, $h1, $w4, $i);
+ };
+}
+
+#[inline(always)]
+fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) {
+ let mut w0 = [block[0], block[1], block[2], block[3]];
+ let mut w1 = [block[4], block[5], block[6], block[7]];
+ let mut w2 = [block[8], block[9], block[10], block[11]];
+ let mut w3 = [block[12], block[13], block[14], block[15]];
+ #[allow(clippy::needless_late_init)]
+ let mut w4;
+
+ let mut h0 = [state[0], state[1], state[2], state[3]];
+ let mut h1 = sha1_first_add(state[4], w0);
+
+ // Rounds 0..20
+ h1 = sha1_digest_round_x4(h0, h1, 0);
+ h0 = rounds4!(h1, h0, w1, 0);
+ h1 = rounds4!(h0, h1, w2, 0);
+ h0 = rounds4!(h1, h0, w3, 0);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 0);
+
+ // Rounds 20..40
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 1);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 1);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 1);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 1);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 1);
+
+ // Rounds 40..60
+ schedule_rounds4!(h0, h1, w1, w2, w3, w4, w0, 2);
+ schedule_rounds4!(h1, h0, w2, w3, w4, w0, w1, 2);
+ schedule_rounds4!(h0, h1, w3, w4, w0, w1, w2, 2);
+ schedule_rounds4!(h1, h0, w4, w0, w1, w2, w3, 2);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 2);
+
+ // Rounds 60..80
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 3);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 3);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 3);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 3);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 3);
+
+ let e = h1[0].rotate_left(30);
+ let [a, b, c, d] = h0;
+
+ state[0] = state[0].wrapping_add(a);
+ state[1] = state[1].wrapping_add(b);
+ state[2] = state[2].wrapping_add(c);
+ state[3] = state[3].wrapping_add(d);
+ state[4] = state[4].wrapping_add(e);
+}
+
+pub fn compress(state: &mut [u32; 5], blocks: &[[u8; BLOCK_SIZE]]) {
+ let mut block_u32 = [0u32; BLOCK_SIZE / 4];
+ // since LLVM can't properly use aliasing yet it will make
+ // unnecessary state stores without this copy
+ let mut state_cpy = *state;
+ for block in blocks.iter() {
+ for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(4)) {
+ *o = u32::from_be_bytes(chunk.try_into().unwrap());
+ }
+ sha1_digest_block_u32(&mut state_cpy, &block_u32);
+ }
+ *state = state_cpy;
+}
diff --git a/vendor/sha1/src/compress/x86.rs b/vendor/sha1/src/compress/x86.rs
new file mode 100644
index 000000000..4dcd56b8a
--- /dev/null
+++ b/vendor/sha1/src/compress/x86.rs
@@ -0,0 +1,112 @@
+//! SHA-1 `x86`/`x86_64` backend
+
+#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+macro_rules! rounds4 {
+ ($h0:ident, $h1:ident, $wk:expr, $i:expr) => {
+ _mm_sha1rnds4_epu32($h0, _mm_sha1nexte_epu32($h1, $wk), $i)
+ };
+}
+
+macro_rules! schedule {
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {
+ _mm_sha1msg2_epu32(_mm_xor_si128(_mm_sha1msg1_epu32($v0, $v1), $v2), $v3)
+ };
+}
+
+macro_rules! schedule_rounds4 {
+ (
+ $h0:ident, $h1:ident,
+ $w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr,
+ $i:expr
+ ) => {
+ $w4 = schedule!($w0, $w1, $w2, $w3);
+ $h1 = rounds4!($h0, $h1, $w4, $i);
+ };
+}
+
+#[target_feature(enable = "sha,sse2,ssse3,sse4.1")]
+unsafe fn digest_blocks(state: &mut [u32; 5], blocks: &[[u8; 64]]) {
+ #[allow(non_snake_case)]
+ let MASK: __m128i = _mm_set_epi64x(0x0001_0203_0405_0607, 0x0809_0A0B_0C0D_0E0F);
+
+ let mut state_abcd = _mm_set_epi32(
+ state[0] as i32,
+ state[1] as i32,
+ state[2] as i32,
+ state[3] as i32,
+ );
+ let mut state_e = _mm_set_epi32(state[4] as i32, 0, 0, 0);
+
+ for block in blocks {
+ // SAFETY: we use only unaligned loads with this pointer
+ #[allow(clippy::cast_ptr_alignment)]
+ let block_ptr = block.as_ptr() as *const __m128i;
+
+ let mut w0 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(0)), MASK);
+ let mut w1 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(1)), MASK);
+ let mut w2 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(2)), MASK);
+ let mut w3 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(3)), MASK);
+ #[allow(clippy::needless_late_init)]
+ let mut w4;
+
+ let mut h0 = state_abcd;
+ let mut h1 = _mm_add_epi32(state_e, w0);
+
+ // Rounds 0..20
+ h1 = _mm_sha1rnds4_epu32(h0, h1, 0);
+ h0 = rounds4!(h1, h0, w1, 0);
+ h1 = rounds4!(h0, h1, w2, 0);
+ h0 = rounds4!(h1, h0, w3, 0);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 0);
+
+ // Rounds 20..40
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 1);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 1);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 1);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 1);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 1);
+
+ // Rounds 40..60
+ schedule_rounds4!(h0, h1, w1, w2, w3, w4, w0, 2);
+ schedule_rounds4!(h1, h0, w2, w3, w4, w0, w1, 2);
+ schedule_rounds4!(h0, h1, w3, w4, w0, w1, w2, 2);
+ schedule_rounds4!(h1, h0, w4, w0, w1, w2, w3, 2);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 2);
+
+ // Rounds 60..80
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 3);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 3);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 3);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 3);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 3);
+
+ state_abcd = _mm_add_epi32(state_abcd, h0);
+ state_e = _mm_sha1nexte_epu32(h1, state_e);
+ }
+
+ state[0] = _mm_extract_epi32(state_abcd, 3) as u32;
+ state[1] = _mm_extract_epi32(state_abcd, 2) as u32;
+ state[2] = _mm_extract_epi32(state_abcd, 1) as u32;
+ state[3] = _mm_extract_epi32(state_abcd, 0) as u32;
+ state[4] = _mm_extract_epi32(state_e, 3) as u32;
+}
+
+cpufeatures::new!(shani_cpuid, "sha", "sse2", "ssse3", "sse4.1");
+
+pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) {
+ // TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725
+ // after stabilization
+ if shani_cpuid::get() {
+ unsafe {
+ digest_blocks(state, blocks);
+ }
+ } else {
+ super::soft::compress(state, blocks);
+ }
+}