summaryrefslogtreecommitdiffstats
path: root/third_party/rust/sha1/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/sha1/src
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/sha1/src')
-rw-r--r--third_party/rust/sha1/src/compress.rs37
-rw-r--r--third_party/rust/sha1/src/compress/aarch64.rs18
-rw-r--r--third_party/rust/sha1/src/compress/soft.rs260
-rw-r--r--third_party/rust/sha1/src/compress/x86.rs112
-rw-r--r--third_party/rust/sha1/src/lib.rs154
5 files changed, 581 insertions, 0 deletions
diff --git a/third_party/rust/sha1/src/compress.rs b/third_party/rust/sha1/src/compress.rs
new file mode 100644
index 0000000000..da4a10a987
--- /dev/null
+++ b/third_party/rust/sha1/src/compress.rs
@@ -0,0 +1,37 @@
+use crate::{Block, BlockSizeUser, Sha1Core};
+use digest::typenum::Unsigned;
+
+cfg_if::cfg_if! {
+ if #[cfg(feature = "force-soft")] {
+ mod soft;
+ use soft::compress as compress_inner;
+ } else if #[cfg(all(feature = "asm", target_arch = "aarch64"))] {
+ mod soft;
+ mod aarch64;
+ use aarch64::compress as compress_inner;
+ } else if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
+ #[cfg(not(feature = "asm"))]
+ mod soft;
+ #[cfg(feature = "asm")]
+ mod soft {
+ pub use sha1_asm::compress;
+ }
+ mod x86;
+ use x86::compress as compress_inner;
+ } else {
+ mod soft;
+ use soft::compress as compress_inner;
+ }
+}
+
+const BLOCK_SIZE: usize = <Sha1Core as BlockSizeUser>::BlockSize::USIZE;
+
+/// SHA-1 compression function
+#[cfg_attr(docsrs, doc(cfg(feature = "compress")))]
+pub fn compress(state: &mut [u32; 5], blocks: &[Block<Sha1Core>]) {
+ // SAFETY: GenericArray<u8, U64> and [u8; 64] have
+ // exactly the same memory layout
+ let blocks: &[[u8; BLOCK_SIZE]] =
+ unsafe { &*(blocks as *const _ as *const [[u8; BLOCK_SIZE]]) };
+ compress_inner(state, blocks);
+}
diff --git a/third_party/rust/sha1/src/compress/aarch64.rs b/third_party/rust/sha1/src/compress/aarch64.rs
new file mode 100644
index 0000000000..5952d1f624
--- /dev/null
+++ b/third_party/rust/sha1/src/compress/aarch64.rs
@@ -0,0 +1,18 @@
+//! SHA-1 `aarch64` backend.
+
+// Per rustc target feature docs for `aarch64-unknown-linux-gnu` and
+// `aarch64-apple-darwin` platforms, the `sha2` target feature enables
+// SHA-1 as well:
+//
+// > Enable SHA1 and SHA256 support.
+cpufeatures::new!(sha1_hwcap, "sha2");
+
+pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) {
+ // TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725
+ // after stabilization
+ if sha1_hwcap::get() {
+ sha1_asm::compress(state, blocks);
+ } else {
+ super::soft::compress(state, blocks);
+ }
+}
diff --git a/third_party/rust/sha1/src/compress/soft.rs b/third_party/rust/sha1/src/compress/soft.rs
new file mode 100644
index 0000000000..0b9fb27014
--- /dev/null
+++ b/third_party/rust/sha1/src/compress/soft.rs
@@ -0,0 +1,260 @@
+#![allow(clippy::many_single_char_names)]
+use super::BLOCK_SIZE;
+use core::convert::TryInto;
+
+const K: [u32; 4] = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6];
+
+#[inline(always)]
+fn add(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ [
+ a[0].wrapping_add(b[0]),
+ a[1].wrapping_add(b[1]),
+ a[2].wrapping_add(b[2]),
+ a[3].wrapping_add(b[3]),
+ ]
+}
+
+#[inline(always)]
+fn xor(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ [a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2], a[3] ^ b[3]]
+}
+
+#[inline]
+pub fn sha1_first_add(e: u32, w0: [u32; 4]) -> [u32; 4] {
+ let [a, b, c, d] = w0;
+ [e.wrapping_add(a), b, c, d]
+}
+
+fn sha1msg1(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ let [_, _, w2, w3] = a;
+ let [w4, w5, _, _] = b;
+ [a[0] ^ w2, a[1] ^ w3, a[2] ^ w4, a[3] ^ w5]
+}
+
+fn sha1msg2(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
+ let [x0, x1, x2, x3] = a;
+ let [_, w13, w14, w15] = b;
+
+ let w16 = (x0 ^ w13).rotate_left(1);
+ let w17 = (x1 ^ w14).rotate_left(1);
+ let w18 = (x2 ^ w15).rotate_left(1);
+ let w19 = (x3 ^ w16).rotate_left(1);
+
+ [w16, w17, w18, w19]
+}
+
+#[inline]
+fn sha1_first_half(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ sha1_first_add(abcd[0].rotate_left(30), msg)
+}
+
+fn sha1_digest_round_x4(abcd: [u32; 4], work: [u32; 4], i: i8) -> [u32; 4] {
+ match i {
+ 0 => sha1rnds4c(abcd, add(work, [K[0]; 4])),
+ 1 => sha1rnds4p(abcd, add(work, [K[1]; 4])),
+ 2 => sha1rnds4m(abcd, add(work, [K[2]; 4])),
+ 3 => sha1rnds4p(abcd, add(work, [K[3]; 4])),
+ _ => unreachable!("unknown icosaround index"),
+ }
+}
+
+fn sha1rnds4c(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ let [mut a, mut b, mut c, mut d] = abcd;
+ let [t, u, v, w] = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_202 {
+ ($a:expr, $b:expr, $c:expr) => {
+ $c ^ ($a & ($b ^ $c))
+ };
+ } // Choose, MD5F, SHA1C
+
+ e = e
+ .wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_202!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d
+ .wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_202!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c
+ .wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_202!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b
+ .wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_202!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ [b, c, d, e]
+}
+
+fn sha1rnds4p(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ let [mut a, mut b, mut c, mut d] = abcd;
+ let [t, u, v, w] = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_150 {
+ ($a:expr, $b:expr, $c:expr) => {
+ $a ^ $b ^ $c
+ };
+ } // Parity, XOR, MD5H, SHA1P
+
+ e = e
+ .wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_150!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d
+ .wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_150!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c
+ .wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_150!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b
+ .wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_150!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ [b, c, d, e]
+}
+
+fn sha1rnds4m(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
+ let [mut a, mut b, mut c, mut d] = abcd;
+ let [t, u, v, w] = msg;
+ let mut e = 0u32;
+
+ macro_rules! bool3ary_232 {
+ ($a:expr, $b:expr, $c:expr) => {
+ ($a & $b) ^ ($a & $c) ^ ($b & $c)
+ };
+ } // Majority, SHA1M
+
+ e = e
+ .wrapping_add(a.rotate_left(5))
+ .wrapping_add(bool3ary_232!(b, c, d))
+ .wrapping_add(t);
+ b = b.rotate_left(30);
+
+ d = d
+ .wrapping_add(e.rotate_left(5))
+ .wrapping_add(bool3ary_232!(a, b, c))
+ .wrapping_add(u);
+ a = a.rotate_left(30);
+
+ c = c
+ .wrapping_add(d.rotate_left(5))
+ .wrapping_add(bool3ary_232!(e, a, b))
+ .wrapping_add(v);
+ e = e.rotate_left(30);
+
+ b = b
+ .wrapping_add(c.rotate_left(5))
+ .wrapping_add(bool3ary_232!(d, e, a))
+ .wrapping_add(w);
+ d = d.rotate_left(30);
+
+ [b, c, d, e]
+}
+
+macro_rules! rounds4 {
+ ($h0:ident, $h1:ident, $wk:expr, $i:expr) => {
+ sha1_digest_round_x4($h0, sha1_first_half($h1, $wk), $i)
+ };
+}
+
+macro_rules! schedule {
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {
+ sha1msg2(xor(sha1msg1($v0, $v1), $v2), $v3)
+ };
+}
+
+macro_rules! schedule_rounds4 {
+ (
+ $h0:ident, $h1:ident,
+ $w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr,
+ $i:expr
+ ) => {
+ $w4 = schedule!($w0, $w1, $w2, $w3);
+ $h1 = rounds4!($h0, $h1, $w4, $i);
+ };
+}
+
+#[inline(always)]
+fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) {
+ let mut w0 = [block[0], block[1], block[2], block[3]];
+ let mut w1 = [block[4], block[5], block[6], block[7]];
+ let mut w2 = [block[8], block[9], block[10], block[11]];
+ let mut w3 = [block[12], block[13], block[14], block[15]];
+ #[allow(clippy::needless_late_init)]
+ let mut w4;
+
+ let mut h0 = [state[0], state[1], state[2], state[3]];
+ let mut h1 = sha1_first_add(state[4], w0);
+
+ // Rounds 0..20
+ h1 = sha1_digest_round_x4(h0, h1, 0);
+ h0 = rounds4!(h1, h0, w1, 0);
+ h1 = rounds4!(h0, h1, w2, 0);
+ h0 = rounds4!(h1, h0, w3, 0);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 0);
+
+ // Rounds 20..40
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 1);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 1);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 1);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 1);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 1);
+
+ // Rounds 40..60
+ schedule_rounds4!(h0, h1, w1, w2, w3, w4, w0, 2);
+ schedule_rounds4!(h1, h0, w2, w3, w4, w0, w1, 2);
+ schedule_rounds4!(h0, h1, w3, w4, w0, w1, w2, 2);
+ schedule_rounds4!(h1, h0, w4, w0, w1, w2, w3, 2);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 2);
+
+ // Rounds 60..80
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 3);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 3);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 3);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 3);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 3);
+
+ let e = h1[0].rotate_left(30);
+ let [a, b, c, d] = h0;
+
+ state[0] = state[0].wrapping_add(a);
+ state[1] = state[1].wrapping_add(b);
+ state[2] = state[2].wrapping_add(c);
+ state[3] = state[3].wrapping_add(d);
+ state[4] = state[4].wrapping_add(e);
+}
+
+pub fn compress(state: &mut [u32; 5], blocks: &[[u8; BLOCK_SIZE]]) {
+ let mut block_u32 = [0u32; BLOCK_SIZE / 4];
+ // since LLVM can't properly use aliasing yet it will make
+ // unnecessary state stores without this copy
+ let mut state_cpy = *state;
+ for block in blocks.iter() {
+ for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(4)) {
+ *o = u32::from_be_bytes(chunk.try_into().unwrap());
+ }
+ sha1_digest_block_u32(&mut state_cpy, &block_u32);
+ }
+ *state = state_cpy;
+}
diff --git a/third_party/rust/sha1/src/compress/x86.rs b/third_party/rust/sha1/src/compress/x86.rs
new file mode 100644
index 0000000000..4dcd56b8a7
--- /dev/null
+++ b/third_party/rust/sha1/src/compress/x86.rs
@@ -0,0 +1,112 @@
+//! SHA-1 `x86`/`x86_64` backend
+
+#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+
+macro_rules! rounds4 {
+ ($h0:ident, $h1:ident, $wk:expr, $i:expr) => {
+ _mm_sha1rnds4_epu32($h0, _mm_sha1nexte_epu32($h1, $wk), $i)
+ };
+}
+
+macro_rules! schedule {
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {
+ _mm_sha1msg2_epu32(_mm_xor_si128(_mm_sha1msg1_epu32($v0, $v1), $v2), $v3)
+ };
+}
+
+macro_rules! schedule_rounds4 {
+ (
+ $h0:ident, $h1:ident,
+ $w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr,
+ $i:expr
+ ) => {
+ $w4 = schedule!($w0, $w1, $w2, $w3);
+ $h1 = rounds4!($h0, $h1, $w4, $i);
+ };
+}
+
+#[target_feature(enable = "sha,sse2,ssse3,sse4.1")]
+unsafe fn digest_blocks(state: &mut [u32; 5], blocks: &[[u8; 64]]) {
+ #[allow(non_snake_case)]
+ let MASK: __m128i = _mm_set_epi64x(0x0001_0203_0405_0607, 0x0809_0A0B_0C0D_0E0F);
+
+ let mut state_abcd = _mm_set_epi32(
+ state[0] as i32,
+ state[1] as i32,
+ state[2] as i32,
+ state[3] as i32,
+ );
+ let mut state_e = _mm_set_epi32(state[4] as i32, 0, 0, 0);
+
+ for block in blocks {
+ // SAFETY: we use only unaligned loads with this pointer
+ #[allow(clippy::cast_ptr_alignment)]
+ let block_ptr = block.as_ptr() as *const __m128i;
+
+ let mut w0 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(0)), MASK);
+ let mut w1 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(1)), MASK);
+ let mut w2 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(2)), MASK);
+ let mut w3 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(3)), MASK);
+ #[allow(clippy::needless_late_init)]
+ let mut w4;
+
+ let mut h0 = state_abcd;
+ let mut h1 = _mm_add_epi32(state_e, w0);
+
+ // Rounds 0..20
+ h1 = _mm_sha1rnds4_epu32(h0, h1, 0);
+ h0 = rounds4!(h1, h0, w1, 0);
+ h1 = rounds4!(h0, h1, w2, 0);
+ h0 = rounds4!(h1, h0, w3, 0);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 0);
+
+ // Rounds 20..40
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 1);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 1);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 1);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 1);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 1);
+
+ // Rounds 40..60
+ schedule_rounds4!(h0, h1, w1, w2, w3, w4, w0, 2);
+ schedule_rounds4!(h1, h0, w2, w3, w4, w0, w1, 2);
+ schedule_rounds4!(h0, h1, w3, w4, w0, w1, w2, 2);
+ schedule_rounds4!(h1, h0, w4, w0, w1, w2, w3, 2);
+ schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 2);
+
+ // Rounds 60..80
+ schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 3);
+ schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 3);
+ schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 3);
+ schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 3);
+ schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 3);
+
+ state_abcd = _mm_add_epi32(state_abcd, h0);
+ state_e = _mm_sha1nexte_epu32(h1, state_e);
+ }
+
+ state[0] = _mm_extract_epi32(state_abcd, 3) as u32;
+ state[1] = _mm_extract_epi32(state_abcd, 2) as u32;
+ state[2] = _mm_extract_epi32(state_abcd, 1) as u32;
+ state[3] = _mm_extract_epi32(state_abcd, 0) as u32;
+ state[4] = _mm_extract_epi32(state_e, 3) as u32;
+}
+
+cpufeatures::new!(shani_cpuid, "sha", "sse2", "ssse3", "sse4.1");
+
+pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) {
+ // TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725
+ // after stabilization
+ if shani_cpuid::get() {
+ unsafe {
+ digest_blocks(state, blocks);
+ }
+ } else {
+ super::soft::compress(state, blocks);
+ }
+}
diff --git a/third_party/rust/sha1/src/lib.rs b/third_party/rust/sha1/src/lib.rs
new file mode 100644
index 0000000000..e5fbf2a7bf
--- /dev/null
+++ b/third_party/rust/sha1/src/lib.rs
@@ -0,0 +1,154 @@
+//! Pure Rust implementation of the [SHA-1][1] cryptographic hash algorithm
+//! with optional hardware-specific optimizations.
+//!
+//! # 🚨 Warning: Cryptographically Broken! 🚨
+//!
+//! The SHA-1 hash function should be considered cryptographically broken and
+//! unsuitable for further use in any security critical capacity, as it is
+//! [practically vulnerable to chosen-prefix collisions][2].
+//!
+//! We provide this crate for legacy interoperability purposes only.
+//!
+//! # Usage
+//!
+//! ```rust
+//! use hex_literal::hex;
+//! use sha1::{Sha1, Digest};
+//!
+//! // create a Sha1 object
+//! let mut hasher = Sha1::new();
+//!
+//! // process input message
+//! hasher.update(b"hello world");
+//!
+//! // acquire hash digest in the form of GenericArray,
+//! // which in this case is equivalent to [u8; 20]
+//! let result = hasher.finalize();
+//! assert_eq!(result[..], hex!("2aae6c35c94fcfb415dbe95f408b9ce91ee846ed"));
+//! ```
+//!
+//! Also see [RustCrypto/hashes][3] readme.
+//!
+//! # Note for users of `sha1 v0.6`
+//!
+//! This crate has been transferred to the RustCrypto organization and uses
+//! implementation previously published as the `sha-1` crate. The previous
+//! zero dependencies version is now published as the [`sha1_smoll`] crate.
+//!
+//! [1]: https://en.wikipedia.org/wiki/SHA-1
+//! [2]: https://sha-mbles.github.io/
+//! [3]: https://github.com/RustCrypto/hashes
+//! [`sha1_smoll`]: https://github.com/mitsuhiko/sha1-smol/
+
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+#![doc(
+ html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
+ html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
+)]
+#![warn(missing_docs, rust_2018_idioms)]
+
+pub use digest::{self, Digest};
+
+use core::{fmt, slice::from_ref};
+#[cfg(feature = "oid")]
+use digest::const_oid::{AssociatedOid, ObjectIdentifier};
+use digest::{
+ block_buffer::Eager,
+ core_api::{
+ AlgorithmName, Block, BlockSizeUser, Buffer, BufferKindUser, CoreWrapper, FixedOutputCore,
+ OutputSizeUser, Reset, UpdateCore,
+ },
+ typenum::{Unsigned, U20, U64},
+ HashMarker, Output,
+};
+
+mod compress;
+
+#[cfg(feature = "compress")]
+pub use compress::compress;
+#[cfg(not(feature = "compress"))]
+use compress::compress;
+
+const STATE_LEN: usize = 5;
+
+/// Core SHA-1 hasher state.
+#[derive(Clone)]
+pub struct Sha1Core {
+ h: [u32; STATE_LEN],
+ block_len: u64,
+}
+
+impl HashMarker for Sha1Core {}
+
+impl BlockSizeUser for Sha1Core {
+ type BlockSize = U64;
+}
+
+impl BufferKindUser for Sha1Core {
+ type BufferKind = Eager;
+}
+
+impl OutputSizeUser for Sha1Core {
+ type OutputSize = U20;
+}
+
+impl UpdateCore for Sha1Core {
+ #[inline]
+ fn update_blocks(&mut self, blocks: &[Block<Self>]) {
+ self.block_len += blocks.len() as u64;
+ compress(&mut self.h, blocks);
+ }
+}
+
+impl FixedOutputCore for Sha1Core {
+ #[inline]
+ fn finalize_fixed_core(&mut self, buffer: &mut Buffer<Self>, out: &mut Output<Self>) {
+ let bs = Self::BlockSize::U64;
+ let bit_len = 8 * (buffer.get_pos() as u64 + bs * self.block_len);
+
+ let mut h = self.h;
+ buffer.len64_padding_be(bit_len, |b| compress(&mut h, from_ref(b)));
+ for (chunk, v) in out.chunks_exact_mut(4).zip(h.iter()) {
+ chunk.copy_from_slice(&v.to_be_bytes());
+ }
+ }
+}
+
+impl Default for Sha1Core {
+ #[inline]
+ fn default() -> Self {
+ Self {
+ h: [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0],
+ block_len: 0,
+ }
+ }
+}
+
+impl Reset for Sha1Core {
+ #[inline]
+ fn reset(&mut self) {
+ *self = Default::default();
+ }
+}
+
+impl AlgorithmName for Sha1Core {
+ fn write_alg_name(f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("Sha1")
+ }
+}
+
+impl fmt::Debug for Sha1Core {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("Sha1Core { ... }")
+ }
+}
+
+#[cfg(feature = "oid")]
+#[cfg_attr(docsrs, doc(cfg(feature = "oid")))]
+impl AssociatedOid for Sha1Core {
+ const OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.14.3.2.26");
+}
+
+/// SHA-1 hasher state.
+pub type Sha1 = CoreWrapper<Sha1Core>;