diff options
Diffstat (limited to 'third_party/rust/httparse/src/simd')
-rw-r--r-- | third_party/rust/httparse/src/simd/avx2.rs | 181 | ||||
-rw-r--r-- | third_party/rust/httparse/src/simd/fallback.rs | 8 | ||||
-rw-r--r-- | third_party/rust/httparse/src/simd/mod.rs | 291 | ||||
-rw-r--r-- | third_party/rust/httparse/src/simd/sse42.rs | 157 |
4 files changed, 637 insertions, 0 deletions
diff --git a/third_party/rust/httparse/src/simd/avx2.rs b/third_party/rust/httparse/src/simd/avx2.rs new file mode 100644 index 0000000000..6bea358640 --- /dev/null +++ b/third_party/rust/httparse/src/simd/avx2.rs @@ -0,0 +1,181 @@ +use crate::iter::Bytes; + +pub enum Scan { + /// Returned when an implementation finds a noteworthy token. + Found, + /// Returned when an implementation couldn't keep running because the input was too short. + TooShort, +} + + +pub unsafe fn parse_uri_batch_32(bytes: &mut Bytes) -> Scan { + while bytes.as_ref().len() >= 32 { + let advance = match_url_char_32_avx(bytes.as_ref()); + bytes.advance(advance); + + if advance != 32 { + return Scan::Found; + } + } + Scan::TooShort +} + +#[cfg(target_arch = "x86_64")] +#[target_feature(enable = "avx2")] +#[inline] +#[allow(non_snake_case, overflowing_literals)] +unsafe fn match_url_char_32_avx(buf: &[u8]) -> usize { + debug_assert!(buf.len() >= 32); + + /* + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + */ + use core::arch::x86_64::*; + + let ptr = buf.as_ptr(); + + let LSH: __m256i = _mm256_set1_epi8(0x0f); + + // See comment in sse42::match_url_char_16_sse. + + let URI: __m256i = _mm256_setr_epi8( + 0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, + 0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c, + 0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, + 0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c, + ); + let ARF: __m256i = _mm256_setr_epi8( + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ); + + let data = _mm256_lddqu_si256(ptr as *const _); + let rbms = _mm256_shuffle_epi8(URI, data); + let cols = _mm256_and_si256(LSH, _mm256_srli_epi16(data, 4)); + let bits = _mm256_and_si256(_mm256_shuffle_epi8(ARF, cols), rbms); + + let v = _mm256_cmpeq_epi8(bits, _mm256_setzero_si256()); + let r = 0xffff_ffff_0000_0000 | _mm256_movemask_epi8(v) as u64; + + _tzcnt_u64(r) as usize +} + +#[cfg(target_arch = "x86")] +unsafe fn match_url_char_32_avx(_: &[u8]) -> usize { + unreachable!("AVX2 detection should be disabled for x86"); +} + +pub unsafe fn match_header_value_batch_32(bytes: &mut Bytes) -> Scan { + while bytes.as_ref().len() >= 32 { + let advance = match_header_value_char_32_avx(bytes.as_ref()); + bytes.advance(advance); + + if advance != 32 { + return Scan::Found; + } + } + Scan::TooShort +} + +#[cfg(target_arch = "x86_64")] +#[target_feature(enable = "avx2")] +#[inline] +#[allow(non_snake_case)] +unsafe fn match_header_value_char_32_avx(buf: &[u8]) -> usize { + debug_assert!(buf.len() >= 32); + + /* + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + */ + use core::arch::x86_64::*; + + let ptr = buf.as_ptr(); + + // %x09 %x20-%x7e %x80-%xff + let TAB: __m256i = _mm256_set1_epi8(0x09); + let DEL: __m256i = _mm256_set1_epi8(0x7f); + let LOW: __m256i = _mm256_set1_epi8(0x20); + + let dat = _mm256_lddqu_si256(ptr as *const _); + // unsigned comparison dat >= LOW + let low = _mm256_cmpeq_epi8(_mm256_max_epu8(dat, LOW), dat); + let tab = _mm256_cmpeq_epi8(dat, TAB); + let del = _mm256_cmpeq_epi8(dat, DEL); + let bit = _mm256_andnot_si256(del, _mm256_or_si256(low, tab)); + let rev = _mm256_cmpeq_epi8(bit, _mm256_setzero_si256()); + let res = 0xffff_ffff_0000_0000 | _mm256_movemask_epi8(rev) as u64; + + _tzcnt_u64(res) as usize +} + +#[cfg(target_arch = "x86")] +unsafe fn match_header_value_char_32_avx(_: &[u8]) -> usize { + unreachable!("AVX2 detection should be disabled for x86"); +} + +#[test] +fn avx2_code_matches_uri_chars_table() { + match super::detect() { + super::AVX_2 | super::AVX_2_AND_SSE_42 => {}, + _ => return, + } + + unsafe { + assert!(byte_is_allowed(b'_', parse_uri_batch_32)); + + for (b, allowed) in crate::URI_MAP.iter().cloned().enumerate() { + assert_eq!( + byte_is_allowed(b as u8, parse_uri_batch_32), allowed, + "byte_is_allowed({:?}) should be {:?}", b, allowed, + ); + } + } +} + +#[test] +fn avx2_code_matches_header_value_chars_table() { + match super::detect() { + super::AVX_2 | super::AVX_2_AND_SSE_42 => {}, + _ => return, + } + + unsafe { + assert!(byte_is_allowed(b'_', match_header_value_batch_32)); + + for (b, allowed) in crate::HEADER_VALUE_MAP.iter().cloned().enumerate() { + assert_eq!( + byte_is_allowed(b as u8, match_header_value_batch_32), allowed, + "byte_is_allowed({:?}) should be {:?}", b, allowed, + ); + } + } +} + +#[cfg(test)] +unsafe fn byte_is_allowed(byte: u8, f: unsafe fn(bytes: &mut Bytes<'_>) -> Scan) -> bool { + let slice = [ + b'_', b'_', b'_', b'_', + b'_', b'_', b'_', b'_', + b'_', b'_', b'_', b'_', + b'_', b'_', b'_', b'_', + b'_', b'_', b'_', b'_', + b'_', b'_', b'_', b'_', + b'_', b'_', byte, b'_', + b'_', b'_', b'_', b'_', + ]; + let mut bytes = Bytes::new(&slice); + + f(&mut bytes); + + match bytes.pos() { + 32 => true, + 26 => false, + _ => unreachable!(), + } +} diff --git a/third_party/rust/httparse/src/simd/fallback.rs b/third_party/rust/httparse/src/simd/fallback.rs new file mode 100644 index 0000000000..871cd01f1a --- /dev/null +++ b/third_party/rust/httparse/src/simd/fallback.rs @@ -0,0 +1,8 @@ +use crate::iter::Bytes; + +// Fallbacks that do nothing... + +#[inline(always)] +pub fn match_uri_vectored(_: &mut Bytes<'_>) {} +#[inline(always)] +pub fn match_header_value_vectored(_: &mut Bytes<'_>) {} diff --git a/third_party/rust/httparse/src/simd/mod.rs b/third_party/rust/httparse/src/simd/mod.rs new file mode 100644 index 0000000000..b1cd85ce09 --- /dev/null +++ b/third_party/rust/httparse/src/simd/mod.rs @@ -0,0 +1,291 @@ +#[cfg(not(all( + httparse_simd, + any( + target_arch = "x86", + target_arch = "x86_64", + ), +)))] +mod fallback; + +#[cfg(not(all( + httparse_simd, + any( + target_arch = "x86", + target_arch = "x86_64", + ), +)))] +pub use self::fallback::*; + +#[cfg(all( + httparse_simd, + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +mod sse42; + +#[cfg(all( + httparse_simd, + any( + httparse_simd_target_feature_avx2, + not(httparse_simd_target_feature_sse42), + ), + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +mod avx2; + +#[cfg(all( + httparse_simd, + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +pub const SSE_42: usize = 1; +#[cfg(all( + httparse_simd, + any(not(httparse_simd_target_feature_sse42), httparse_simd_target_feature_avx2), + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +pub const AVX_2: usize = 2; +#[cfg(all( + httparse_simd, + any( + not(httparse_simd_target_feature_sse42), + httparse_simd_target_feature_avx2, + test, + ), + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +pub const AVX_2_AND_SSE_42: usize = 3; + +#[cfg(all( + httparse_simd, + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +const NONE: usize = std::usize::MAX; +#[cfg(all( + httparse_simd, + not(any( + httparse_simd_target_feature_sse42, + httparse_simd_target_feature_avx2, + )), + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +mod runtime { + //! Runtime detection of simd features. Used when the build script + //! doesn't notice any target features at build time. + //! + //! While `is_x86_feature_detected!` has it's own caching built-in, + //! at least in 1.27.0, the functions don't inline, leaving using it + //! actually *slower* than just using the scalar fallback. + + use core::sync::atomic::{AtomicUsize, Ordering}; + + static FEATURE: AtomicUsize = AtomicUsize::new(0); + + const INIT: usize = 0; + + pub fn detect() -> usize { + let feat = FEATURE.load(Ordering::Relaxed); + if feat == INIT { + if cfg!(target_arch = "x86_64") && is_x86_feature_detected!("avx2") { + if is_x86_feature_detected!("sse4.2") { + FEATURE.store(super::AVX_2_AND_SSE_42, Ordering::Relaxed); + return super::AVX_2_AND_SSE_42; + } else { + FEATURE.store(super::AVX_2, Ordering::Relaxed); + return super::AVX_2; + } + } else if is_x86_feature_detected!("sse4.2") { + FEATURE.store(super::SSE_42, Ordering::Relaxed); + return super::SSE_42; + } else { + FEATURE.store(super::NONE, Ordering::Relaxed); + } + } + feat + } + + pub fn match_uri_vectored(bytes: &mut crate::iter::Bytes) { + unsafe { + match detect() { + super::SSE_42 => super::sse42::parse_uri_batch_16(bytes), + super::AVX_2 => { super::avx2::parse_uri_batch_32(bytes); }, + super::AVX_2_AND_SSE_42 => { + if let super::avx2::Scan::Found = super::avx2::parse_uri_batch_32(bytes) { + return; + } + super::sse42::parse_uri_batch_16(bytes) + }, + _ => () + } + } + + // else do nothing + } + + pub fn match_header_value_vectored(bytes: &mut crate::iter::Bytes) { + unsafe { + match detect() { + super::SSE_42 => super::sse42::match_header_value_batch_16(bytes), + super::AVX_2 => { super::avx2::match_header_value_batch_32(bytes); }, + super::AVX_2_AND_SSE_42 => { + if let super::avx2::Scan::Found = super::avx2::match_header_value_batch_32(bytes) { + return; + } + super::sse42::match_header_value_batch_16(bytes) + }, + _ => () + } + } + + // else do nothing + } +} + +#[cfg(all( + httparse_simd, + not(any( + httparse_simd_target_feature_sse42, + httparse_simd_target_feature_avx2, + )), + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +pub use self::runtime::*; + +#[cfg(all( + httparse_simd, + httparse_simd_target_feature_sse42, + not(httparse_simd_target_feature_avx2), + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +mod sse42_compile_time { + pub fn match_uri_vectored(bytes: &mut crate::iter::Bytes) { + if detect() == super::SSE_42 { + unsafe { + super::sse42::parse_uri_batch_16(bytes); + } + } + + // else do nothing + } + + pub fn match_header_value_vectored(bytes: &mut crate::iter::Bytes) { + if detect() == super::SSE_42 { + unsafe { + super::sse42::match_header_value_batch_16(bytes); + } + } + + // else do nothing + } + + pub fn detect() -> usize { + if is_x86_feature_detected!("sse4.2") { + super::SSE_42 + } else { + super::NONE + } + } +} + +#[cfg(all( + httparse_simd, + httparse_simd_target_feature_sse42, + not(httparse_simd_target_feature_avx2), + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +pub use self::sse42_compile_time::*; + +#[cfg(all( + httparse_simd, + httparse_simd_target_feature_avx2, + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +mod avx2_compile_time { + pub fn match_uri_vectored(bytes: &mut crate::iter::Bytes) { + // do both, since avx2 only works when bytes.len() >= 32 + if detect() == super::AVX_2_AND_SSE_42 { + unsafe { + super::avx2::parse_uri_batch_32(bytes); + } + + } + if detect() == super::SSE_42 { + unsafe { + super::sse42::parse_uri_batch_16(bytes); + } + } + + // else do nothing + } + + pub fn match_header_value_vectored(bytes: &mut crate::iter::Bytes) { + // do both, since avx2 only works when bytes.len() >= 32 + if detect() == super::AVX_2_AND_SSE_42 { + let scanned = unsafe { + super::avx2::match_header_value_batch_32(bytes) + }; + + if let super::avx2::Scan::Found = scanned { + return; + } + } + if detect() == super::SSE_42 { + unsafe { + super::sse42::match_header_value_batch_16(bytes); + } + } + + // else do nothing + } + + pub fn detect() -> usize { + if cfg!(target_arch = "x86_64") && is_x86_feature_detected!("avx2") { + super::AVX_2_AND_SSE_42 + } else if is_x86_feature_detected!("sse4.2") { + super::SSE_42 + } else { + super::NONE + } + } +} + +#[cfg(all( + httparse_simd, + httparse_simd_target_feature_avx2, + any( + target_arch = "x86", + target_arch = "x86_64", + ), +))] +pub use self::avx2_compile_time::*; diff --git a/third_party/rust/httparse/src/simd/sse42.rs b/third_party/rust/httparse/src/simd/sse42.rs new file mode 100644 index 0000000000..8caf8f779e --- /dev/null +++ b/third_party/rust/httparse/src/simd/sse42.rs @@ -0,0 +1,157 @@ +use crate::iter::Bytes; + +pub unsafe fn parse_uri_batch_16(bytes: &mut Bytes) { + while bytes.as_ref().len() >= 16 { + let advance = match_url_char_16_sse(bytes.as_ref()); + bytes.advance(advance); + + if advance != 16 { + break; + } + } +} + +#[target_feature(enable = "sse4.2")] +#[allow(non_snake_case, overflowing_literals)] +unsafe fn match_url_char_16_sse(buf: &[u8]) -> usize { + debug_assert!(buf.len() >= 16); + + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + + let ptr = buf.as_ptr(); + + let LSH: __m128i = _mm_set1_epi8(0x0f); + + // The first 0xf8 corresponds to the 8 first rows of the first column + // of URI_MAP in the crate's root, with the first row corresponding to bit 0 + // and the 8th row corresponding to bit 7. + // The 8 first rows give 0 0 0 1 1 1 1 1, which is 0xf8 (with least + // significant digit on the left). + // + // Another example just to drive the point home: in column 15, '>' is + // rejected, so the values are 0 0 1 0 1 1 1 1, which gives us 0xf4. + // + // Thanks to Vlad Krasnov for explaining this stuff to us mere mortals in + // a GitHub comment! + // + // https://github.com/seanmonstar/httparse/pull/89#issuecomment-807039219 + + let URI: __m128i = _mm_setr_epi8( + 0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, + 0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c, + ); + let ARF: __m128i = _mm_setr_epi8( + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ); + + let data = _mm_lddqu_si128(ptr as *const _); + let rbms = _mm_shuffle_epi8(URI, data); + let cols = _mm_and_si128(LSH, _mm_srli_epi16(data, 4)); + let bits = _mm_and_si128(_mm_shuffle_epi8(ARF, cols), rbms); + + let v = _mm_cmpeq_epi8(bits, _mm_setzero_si128()); + let r = 0xffff_0000 | _mm_movemask_epi8(v) as u32; + + _tzcnt_u32(r) as usize +} + +pub unsafe fn match_header_value_batch_16(bytes: &mut Bytes) { + while bytes.as_ref().len() >= 16 { + let advance = match_header_value_char_16_sse(bytes.as_ref()); + bytes.advance(advance); + + if advance != 16 { + break; + } + } +} + +#[target_feature(enable = "sse4.2")] +#[allow(non_snake_case)] +unsafe fn match_header_value_char_16_sse(buf: &[u8]) -> usize { + debug_assert!(buf.len() >= 16); + + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + + let ptr = buf.as_ptr(); + + // %x09 %x20-%x7e %x80-%xff + let TAB: __m128i = _mm_set1_epi8(0x09); + let DEL: __m128i = _mm_set1_epi8(0x7f); + let LOW: __m128i = _mm_set1_epi8(0x20); + + let dat = _mm_lddqu_si128(ptr as *const _); + // unsigned comparison dat >= LOW + let low = _mm_cmpeq_epi8(_mm_max_epu8(dat, LOW), dat); + let tab = _mm_cmpeq_epi8(dat, TAB); + let del = _mm_cmpeq_epi8(dat, DEL); + let bit = _mm_andnot_si128(del, _mm_or_si128(low, tab)); + let rev = _mm_cmpeq_epi8(bit, _mm_setzero_si128()); + let res = 0xffff_0000 | _mm_movemask_epi8(rev) as u32; + + _tzcnt_u32(res) as usize +} + +#[test] +fn sse_code_matches_uri_chars_table() { + match super::detect() { + super::SSE_42 | super::AVX_2_AND_SSE_42 => {}, + _ => return, + } + + unsafe { + assert!(byte_is_allowed(b'_', parse_uri_batch_16)); + + for (b, allowed) in crate::URI_MAP.iter().cloned().enumerate() { + assert_eq!( + byte_is_allowed(b as u8, parse_uri_batch_16), allowed, + "byte_is_allowed({:?}) should be {:?}", b, allowed, + ); + } + } +} + +#[test] +fn sse_code_matches_header_value_chars_table() { + match super::detect() { + super::SSE_42 | super::AVX_2_AND_SSE_42 => {}, + _ => return, + } + + unsafe { + assert!(byte_is_allowed(b'_', match_header_value_batch_16)); + + for (b, allowed) in crate::HEADER_VALUE_MAP.iter().cloned().enumerate() { + assert_eq!( + byte_is_allowed(b as u8, match_header_value_batch_16), allowed, + "byte_is_allowed({:?}) should be {:?}", b, allowed, + ); + } + } +} + +#[cfg(test)] +unsafe fn byte_is_allowed(byte: u8, f: unsafe fn(bytes: &mut Bytes<'_>)) -> bool { + let slice = [ + b'_', b'_', b'_', b'_', + b'_', b'_', b'_', b'_', + b'_', b'_', byte, b'_', + b'_', b'_', b'_', b'_', + ]; + let mut bytes = Bytes::new(&slice); + + f(&mut bytes); + + match bytes.pos() { + 16 => true, + 10 => false, + _ => unreachable!(), + } +} |