blob: 569bcff9219d921e2f22c8a5fe8ee388eac62a51 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
|
#![cfg(exhaustive)]
use std::str;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
#[test]
fn test_exhaustive() {
const BATCH_SIZE: u32 = 1_000_000;
let counter = Arc::new(AtomicUsize::new(0));
let finished = Arc::new(AtomicUsize::new(0));
let mut workers = Vec::new();
for _ in 0..num_cpus::get() {
let counter = counter.clone();
let finished = finished.clone();
workers.push(thread::spawn(move || loop {
let batch = counter.fetch_add(1, Ordering::Relaxed) as u32;
if batch > u32::max_value() / BATCH_SIZE {
return;
}
let min = batch * BATCH_SIZE;
let max = if batch == u32::max_value() / BATCH_SIZE {
u32::max_value()
} else {
min + BATCH_SIZE - 1
};
let mut bytes = [0u8; 24];
let mut buffer = ryu::Buffer::new();
for u in min..=max {
let f = f32::from_bits(u);
if !f.is_finite() {
continue;
}
let n = unsafe { ryu::raw::format32(f, &mut bytes[0]) };
assert_eq!(Ok(Ok(f)), str::from_utf8(&bytes[..n]).map(str::parse));
assert_eq!(Ok(f), buffer.format_finite(f).parse());
}
let increment = (max - min + 1) as usize;
let update = finished.fetch_add(increment, Ordering::Relaxed);
println!("{}", update + increment);
}));
}
for w in workers {
w.join().unwrap();
}
}
|