summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys/sgx/abi/tls/sync_bitset.rs
blob: 4eeff8f6ef7736f93bb14d8963327f8d30704e43 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#[cfg(test)]
mod tests;

use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS};
use crate::iter::{Enumerate, Peekable};
use crate::slice::Iter;
use crate::sync::atomic::{AtomicUsize, Ordering};

/// A bitset that can be used synchronously.
pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]);

pub(super) const SYNC_BITSET_INIT: SyncBitset =
    SyncBitset([AtomicUsize::new(0), AtomicUsize::new(0)]);

impl SyncBitset {
    pub fn get(&self, index: usize) -> bool {
        let (hi, lo) = Self::split(index);
        (self.0[hi].load(Ordering::Relaxed) & lo) != 0
    }

    /// Not atomic.
    pub fn iter(&self) -> SyncBitsetIter<'_> {
        SyncBitsetIter { iter: self.0.iter().enumerate().peekable(), elem_idx: 0 }
    }

    pub fn clear(&self, index: usize) {
        let (hi, lo) = Self::split(index);
        self.0[hi].fetch_and(!lo, Ordering::Relaxed);
    }

    /// Sets any unset bit. Not atomic. Returns `None` if all bits were
    /// observed to be set.
    pub fn set(&self) -> Option<usize> {
        'elems: for (idx, elem) in self.0.iter().enumerate() {
            let mut current = elem.load(Ordering::Relaxed);
            loop {
                if 0 == !current {
                    continue 'elems;
                }
                let trailing_ones = (!current).trailing_zeros() as usize;
                match elem.compare_exchange(
                    current,
                    current | (1 << trailing_ones),
                    Ordering::AcqRel,
                    Ordering::Relaxed,
                ) {
                    Ok(_) => return Some(idx * USIZE_BITS + trailing_ones),
                    Err(previous) => current = previous,
                }
            }
        }
        None
    }

    fn split(index: usize) -> (usize, usize) {
        (index / USIZE_BITS, 1 << (index % USIZE_BITS))
    }
}

pub(super) struct SyncBitsetIter<'a> {
    iter: Peekable<Enumerate<Iter<'a, AtomicUsize>>>,
    elem_idx: usize,
}

impl<'a> Iterator for SyncBitsetIter<'a> {
    type Item = usize;

    fn next(&mut self) -> Option<usize> {
        self.iter.peek().cloned().and_then(|(idx, elem)| {
            let elem = elem.load(Ordering::Relaxed);
            let low_mask = (1 << self.elem_idx) - 1;
            let next = elem & !low_mask;
            let next_idx = next.trailing_zeros() as usize;
            self.elem_idx = next_idx + 1;
            if self.elem_idx >= 64 {
                self.elem_idx = 0;
                self.iter.next();
            }
            match next_idx {
                64 => self.next(),
                _ => Some(idx * USIZE_BITS + next_idx),
            }
        })
    }
}