summaryrefslogtreecommitdiffstats
path: root/vendor/sharded-slab/src/page/stack.rs
blob: e28d9b1a758cd44a95c68bb77e15348edc127f58 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
use crate::cfg;
use crate::sync::atomic::{AtomicUsize, Ordering};
use std::{fmt, marker::PhantomData};

pub(super) struct TransferStack<C = cfg::DefaultConfig> {
    head: AtomicUsize,
    _cfg: PhantomData<fn(C)>,
}

impl<C: cfg::Config> TransferStack<C> {
    pub(super) fn new() -> Self {
        Self {
            head: AtomicUsize::new(super::Addr::<C>::NULL),
            _cfg: PhantomData,
        }
    }

    pub(super) fn pop_all(&self) -> Option<usize> {
        let val = self.head.swap(super::Addr::<C>::NULL, Ordering::Acquire);
        test_println!("-> pop {:#x}", val);
        if val == super::Addr::<C>::NULL {
            None
        } else {
            Some(val)
        }
    }

    fn push(&self, new_head: usize, before: impl Fn(usize)) {
        // We loop to win the race to set the new head. The `next` variable
        // is the next slot on the stack which needs to be pointed to by the
        // new head.
        let mut next = self.head.load(Ordering::Relaxed);
        loop {
            test_println!("-> next {:#x}", next);
            before(next);

            match self
                .head
                .compare_exchange(next, new_head, Ordering::Release, Ordering::Relaxed)
            {
                // lost the race!
                Err(actual) => {
                    test_println!("-> retry!");
                    next = actual;
                }
                Ok(_) => {
                    test_println!("-> successful; next={:#x}", next);
                    return;
                }
            }
        }
    }
}

impl<C: cfg::Config> super::FreeList<C> for TransferStack<C> {
    fn push<T>(&self, new_head: usize, slot: &super::Slot<T, C>) {
        self.push(new_head, |next| slot.set_next(next))
    }
}

impl<C> fmt::Debug for TransferStack<C> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_struct("TransferStack")
            .field(
                "head",
                &format_args!("{:#0x}", &self.head.load(Ordering::Relaxed)),
            )
            .finish()
    }
}

#[cfg(all(loom, test))]
mod test {
    use super::*;
    use crate::{sync::UnsafeCell, test_util};
    use loom::thread;
    use std::sync::Arc;

    #[test]
    fn transfer_stack() {
        test_util::run_model("transfer_stack", || {
            let causalities = [UnsafeCell::new(999), UnsafeCell::new(999)];
            let shared = Arc::new((causalities, TransferStack::<cfg::DefaultConfig>::new()));
            let shared1 = shared.clone();
            let shared2 = shared.clone();

            let t1 = thread::spawn(move || {
                let (causalities, stack) = &*shared1;
                stack.push(0, |prev| {
                    causalities[0].with_mut(|c| unsafe {
                        *c = 0;
                    });
                    test_println!("prev={:#x}", prev)
                });
            });
            let t2 = thread::spawn(move || {
                let (causalities, stack) = &*shared2;
                stack.push(1, |prev| {
                    causalities[1].with_mut(|c| unsafe {
                        *c = 1;
                    });
                    test_println!("prev={:#x}", prev)
                });
            });

            let (causalities, stack) = &*shared;
            let mut idx = stack.pop_all();
            while idx == None {
                idx = stack.pop_all();
                thread::yield_now();
            }
            let idx = idx.unwrap();
            causalities[idx].with(|val| unsafe {
                assert_eq!(
                    *val, idx,
                    "UnsafeCell write must happen-before index is pushed to the stack!"
                );
            });

            t1.join().unwrap();
            t2.join().unwrap();
        });
    }
}