summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys/itron/spin.rs
blob: 44d409444bca48ec90efdbc059e8ce1bc0f4e8a9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
use super::abi;
use crate::{
    cell::UnsafeCell,
    mem::MaybeUninit,
    sync::atomic::{AtomicBool, AtomicUsize, Ordering},
};

/// A mutex implemented by `dis_dsp` (for intra-core synchronization) and a
/// spinlock (for inter-core synchronization).
pub struct SpinMutex<T = ()> {
    locked: AtomicBool,
    data: UnsafeCell<T>,
}

impl<T> SpinMutex<T> {
    #[inline]
    pub const fn new(x: T) -> Self {
        Self { locked: AtomicBool::new(false), data: UnsafeCell::new(x) }
    }

    /// Acquire a lock.
    #[inline]
    pub fn with_locked<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
        struct SpinMutexGuard<'a>(&'a AtomicBool);

        impl Drop for SpinMutexGuard<'_> {
            #[inline]
            fn drop(&mut self) {
                self.0.store(false, Ordering::Release);
                unsafe { abi::ena_dsp() };
            }
        }

        let _guard;
        if unsafe { abi::sns_dsp() } == 0 {
            let er = unsafe { abi::dis_dsp() };
            debug_assert!(er >= 0);

            // Wait until the current processor acquires a lock.
            while self.locked.swap(true, Ordering::Acquire) {}

            _guard = SpinMutexGuard(&self.locked);
        }

        f(unsafe { &mut *self.data.get() })
    }
}

/// `OnceCell<(abi::ID, T)>` implemented by `dis_dsp` (for intra-core
/// synchronization) and a spinlock (for inter-core synchronization).
///
/// It's assumed that `0` is not a valid ID, and all kernel
/// object IDs fall into range `1..=usize::MAX`.
pub struct SpinIdOnceCell<T = ()> {
    id: AtomicUsize,
    spin: SpinMutex<()>,
    extra: UnsafeCell<MaybeUninit<T>>,
}

const ID_UNINIT: usize = 0;

impl<T> SpinIdOnceCell<T> {
    #[inline]
    pub const fn new() -> Self {
        Self {
            id: AtomicUsize::new(ID_UNINIT),
            extra: UnsafeCell::new(MaybeUninit::uninit()),
            spin: SpinMutex::new(()),
        }
    }

    #[inline]
    pub fn get(&self) -> Option<(abi::ID, &T)> {
        match self.id.load(Ordering::Acquire) {
            ID_UNINIT => None,
            id => Some((id as abi::ID, unsafe { (&*self.extra.get()).assume_init_ref() })),
        }
    }

    #[inline]
    pub fn get_mut(&mut self) -> Option<(abi::ID, &mut T)> {
        match *self.id.get_mut() {
            ID_UNINIT => None,
            id => Some((id as abi::ID, unsafe { (&mut *self.extra.get()).assume_init_mut() })),
        }
    }

    #[inline]
    pub unsafe fn get_unchecked(&self) -> (abi::ID, &T) {
        (self.id.load(Ordering::Acquire) as abi::ID, unsafe {
            (&*self.extra.get()).assume_init_ref()
        })
    }

    /// Assign the content without checking if it's already initialized or
    /// being initialized.
    pub unsafe fn set_unchecked(&self, (id, extra): (abi::ID, T)) {
        debug_assert!(self.get().is_none());

        // Assumption: A positive `abi::ID` fits in `usize`.
        debug_assert!(id >= 0);
        debug_assert!(usize::try_from(id).is_ok());
        let id = id as usize;

        unsafe { *self.extra.get() = MaybeUninit::new(extra) };
        self.id.store(id, Ordering::Release);
    }

    /// Gets the contents of the cell, initializing it with `f` if
    /// the cell was empty. If the cell was empty and `f` failed, an
    /// error is returned.
    ///
    /// Warning: `f` must not perform a blocking operation, which
    /// includes panicking.
    #[inline]
    pub fn get_or_try_init<F, E>(&self, f: F) -> Result<(abi::ID, &T), E>
    where
        F: FnOnce() -> Result<(abi::ID, T), E>,
    {
        // Fast path
        if let Some(x) = self.get() {
            return Ok(x);
        }

        self.initialize(f)?;

        debug_assert!(self.get().is_some());

        // Safety: The inner value has been initialized
        Ok(unsafe { self.get_unchecked() })
    }

    fn initialize<F, E>(&self, f: F) -> Result<(), E>
    where
        F: FnOnce() -> Result<(abi::ID, T), E>,
    {
        self.spin.with_locked(|_| {
            if self.id.load(Ordering::Relaxed) == ID_UNINIT {
                let (initialized_id, initialized_extra) = f()?;

                // Assumption: A positive `abi::ID` fits in `usize`.
                debug_assert!(initialized_id >= 0);
                debug_assert!(usize::try_from(initialized_id).is_ok());
                let initialized_id = initialized_id as usize;

                // Store the initialized contents. Use the release ordering to
                // make sure the write is visible to the callers of `get`.
                unsafe { *self.extra.get() = MaybeUninit::new(initialized_extra) };
                self.id.store(initialized_id, Ordering::Release);
            }
            Ok(())
        })
    }
}

impl<T> Drop for SpinIdOnceCell<T> {
    #[inline]
    fn drop(&mut self) {
        if self.get_mut().is_some() {
            unsafe { (&mut *self.extra.get()).assume_init_drop() };
        }
    }
}