summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys/sgx/rwlock.rs
blob: a97fb9ab026f02d57cc3f347ac5d62ac891d4c96 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
#[cfg(test)]
mod tests;

use crate::num::NonZeroUsize;
use crate::sys_common::lazy_box::{LazyBox, LazyInit};

use super::waitqueue::{
    try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
};
use crate::mem;

pub struct RwLock {
    readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
    writer: SpinMutex<WaitVariable<bool>>,
}

pub(crate) type MovableRwLock = LazyBox<RwLock>;

impl LazyInit for RwLock {
    fn init() -> Box<Self> {
        Box::new(Self::new())
    }
}

// Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below)
//
// # Safety
// Never called, as it is a compile time check.
#[allow(dead_code)]
unsafe fn rw_lock_size_assert(r: RwLock) {
    unsafe { mem::transmute::<RwLock, [u8; 144]>(r) };
}

impl RwLock {
    pub const fn new() -> RwLock {
        RwLock {
            readers: SpinMutex::new(WaitVariable::new(None)),
            writer: SpinMutex::new(WaitVariable::new(false)),
        }
    }

    #[inline]
    pub unsafe fn read(&self) {
        let mut rguard = self.readers.lock();
        let wguard = self.writer.lock();
        if *wguard.lock_var() || !wguard.queue_empty() {
            // Another thread has or is waiting for the write lock, wait
            drop(wguard);
            WaitQueue::wait(rguard, || {});
        // Another thread has passed the lock to us
        } else {
            // No waiting writers, acquire the read lock
            *rguard.lock_var_mut() =
                NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
        }
    }

    #[inline]
    pub unsafe fn try_read(&self) -> bool {
        let mut rguard = try_lock_or_false!(self.readers);
        let wguard = try_lock_or_false!(self.writer);
        if *wguard.lock_var() || !wguard.queue_empty() {
            // Another thread has or is waiting for the write lock
            false
        } else {
            // No waiting writers, acquire the read lock
            *rguard.lock_var_mut() =
                NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
            true
        }
    }

    #[inline]
    pub unsafe fn write(&self) {
        let rguard = self.readers.lock();
        let mut wguard = self.writer.lock();
        if *wguard.lock_var() || rguard.lock_var().is_some() {
            // Another thread has the lock, wait
            drop(rguard);
            WaitQueue::wait(wguard, || {});
        // Another thread has passed the lock to us
        } else {
            // We are just now obtaining the lock
            *wguard.lock_var_mut() = true;
        }
    }

    #[inline]
    pub unsafe fn try_write(&self) -> bool {
        let rguard = try_lock_or_false!(self.readers);
        let mut wguard = try_lock_or_false!(self.writer);
        if *wguard.lock_var() || rguard.lock_var().is_some() {
            // Another thread has the lock
            false
        } else {
            // We are just now obtaining the lock
            *wguard.lock_var_mut() = true;
            true
        }
    }

    #[inline]
    unsafe fn __read_unlock(
        &self,
        mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>,
        wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
    ) {
        *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1);
        if rguard.lock_var().is_some() {
            // There are other active readers
        } else {
            if let Ok(mut wguard) = WaitQueue::notify_one(wguard) {
                // A writer was waiting, pass the lock
                *wguard.lock_var_mut() = true;
                wguard.drop_after(rguard);
            } else {
                // No writers were waiting, the lock is released
                rtassert!(rguard.queue_empty());
            }
        }
    }

    #[inline]
    pub unsafe fn read_unlock(&self) {
        let rguard = self.readers.lock();
        let wguard = self.writer.lock();
        unsafe { self.__read_unlock(rguard, wguard) };
    }

    #[inline]
    unsafe fn __write_unlock(
        &self,
        rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>,
        wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
    ) {
        match WaitQueue::notify_one(wguard) {
            Err(mut wguard) => {
                // No writers waiting, release the write lock
                *wguard.lock_var_mut() = false;
                if let Ok(mut rguard) = WaitQueue::notify_all(rguard) {
                    // One or more readers were waiting, pass the lock to them
                    if let NotifiedTcs::All { count } = rguard.notified_tcs() {
                        *rguard.lock_var_mut() = Some(count)
                    } else {
                        unreachable!() // called notify_all
                    }
                    rguard.drop_after(wguard);
                } else {
                    // No readers waiting, the lock is released
                }
            }
            Ok(wguard) => {
                // There was a thread waiting for write, just pass the lock
                wguard.drop_after(rguard);
            }
        }
    }

    #[inline]
    pub unsafe fn write_unlock(&self) {
        let rguard = self.readers.lock();
        let wguard = self.writer.lock();
        unsafe { self.__write_unlock(rguard, wguard) };
    }

    // only used by __rust_rwlock_unlock below
    #[inline]
    #[cfg_attr(test, allow(dead_code))]
    unsafe fn unlock(&self) {
        let rguard = self.readers.lock();
        let wguard = self.writer.lock();
        if *wguard.lock_var() == true {
            unsafe { self.__write_unlock(rguard, wguard) };
        } else {
            unsafe { self.__read_unlock(rguard, wguard) };
        }
    }
}

// The following functions are needed by libunwind. These symbols are named
// in pre-link args for the target specification, so keep that in sync.
#[cfg(not(test))]
const EINVAL: i32 = 22;

#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 {
    if p.is_null() {
        return EINVAL;
    }
    unsafe { (*p).read() };
    return 0;
}

#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 {
    if p.is_null() {
        return EINVAL;
    }
    unsafe { (*p).write() };
    return 0;
}
#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
    if p.is_null() {
        return EINVAL;
    }
    unsafe { (*p).unlock() };
    return 0;
}