summaryrefslogtreecommitdiffstats
path: root/vendor/redox_syscall/src/flag.rs
blob: 45a6e42e80120440d0eee8b36c565450d9a87213 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
use bitflags::bitflags as inner_bitflags;
use core::{mem, ops::Deref, slice};

macro_rules! bitflags {
    (
        $(#[$outer:meta])*
        pub struct $BitFlags:ident: $T:ty {
            $(
                $(#[$inner:ident $($args:tt)*])*
                const $Flag:ident = $value:expr;
            )+
        }
    ) => {
        // First, use the inner bitflags
        inner_bitflags! {
            #[derive(Default)]
            $(#[$outer])*
            pub struct $BitFlags: $T {
                $(
                    $(#[$inner $($args)*])*
                    const $Flag = $value;
                )+
            }
        }

        // Secondly, re-export all inner constants
        // (`pub use self::Struct::*` doesn't work)
        $(
            $(#[$inner $($args)*])*
            pub const $Flag: $BitFlags = $BitFlags::$Flag;
        )+
    }
}

pub const CLOCK_REALTIME: usize = 1;
pub const CLOCK_MONOTONIC: usize = 4;

bitflags! {
    pub struct EventFlags: usize {
        const EVENT_NONE = 0;
        const EVENT_READ = 1;
        const EVENT_WRITE = 2;
    }
}

pub const F_DUPFD: usize = 0;
pub const F_GETFD: usize = 1;
pub const F_SETFD: usize = 2;
pub const F_GETFL: usize = 3;
pub const F_SETFL: usize = 4;

pub const FUTEX_WAIT: usize = 0;
pub const FUTEX_WAKE: usize = 1;
pub const FUTEX_REQUEUE: usize = 2;
pub const FUTEX_WAIT64: usize = 3;

// packet.c = fd
pub const SKMSG_FRETURNFD: usize = 0;

// packet.uid:packet.gid = offset, packet.c = base address, packet.d = page count
pub const SKMSG_PROVIDE_MMAP: usize = 1;

bitflags! {
    pub struct MapFlags: usize {
        // TODO: Downgrade PROT_NONE to global constant? (bitflags specifically states zero flags
        // can cause buggy behavior).
        const PROT_NONE = 0x0000_0000;

        const PROT_EXEC = 0x0001_0000;
        const PROT_WRITE = 0x0002_0000;
        const PROT_READ = 0x0004_0000;

        const MAP_SHARED = 0x0001;
        const MAP_PRIVATE = 0x0002;

        const MAP_FIXED = 0x0004;
        const MAP_FIXED_NOREPLACE = 0x000C;

        /// For *userspace-backed mmaps*, return from the mmap call before all pages have been
        /// provided by the scheme. This requires the scheme to be trusted, as the current context
        /// can block indefinitely, if the scheme does not respond to the page fault handler's
        /// request, as it tries to map the page by requesting it from the scheme.
        ///
        /// In some cases however, such as the program loader, the data needs to be trusted as much
        /// with or without MAP_LAZY, and if so, mapping lazily will not cause insecureness by
        /// itself.
        ///
        /// For kernel-backed mmaps, this flag has no effect at all. It is unspecified whether
        /// kernel mmaps are lazy or not.
        const MAP_LAZY = 0x0010;
    }
}
bitflags! {
    pub struct MunmapFlags: usize {
        /// Indicates whether the funmap call must implicitly do an msync, for the changes to
        /// become visible later.
        ///
        /// This flag will currently be set if and only if MAP_SHARED | PROT_WRITE are set.
        const NEEDS_SYNC = 1;
    }
}

pub const MODE_TYPE: u16 = 0xF000;
pub const MODE_DIR: u16 = 0x4000;
pub const MODE_FILE: u16 = 0x8000;
pub const MODE_SYMLINK: u16 = 0xA000;
pub const MODE_FIFO: u16 = 0x1000;
pub const MODE_CHR: u16 = 0x2000;

pub const MODE_PERM: u16 = 0x0FFF;
pub const MODE_SETUID: u16 = 0o4000;
pub const MODE_SETGID: u16 = 0o2000;

pub const O_RDONLY: usize =     0x0001_0000;
pub const O_WRONLY: usize =     0x0002_0000;
pub const O_RDWR: usize =       0x0003_0000;
pub const O_NONBLOCK: usize =   0x0004_0000;
pub const O_APPEND: usize =     0x0008_0000;
pub const O_SHLOCK: usize =     0x0010_0000;
pub const O_EXLOCK: usize =     0x0020_0000;
pub const O_ASYNC: usize =      0x0040_0000;
pub const O_FSYNC: usize =      0x0080_0000;
pub const O_CLOEXEC: usize =    0x0100_0000;
pub const O_CREAT: usize =      0x0200_0000;
pub const O_TRUNC: usize =      0x0400_0000;
pub const O_EXCL: usize =       0x0800_0000;
pub const O_DIRECTORY: usize =  0x1000_0000;
pub const O_STAT: usize =       0x2000_0000;
pub const O_SYMLINK: usize =    0x4000_0000;
pub const O_NOFOLLOW: usize =   0x8000_0000;
pub const O_ACCMODE: usize =    O_RDONLY | O_WRONLY | O_RDWR;

bitflags! {
    pub struct PhysmapFlags: usize {
        const PHYSMAP_WRITE = 0x0000_0001;
        const PHYSMAP_WRITE_COMBINE = 0x0000_0002;
        const PHYSMAP_NO_CACHE = 0x0000_0004;
    }
}
bitflags! {
    /// Extra flags for [`physalloc2`] or [`physalloc3`].
    ///
    /// [`physalloc2`]: ../call/fn.physalloc2.html
    /// [`physalloc3`]: ../call/fn.physalloc3.html
    pub struct PhysallocFlags: usize {
        /// Only allocate memory within the 32-bit physical memory space. This is necessary for
        /// some devices may not support 64-bit memory.
        const SPACE_32 =        0x0000_0001;

        /// The frame that will be allocated, is going to reside anywhere in 64-bit space. This
        /// flag is redundant for the most part, except when overriding some other default.
        const SPACE_64 =        0x0000_0002;

        /// Do a "partial allocation", which means that not all of the frames specified in the
        /// frame count `size` actually have to be allocated. This means that if the allocator was
        /// unable to find a physical memory range large enough, it can instead return whatever
        /// range it decides is optimal. Thus, instead of letting one driver get an expensive
        /// 128MiB physical memory range when the physical memory has become fragmented, and
        /// failing, it can instead be given a more optimal range. If the device supports
        /// scatter-gather lists, then the driver only has to allocate more ranges, and the device
        /// will do vectored I/O.
        ///
        /// PARTIAL_ALLOC supports different allocation strategies, refer to
        /// [`Optimal`], [`GreatestRange`].
        ///
        /// [`Optimal`]: ./enum.PartialAllocStrategy.html
        /// [`GreatestRange`]: ./enum.PartialAllocStrategy.html
        const PARTIAL_ALLOC =   0x0000_0004;
    }
}

/// The bitmask of the partial allocation strategy. Currently four different strategies are
/// supported. If [`PARTIAL_ALLOC`] is not set, this bitmask is no longer reserved.
pub const PARTIAL_ALLOC_STRATEGY_MASK: usize = 0x0003_0000;

#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[repr(usize)]
pub enum PartialAllocStrategy {
    /// The allocator decides itself the size of the memory range, based on e.g. free memory ranges
    /// and other processes which require large physical memory chunks.
    Optimal = 0x0001_0000,

    /// The allocator returns the absolute greatest range it can find.
    GreatestRange = 0x0002_0000,

    /// The allocator returns the first range that fits the minimum count, without searching extra.
    Greedy = 0x0003_0000,
}
impl Default for PartialAllocStrategy {
    fn default() -> Self {
        Self::Optimal
    }
}

impl PartialAllocStrategy {
    pub fn from_raw(raw: usize) -> Option<Self> {
        match raw {
            0x0001_0000 => Some(Self::Optimal),
            0x0002_0000 => Some(Self::GreatestRange),
            0x0003_0000 => Some(Self::Greedy),
            _ => None,
        }
    }
}

// The top 48 bits of PTRACE_* are reserved, for now

bitflags! {
    pub struct PtraceFlags: u64 {
        /// Stop before a syscall is handled. Send PTRACE_FLAG_IGNORE to not
        /// handle the syscall.
        const PTRACE_STOP_PRE_SYSCALL = 0x0000_0000_0000_0001;
        /// Stop after a syscall is handled.
        const PTRACE_STOP_POST_SYSCALL = 0x0000_0000_0000_0002;
        /// Stop after exactly one instruction. TODO: This may not handle
        /// fexec/signal boundaries. Should it?
        const PTRACE_STOP_SINGLESTEP = 0x0000_0000_0000_0004;
        /// Stop before a signal is handled. Send PTRACE_FLAG_IGNORE to not
        /// handle signal.
        const PTRACE_STOP_SIGNAL = 0x0000_0000_0000_0008;
        /// Stop on a software breakpoint, such as the int3 instruction for
        /// x86_64.
        const PTRACE_STOP_BREAKPOINT = 0x0000_0000_0000_0010;
        /// Stop just before exiting for good.
        const PTRACE_STOP_EXIT = 0x0000_0000_0000_0020;

        const PTRACE_STOP_MASK = 0x0000_0000_0000_00FF;


        /// Sent when a child is cloned, giving you the opportunity to trace it.
        /// If you don't catch this, the child is started as normal.
        const PTRACE_EVENT_CLONE = 0x0000_0000_0000_0100;

        /// Sent when current-addrspace is changed, allowing the tracer to reopen the memory file.
        const PTRACE_EVENT_ADDRSPACE_SWITCH = 0x0000_0000_0000_0200;

        const PTRACE_EVENT_MASK = 0x0000_0000_0000_0F00;

        /// Special meaning, depending on the event. Usually, when fired before
        /// an action, it will skip performing that action.
        const PTRACE_FLAG_IGNORE = 0x0000_0000_0000_1000;

        const PTRACE_FLAG_MASK = 0x0000_0000_0000_F000;
    }
}
impl Deref for PtraceFlags {
    type Target = [u8];
    fn deref(&self) -> &Self::Target {
        // Same as to_ne_bytes but in-place
        unsafe {
            slice::from_raw_parts(
                &self.bits as *const _ as *const u8,
                mem::size_of::<u64>()
            )
        }
    }
}

pub const SEEK_SET: usize = 0;
pub const SEEK_CUR: usize = 1;
pub const SEEK_END: usize = 2;

pub const SIGHUP: usize =   1;
pub const SIGINT: usize =   2;
pub const SIGQUIT: usize =  3;
pub const SIGILL: usize =   4;
pub const SIGTRAP: usize =  5;
pub const SIGABRT: usize =  6;
pub const SIGBUS: usize =   7;
pub const SIGFPE: usize =   8;
pub const SIGKILL: usize =  9;
pub const SIGUSR1: usize =  10;
pub const SIGSEGV: usize =  11;
pub const SIGUSR2: usize =  12;
pub const SIGPIPE: usize =  13;
pub const SIGALRM: usize =  14;
pub const SIGTERM: usize =  15;
pub const SIGSTKFLT: usize= 16;
pub const SIGCHLD: usize =  17;
pub const SIGCONT: usize =  18;
pub const SIGSTOP: usize =  19;
pub const SIGTSTP: usize =  20;
pub const SIGTTIN: usize =  21;
pub const SIGTTOU: usize =  22;
pub const SIGURG: usize =   23;
pub const SIGXCPU: usize =  24;
pub const SIGXFSZ: usize =  25;
pub const SIGVTALRM: usize= 26;
pub const SIGPROF: usize =  27;
pub const SIGWINCH: usize = 28;
pub const SIGIO: usize =    29;
pub const SIGPWR: usize =   30;
pub const SIGSYS: usize =   31;

pub const SIG_DFL: usize = 0;
pub const SIG_IGN: usize = 1;

pub const SIG_BLOCK: usize = 0;
pub const SIG_UNBLOCK: usize = 1;
pub const SIG_SETMASK: usize = 2;

bitflags! {
    pub struct SigActionFlags: usize {
        const SA_NOCLDSTOP = 0x00000001;
        const SA_NOCLDWAIT = 0x00000002;
        const SA_SIGINFO =   0x00000004;
        const SA_RESTORER =  0x04000000;
        const SA_ONSTACK =   0x08000000;
        const SA_RESTART =   0x10000000;
        const SA_NODEFER =   0x40000000;
        const SA_RESETHAND = 0x80000000;
    }
}

bitflags! {
    pub struct WaitFlags: usize {
        const WNOHANG =    0x01;
        const WUNTRACED =  0x02;
        const WCONTINUED = 0x08;
    }
}

pub const ADDRSPACE_OP_MMAP: usize = 0;
pub const ADDRSPACE_OP_MUNMAP: usize = 1;
pub const ADDRSPACE_OP_MPROTECT: usize = 2;
pub const ADDRSPACE_OP_TRANSFER: usize = 3;

/// True if status indicates the child is stopped.
pub fn wifstopped(status: usize) -> bool {
    (status & 0xff) == 0x7f
}

/// If wifstopped(status), the signal that stopped the child.
pub fn wstopsig(status: usize) -> usize {
    (status >> 8) & 0xff
}

/// True if status indicates the child continued after a stop.
pub fn wifcontinued(status: usize) -> bool {
    status == 0xffff
}

/// True if STATUS indicates termination by a signal.
pub fn wifsignaled(status: usize) -> bool {
    ((status & 0x7f) + 1) as i8 >= 2
}

/// If wifsignaled(status), the terminating signal.
pub fn wtermsig(status: usize) -> usize {
    status & 0x7f
}

/// True if status indicates normal termination.
pub fn wifexited(status: usize) -> bool {
    wtermsig(status) == 0
}

/// If wifexited(status), the exit status.
pub fn wexitstatus(status: usize) -> usize {
    (status >> 8) & 0xff
}

/// True if status indicates a core dump was created.
pub fn wcoredump(status: usize) -> bool {
    (status & 0x80) != 0
}

bitflags! {
    pub struct MremapFlags: usize {
        const FIXED = 1;
        const FIXED_REPLACE = 3;
        // TODO: MAYMOVE, DONTUNMAP
    }
}