1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2001-2003 Pavel Machek <pavel@suse.cz>
* Based on code
* Copyright 2001 Patrick Mochel <mochel@osdl.org>
*/
#ifndef _ASM_X86_SUSPEND_64_H
#define _ASM_X86_SUSPEND_64_H
#include <asm/desc.h>
#include <asm/fpu/api.h>
/*
* Image of the saved processor state, used by the low level ACPI suspend to
* RAM code and by the low level hibernation code.
*
* If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S
* and make sure that __save/__restore_processor_state(), defined in
* arch/x86/power/cpu.c, still work as required.
*
* Because the structure is packed, make sure to avoid unaligned members. For
* optimisation purposes but also because tools like kmemleak only search for
* pointers that are aligned.
*/
struct saved_context {
struct pt_regs regs;
/*
* User CS and SS are saved in current_pt_regs(). The rest of the
* segment selectors need to be saved and restored here.
*/
u16 ds, es, fs, gs;
/*
* Usermode FSBASE and GSBASE may not match the fs and gs selectors,
* so we save them separately. We save the kernelmode GSBASE to
* restore percpu access after resume.
*/
unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
struct saved_msrs saved_msrs;
unsigned long efer;
u16 gdt_pad; /* Unused */
struct desc_ptr gdt_desc;
u16 idt_pad;
struct desc_ptr idt;
u16 ldt;
u16 tss;
unsigned long tr;
unsigned long safety;
unsigned long return_address;
bool misc_enable_saved;
} __attribute__((packed));
#define loaddebug(thread,register) \
set_debugreg((thread)->debugreg##register, register)
/* routines for saving/restoring kernel state */
extern char core_restore_code[];
extern char restore_registers[];
#endif /* _ASM_X86_SUSPEND_64_H */
|