diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/suspend_64.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/suspend_asm_64.S | 8 |
2 files changed, 17 insertions, 7 deletions
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c index 01fbfb018ca..da10eef4c3e 100644 --- a/arch/x86/kernel/suspend_64.c +++ b/arch/x86/kernel/suspend_64.c @@ -156,6 +156,12 @@ extern int restore_image(void); */ unsigned long restore_jump_address; +/* + * Value of the cr3 register from before the hibernation (this value is passed + * in the image header). + */ +unsigned long restore_cr3; + pgd_t *temp_level4_pgt; void *relocated_restore_code; @@ -254,7 +260,8 @@ int pfn_is_nosave(unsigned long pfn) struct restore_data_record { unsigned long jump_address; - unsigned long control; + unsigned long cr3; + unsigned long magic; }; #define RESTORE_MAGIC 0x0123456789ABCDEFUL @@ -271,7 +278,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) if (max_size < sizeof(struct restore_data_record)) return -EOVERFLOW; rdr->jump_address = restore_jump_address; - rdr->control = (restore_jump_address ^ RESTORE_MAGIC); + rdr->cr3 = restore_cr3; + rdr->magic = RESTORE_MAGIC; return 0; } @@ -285,7 +293,7 @@ int arch_hibernation_header_restore(void *addr) struct restore_data_record *rdr = addr; restore_jump_address = rdr->jump_address; - return (rdr->control == (restore_jump_address ^ RESTORE_MAGIC)) ? - 0 : -EINVAL; + restore_cr3 = rdr->cr3; + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; } #endif /* CONFIG_HIBERNATION */ diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S index 40a209e0525..48344b666d2 100644 --- a/arch/x86/kernel/suspend_asm_64.S +++ b/arch/x86/kernel/suspend_asm_64.S @@ -39,6 +39,9 @@ ENTRY(swsusp_arch_suspend) /* save the address of restore_registers */ movq $restore_registers, %rax movq %rax, restore_jump_address(%rip) + /* save cr3 */ + movq %cr3, %rax + movq %rax, restore_cr3(%rip) call swsusp_save ret @@ -60,6 +63,7 @@ ENTRY(restore_image) /* prepare to jump to the image kernel */ movq restore_jump_address(%rip), %rax + movq restore_cr3(%rip), %rbx /* prepare to copy image data to their original locations */ movq restore_pblist(%rip), %rdx @@ -98,9 +102,7 @@ done: ENTRY(restore_registers) /* go back to the original page tables */ - movq $(init_level4_pgt - __START_KERNEL_map), %rax - addq phys_base(%rip), %rax - movq %rax, %cr3 + movq %rbx, %cr3 /* Flush TLB, including "global" things (vmalloc) */ movq mmu_cr4_features(%rip), %rax |