aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2024-02-27 16:19:11 +0100
committerBorislav Petkov (AMD) <bp@alien8.de>2024-03-04 18:12:16 +0100
commitd6a41f184dcea0814260af2780e147022c11dca8 (patch)
tree9e031b5b8a82e03dc1a071cf138a3e8ba5e14c3c /arch
parent63bed96604205fa0b23c91d268df5f1f1b26faf6 (diff)
downloadlinux-d6a41f184dcea0814260af2780e147022c11dca8.tar.gz
x86/startup_64: Simplify calculation of initial page table address
Determining the address of the initial page table to program into CR3 involves: - taking the physical address - adding the SME encryption mask On the primary entry path, the code is mapped using a 1:1 virtual to physical translation, so the physical address can be taken directly using a RIP-relative LEA instruction. On the secondary entry path, the address can be obtained by taking the offset from the virtual kernel base (__START_kernel_map) and adding the physical kernel base. This is implemented in a slightly confusing way, so clean this up. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lore.kernel.org/r/20240227151907.387873-14-ardb+git@google.com
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/head_64.S25
1 files changed, 7 insertions, 18 deletions
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 54207e7e5d3323..b8b7118b69761d 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -111,13 +111,11 @@ SYM_CODE_START_NOALIGN(startup_64)
call __startup_64
/* Form the CR3 value being sure to include the CR3 modifier */
- addq $(early_top_pgt - __START_KERNEL_map), %rax
+ leaq early_top_pgt(%rip), %rcx
+ addq %rcx, %rax
#ifdef CONFIG_AMD_MEM_ENCRYPT
mov %rax, %rdi
- mov %rax, %r14
-
- addq phys_base(%rip), %rdi
/*
* For SEV guests: Verify that the C-bit is correct. A malicious
@@ -126,12 +124,6 @@ SYM_CODE_START_NOALIGN(startup_64)
* the next RET instruction.
*/
call sev_verify_cbit
-
- /*
- * Restore CR3 value without the phys_base which will be added
- * below, before writing %cr3.
- */
- mov %r14, %rax
#endif
jmp 1f
@@ -171,18 +163,18 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
/* Clear %R15 which holds the boot_params pointer on the boot CPU */
xorl %r15d, %r15d
+ /* Derive the runtime physical address of init_top_pgt[] */
+ movq phys_base(%rip), %rax
+ addq $(init_top_pgt - __START_KERNEL_map), %rax
+
/*
* Retrieve the modifier (SME encryption mask if SME is active) to be
* added to the initial pgdir entry that will be programmed into CR3.
*/
#ifdef CONFIG_AMD_MEM_ENCRYPT
- movq sme_me_mask, %rax
-#else
- xorl %eax, %eax
+ addq sme_me_mask(%rip), %rax
#endif
- /* Form the CR3 value being sure to include the CR3 modifier */
- addq $(init_top_pgt - __START_KERNEL_map), %rax
1:
/*
@@ -212,9 +204,6 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
btsl $X86_CR4_PSE_BIT, %ecx
movq %rcx, %cr4
- /* Setup early boot stage 4-/5-level pagetables. */
- addq phys_base(%rip), %rax
-
/*
* Switch to new page-table
*