summaryrefslogtreecommitdiff
path: root/arch/x86/boot/compressed
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2023-08-07 18:27:03 +0200
committerBorislav Petkov (AMD) <bp@alien8.de>2023-08-07 20:40:36 +0200
commitd7156b986d4cc0657fa6dc05c9fcf51c3d55a0fe (patch)
tree9810091771de88189b6be31a3a3f79c9fc1c9cc3 /arch/x86/boot/compressed
parent12792064587623065250069d1df980e2c9ac3e67 (diff)
x86/efistub: Clear BSS in EFI handover protocol entrypoint
The so-called EFI handover protocol is value-add from the distros that permits a loader to simply copy a PE kernel image into memory and call an alternative entrypoint that is described by an embedded boot_params structure. Most implementations of this protocol do not bother to check the PE header for minimum alignment, section placement, etc, and therefore also don't clear the image's BSS, or even allocate enough memory for it. Allocating more memory on the fly is rather difficult, but at least clear the BSS region explicitly when entering in this manner, so that the EFI stub code does not get confused by global variables that were not zero-initialized correctly. When booting in mixed mode, this BSS clearing must occur before any global state is created, so clear it in the 32-bit asm entry point. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20230807162720.545787-7-ardb@kernel.org
Diffstat (limited to 'arch/x86/boot/compressed')
-rw-r--r--arch/x86/boot/compressed/efi_mixed.S14
1 files changed, 13 insertions, 1 deletions
diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
index 9308b595f6f0..8a02a151806d 100644
--- a/arch/x86/boot/compressed/efi_mixed.S
+++ b/arch/x86/boot/compressed/efi_mixed.S
@@ -142,6 +142,18 @@ SYM_FUNC_END(__efi64_thunk)
.code32
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
SYM_FUNC_START(efi32_stub_entry)
+ call 1f
+1: popl %ecx
+
+ /* Clear BSS */
+ xorl %eax, %eax
+ leal (_bss - 1b)(%ecx), %edi
+ leal (_ebss - 1b)(%ecx), %ecx
+ subl %edi, %ecx
+ shrl $2, %ecx
+ cld
+ rep stosl
+
add $0x4, %esp /* Discard return address */
popl %ecx
popl %edx
@@ -334,7 +346,7 @@ SYM_FUNC_END(efi32_pe_entry)
.org efi32_stub_entry + 0x200
.code64
SYM_FUNC_START_NOALIGN(efi64_stub_entry)
- jmp efi_stub_entry
+ jmp efi_handover_entry
SYM_FUNC_END(efi64_stub_entry)
#endif