summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/relocate_kernel_64.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-17 13:00:12 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-17 13:00:12 +0200
commit0279aa780df4362f218b5645c07e5265859937f6 (patch)
treea04865295e17838fa86126d320bb04936689accd /arch/x86/kernel/relocate_kernel_64.S
parent5ba202a7c986fc58dd2fd1571c99667ab2699995 (diff)
parenta678164aadbf68d80f7ab79b8bd5cfe3711e42fa (diff)
Merge tag 'x86-cleanups-2024-09-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Thomas Gleixner: "A set of cleanups across x86: - Use memremap() for the EISA probe instead of ioremap(). EISA is strictly memory and not MMIO - Cleanups and enhancement all over the place" * tag 'x86-cleanups-2024-09-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/EISA: Dereference memory directly instead of using readl() x86/extable: Remove unused declaration fixup_bug() x86/boot/64: Strip percpu address space when setting up GDT descriptors x86/cpu: Clarify the error message when BIOS does not support SGX x86/kexec: Add comments around swap_pages() assembly to improve readability x86/kexec: Fix a comment of swap_pages() assembly x86/sgx: Fix a W=1 build warning in function comment x86/EISA: Use memremap() to probe for the EISA BIOS signature x86/mtrr: Remove obsolete declaration for mtrr_bp_restore() x86/cpu_entry_area: Annotate percpu_setup_exception_stacks() as __init
Diffstat (limited to 'arch/x86/kernel/relocate_kernel_64.S')
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S10
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 042c9a0334e9..e9e88c342f75 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -170,6 +170,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
wbinvd
.Lsme_off:
+ /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
movq %rcx, %r11
call swap_pages
@@ -258,7 +259,7 @@ SYM_CODE_END(virtual_mapped)
/* Do the copies */
SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
UNWIND_HINT_END_OF_STACK
- movq %rdi, %rcx /* Put the page_list in %rcx */
+ movq %rdi, %rcx /* Put the indirection_page in %rcx */
xorl %edi, %edi
xorl %esi, %esi
jmp 1f
@@ -289,18 +290,21 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
movq %rcx, %rsi /* For ever source page do a copy */
andq $0xfffffffffffff000, %rsi
- movq %rdi, %rdx
- movq %rsi, %rax
+ movq %rdi, %rdx /* Save destination page to %rdx */
+ movq %rsi, %rax /* Save source page to %rax */
+ /* copy source page to swap page */
movq %r10, %rdi
movl $512, %ecx
rep ; movsq
+ /* copy destination page to source page */
movq %rax, %rdi
movq %rdx, %rsi
movl $512, %ecx
rep ; movsq
+ /* copy swap page to destination page */
movq %rdx, %rdi
movq %r10, %rsi
movl $512, %ecx