diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-07 10:52:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-07 10:52:13 -0700 |
commit | 4a0fc73da97efd23a383ca839e6fe86410268f6b (patch) | |
tree | 45440eb0cfa2905dcdbca809cc95e23d16da6ec7 /arch | |
parent | ac2224a467b499730057525924f6be3f4fdb0da5 (diff) | |
parent | 06fc3b0d2251b550f530a1c42e0f9c5d022476dd (diff) |
Merge tag 's390-6.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Heiko Carstens:
- A couple of virtual vs physical address confusion fixes
- Rework locking in dcssblk driver to address a lockdep warning
- Remove support for "noexec" kernel command line option since there is
no use case where it would make sense
- Simplify kernel mapping setup and get rid of quite a bit of code
- Add architecture specific __set_memory_yy() functions which allow us
to modify kernel mappings. Unlike the set_memory_xx() variants they
take void pointer start and end parameters, which allows using them
without the usual casts, and also to use them on areas larger than
8TB.
Note that the set_memory_xx() family comes with an int num_pages
parameter which overflows with 8TB. This could be addressed by
changing the num_pages parameter to unsigned long, however requires
to change all architectures, since the module code expects an int
parameter (see module_set_memory()).
This was indeed an issue since for debug_pagealloc() we call
set_memory_4k() on the whole identity mapping. Therefore address this
for now with the __set_memory_yy() variant, and address common code
later
- Use dev_set_name() and also fix memory leak in zcrypt driver error
handling
- Remove unused lsi_mask from airq_struct
- Add warning for invalid kernel mapping requests
* tag 's390-6.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/vmem: do not silently ignore mapping limit
s390/zcrypt: utilize dev_set_name() ability to use a formatted string
s390/zcrypt: don't leak memory if dev_set_name() fails
s390/mm: fix MAX_DMA_ADDRESS physical vs virtual confusion
s390/airq: remove lsi_mask from airq_struct
s390/mm: use __set_memory() variants where useful
s390/set_memory: add __set_memory() variant
s390/set_memory: generate all set_memory() functions
s390/mm: improve description of mapping permissions of prefix pages
s390/amode31: change type of __samode31, __eamode31, etc
s390/mm: simplify kernel mapping setup
s390: remove "noexec" option
s390/vmem: fix virtual vs physical address confusion
s390/dcssblk: fix lockdep warning
s390/monreader: fix virtual vs physical address confusion
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/boot/ipl_parm.c | 7 | ||||
-rw-r--r-- | arch/s390/boot/startup.c | 4 | ||||
-rw-r--r-- | arch/s390/boot/vmem.c | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/airq.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/dma.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/sections.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/set_memory.h | 62 | ||||
-rw-r--r-- | arch/s390/include/asm/setup.h | 1 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 17 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 3 | ||||
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 148 |
16 files changed, 82 insertions, 198 deletions
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 8753cb0339e5..7b7521762633 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -19,7 +19,6 @@ struct parmarea parmarea __section(".parmarea") = { }; char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; -int __bootdata(noexec_disabled); unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL; struct ipl_parameter_block __bootdata_preserved(ipl_block); @@ -290,12 +289,6 @@ void parse_boot_command_line(void) zlib_dfltcc_support = ZLIB_DFLTCC_FULL_DEBUG; } - if (!strcmp(param, "noexec")) { - rc = kstrtobool(val, &enabled); - if (!rc && !enabled) - noexec_disabled = 1; - } - if (!strcmp(param, "facilities") && val) modify_fac_list(val); diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index b9681cb22753..d3e48bd9c394 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -53,10 +53,8 @@ static void detect_facilities(void) } if (test_facility(78)) machine.has_edat2 = 1; - if (!noexec_disabled && test_facility(130)) { + if (test_facility(130)) machine.has_nx = 1; - __ctl_set_bit(0, 20); - } } static void setup_lpp(void) diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index c67f59db7a51..01257ce3b89c 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -287,7 +287,9 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e if (kasan_pte_populate_zero_shadow(pte, mode)) continue; entry = __pte(_pa(addr, PAGE_SIZE, mode)); - entry = set_pte_bit(entry, PAGE_KERNEL_EXEC); + entry = set_pte_bit(entry, PAGE_KERNEL); + if (!machine.has_nx) + entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC)); set_pte(pte, entry); pages++; } @@ -311,7 +313,9 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e continue; if (can_large_pmd(pmd, addr, next)) { entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode)); - entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC); + entry = set_pmd_bit(entry, SEGMENT_KERNEL); + if (!machine.has_nx) + entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC)); set_pmd(pmd, entry); pages++; continue; @@ -342,7 +346,9 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e continue; if (can_large_pud(pud, addr, next)) { entry = __pud(_pa(addr, _REGION3_SIZE, mode)); - entry = set_pud_bit(entry, REGION3_KERNEL_EXEC); + entry = set_pud_bit(entry, REGION3_KERNEL); + if (!machine.has_nx) + entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC)); set_pud(pud, entry); pages++; continue; diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h index e82e5626e139..c4c28c2609a5 100644 --- a/arch/s390/include/asm/airq.h +++ b/arch/s390/include/asm/airq.h @@ -18,7 +18,6 @@ struct airq_struct { struct hlist_node list; /* Handler queueing. */ void (*handler)(struct airq_struct *airq, struct tpi_info *tpi_info); u8 *lsi_ptr; /* Local-Summary-Indicator pointer */ - u8 lsi_mask; /* Local-Summary-Indicator mask */ u8 isc; /* Interrupt-subclass */ u8 flags; }; diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h index c260adb25997..7fe3e31956d7 100644 --- a/arch/s390/include/asm/dma.h +++ b/arch/s390/include/asm/dma.h @@ -9,6 +9,6 @@ * to DMA. It _is_ used for the s390 memory zone split at 2GB caused * by the 31 bit heritage. */ -#define MAX_DMA_ADDRESS 0x80000000 +#define MAX_DMA_ADDRESS __va(0x80000000) #endif /* _ASM_S390_DMA_H */ diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h index 3fecaa4e8b74..0486e6ef62bf 100644 --- a/arch/s390/include/asm/sections.h +++ b/arch/s390/include/asm/sections.h @@ -23,7 +23,7 @@ */ #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var -extern unsigned long __samode31, __eamode31; -extern unsigned long __stext_amode31, __etext_amode31; +extern char *__samode31, *__eamode31; +extern char *__stext_amode31, *__etext_amode31; #endif diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h index 7a3eefd7a242..06fbabe2f66c 100644 --- a/arch/s390/include/asm/set_memory.h +++ b/arch/s390/include/asm/set_memory.h @@ -24,43 +24,41 @@ enum { #define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT) #define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT) -int __set_memory(unsigned long addr, int numpages, unsigned long flags); - -static inline int set_memory_ro(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_RO); -} - -static inline int set_memory_rw(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_RW); -} - -static inline int set_memory_nx(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_NX); -} - -static inline int set_memory_x(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_X); -} +int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags); #define set_memory_rox set_memory_rox -static inline int set_memory_rox(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_RO | SET_MEMORY_X); -} -static inline int set_memory_rwnx(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_RW | SET_MEMORY_NX); +/* + * Generate two variants of each set_memory() function: + * + * set_memory_yy(unsigned long addr, int numpages); + * __set_memory_yy(void *start, void *end); + * + * The second variant exists for both convenience to avoid the usual + * (unsigned long) casts, but unlike the first variant it can also be used + * for areas larger than 8TB, which may happen at memory initialization. + */ +#define __SET_MEMORY_FUNC(fname, flags) \ +static inline int fname(unsigned long addr, int numpages) \ +{ \ + return __set_memory(addr, numpages, (flags)); \ +} \ + \ +static inline int __##fname(void *start, void *end) \ +{ \ + unsigned long numpages; \ + \ + numpages = (end - start) >> PAGE_SHIFT; \ + return __set_memory((unsigned long)start, numpages, (flags)); \ } -static inline int set_memory_4k(unsigned long addr, int numpages) -{ - return __set_memory(addr, numpages, SET_MEMORY_4K); -} +__SET_MEMORY_FUNC(set_memory_ro, SET_MEMORY_RO) +__SET_MEMORY_FUNC(set_memory_rw, SET_MEMORY_RW) +__SET_MEMORY_FUNC(set_memory_nx, SET_MEMORY_NX) +__SET_MEMORY_FUNC(set_memory_x, SET_MEMORY_X) +__SET_MEMORY_FUNC(set_memory_rox, SET_MEMORY_RO | SET_MEMORY_X) +__SET_MEMORY_FUNC(set_memory_rwnx, SET_MEMORY_RW | SET_MEMORY_NX) +__SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K) int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index b30fe91166e3..25cadc2b9cff 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -72,7 +72,6 @@ extern unsigned int zlib_dfltcc_support; #define ZLIB_DFLTCC_INFLATE_ONLY 3 #define ZLIB_DFLTCC_FULL_DEBUG 4 -extern int noexec_disabled; extern unsigned long ident_map_size; extern unsigned long max_mappable; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 2dd5976a55ac..442ce0489e1a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -44,7 +44,6 @@ early_param(#param, ignore_decompressor_param_##param) decompressor_handled_param(mem); decompressor_handled_param(vmalloc); decompressor_handled_param(dfltcc); -decompressor_handled_param(noexec); decompressor_handled_param(facilities); decompressor_handled_param(nokaslr); #if IS_ENABLED(CONFIG_KVM) @@ -233,10 +232,8 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_VX; __ctl_set_bit(0, 17); } - if (test_facility(130) && !noexec_disabled) { + if (test_facility(130)) S390_lowcore.machine_flags |= MACHINE_FLAG_NX; - __ctl_set_bit(0, 20); - } if (test_facility(133)) S390_lowcore.machine_flags |= MACHINE_FLAG_GS; if (test_facility(139) && (tod_clock_base.tod >> 63)) { diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 12a2bd4fc88c..ce65fc01671f 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -216,8 +216,8 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(lowcore_ptr); VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); - vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); - vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); + vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31); + vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); abs_lc = get_abs_lowcore(); abs_lc->vmcore_info = paddr_vmcoreinfo_note(); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c744104e4a9c..de6ad0fb2328 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -97,10 +97,10 @@ EXPORT_SYMBOL(console_irq); * relocated above 2 GB, because it has to use 31 bit addresses. * Such code and data is part of the .amode31 section. */ -unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31; -unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31; -unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31; -unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31; +char __amode31_ref *__samode31 = _samode31; +char __amode31_ref *__eamode31 = _eamode31; +char __amode31_ref *__stext_amode31 = _stext_amode31; +char __amode31_ref *__etext_amode31 = _etext_amode31; struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; @@ -145,7 +145,6 @@ static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31; static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31; static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31; -int __bootdata(noexec_disabled); unsigned long __bootdata_preserved(max_mappable); unsigned long __bootdata(ident_map_size); struct physmem_info __bootdata(physmem_info); @@ -771,15 +770,15 @@ static void __init setup_memory(void) static void __init relocate_amode31_section(void) { unsigned long amode31_size = __eamode31 - __samode31; - long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31; - long *ptr; + long amode31_offset, *ptr; + amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31; pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); /* Move original AMODE31 section to the new one */ - memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size); + memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size); /* Zero out the old AMODE31 section to catch invalid accesses within it */ - memset((void *)__samode31, 0, amode31_size); + memset(__samode31, 0, amode31_size); /* Update all AMODE31 region references */ for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 9bd0a873f3b1..6fa6a4b0b9a8 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -3398,7 +3398,6 @@ static void gib_alert_irq_handler(struct airq_struct *airq, static struct airq_struct gib_alert_irq = { .handler = gib_alert_irq_handler, - .lsi_ptr = &gib_alert_irq.lsi_mask, }; void kvm_s390_gib_destroy(void) @@ -3438,6 +3437,8 @@ int __init kvm_s390_gib_init(u8 nisc) rc = -EIO; goto out_free_gib; } + /* adapter interrupts used for AP (applicable here) don't use the LSI */ + *gib_alert_irq.lsi_ptr = 0xff; gib->nisc = nisc; gib_origin = virt_to_phys(gib); diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index afa5db750d92..b51666967aa1 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -290,8 +290,8 @@ static int pt_dump_init(void) max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; - address_markers[AMODE31_START_NR].start_address = __samode31; - address_markers[AMODE31_END_NR].start_address = __eamode31; + address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; + address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31; address_markers[MODULES_NR].start_address = MODULES_VADDR; address_markers[MODULES_END_NR].start_address = MODULES_END; address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 8d94e29adcdb..8b94d2212d33 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -98,7 +98,7 @@ void __init paging_init(void) sparse_init(); zone_dma_bits = 31; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); - max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); + max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init(max_zone_pfns); } @@ -107,7 +107,7 @@ void mark_rodata_ro(void) { unsigned long size = __end_ro_after_init - __start_ro_after_init; - set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); + __set_memory_ro(__start_ro_after_init, __end_ro_after_init); pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); debug_checkwx(); } diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index e5ec76271b16..b87e96c64b61 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -373,7 +373,7 @@ static int change_page_attr_alias(unsigned long addr, unsigned long end, return rc; } -int __set_memory(unsigned long addr, int numpages, unsigned long flags) +int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags) { unsigned long end; int rc; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index e44243b9c0a4..6957d2ed97bf 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -5,7 +5,6 @@ #include <linux/memory_hotplug.h> #include <linux/memblock.h> -#include <linux/kasan.h> #include <linux/pfn.h> #include <linux/mm.h> #include <linux/init.h> @@ -291,14 +290,9 @@ out: static void try_free_pmd_table(pud_t *pud, unsigned long start) { - const unsigned long end = start + PUD_SIZE; pmd_t *pmd; int i; - /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ - if (end > VMALLOC_START) - return; - pmd = pmd_offset(pud, start); for (i = 0; i < PTRS_PER_PMD; i++, pmd++) if (!pmd_none(*pmd)) @@ -363,14 +357,9 @@ out: static void try_free_pud_table(p4d_t *p4d, unsigned long start) { - const unsigned long end = start + P4D_SIZE; pud_t *pud; int i; - /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ - if (end > VMALLOC_START) - return; - pud = pud_offset(p4d, start); for (i = 0; i < PTRS_PER_PUD; i++, pud++) { if (!pud_none(*pud)) @@ -413,14 +402,9 @@ out: static void try_free_p4d_table(pgd_t *pgd, unsigned long start) { - const unsigned long end = start + PGDIR_SIZE; p4d_t *p4d; int i; - /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ - if (end > VMALLOC_START) - return; - p4d = p4d_offset(pgd, start); for (i = 0; i < PTRS_PER_P4D; i++, p4d++) { if (!p4d_none(*p4d)) @@ -440,6 +424,9 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add, if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end))) return -EINVAL; + /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ + if (WARN_ON_ONCE(end > VMALLOC_START)) + return -EINVAL; for (addr = start; addr < end; addr = next) { next = pgd_addr_end(addr, end); pgd = pgd_offset_k(addr); @@ -650,122 +637,29 @@ void vmem_unmap_4k_page(unsigned long addr) mutex_unlock(&vmem_mutex); } -static int __init memblock_region_cmp(const void *a, const void *b) -{ - const struct memblock_region *r1 = a; - const struct memblock_region *r2 = b; - - if (r1->base < r2->base) - return -1; - if (r1->base > r2->base) - return 1; - return 0; -} - -static void __init memblock_region_swap(void *a, void *b, int size) -{ - swap(*(struct memblock_region *)a, *(struct memblock_region *)b); -} - -#ifdef CONFIG_KASAN -#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) - -static inline int set_memory_kasan(unsigned long start, unsigned long end) -{ - start = PAGE_ALIGN_DOWN(__sha(start)); - end = PAGE_ALIGN(__sha(end)); - return set_memory_rwnx(start, (end - start) >> PAGE_SHIFT); -} -#endif - -/* - * map whole physical memory to virtual memory (identity mapping) - * we reserve enough space in the vmalloc area for vmemmap to hotplug - * additional memory segments. - */ void __init vmem_map_init(void) { - struct memblock_region memory_rwx_regions[] = { - { - .base = 0, - .size = sizeof(struct lowcore), - .flags = MEMBLOCK_NONE, -#ifdef CONFIG_NUMA - .nid = NUMA_NO_NODE, -#endif - }, - { - .base = __pa(_stext), - .size = _etext - _stext, - .flags = MEMBLOCK_NONE, -#ifdef CONFIG_NUMA - .nid = NUMA_NO_NODE, -#endif - }, - { - .base = __pa(_sinittext), - .size = _einittext - _sinittext, - .flags = MEMBLOCK_NONE, -#ifdef CONFIG_NUMA - .nid = NUMA_NO_NODE, -#endif - }, - { - .base = __stext_amode31, - .size = __etext_amode31 - __stext_amode31, - .flags = MEMBLOCK_NONE, -#ifdef CONFIG_NUMA - .nid = NUMA_NO_NODE, -#endif - }, - }; - struct memblock_type memory_rwx = { - .regions = memory_rwx_regions, - .cnt = ARRAY_SIZE(memory_rwx_regions), - .max = ARRAY_SIZE(memory_rwx_regions), - }; - phys_addr_t base, end; - u64 i; - + __set_memory_rox(_stext, _etext); + __set_memory_ro(_etext, __end_rodata); + __set_memory_rox(_sinittext, _einittext); + __set_memory_rox(__stext_amode31, __etext_amode31); /* - * Set RW+NX attribute on all memory, except regions enumerated with - * memory_rwx exclude type. These regions need different attributes, - * which are enforced afterwards. - * - * __for_each_mem_range() iterate and exclude types should be sorted. - * The relative location of _stext and _sinittext is hardcoded in the - * linker script. However a location of __stext_amode31 and the kernel - * image itself are chosen dynamically. Thus, sort the exclude type. + * If the BEAR-enhancement facility is not installed the first + * prefix page is used to return to the previous context with + * an LPSWE instruction and therefore must be executable. */ - sort(&memory_rwx_regions, - ARRAY_SIZE(memory_rwx_regions), sizeof(memory_rwx_regions[0]), - memblock_region_cmp, memblock_region_swap); - __for_each_mem_range(i, &memblock.memory, &memory_rwx, - NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) { - set_memory_rwnx((unsigned long)__va(base), - (end - base) >> PAGE_SHIFT); + if (!static_key_enabled(&cpu_has_bear)) + set_memory_x(0, 1); + if (debug_pagealloc_enabled()) { + /* + * Use RELOC_HIDE() as long as __va(0) translates to NULL, + * since performing pointer arithmetic on a NULL pointer + * has undefined behavior and generates compiler warnings. + */ + __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size)); } - -#ifdef CONFIG_KASAN - for_each_mem_range(i, &base, &end) - set_memory_kasan(base, end); -#endif - set_memory_rox((unsigned long)_stext, - (unsigned long)(_etext - _stext) >> PAGE_SHIFT); - set_memory_ro((unsigned long)_etext, - (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT); - set_memory_rox((unsigned long)_sinittext, - (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT); - set_memory_rox(__stext_amode31, - (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT); - - /* lowcore must be executable for LPSWE */ - if (static_key_enabled(&cpu_has_bear)) - set_memory_nx(0, 1); - set_memory_nx(PAGE_SIZE, 1); - if (debug_pagealloc_enabled()) - set_memory_4k(0, ident_map_size >> PAGE_SHIFT); - + if (MACHINE_HAS_NX) + ctl_set_bit(0, 20); pr_info("Write protected kernel read-only data: %luk\n", (unsigned long)(__end_rodata - _stext) >> 10); } |