diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-31 09:25:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-31 09:25:15 -0700 |
commit | 59fc453b21f767f2fb0ff4dc0a947e9b9c9e6d14 (patch) | |
tree | 42029c432982ebabb462bd16a413a033125fd793 /include | |
parent | 310c7585e8300ddc46211df0757c11e4299ec482 (diff) | |
parent | 2ebe82288b3278b8e538ee8adce4142dbdedd8f6 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- the rest of MM
- lib/bitmap updates
- hfs updates
- fatfs updates
- various other misc things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (94 commits)
mm/gup.c: fix __get_user_pages_fast() comment
mm: Fix warning in insert_pfn()
memory-hotplug.rst: add some details about locking internals
powerpc/powernv: hold device_hotplug_lock when calling memtrace_offline_pages()
powerpc/powernv: hold device_hotplug_lock when calling device_online()
mm/memory_hotplug: fix online/offline_pages called w.o. mem_hotplug_lock
mm/memory_hotplug: make add_memory() take the device_hotplug_lock
mm/memory_hotplug: make remove_memory() take the device_hotplug_lock
mm/memblock.c: warn if zero alignment was requested
memblock: stop using implicit alignment to SMP_CACHE_BYTES
docs/boot-time-mm: remove bootmem documentation
mm: remove include/linux/bootmem.h
memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants
mm: remove nobootmem
memblock: rename __free_pages_bootmem to memblock_free_pages
memblock: rename free_all_bootmem to memblock_free_all
memblock: replace free_bootmem_late with memblock_free_late
memblock: replace free_bootmem{_node} with memblock_free
mm: nobootmem: remove bootmem allocation APIs
memblock: replace alloc_bootmem with memblock_alloc
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/percpu.h | 4 | ||||
-rw-r--r-- | include/linux/bitmap.h | 37 | ||||
-rw-r--r-- | include/linux/bootmem.h | 404 | ||||
-rw-r--r-- | include/linux/compat.h | 3 | ||||
-rw-r--r-- | include/linux/hmm.h | 33 | ||||
-rw-r--r-- | include/linux/memblock.h | 165 | ||||
-rw-r--r-- | include/linux/memory_hotplug.h | 4 | ||||
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | include/linux/mmzone.h | 5 | ||||
-rw-r--r-- | include/linux/percpu-defs.h | 6 | ||||
-rw-r--r-- | include/linux/rbtree_augmented.h | 4 | ||||
-rw-r--r-- | include/linux/signal.h | 6 |
12 files changed, 207 insertions, 466 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 1817a8415a5e..c2de013b2cf4 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -62,10 +62,6 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_ATTRIBUTES #endif -#ifndef PER_CPU_DEF_ATTRIBUTES -#define PER_CPU_DEF_ATTRIBUTES -#endif - #define raw_cpu_generic_read(pcp) \ ({ \ *raw_cpu_ptr(&(pcp)); \ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index acf5e8df3504..f58e97446abc 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -28,8 +28,8 @@ * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * - * Note that nbits should be always a compile time evaluable constant. - * Otherwise many inlines will generate horrible code. + * The generated code is more efficient when nbits is known at + * compile-time and at most BITS_PER_LONG. * * :: * @@ -204,38 +204,31 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf, #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +/* + * The static inlines below do not handle constant nbits==0 correctly, + * so make such users (should any ever turn up) call the out-of-line + * versions. + */ #define small_const_nbits(nbits) \ - (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { - if (small_const_nbits(nbits)) - *dst = 0UL; - else { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0, len); - } + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); } static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { - if (small_const_nbits(nbits)) - *dst = ~0UL; - else { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0xff, len); - } + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0xff, len); } static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits) { - if (small_const_nbits(nbits)) - *dst = *src; - else { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memcpy(dst, src, len); - } + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); } /* @@ -398,7 +391,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, } static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, int nbits) + unsigned int shift, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h deleted file mode 100644 index 42515195d7d8..000000000000 --- a/include/linux/bootmem.h +++ /dev/null @@ -1,404 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 - */ -#ifndef _LINUX_BOOTMEM_H -#define _LINUX_BOOTMEM_H - -#include <linux/mmzone.h> -#include <linux/mm_types.h> -#include <asm/dma.h> -#include <asm/processor.h> - -/* - * simple boot-time physical memory area allocator. - */ - -extern unsigned long max_low_pfn; -extern unsigned long min_low_pfn; - -/* - * highest page - */ -extern unsigned long max_pfn; -/* - * highest possible page - */ -extern unsigned long long max_possible_pfn; - -#ifndef CONFIG_NO_BOOTMEM -/** - * struct bootmem_data - per-node information used by the bootmem allocator - * @node_min_pfn: the starting physical address of the node's memory - * @node_low_pfn: the end physical address of the directly addressable memory - * @node_bootmem_map: is a bitmap pointer - the bits represent all physical - * memory pages (including holes) on the node. - * @last_end_off: the offset within the page of the end of the last allocation; - * if 0, the page used is full - * @hint_idx: the PFN of the page used with the last allocation; - * together with using this with the @last_end_offset field, - * a test can be made to see if allocations can be merged - * with the page used for the last allocation rather than - * using up a full new page. - * @list: list entry in the linked list ordered by the memory addresses - */ -typedef struct bootmem_data { - unsigned long node_min_pfn; - unsigned long node_low_pfn; - void *node_bootmem_map; - unsigned long last_end_off; - unsigned long hint_idx; - struct list_head list; -} bootmem_data_t; - -extern bootmem_data_t bootmem_node_data[]; -#endif - -extern unsigned long bootmem_bootmap_pages(unsigned long); - -extern unsigned long init_bootmem_node(pg_data_t *pgdat, - unsigned long freepfn, - unsigned long startpfn, - unsigned long endpfn); -extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); - -extern unsigned long free_all_bootmem(void); -extern void reset_node_managed_pages(pg_data_t *pgdat); -extern void reset_all_zones_managed_pages(void); - -extern void free_bootmem_node(pg_data_t *pgdat, - unsigned long addr, - unsigned long size); -extern void free_bootmem(unsigned long physaddr, unsigned long size); -extern void free_bootmem_late(unsigned long physaddr, unsigned long size); - -/* - * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, - * the architecture-specific code should honor this). - * - * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success). - * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory - * already was reserved. - */ -#define BOOTMEM_DEFAULT 0 -#define BOOTMEM_EXCLUSIVE (1<<0) - -extern int reserve_bootmem(unsigned long addr, - unsigned long size, - int flags); -extern int reserve_bootmem_node(pg_data_t *pgdat, - unsigned long physaddr, - unsigned long size, - int flags); - -extern void *__alloc_bootmem(unsigned long size, - unsigned long align, - unsigned long goal); -extern void *__alloc_bootmem_nopanic(unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -extern void *__alloc_bootmem_node(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -void *__alloc_bootmem_node_high(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal, - unsigned long limit) __malloc; -extern void *__alloc_bootmem_low(unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -void *__alloc_bootmem_low_nopanic(unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; - -#ifdef CONFIG_NO_BOOTMEM -/* We are using top down, so it is safe to use 0 here */ -#define BOOTMEM_LOW_LIMIT 0 -#else -#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS) -#endif - -#ifndef ARCH_LOW_ADDRESS_LIMIT -#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL -#endif - -#define alloc_bootmem(x) \ - __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_align(x, align) \ - __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_nopanic(x) \ - __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages(x) \ - __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages_nopanic(x) \ - __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_node(pgdat, x) \ - __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_node_nopanic(pgdat, x) \ - __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages_node(pgdat, x) \ - __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ - __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) - -#define alloc_bootmem_low(x) \ - __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) -#define alloc_bootmem_low_pages_nopanic(x) \ - __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0) -#define alloc_bootmem_low_pages(x) \ - __alloc_bootmem_low(x, PAGE_SIZE, 0) -#define alloc_bootmem_low_pages_node(pgdat, x) \ - __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) - - -#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) - -/* FIXME: use MEMBLOCK_ALLOC_* variants here */ -#define BOOTMEM_ALLOC_ACCESSIBLE 0 -#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0) - -/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */ -void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, - phys_addr_t max_addr, int nid); -void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size, - phys_addr_t align, phys_addr_t min_addr, - phys_addr_t max_addr, int nid); -void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, int nid); -void __memblock_free_early(phys_addr_t base, phys_addr_t size); -void __memblock_free_late(phys_addr_t base, phys_addr_t size); - -static inline void * __init memblock_virt_alloc( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_raw( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_nopanic( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid_nopanic(size, align, - BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_low( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid(size, align, - BOOTMEM_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, - NUMA_NO_NODE); -} -static inline void * __init memblock_virt_alloc_low_nopanic( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid_nopanic(size, align, - BOOTMEM_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_from_nopanic( - phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) -{ - return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_node( - phys_addr_t size, int nid) -{ - return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, nid); -} - -static inline void * __init memblock_virt_alloc_node_nopanic( - phys_addr_t size, int nid) -{ - return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - nid); -} - -static inline void __init memblock_free_early( - phys_addr_t base, phys_addr_t size) -{ - __memblock_free_early(base, size); -} - -static inline void __init memblock_free_early_nid( - phys_addr_t base, phys_addr_t size, int nid) -{ - __memblock_free_early(base, size); -} - -static inline void __init memblock_free_late( - phys_addr_t base, phys_addr_t size) -{ - __memblock_free_late(base, size); -} - -#else - -#define BOOTMEM_ALLOC_ACCESSIBLE 0 - - -/* Fall back to all the existing bootmem APIs */ -static inline void * __init memblock_virt_alloc( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_raw( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_nopanic( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_low( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_low(size, align, 0); -} - -static inline void * __init memblock_virt_alloc_low_nopanic( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_low_nopanic(size, align, 0); -} - -static inline void * __init memblock_virt_alloc_from_nopanic( - phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) -{ - return __alloc_bootmem_nopanic(size, align, min_addr); -} - -static inline void * __init memblock_virt_alloc_node( - phys_addr_t size, int nid) -{ - return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES, - BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_node_nopanic( - phys_addr_t size, int nid) -{ - return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size, - SMP_CACHE_BYTES, - BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size, - phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) -{ - return __alloc_bootmem_node_high(NODE_DATA(nid), size, align, - min_addr); -} - -static inline void * __init memblock_virt_alloc_try_nid_raw( - phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, int nid) -{ - return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, - min_addr, max_addr); -} - -static inline void * __init memblock_virt_alloc_try_nid_nopanic( - phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, int nid) -{ - return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, - min_addr, max_addr); -} - -static inline void __init memblock_free_early( - phys_addr_t base, phys_addr_t size) -{ - free_bootmem(base, size); -} - -static inline void __init memblock_free_early_nid( - phys_addr_t base, phys_addr_t size, int nid) -{ - free_bootmem_node(NODE_DATA(nid), base, size); -} - -static inline void __init memblock_free_late( - phys_addr_t base, phys_addr_t size) -{ - free_bootmem_late(base, size); -} -#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */ - -extern void *alloc_large_system_hash(const char *tablename, - unsigned long bucketsize, - unsigned long numentries, - int scale, - int flags, - unsigned int *_hash_shift, - unsigned int *_hash_mask, - unsigned long low_limit, - unsigned long high_limit); - -#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ -#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min - * shift passed via *_hash_shift */ -#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ - -/* Only NUMA needs hash distribution. 64bit NUMA architectures have - * sufficient vmalloc space. - */ -#ifdef CONFIG_NUMA -#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) -extern int hashdist; /* Distribute hashes across NUMA nodes? */ -#else -#define hashdist (0) -#endif - - -#endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/compat.h b/include/linux/compat.h index d30e4dbd4be2..06e77473f175 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -488,8 +488,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; + /* fall through */ case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; + /* fall through */ case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; + /* fall through */ case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; } return copy_to_user(compat, &v, size) ? -EFAULT : 0; diff --git a/include/linux/hmm.h b/include/linux/hmm.h index dde947083d4e..c6fb869a81c0 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * Authors: Jérôme Glisse <jglisse@redhat.com> + * Authors: Jérôme Glisse <jglisse@redhat.com> */ /* * Heterogeneous Memory Management (HMM) @@ -274,14 +274,29 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, struct hmm_mirror; /* - * enum hmm_update_type - type of update + * enum hmm_update_event - type of update * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) */ -enum hmm_update_type { +enum hmm_update_event { HMM_UPDATE_INVALIDATE, }; /* + * struct hmm_update - HMM update informations for callback + * + * @start: virtual start address of the range to update + * @end: virtual end address of the range to update + * @event: event triggering the update (what is happening) + * @blockable: can the callback block/sleep ? + */ +struct hmm_update { + unsigned long start; + unsigned long end; + enum hmm_update_event event; + bool blockable; +}; + +/* * struct hmm_mirror_ops - HMM mirror device operations callback * * @update: callback to update range on a device @@ -300,9 +315,9 @@ struct hmm_mirror_ops { /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror - * @update_type: type of update that occurred to the CPU page table - * @start: virtual start address of the range to update - * @end: virtual end address of the range to update + * @update: update informations (see struct hmm_update) + * Returns: -EAGAIN if update.blockable false and callback need to + * block, 0 otherwise. * * This callback ultimately originates from mmu_notifiers when the CPU * page table is updated. The device driver must update its page table @@ -313,10 +328,8 @@ struct hmm_mirror_ops { * page tables are completely updated (TLBs flushed, etc); this is a * synchronous call. */ - void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, - enum hmm_update_type update_type, - unsigned long start, - unsigned long end); + int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, + const struct hmm_update *update); }; /* diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 2acdd046df2d..aee299a6aa76 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -2,7 +2,6 @@ #define _LINUX_MEMBLOCK_H #ifdef __KERNEL__ -#ifdef CONFIG_HAVE_MEMBLOCK /* * Logical memory blocks. * @@ -16,6 +15,19 @@ #include <linux/init.h> #include <linux/mm.h> +#include <asm/dma.h> + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; + +/* + * highest page + */ +extern unsigned long max_pfn; +/* + * highest possible page + */ +extern unsigned long long max_possible_pfn; #define INIT_MEMBLOCK_REGIONS 128 #define INIT_PHYSMEM_REGIONS 4 @@ -120,6 +132,10 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); enum memblock_flags choose_memblock_flags(void); +unsigned long memblock_free_all(void); +void reset_node_managed_pages(pg_data_t *pgdat); +void reset_all_zones_managed_pages(void); + /* Low level functions */ int memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, @@ -301,10 +317,116 @@ static inline int memblock_get_region_node(const struct memblock_region *r) } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ -phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); -phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); +/* Flags for memblock allocation APIs */ +#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 + +/* We are using top down, so it is safe to use 0 here */ +#define MEMBLOCK_LOW_LIMIT 0 + +#ifndef ARCH_LOW_ADDRESS_LIMIT +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL +#endif + +phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); + +phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align); + +void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); +void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); +void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); + +static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_raw(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_from(phys_addr_t size, + phys_addr_t align, + phys_addr_t min_addr) +{ + return memblock_alloc_try_nid(size, align, min_addr, + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_nopanic(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_low(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); +} +static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size, + phys_addr_t align, + phys_addr_t min_addr) +{ + return memblock_alloc_try_nid_nopanic(size, align, min_addr, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_node(phys_addr_t size, + phys_addr_t align, int nid) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, + int nid) +{ + return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, + MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static inline void __init memblock_free_early(phys_addr_t base, + phys_addr_t size) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_early_nid(phys_addr_t base, + phys_addr_t size, int nid) +{ + __memblock_free_early(base, size); +} -phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); +static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) +{ + __memblock_free_late(base, size); +} /* * Set the allocation direction to bottom-up or top-down. @@ -324,10 +446,6 @@ static inline bool memblock_bottom_up(void) return memblock.bottom_up; } -/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ -#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) -#define MEMBLOCK_ALLOC_ACCESSIBLE 0 - phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, enum memblock_flags flags); @@ -433,6 +551,31 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo i < memblock_type->cnt; \ i++, rgn = &memblock_type->regions[i]) +extern void *alloc_large_system_hash(const char *tablename, + unsigned long bucketsize, + unsigned long numentries, + int scale, + int flags, + unsigned int *_hash_shift, + unsigned int *_hash_mask, + unsigned long low_limit, + unsigned long high_limit); + +#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ +#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ + +/* Only NUMA needs hash distribution. 64bit NUMA architectures have + * sufficient vmalloc space. + */ +#ifdef CONFIG_NUMA +#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) +extern int hashdist; /* Distribute hashes across NUMA nodes? */ +#else +#define hashdist (0) +#endif + #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); #else @@ -440,12 +583,6 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } #endif -#else -static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) -{ - return 0; -} -#endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 34a28227068d..ffd9cd10fcf3 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -301,6 +301,7 @@ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern void remove_memory(int nid, u64 start, u64 size); +extern void __remove_memory(int nid, u64 start, u64 size); #else static inline bool is_mem_section_removable(unsigned long pfn, @@ -317,11 +318,13 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) } static inline void remove_memory(int nid, u64 start, u64 size) {} +static inline void __remove_memory(int nid, u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ extern void __ref free_area_init_core_hotplug(int nid); extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); +extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource, bool online); extern int arch_add_memory(int nid, u64 start, u64 size, @@ -330,7 +333,6 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); -extern void remove_memory(int nid, u64 start, u64 size); extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn, struct vmem_altmap *altmap); extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, diff --git a/include/linux/mm.h b/include/linux/mm.h index 1e52b8fd1685..fcf9cc9d535f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2163,7 +2163,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, struct mminit_pfnnid_cache *state); #endif -#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) +#if !defined(CONFIG_FLAT_NODE_MEM_MAP) void zero_resv_unavail(void); #else static inline void zero_resv_unavail(void) {} diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9f0caccd5833..847705a6d0ec 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -633,9 +633,6 @@ typedef struct pglist_data { struct page_ext *node_page_ext; #endif #endif -#ifndef CONFIG_NO_BOOTMEM - struct bootmem_data *bdata; -#endif #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * Must be held any time you expect node_start_pfn, node_present_pages @@ -869,7 +866,7 @@ static inline int is_highmem_idx(enum zone_type idx) } /** - * is_highmem - helper function to quickly check if a struct zone is a + * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. * @zone - pointer to struct zone variable diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 2d2096ba1cfe..1ce8e264a269 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -91,8 +91,7 @@ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ extern __PCPU_ATTRS(sec) __typeof__(type) name; \ - __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ - __typeof__(type) name + __PCPU_ATTRS(sec) __weak __typeof__(type) name #else /* * Normal declaration and definition macros. @@ -101,8 +100,7 @@ extern __PCPU_ATTRS(sec) __typeof__(type) name #define DEFINE_PER_CPU_SECTION(type, name, sec) \ - __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ - __typeof__(type) name + __PCPU_ATTRS(sec) __typeof__(type) name #endif /* diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index af8a61be2d8d..9510c677ac70 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -51,8 +51,8 @@ extern void __rb_insert_augmented(struct rb_node *node, * * On insertion, the user must update the augmented information on the path * leading to the inserted node, then call rb_link_node() as usual and - * rb_augment_inserted() instead of the usual rb_insert_color() call. - * If rb_augment_inserted() rebalances the rbtree, it will callback into + * rb_insert_augmented() instead of the usual rb_insert_color() call. + * If rb_insert_augmented() rebalances the rbtree, it will callback into * a user provided function to update the augmented information on the * affected subtrees. */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 200ed96a05af..f428e86f4800 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -129,9 +129,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ + /* fall through */ \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ + /* fall through */ \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ @@ -161,7 +163,9 @@ static inline void name(sigset_t *set) \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ + /* fall through */ \ case 2: set->sig[1] = op(set->sig[1]); \ + /* fall through */ \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ @@ -182,6 +186,7 @@ static inline void sigemptyset(sigset_t *set) memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; + /* fall through */ case 1: set->sig[0] = 0; break; } @@ -194,6 +199,7 @@ static inline void sigfillset(sigset_t *set) memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; + /* fall through */ case 1: set->sig[0] = -1; break; } |