diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 29 | ||||
-rw-r--r-- | lib/bcd.c | 8 | ||||
-rw-r--r-- | lib/crc32.c | 9 | ||||
-rw-r--r-- | lib/decompress.c | 9 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 56 | ||||
-rw-r--r-- | lib/gcd.c | 3 | ||||
-rw-r--r-- | lib/gen_crc32table.c | 6 | ||||
-rw-r--r-- | lib/genalloc.c | 88 | ||||
-rw-r--r-- | lib/idr.c | 32 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 5 | ||||
-rw-r--r-- | lib/nlattr.c | 4 | ||||
-rw-r--r-- | lib/parser.c | 10 | ||||
-rw-r--r-- | lib/plist.c | 4 | ||||
-rw-r--r-- | lib/scatterlist.c | 16 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 32 | ||||
-rw-r--r-- | lib/swiotlb.c | 33 | ||||
-rw-r--r-- | lib/vsprintf.c | 139 |
17 files changed, 321 insertions, 162 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2403a63b5da5..7fba3a98967f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -196,12 +196,13 @@ config LOCKUP_DETECTOR thresholds can be controlled through the sysctl watchdog_thresh. config HARDLOCKUP_DETECTOR - def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ - !HAVE_NMI_WATCHDOG + def_bool y + depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG + depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI config BOOTPARAM_HARDLOCKUP_PANIC bool "Panic (Reboot) On Hard Lockups" - depends on LOCKUP_DETECTOR + depends on HARDLOCKUP_DETECTOR help Say Y here to enable the kernel to panic on "hard lockups", which are bugs that cause the kernel to loop in kernel @@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC config BOOTPARAM_HARDLOCKUP_PANIC_VALUE int - depends on LOCKUP_DETECTOR + depends on HARDLOCKUP_DETECTOR range 0 1 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC default 1 if BOOTPARAM_HARDLOCKUP_PANIC @@ -452,7 +453,8 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && \ - (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) + (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || \ + MICROBLAZE || TILE || ARM64) select DEBUG_FS select STACKTRACE if STACKTRACE_SUPPORT @@ -629,6 +631,20 @@ config PROVE_RCU_REPEATEDLY Say N if you are unsure. +config PROVE_RCU_DELAY + bool "RCU debugging: preemptible RCU race provocation" + depends on DEBUG_KERNEL && PREEMPT_RCU + default n + help + There is a class of races that involve an unlikely preemption + of __rcu_read_unlock() just after ->rcu_read_lock_nesting has + been set to INT_MIN. This feature inserts a delay at that + point to increase the probability of these races. + + Say Y to increase probability of preemption of __rcu_read_unlock(). + + Say N if you are unsure. + config SPARSE_RCU_POINTER bool "RCU debugging: sparse-based checks for pointer usage" default n @@ -739,7 +755,8 @@ config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT depends on BUG depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ - FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE + FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || \ + TILE || ARM64 default y help Say Y here to make BUG() panics output the file name and line number diff --git a/lib/bcd.c b/lib/bcd.c index 55efaf742346..40d304efe272 100644 --- a/lib/bcd.c +++ b/lib/bcd.c @@ -1,14 +1,14 @@ #include <linux/bcd.h> #include <linux/export.h> -unsigned bcd2bin(unsigned char val) +unsigned _bcd2bin(unsigned char val) { return (val & 0x0f) + (val >> 4) * 10; } -EXPORT_SYMBOL(bcd2bin); +EXPORT_SYMBOL(_bcd2bin); -unsigned char bin2bcd(unsigned val) +unsigned char _bin2bcd(unsigned val) { return ((val / 10) << 4) + val % 10; } -EXPORT_SYMBOL(bin2bcd); +EXPORT_SYMBOL(_bin2bcd); diff --git a/lib/crc32.c b/lib/crc32.c index 61774b8db4de..072fbd8234d5 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) #else u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE); + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32table_le, CRCPOLY_LE); } u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); } #endif EXPORT_SYMBOL(crc32_le); @@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) #else u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { - return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE); + return crc32_be_generic(crc, p, len, + (const u32 (*)[256])crc32table_be, CRCPOLY_BE); } #endif EXPORT_SYMBOL(crc32_be); diff --git a/lib/decompress.c b/lib/decompress.c index 3d766b7f60ab..31a804277282 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/string.h> +#include <linux/init.h> #ifndef CONFIG_DECOMPRESS_GZIP # define gunzip NULL @@ -31,11 +32,13 @@ # define unlzo NULL #endif -static const struct compress_format { +struct compress_format { unsigned char magic[2]; const char *name; decompress_fn decompressor; -} compressed_formats[] = { +}; + +static const struct compress_format compressed_formats[] __initdata = { { {037, 0213}, "gzip", gunzip }, { {037, 0236}, "gzip", gunzip }, { {0x42, 0x5a}, "bzip2", bunzip2 }, @@ -45,7 +48,7 @@ static const struct compress_format { { {0, 0}, NULL, NULL } }; -decompress_fn decompress_method(const unsigned char *inbuf, int len, +decompress_fn __init decompress_method(const unsigned char *inbuf, int len, const char **name) { const struct compress_format *cf; diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 7ca29a0a3019..e7f7d993357a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -521,25 +521,25 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf) int pos_after_tid; int pos = 0; - pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG); + *buf = '\0'; + if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { if (in_interrupt()) - pos += snprintf(buf + pos, remaining(pos), "%s ", - "<intr>"); + pos += snprintf(buf + pos, remaining(pos), "<intr> "); else pos += snprintf(buf + pos, remaining(pos), "[%d] ", - task_pid_vnr(current)); + task_pid_vnr(current)); } pos_after_tid = pos; if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", - desc->modname); + desc->modname); if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", - desc->function); + desc->function); if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) pos += snprintf(buf + pos, remaining(pos), "%d:", - desc->lineno); + desc->lineno); if (pos - pos_after_tid) pos += snprintf(buf + pos, remaining(pos), " "); if (pos >= PREFIX_SIZE) @@ -559,9 +559,13 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf); + + res = printk(KERN_DEBUG "%s%pV", + dynamic_emit_prefix(descriptor, buf), &vaf); + va_end(args); return res; @@ -574,15 +578,26 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor, struct va_format vaf; va_list args; int res; - char buf[PREFIX_SIZE]; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); + + if (!dev) { + res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf); + } else { + char buf[PREFIX_SIZE]; + + res = dev_printk_emit(7, dev, "%s%s %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev), dev_name(dev), + &vaf); + } + va_end(args); return res; @@ -592,20 +607,35 @@ EXPORT_SYMBOL(__dynamic_dev_dbg); #ifdef CONFIG_NET int __dynamic_netdev_dbg(struct _ddebug *descriptor, - const struct net_device *dev, const char *fmt, ...) + const struct net_device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; int res; - char buf[PREFIX_SIZE]; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); + + if (dev && dev->dev.parent) { + char buf[PREFIX_SIZE]; + + res = dev_printk_emit(7, dev->dev.parent, + "%s%s %s %s: %pV", + dynamic_emit_prefix(descriptor, buf), + dev_driver_string(dev->dev.parent), + dev_name(dev->dev.parent), + netdev_name(dev), &vaf); + } else if (dev) { + res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf); + } else { + res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); + } + va_end(args); return res; diff --git a/lib/gcd.c b/lib/gcd.c index cce4f3cd14b3..3657f129d7b8 100644 --- a/lib/gcd.c +++ b/lib/gcd.c @@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b) if (a < b) swap(a, b); + + if (!b) + return a; while ((r = a % b) != 0) { a = b; b = r; diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index 8f8d5439e2d9..71fcfcd96410 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c @@ -109,7 +109,7 @@ int main(int argc, char** argv) if (CRC_LE_BITS > 1) { crc32init_le(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32table_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32table_le, LE_TABLE_ROWS, @@ -119,7 +119,7 @@ int main(int argc, char** argv) if (CRC_BE_BITS > 1) { crc32init_be(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32table_be[%d][%d] = {", BE_TABLE_ROWS, BE_TABLE_SIZE); output_table(crc32table_be, LE_TABLE_ROWS, @@ -128,7 +128,7 @@ int main(int argc, char** argv) } if (CRC_LE_BITS > 1) { crc32cinit_le(); - printf("static const u32 __cacheline_aligned " + printf("static u32 __cacheline_aligned " "crc32ctable_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32ctable_le, LE_TABLE_ROWS, diff --git a/lib/genalloc.c b/lib/genalloc.c index 6bc04aab6ec7..ca208a92628c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; + pool->algo = gen_pool_first_fit; + pool->data = NULL; } return pool; } @@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy); * @size: number of bytes to allocate from the pool * * Allocate the requested number of bytes from the specified pool. - * Uses a first-fit algorithm. Can not be used in NMI handler on - * architectures without NMI-safe cmpxchg implementation. + * Uses the pool allocation function (with first-fit algorithm by default). + * Can not be used in NMI handler on architectures without + * NMI-safe cmpxchg implementation. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) { @@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) end_bit = (chunk->end_addr - chunk->start_addr) >> order; retry: - start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, - start_bit, nbits, 0); + start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, + pool->data); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool) return size; } EXPORT_SYMBOL_GPL(gen_pool_size); + +/** + * gen_pool_set_algo - set the allocation algorithm + * @pool: pool to change allocation algorithm + * @algo: custom algorithm function + * @data: additional data used by @algo + * + * Call @algo for each memory allocation in the pool. + * If @algo is NULL use gen_pool_first_fit as default + * memory allocation function. + */ +void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) +{ + rcu_read_lock(); + + pool->algo = algo; + if (!pool->algo) + pool->algo = gen_pool_first_fit; + + pool->data = data; + + rcu_read_unlock(); +} +EXPORT_SYMBOL(gen_pool_set_algo); + +/** + * gen_pool_first_fit - find the first available region + * of memory matching the size requirement (no alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + */ +unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data) +{ + return bitmap_find_next_zero_area(map, size, start, nr, 0); +} +EXPORT_SYMBOL(gen_pool_first_fit); + +/** + * gen_pool_best_fit - find the best fitting region of memory + * macthing the size requirement (no alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + * + * Iterate over the bitmap to find the smallest free region + * which we can allocate the memory. + */ +unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data) +{ + unsigned long start_bit = size; + unsigned long len = size + 1; + unsigned long index; + + index = bitmap_find_next_zero_area(map, size, start, nr, 0); + + while (index < size) { + int next_bit = find_next_bit(map, size, index + nr); + if ((next_bit - index) < len) { + len = next_bit - index; + start_bit = index; + if (len == nr) + return start_bit; + } + index = bitmap_find_next_zero_area(map, size, + next_bit + 1, nr, 0); + } + + return start_bit; +} +EXPORT_SYMBOL(gen_pool_best_fit); diff --git a/lib/idr.c b/lib/idr.c index 4046e29c0a99..648239079dd2 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -20,7 +20,7 @@ * that id to this code and it returns your pointer. * You can release ids at any time. When all ids are released, most of - * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we + * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we * don't need to go to the memory "store" during an id allocate, just * so you don't need to be too concerned about locking and conflicts * with the slab allocator. @@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) */ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) { - while (idp->id_free_cnt < IDR_FREE_MAX) { + while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); if (new == NULL) @@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) sh = IDR_BITS*l; id = ((id >> sh) ^ n ^ m) << sh; } - if ((id >= MAX_ID_BIT) || (id < 0)) + if ((id >= MAX_IDR_BIT) || (id < 0)) return IDR_NOMORE_SPACE; if (l == 0) break; @@ -223,7 +223,7 @@ build_up: * Add a new layer to the top of the tree if the requested * id is larger than the currently allocated space. */ - while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { + while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { layers++; if (!p->count) { /* special case: if the tree is currently empty, @@ -265,7 +265,7 @@ build_up: static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; int id; id = idr_get_empty_slot(idp, starting_id, pa); @@ -357,7 +357,7 @@ static void idr_remove_warning(int id) static void sub_remove(struct idr *idp, int shift, int id) { struct idr_layer *p = idp->top; - struct idr_layer **pa[MAX_LEVEL]; + struct idr_layer **pa[MAX_IDR_LEVEL]; struct idr_layer ***paa = &pa[0]; struct idr_layer *to_free; int n; @@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id) struct idr_layer *to_free; /* Mask off upper bits we don't use for the search. */ - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && @@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id) to_free->bitmap = to_free->count = 0; free_layer(to_free); } - while (idp->id_free_cnt >= IDR_FREE_MAX) { + while (idp->id_free_cnt >= MAX_IDR_FREE) { p = get_from_free_list(idp); /* * Note: we don't call the rcu callback here, since the only @@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp) int n, id, max; int bt_mask; struct idr_layer *p; - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; @@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id) n = (p->layer+1) * IDR_BITS; /* Mask off upper bits we don't use for the search. */ - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; if (id >= (1 << n)) return NULL; @@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp, { int n, id, max, error = 0; struct idr_layer *p; - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; @@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each); */ void *idr_get_next(struct idr *idp, int *nextidp) { - struct idr_layer *p, *pa[MAX_LEVEL]; + struct idr_layer *p, *pa[MAX_IDR_LEVEL]; struct idr_layer **paa = &pa[0]; int id = *nextidp; int n, max; @@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) n = (p->layer+1) * IDR_BITS; - id &= MAX_ID_MASK; + id &= MAX_IDR_MASK; if (id >= (1 << n)) return ERR_PTR(-EINVAL); @@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get); */ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) { - struct idr_layer *pa[MAX_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL]; struct ida_bitmap *bitmap; unsigned long flags; int idr_id = starting_id / IDA_BITMAP_BITS; @@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) if (t < 0) return _idr_rc_to_errno(t); - if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) + if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) return -ENOSPC; if (t != idr_id) @@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) } id = idr_id * IDA_BITMAP_BITS + t; - if (id >= MAX_ID_BIT) + if (id >= MAX_IDR_BIT) return -ENOSPC; __set_bit(t, bitmap->bitmap); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 0401d2916d9f..52e5abbc41db 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net) struct uevent_sock *ue_sk; struct netlink_kernel_cfg cfg = { .groups = 1, + .flags = NL_CFG_F_NONROOT_RECV, }; ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); if (!ue_sk) return -ENOMEM; - ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, - THIS_MODULE, &cfg); + ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); if (!ue_sk->sk) { printk(KERN_ERR "kobject_uevent: unable to create netlink socket!\n"); @@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = { static int __init kobject_uevent_init(void) { - netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); return register_pernet_subsys(&uevent_net_ops); } diff --git a/lib/nlattr.c b/lib/nlattr.c index 4226dfeb5178..18eca7809b08 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, + [NLA_S8] = sizeof(s8), + [NLA_S16] = sizeof(s16), + [NLA_S32] = sizeof(s32), + [NLA_S64] = sizeof(s64), }; static int validate_nla(const struct nlattr *nla, int maxtype, diff --git a/lib/parser.c b/lib/parser.c index c43410084838..52cfa69f73df 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -122,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[]) * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. On success, sets @result to the integer represented - * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. + * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. */ static int match_number(substring_t *s, int *result, int base) { char *endp; char *buf; int ret; + long val; size_t len = s->to - s->from; buf = kmalloc(len + 1, GFP_KERNEL); @@ -136,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base) return -ENOMEM; memcpy(buf, s->from, len); buf[len] = '\0'; - *result = simple_strtol(buf, &endp, base); + ret = 0; + val = simple_strtol(buf, &endp, base); if (endp == buf) ret = -EINVAL; + else if (val < (long)INT_MIN || val > (long)INT_MAX) + ret = -ERANGE; + else + *result = (int) val; kfree(buf); return ret; } diff --git a/lib/plist.c b/lib/plist.c index 6ab0e521c48b..1ebc95f7a46f 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -175,7 +175,7 @@ static int __init plist_test(void) int nr_expect = 0, i, loop; unsigned int r = local_clock(); - printk(KERN_INFO "start plist test\n"); + pr_debug("start plist test\n"); plist_head_init(&test_head); for (i = 0; i < ARRAY_SIZE(test_node); i++) plist_node_init(test_node + i, 0); @@ -203,7 +203,7 @@ static int __init plist_test(void) plist_test_check(nr_expect); } - printk(KERN_INFO "end plist test\n"); + pr_debug("end plist test\n"); return 0; } diff --git a/lib/scatterlist.c b/lib/scatterlist.c index fadae774a20c..e76d85cf3175 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -404,14 +404,13 @@ EXPORT_SYMBOL(sg_miter_start); * @miter: sg mapping iter to proceed * * Description: - * Proceeds @miter@ to the next mapping. @miter@ should have been - * started using sg_miter_start(). On successful return, - * @miter@->page, @miter@->addr and @miter@->length point to the - * current mapping. + * Proceeds @miter to the next mapping. @miter should have been started + * using sg_miter_start(). On successful return, @miter->page, + * @miter->addr and @miter->length point to the current mapping. * * Context: - * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till - * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled + * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -465,7 +464,8 @@ EXPORT_SYMBOL(sg_miter_next); * resources (kmap) need to be released during iteration. * * Context: - * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care + * otherwise. */ void sg_miter_stop(struct sg_mapping_iter *miter) { @@ -479,7 +479,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON(!irqs_disabled()); + WARN_ON_ONCE(preemptible()); kunmap_atomic(miter->addr); } else kunmap(miter->page); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index eb10578ae055..0374a596cffa 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; - int print_once = 1; - for (;;) { - for (i = 0; i < loops; i++) { - if (arch_spin_trylock(&lock->raw_lock)) - return; - __delay(1); - } - /* lockup suspected: */ - if (print_once) { - print_once = 0; - spin_dump(lock, "lockup suspected"); + for (i = 0; i < loops; i++) { + if (arch_spin_trylock(&lock->raw_lock)) + return; + __delay(1); + } + /* lockup suspected: */ + spin_dump(lock, "lockup suspected"); #ifdef CONFIG_SMP - trigger_all_cpu_backtrace(); + trigger_all_cpu_backtrace(); #endif - } - } + + /* + * The trylock above was causing a livelock. Give the lower level arch + * specific lock code a chance to acquire the lock. We have already + * printed a warning/backtrace at this point. The non-debug arch + * specific code might actually succeed in acquiring the lock. If it is + * not successful, the end-result is the same - there is no forward + * progress. + */ + arch_spin_lock(&lock->raw_lock); } void do_raw_spin_lock(raw_spinlock_t *lock) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 45bc1f83a5ad..f114bf6a8e13 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -170,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -void __init +static void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; @@ -206,8 +206,9 @@ swiotlb_init(int verbose) int swiotlb_late_init_with_default_size(size_t default_size) { - unsigned long i, bytes, req_nslabs = io_tlb_nslabs; + unsigned long bytes, req_nslabs = io_tlb_nslabs; unsigned int order; + int rc = 0; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); @@ -229,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size) order--; } - if (!io_tlb_start) - goto cleanup1; - + if (!io_tlb_start) { + io_tlb_nslabs = req_nslabs; + return -ENOMEM; + } if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; } + rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs); + if (rc) + free_pages((unsigned long)io_tlb_start, order); + return rc; +} + +int +swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) +{ + unsigned long i, bytes; + + bytes = nslabs << IO_TLB_SHIFT; + + io_tlb_nslabs = nslabs; + io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; + memset(io_tlb_start, 0, bytes); /* @@ -288,10 +305,8 @@ cleanup3: io_tlb_list = NULL; cleanup2: io_tlb_end = NULL; - free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; -cleanup1: - io_tlb_nslabs = req_nslabs; + io_tlb_nslabs = 0; return -ENOMEM; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 0e337541f005..39c99fea7c03 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -174,35 +174,25 @@ char *put_dec_trunc8(char *buf, unsigned r) unsigned q; /* Copy of previous function's body with added early returns */ - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 2 */ - if (q == 0) - return buf; - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 3 */ - if (r == 0) - return buf; - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 4 */ - if (q == 0) - return buf; - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 5 */ - if (r == 0) - return buf; - q = (r * 0x199a) >> 16; - *buf++ = (r - 10 * q) + '0'; /* 6 */ + while (r >= 10000) { + q = r + '0'; + r = (r * (uint64_t)0x1999999a) >> 32; + *buf++ = q - 10*r; + } + + q = (r * 0x199a) >> 16; /* r <= 9999 */ + *buf++ = (r - 10 * q) + '0'; if (q == 0) return buf; - r = (q * 0xcd) >> 11; - *buf++ = (q - 10 * r) + '0'; /* 7 */ + r = (q * 0xcd) >> 11; /* q <= 999 */ + *buf++ = (q - 10 * r) + '0'; if (r == 0) return buf; - q = (r * 0xcd) >> 11; - *buf++ = (r - 10 * q) + '0'; /* 8 */ + q = (r * 0xcd) >> 11; /* r <= 99 */ + *buf++ = (r - 10 * q) + '0'; if (q == 0) return buf; - *buf++ = q + '0'; /* 9 */ + *buf++ = q + '0'; /* q <= 9 */ return buf; } @@ -243,18 +233,34 @@ char *put_dec(char *buf, unsigned long long n) /* Second algorithm: valid only for 64-bit long longs */ +/* See comment in put_dec_full9 for choice of constants */ static noinline_for_stack -char *put_dec_full4(char *buf, unsigned q) +void put_dec_full4(char *buf, unsigned q) { unsigned r; - r = (q * 0xcccd) >> 19; - *buf++ = (q - 10 * r) + '0'; - q = (r * 0x199a) >> 16; - *buf++ = (r - 10 * q) + '0'; + r = (q * 0xccd) >> 15; + buf[0] = (q - 10 * r) + '0'; + q = (r * 0xcd) >> 11; + buf[1] = (r - 10 * q) + '0'; r = (q * 0xcd) >> 11; - *buf++ = (q - 10 * r) + '0'; - *buf++ = r + '0'; - return buf; + buf[2] = (q - 10 * r) + '0'; + buf[3] = r + '0'; +} + +/* + * Call put_dec_full4 on x % 10000, return x / 10000. + * The approximation x/10000 == (x * 0x346DC5D7) >> 43 + * holds for all x < 1,128,869,999. The largest value this + * helper will ever be asked to convert is 1,125,520,955. + * (d1 in the put_dec code, assuming n is all-ones). + */ +static +unsigned put_dec_helper4(char *buf, unsigned x) +{ + uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43; + + put_dec_full4(buf, x - q * 10000); + return q; } /* Based on code by Douglas W. Jones found at @@ -276,28 +282,19 @@ char *put_dec(char *buf, unsigned long long n) d3 = (h >> 16); /* implicit "& 0xffff" */ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); + q = put_dec_helper4(buf, q); + + q += 7671 * d3 + 9496 * d2 + 6 * d1; + q = put_dec_helper4(buf+4, q); + + q += 4749 * d3 + 42 * d2; + q = put_dec_helper4(buf+8, q); - buf = put_dec_full4(buf, q % 10000); - q = q / 10000; - - d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1; - buf = put_dec_full4(buf, d1 % 10000); - q = d1 / 10000; - - d2 = q + 4749 * d3 + 42 * d2; - buf = put_dec_full4(buf, d2 % 10000); - q = d2 / 10000; - - d3 = q + 281 * d3; - if (!d3) - goto done; - buf = put_dec_full4(buf, d3 % 10000); - q = d3 / 10000; - if (!q) - goto done; - buf = put_dec_full4(buf, q); - done: - while (buf[-1] == '0') + q += 281 * d3; + buf += 12; + if (q) + buf = put_dec_trunc8(buf, q); + else while (buf[-1] == '0') --buf; return buf; @@ -990,7 +987,7 @@ int kptr_restrict __read_mostly; * - 'm' For a 6-byte MAC address, it prints the hex address without colons * - 'MF' For a 6-byte MAC FDDI address, it prints the address * with a dash-separated hex notation - * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth) + * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth) * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's @@ -1341,7 +1338,10 @@ qualifier: * %pR output the address range in a struct resource with decoded flags * %pr output the address range in a struct resource with raw flags * %pM output a 6-byte MAC address with colons + * %pMR output a 6-byte MAC address with colons in reversed order + * %pMF output a 6-byte MAC address with dashes * %pm output a 6-byte MAC address without colons + * %pmR output a 6-byte MAC address without colons in reversed order * %pI4 print an IPv4 address without leading zeros * %pi4 print an IPv4 address with leading zeros * %pI6 print an IPv6 address with colons @@ -2017,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) s16 field_width; bool is_sign; - while (*fmt && *str) { + while (*fmt) { /* skip any white space in format */ /* white space in format matchs any amount of * white space, including none, in the input. @@ -2042,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args) * advance both strings to next white space */ if (*fmt == '*') { + if (!*str) + break; while (!isspace(*fmt) && *fmt != '%' && *fmt) fmt++; while (!isspace(*str) && *str) @@ -2070,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args) } } - if (!*fmt || !*str) + if (!*fmt) + break; + + if (*fmt == 'n') { + /* return number of characters read so far */ + *va_arg(args, int *) = str - buf; + ++fmt; + continue; + } + + if (!*str) break; base = 10; @@ -2103,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) num++; } continue; - case 'n': - /* return number of characters read so far */ - { - int *i = (int *)va_arg(args, int*); - *i = str - buf; - } - continue; case 'o': base = 8; break; @@ -2210,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) str = next; } - /* - * Now we've come all the way through so either the input string or the - * format ended. In the former case, there can be a %n at the current - * position in the format that needs to be filled. - */ - if (*fmt == '%' && *(fmt + 1) == 'n') { - int *p = (int *)va_arg(args, int *); - *p = str - buf; - } - return num; } EXPORT_SYMBOL(vsscanf); |