From f6789593d5cea42a4ecb1cbeab6a23ade5ebbba7 Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Wed, 30 Jul 2014 16:08:21 -0700 Subject: mm/page-writeback.c: fix divide by zero in bdi_dirty_limits() Under memory pressure, it is possible for dirty_thresh, calculated by global_dirty_limits() in balance_dirty_pages(), to equal zero. Then, if strictlimit is true, bdi_dirty_limits() tries to resolve the proportion: bdi_bg_thresh : bdi_thresh = background_thresh : dirty_thresh by dividing by zero. Signed-off-by: Maxim Patlasov Acked-by: Rik van Riel Cc: Michal Hocko Cc: KOSAKI Motohiro Cc: Wu Fengguang Cc: Johannes Weiner Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 518e2c3f4c75..e0c943014eb7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi, *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); if (bdi_bg_thresh) - *bdi_bg_thresh = div_u64((u64)*bdi_thresh * - background_thresh, - dirty_thresh); + *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh * + background_thresh, + dirty_thresh) : 0; /* * In order to avoid the stacked BDI deadlock we need -- cgit v1.2.3 From b104a35d32025ca740539db2808aa3385d0f30eb Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 30 Jul 2014 16:08:24 -0700 Subject: mm, thp: do not allow thp faults to avoid cpuset restrictions The page allocator relies on __GFP_WAIT to determine if ALLOC_CPUSET should be set in allocflags. ALLOC_CPUSET controls if a page allocation should be restricted only to the set of allowed cpuset mems. Transparent hugepages clears __GFP_WAIT when defrag is disabled to prevent the fault path from using memory compaction or direct reclaim. Thus, it is unfairly able to allocate outside of its cpuset mems restriction as a side-effect. This patch ensures that ALLOC_CPUSET is only cleared when the gfp mask is truly GFP_ATOMIC by verifying it is also not a thp allocation. Signed-off-by: David Rientjes Reported-by: Alex Thorlton Tested-by: Alex Thorlton Cc: Bob Liu Cc: Dave Hansen Cc: Hedi Berriche Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Mel Gorman Cc: Rik van Riel Cc: Srivatsa S. Bhat Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8bcfe3ae20cb..ef44ad736ca1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2447,7 +2447,7 @@ static inline int gfp_to_alloc_flags(gfp_t gfp_mask) { int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; - const gfp_t wait = gfp_mask & __GFP_WAIT; + const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD)); /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); @@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask) * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will - * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). + * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH). */ alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); - if (!wait) { + if (atomic) { /* - * Not worth trying to allocate harder for - * __GFP_NOMEMALLOC even if it can't schedule. + * Not worth trying to allocate harder for __GFP_NOMEMALLOC even + * if it can't schedule. */ - if (!(gfp_mask & __GFP_NOMEMALLOC)) + if (!(gfp_mask & __GFP_NOMEMALLOC)) alloc_flags |= ALLOC_HARDER; /* - * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. - * See also cpuset_zone_allowed() comment in kernel/cpuset.c. + * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the + * comment for __cpuset_node_allowed_softwall(). */ alloc_flags &= ~ALLOC_CPUSET; } else if (unlikely(rt_task(current)) && !in_interrupt()) -- cgit v1.2.3 From 0193ed8225e1a79ed64632106ec3cc81798cb13c Mon Sep 17 00:00:00 2001 From: Alexandre Bounine Date: Wed, 30 Jul 2014 16:08:26 -0700 Subject: rapidio/tsi721_dma: fix failure to obtain transaction descriptor This is a bug fix for the situation when function tsi721_desc_get() fails to obtain a free transaction descriptor. The bug usually results in a memory access crash dump when data transfer scatter-gather list has more entries than size of hardware buffer descriptors ring. This fix ensures that error is properly returned to a caller instead of an invalid entry. This patch is applicable to kernel versions starting from v3.5. Signed-off-by: Alexandre Bounine Cc: Matt Porter Cc: Andre van Herk Cc: Stef van Os Cc: Vinod Koul Cc: Dan Williams Cc: [3.5+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rapidio/devices/tsi721_dma.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 9b60b1f3261c..44341dc5b148 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) "desc %p not ACKed\n", tx_desc); } + if (ret == NULL) { + dev_dbg(bdma_chan->dchan.device->dev, + "%s: unable to obtain tx descriptor\n", __func__); + goto err_out; + } + i = bdma_chan->wr_count_next % bdma_chan->bd_num; if (i == bdma_chan->bd_num - 1) { i = 0; @@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) tx_desc->txd.phys = bdma_chan->bd_phys + i * sizeof(struct tsi721_dma_desc); tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; - +err_out: spin_unlock_bh(&bdma_chan->lock); return ret; -- cgit v1.2.3 From 93a9eb39fad1b5fc9077776caa3af207883b254d Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 30 Jul 2014 16:08:28 -0700 Subject: hwpoison: fix hugetlbfs/thp precheck in hwpoison_user_mappings() A recent fix from Chen Yucong, commit 0bc1f8b0682c ("hwpoison: fix the handling path of the victimized page frame that belong to non-LRU") rejects going into unmapping operation for hugetlbfs/thp pages, which results in failing error containing on such pages. This patch fixes it. With this patch, hwpoison functional tests in mce-test testsuite pass. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Chen Yucong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7211a73ba14d..3db261fdee4c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, struct page *hpage = *hpagep; struct page *ppage; - if (PageReserved(p) || PageSlab(p) || !PageLRU(p)) + /* + * Here we are interested only in user-mapped pages, so skip any + * other types of pages. + */ + if (PageReserved(p) || PageSlab(p)) + return SWAP_SUCCESS; + if (!(PageLRU(hpage) || PageHuge(p))) return SWAP_SUCCESS; /* -- cgit v1.2.3 From 52089b14c08f6ee9886c40b031712383fa24ddd9 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 30 Jul 2014 16:08:30 -0700 Subject: hwpoison: call action_result() in failure path of hwpoison_user_mappings() hwpoison_user_mappings() could fail for various reasons, so printk()s to print out the reasons should be done in each failure check inside hwpoison_user_mappings(). And currently we don't call action_result() when hwpoison_user_mappings() fails, which is not consistent with other exit points of memory error handler. So this patch fixes these messaging problems. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Chen Yucong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 3db261fdee4c..a013bc94ebbe 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -911,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, if (!page_mapped(hpage)) return SWAP_SUCCESS; - if (PageKsm(p)) + if (PageKsm(p)) { + pr_err("MCE %#lx: can't handle KSM pages.\n", pfn); return SWAP_FAIL; + } if (PageSwapCache(p)) { printk(KERN_ERR @@ -1235,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) */ if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage) != SWAP_SUCCESS) { - printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); + action_result(pfn, "unmapping failed", IGNORED); res = -EBUSY; goto out; } -- cgit v1.2.3 From 2bcf2e92c3918ce62ab4e934256e47e9a16d19c3 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 30 Jul 2014 16:08:33 -0700 Subject: memcg: oom_notify use-after-free fix Paul Furtado has reported the following GPF: general protection fault: 0000 [#1] SMP Modules linked in: ipv6 dm_mod xen_netfront coretemp hwmon x86_pkg_temp_thermal crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel ablk_helper cryptd lrw gf128mul glue_helper aes_x86_64 microcode pcspkr ext4 jbd2 mbcache raid0 xen_blkfront CPU: 3 PID: 3062 Comm: java Not tainted 3.16.0-rc5 #1 task: ffff8801cfe8f170 ti: ffff8801d2ec4000 task.ti: ffff8801d2ec4000 RIP: e030:mem_cgroup_oom_synchronize+0x140/0x240 RSP: e02b:ffff8801d2ec7d48 EFLAGS: 00010283 RAX: 0000000000000001 RBX: ffff88009d633800 RCX: 000000000000000e RDX: fffffffffffffffe RSI: ffff88009d630200 RDI: ffff88009d630200 RBP: ffff8801d2ec7da8 R08: 0000000000000012 R09: 00000000fffffffe R10: 0000000000000000 R11: 0000000000000000 R12: ffff88009d633800 R13: ffff8801d2ec7d48 R14: dead000000100100 R15: ffff88009d633a30 FS: 00007f1748bb4700(0000) GS:ffff8801def80000(0000) knlGS:0000000000000000 CS: e033 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00007f4110300308 CR3: 00000000c05f7000 CR4: 0000000000002660 Call Trace: pagefault_out_of_memory+0x18/0x90 mm_fault_error+0xa9/0x1a0 __do_page_fault+0x478/0x4c0 do_page_fault+0x2c/0x40 page_fault+0x28/0x30 Code: 44 00 00 48 89 df e8 40 ca ff ff 48 85 c0 49 89 c4 74 35 4c 8b b0 30 02 00 00 4c 8d b8 30 02 00 00 4d 39 fe 74 1b 0f 1f 44 00 00 <49> 8b 7e 10 be 01 00 00 00 e8 42 d2 04 00 4d 8b 36 4d 39 fe 75 RIP mem_cgroup_oom_synchronize+0x140/0x240 Commit fb2a6fc56be6 ("mm: memcg: rework and document OOM waiting and wakeup") has moved mem_cgroup_oom_notify outside of memcg_oom_lock assuming it is protected by the hierarchical OOM-lock. Although this is true for the notification part the protection doesn't cover unregistration of event which can happen in parallel now so mem_cgroup_oom_notify can see already unlinked and/or freed mem_cgroup_eventfd_list. Fix this by using memcg_oom_lock also in mem_cgroup_oom_notify. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=80881 Fixes: fb2a6fc56be6 (mm: memcg: rework and document OOM waiting and wakeup) Signed-off-by: Michal Hocko Reported-by: Paul Furtado Tested-by: Paul Furtado Acked-by: Johannes Weiner Cc: [3.12+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a2c7bcb0e6eb..1f14a430c656 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) { struct mem_cgroup_eventfd_list *ev; + spin_lock(&memcg_oom_lock); + list_for_each_entry(ev, &memcg->oom_notify, list) eventfd_signal(ev->eventfd, 1); + + spin_unlock(&memcg_oom_lock); return 0; } -- cgit v1.2.3 From b4903d6e8408e6137ee4666ee67ec566b74a0f05 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Wed, 30 Jul 2014 16:08:35 -0700 Subject: mm: debugfs: move rounddown_pow_of_two() out from do_fault path do_fault_around() expects fault_around_bytes rounded down to nearest page order. Instead of calling rounddown_pow_of_two every time in fault_around_pages()/fault_around_mask() we could do round down when user changes fault_around_bytes via debugfs interface. This also fixes bug when user set fault_around_bytes to 0. Result of rounddown_pow_of_two(0) is not defined, therefore fault_around_bytes == 0 doesn't work without this patch. Let's set fault_around_bytes to PAGE_SIZE if user sets to something less than PAGE_SIZE [akpm@linux-foundation.org: tweak code layout] Fixes: a9b0f861("mm: nominate faultaround area in bytes rather than page order") Signed-off-by: Andrey Ryabinin Reported-by: Sasha Levin Acked-by: Kirill A. Shutemov Cc: [3.15.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 7e8d8205b610..8b44f765b645 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, update_mmu_cache(vma, address, pte); } -static unsigned long fault_around_bytes = 65536; +static unsigned long fault_around_bytes = rounddown_pow_of_two(65536); -/* - * fault_around_pages() and fault_around_mask() round down fault_around_bytes - * to nearest page order. It's what do_fault_around() expects to see. - */ static inline unsigned long fault_around_pages(void) { - return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE; + return fault_around_bytes >> PAGE_SHIFT; } static inline unsigned long fault_around_mask(void) { - return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK; + return ~(fault_around_bytes - 1) & PAGE_MASK; } - #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) { @@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val) return 0; } +/* + * fault_around_pages() and fault_around_mask() expects fault_around_bytes + * rounded down to nearest page order. It's what do_fault_around() expects to + * see. + */ static int fault_around_bytes_set(void *data, u64 val) { if (val / PAGE_SIZE > PTRS_PER_PTE) return -EINVAL; - fault_around_bytes = val; + if (val > PAGE_SIZE) + fault_around_bytes = rounddown_pow_of_two(val); + else + fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ return 0; } DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops, -- cgit v1.2.3 From 75325189c9e4df5f67eec5ec5b0d90759084887b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 30 Jul 2014 16:08:37 -0700 Subject: mm: fix filemap.c pagecache_get_page() kernel-doc warnings Fix kernel-doc warnings in mm/filemap.c: pagecache_get_page(): Warning(..//mm/filemap.c:1054): No description found for parameter 'cache_gfp_mask' Warning(..//mm/filemap.c:1054): No description found for parameter 'radix_gfp_mask' Warning(..//mm/filemap.c:1054): Excess function parameter 'gfp_mask' description in 'pagecache_get_page' Fixes: 2457aec63745 ("mm: non-atomically mark page accessed during page cache allocation where possible") [mgorman@suse.de: change everything] [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Randy Dunlap Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index dafb06f70a09..900edfaf6df5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry); * @mapping: the address_space to search * @offset: the page index * @fgp_flags: PCG flags - * @gfp_mask: gfp mask to use if a page is to be allocated + * @cache_gfp_mask: gfp mask to use for the page cache data page allocation + * @radix_gfp_mask: gfp mask to use for radix tree node allocation * * Looks up the page cache slot at @mapping & @offset. * - * PCG flags modify how the page is returned + * PCG flags modify how the page is returned. * * FGP_ACCESSED: the page will be marked accessed * FGP_LOCK: Page is return locked * FGP_CREAT: If page is not present then a new page is allocated using - * @gfp_mask and added to the page cache and the VM's LRU - * list. The page is returned locked and with an increased - * refcount. Otherwise, %NULL is returned. + * @cache_gfp_mask and added to the page cache and the VM's LRU + * list. If radix tree nodes are allocated during page cache + * insertion then @radix_gfp_mask is used. The page is returned + * locked and with an increased refcount. Otherwise, %NULL is + * returned. * * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even * if the GFP flags specified for FGP_CREAT are atomic. -- cgit v1.2.3 From 8f1d26d0e59b9676587c54578f976709b625d6e9 Mon Sep 17 00:00:00 2001 From: Atsushi Kumagai Date: Wed, 30 Jul 2014 16:08:39 -0700 Subject: kexec: export free_huge_page to VMCOREINFO PG_head_mask was added into VMCOREINFO to filter huge pages in b3acc56bfe1 ("kexec: save PG_head_mask in VMCOREINFO"), but makedumpfile still need another symbol to filter *hugetlbfs* pages. If a user hope to filter user pages, makedumpfile tries to exclude them by checking the condition whether the page is anonymous, but hugetlbfs pages aren't anonymous while they also be user pages. We know it's possible to detect them in the same way as PageHuge(), so we need the start address of free_huge_page(): int PageHuge(struct page *page) { if (!PageCompound(page)) return 0; page = compound_head(page); return get_compound_page_dtor(page) == free_huge_page; } For that reason, this patch changes free_huge_page() into public to export it to VMCOREINFO. Signed-off-by: Atsushi Kumagai Acked-by: Baoquan He Cc: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 1 + kernel/kexec.c | 2 ++ mm/hugetlb.c | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 255cd5cc0754..a23c096b3080 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); bool is_hugepage_active(struct page *page); +void free_huge_page(struct page *page); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); diff --git a/kernel/kexec.c b/kernel/kexec.c index 369f41a94124..23a088fec3c0 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1619,6 +1620,7 @@ static int __init crash_save_vmcoreinfo_init(void) #endif VMCOREINFO_NUMBER(PG_head_mask); VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); + VMCOREINFO_SYMBOL(free_huge_page); arch_crash_save_vmcoreinfo(); update_vmcoreinfo_note(); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9221c02ed9e2..7a0a73d2fcff 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size) return NULL; } -static void free_huge_page(struct page *page) +void free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the -- cgit v1.2.3 From e0198b290dcd8313bdf313a0d083033d5c01d761 Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Wed, 30 Jul 2014 16:08:42 -0700 Subject: Josh has moved My IBM email addresses haven't worked for years; also map some old-but-functional forwarding addresses to my canonical address. Update my GPG key fingerprint; I moved to 4096R a long time ago. Update description. Signed-off-by: Josh Triplett Cc: "Paul E. McKenney" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- .mailmap | 5 +++++ CREDITS | 7 ++++--- MAINTAINERS | 2 +- kernel/rcu/rcutorture.c | 4 ++-- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/.mailmap b/.mailmap index df1baba43a64..1ad68731fb47 100644 --- a/.mailmap +++ b/.mailmap @@ -62,6 +62,11 @@ Jeff Garzik Jens Axboe Jens Osterkamp John Stultz + + + + + Juha Yrjola Juha Yrjola Juha Yrjola diff --git a/CREDITS b/CREDITS index 28ee1514b9de..a80b66718f66 100644 --- a/CREDITS +++ b/CREDITS @@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615 S: Australia N: Josh Triplett -E: josh@freedesktop.org -P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB -D: rcutorture maintainer +E: josh@joshtriplett.org +P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C 1E67 0ED9 A3DF 8AFF 873D +D: RCU and rcutorture D: lock annotations, finding and fixing lock bugs +D: kernel tinification N: Winfried Trümper E: winni@xpilot.org diff --git a/MAINTAINERS b/MAINTAINERS index 86efa7e213c2..95990dd2678c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7424,7 +7424,7 @@ S: Orphan F: drivers/net/wireless/ray* RCUTORTURE MODULE -M: Josh Triplett +M: Josh Triplett M: "Paul E. McKenney" L: linux-kernel@vger.kernel.org S: Supported diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 7fa34f86e5ba..948a7693748e 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -18,7 +18,7 @@ * Copyright (C) IBM Corporation, 2005, 2006 * * Authors: Paul E. McKenney - * Josh Triplett + * Josh Triplett * * See also: Documentation/RCU/torture.txt */ @@ -51,7 +51,7 @@ #include MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); +MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); torture_param(int, fqs_duration, 0, -- cgit v1.2.3