diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-04-28 13:01:50 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-04-28 13:02:01 -0700 |
commit | 0e55546b189fc5f1ce5149445d7df083f26d4f25 (patch) | |
tree | 0ed58feac725c81dd27a4533bea24c328898e877 /kernel | |
parent | f3412b3879b4f7c4313b186b03940d4791345534 (diff) | |
parent | 249aca0d3d631660aa3583c6a3559b75b6e971b4 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
include/linux/netdevice.h
net/core/dev.c
6510ea973d8d ("net: Use this_cpu_inc() to increment net->core_stats")
794c24e9921f ("net-core: rx_otherhost_dropped to core_stats")
https://lore.kernel.org/all/20220428111903.5f4304e0@canb.auug.org.au/
drivers/net/wan/cosa.c
d48fea8401cf ("net: cosa: fix error check return value of register_chrdev()")
89fbca3307d4 ("net: wan: remove support for COSA and SRP synchronous serial boards")
https://lore.kernel.org/all/20220428112130.1f689e5e@canb.auug.org.au/
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 2 | ||||
-rw-r--r-- | kernel/events/internal.h | 5 | ||||
-rw-r--r-- | kernel/events/ring_buffer.c | 5 | ||||
-rw-r--r-- | kernel/kcov.c | 7 | ||||
-rw-r--r-- | kernel/kprobes.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 10 |
6 files changed, 17 insertions, 14 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 23bb19716ad3..7858bafffa9d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6247,7 +6247,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) again: mutex_lock(&event->mmap_mutex); if (event->rb) { - if (event->rb->nr_pages != nr_pages) { + if (data_page_nr(event->rb) != nr_pages) { ret = -EINVAL; goto unlock; } diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 082832738c8f..5150d5f84c03 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb) } #endif +static inline int data_page_nr(struct perf_buffer *rb) +{ + return rb->nr_pages << page_order(rb); +} + static inline unsigned long perf_data_size(struct perf_buffer *rb) { return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 52868716ec35..fb35b926024c 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb) } #else -static int data_page_nr(struct perf_buffer *rb) -{ - return rb->nr_pages << page_order(rb); -} - static struct page * __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) { diff --git a/kernel/kcov.c b/kernel/kcov.c index 475524bd900a..b3732b210593 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -475,8 +475,11 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_flags |= VM_DONTEXPAND; for (off = 0; off < size; off += PAGE_SIZE) { page = vmalloc_to_page(kcov->area + off); - if (vm_insert_page(vma, vma->vm_start + off, page)) - WARN_ONCE(1, "vm_insert_page() failed"); + res = vm_insert_page(vma, vma->vm_start + off, page); + if (res) { + pr_warn_once("kcov: vm_insert_page() failed\n"); + return res; + } } return 0; exit: diff --git a/kernel/kprobes.c b/kernel/kprobes.c index dbe57df2e199..dd58c0be9ce2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data, struct kprobe_ctlblk *kcb; /* The data must NOT be null. This means rethook data structure is broken. */ - if (WARN_ON_ONCE(!data)) + if (WARN_ON_ONCE(!data) || !rp->handler) return; __this_cpu_write(current_kprobe, &rp->kp); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d4bd299d67ab..a68482d66535 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3829,11 +3829,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s se->avg.runnable_sum = se->avg.runnable_avg * divider; - se->avg.load_sum = divider; - if (se_weight(se)) { - se->avg.load_sum = - div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); - } + se->avg.load_sum = se->avg.load_avg * divider; + if (se_weight(se) < se->avg.load_sum) + se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); + else + se->avg.load_sum = 1; enqueue_load_avg(cfs_rq, se); cfs_rq->avg.util_avg += se->avg.util_avg; |