diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/net_utils.c | 3 | ||||
-rw-r--r-- | lib/ref_tracker.c | 179 | ||||
-rw-r--r-- | lib/scatterlist.c | 269 | ||||
-rw-r--r-- | lib/test_bpf.c | 3 | ||||
-rw-r--r-- | lib/test_ref_tracker.c | 2 | ||||
-rw-r--r-- | lib/ts_bm.c | 4 |
6 files changed, 425 insertions, 35 deletions
diff --git a/lib/net_utils.c b/lib/net_utils.c index c17201df3d08..42bb0473fb22 100644 --- a/lib/net_utils.c +++ b/lib/net_utils.c @@ -2,7 +2,8 @@ #include <linux/string.h> #include <linux/if_ether.h> #include <linux/ctype.h> -#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/hex.h> bool mac_pton(const char *s, u8 *mac) { diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index dc7b14aa3431..cf5609b1ca79 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -1,11 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-or-later + +#define pr_fmt(fmt) "ref_tracker: " fmt + #include <linux/export.h> +#include <linux/list_sort.h> #include <linux/ref_tracker.h> #include <linux/slab.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #define REF_TRACKER_STACK_ENTRIES 16 +#define STACK_BUF_SIZE 1024 struct ref_tracker { struct list_head head; /* anchor into dir->list or dir->quarantine */ @@ -14,6 +19,141 @@ struct ref_tracker { depot_stack_handle_t free_stack_handle; }; +struct ref_tracker_dir_stats { + int total; + int count; + struct { + depot_stack_handle_t stack_handle; + unsigned int count; + } stacks[]; +}; + +static struct ref_tracker_dir_stats * +ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) +{ + struct ref_tracker_dir_stats *stats; + struct ref_tracker *tracker; + + stats = kmalloc(struct_size(stats, stacks, limit), + GFP_NOWAIT | __GFP_NOWARN); + if (!stats) + return ERR_PTR(-ENOMEM); + stats->total = 0; + stats->count = 0; + + list_for_each_entry(tracker, &dir->list, head) { + depot_stack_handle_t stack = tracker->alloc_stack_handle; + int i; + + ++stats->total; + for (i = 0; i < stats->count; ++i) + if (stats->stacks[i].stack_handle == stack) + break; + if (i >= limit) + continue; + if (i >= stats->count) { + stats->stacks[i].stack_handle = stack; + stats->stacks[i].count = 0; + ++stats->count; + } + ++stats->stacks[i].count; + } + + return stats; +} + +struct ostream { + char *buf; + int size, used; +}; + +#define pr_ostream(stream, fmt, args...) \ +({ \ + struct ostream *_s = (stream); \ +\ + if (!_s->buf) { \ + pr_err(fmt, ##args); \ + } else { \ + int ret, len = _s->size - _s->used; \ + ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ + _s->used += min(ret, len); \ + } \ +}) + +static void +__ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, + unsigned int display_limit, struct ostream *s) +{ + struct ref_tracker_dir_stats *stats; + unsigned int i = 0, skipped; + depot_stack_handle_t stack; + char *sbuf; + + lockdep_assert_held(&dir->lock); + + if (list_empty(&dir->list)) + return; + + stats = ref_tracker_get_stats(dir, display_limit); + if (IS_ERR(stats)) { + pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", + dir->name, dir, stats); + return; + } + + sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); + + for (i = 0, skipped = stats->total; i < stats->count; ++i) { + stack = stats->stacks[i].stack_handle; + if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) + sbuf[0] = 0; + pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, + stats->stacks[i].count, stats->total, sbuf); + skipped -= stats->stacks[i].count; + } + + if (skipped) + pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", + dir->name, dir, skipped, stats->total); + + kfree(sbuf); + + kfree(stats); +} + +void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ostream os = {}; + + __ref_tracker_dir_pr_ostream(dir, display_limit, &os); +} +EXPORT_SYMBOL(ref_tracker_dir_print_locked); + +void ref_tracker_dir_print(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + unsigned long flags; + + spin_lock_irqsave(&dir->lock, flags); + ref_tracker_dir_print_locked(dir, display_limit); + spin_unlock_irqrestore(&dir->lock, flags); +} +EXPORT_SYMBOL(ref_tracker_dir_print); + +int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) +{ + struct ostream os = { .buf = buf, .size = size }; + unsigned long flags; + + spin_lock_irqsave(&dir->lock, flags); + __ref_tracker_dir_pr_ostream(dir, 16, &os); + spin_unlock_irqrestore(&dir->lock, flags); + + return os.used; +} +EXPORT_SYMBOL(ref_tracker_dir_snprint); + void ref_tracker_dir_exit(struct ref_tracker_dir *dir) { struct ref_tracker *tracker, *n; @@ -27,13 +167,13 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir) kfree(tracker); dir->quarantine_avail++; } - list_for_each_entry_safe(tracker, n, &dir->list, head) { - pr_err("leaked reference.\n"); - if (tracker->alloc_stack_handle) - stack_depot_print(tracker->alloc_stack_handle); + if (!list_empty(&dir->list)) { + ref_tracker_dir_print_locked(dir, 16); leak = true; - list_del(&tracker->head); - kfree(tracker); + list_for_each_entry_safe(tracker, n, &dir->list, head) { + list_del(&tracker->head); + kfree(tracker); + } } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(leak); @@ -42,28 +182,6 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir) } EXPORT_SYMBOL(ref_tracker_dir_exit); -void ref_tracker_dir_print(struct ref_tracker_dir *dir, - unsigned int display_limit) -{ - struct ref_tracker *tracker; - unsigned long flags; - unsigned int i = 0; - - spin_lock_irqsave(&dir->lock, flags); - list_for_each_entry(tracker, &dir->list, head) { - if (i < display_limit) { - pr_err("leaked reference.\n"); - if (tracker->alloc_stack_handle) - stack_depot_print(tracker->alloc_stack_handle); - i++; - } else { - break; - } - } - spin_unlock_irqrestore(&dir->lock, flags); -} -EXPORT_SYMBOL(ref_tracker_dir_print); - int ref_tracker_alloc(struct ref_tracker_dir *dir, struct ref_tracker **trackerp, gfp_t gfp) @@ -71,7 +189,7 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir, unsigned long entries[REF_TRACKER_STACK_ENTRIES]; struct ref_tracker *tracker; unsigned int nr_entries; - gfp_t gfp_mask = gfp; + gfp_t gfp_mask = gfp | __GFP_NOWARN; unsigned long flags; WARN_ON_ONCE(dir->dead); @@ -119,7 +237,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir, return -EEXIST; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); - stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC); + stack_handle = stack_depot_save(entries, nr_entries, + GFP_NOWAIT | __GFP_NOWARN); spin_lock_irqsave(&dir->lock, flags); if (tracker->dead) { diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 8d7519a8f308..e97d7060329e 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -9,6 +9,8 @@ #include <linux/scatterlist.h> #include <linux/highmem.h> #include <linux/kmemleak.h> +#include <linux/bvec.h> +#include <linux/uio.h> /** * sg_next - return the next scatterlist entry in a list @@ -1095,3 +1097,270 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, return offset; } EXPORT_SYMBOL(sg_zero_buffer); + +/* + * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class + * iterators, and add them to the scatterlist. + */ +static ssize_t extract_user_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + struct page **pages; + unsigned int npages; + ssize_t ret = 0, res; + size_t len, off; + + /* We decant the page list into the tail of the scatterlist */ + pages = (void *)sgtable->sgl + + array_size(sg_max, sizeof(struct scatterlist)); + pages -= sg_max; + + do { + res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, + extraction_flags, &off); + if (res < 0) + goto failed; + + len = res; + maxsize -= len; + ret += len; + npages = DIV_ROUND_UP(off + len, PAGE_SIZE); + sg_max -= npages; + + for (; npages > 0; npages--) { + struct page *page = *pages; + size_t seg = min_t(size_t, PAGE_SIZE - off, len); + + *pages++ = NULL; + sg_set_page(sg, page, seg, off); + sgtable->nents++; + sg++; + len -= seg; + off = 0; + } + } while (maxsize > 0 && sg_max > 0); + + return ret; + +failed: + while (sgtable->nents > sgtable->orig_nents) + put_page(sg_page(&sgtable->sgl[--sgtable->nents])); + return res; +} + +/* + * Extract up to sg_max pages from a BVEC-type iterator and add them to the + * scatterlist. The pages are not pinned. + */ +static ssize_t extract_bvec_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + const struct bio_vec *bv = iter->bvec; + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + unsigned long start = iter->iov_offset; + unsigned int i; + ssize_t ret = 0; + + for (i = 0; i < iter->nr_segs; i++) { + size_t off, len; + + len = bv[i].bv_len; + if (start >= len) { + start -= len; + continue; + } + + len = min_t(size_t, maxsize, len - start); + off = bv[i].bv_offset + start; + + sg_set_page(sg, bv[i].bv_page, len, off); + sgtable->nents++; + sg++; + sg_max--; + + ret += len; + maxsize -= len; + if (maxsize <= 0 || sg_max == 0) + break; + start = 0; + } + + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/* + * Extract up to sg_max pages from a KVEC-type iterator and add them to the + * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or + * static buffers. The pages are not pinned. + */ +static ssize_t extract_kvec_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + const struct kvec *kv = iter->kvec; + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + unsigned long start = iter->iov_offset; + unsigned int i; + ssize_t ret = 0; + + for (i = 0; i < iter->nr_segs; i++) { + struct page *page; + unsigned long kaddr; + size_t off, len, seg; + + len = kv[i].iov_len; + if (start >= len) { + start -= len; + continue; + } + + kaddr = (unsigned long)kv[i].iov_base + start; + off = kaddr & ~PAGE_MASK; + len = min_t(size_t, maxsize, len - start); + kaddr &= PAGE_MASK; + + maxsize -= len; + ret += len; + do { + seg = min_t(size_t, len, PAGE_SIZE - off); + if (is_vmalloc_or_module_addr((void *)kaddr)) + page = vmalloc_to_page((void *)kaddr); + else + page = virt_to_page(kaddr); + + sg_set_page(sg, page, len, off); + sgtable->nents++; + sg++; + sg_max--; + + len -= seg; + kaddr += PAGE_SIZE; + off = 0; + } while (len > 0 && sg_max > 0); + + if (maxsize <= 0 || sg_max == 0) + break; + start = 0; + } + + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/* + * Extract up to sg_max folios from an XARRAY-type iterator and add them to + * the scatterlist. The pages are not pinned. + */ +static ssize_t extract_xarray_to_sg(struct iov_iter *iter, + ssize_t maxsize, + struct sg_table *sgtable, + unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + struct scatterlist *sg = sgtable->sgl + sgtable->nents; + struct xarray *xa = iter->xarray; + struct folio *folio; + loff_t start = iter->xarray_start + iter->iov_offset; + pgoff_t index = start / PAGE_SIZE; + ssize_t ret = 0; + size_t offset, len; + XA_STATE(xas, xa, index); + + rcu_read_lock(); + + xas_for_each(&xas, folio, ULONG_MAX) { + if (xas_retry(&xas, folio)) + continue; + if (WARN_ON(xa_is_value(folio))) + break; + if (WARN_ON(folio_test_hugetlb(folio))) + break; + + offset = offset_in_folio(folio, start); + len = min_t(size_t, maxsize, folio_size(folio) - offset); + + sg_set_page(sg, folio_page(folio, 0), len, offset); + sgtable->nents++; + sg++; + sg_max--; + + maxsize -= len; + ret += len; + if (maxsize <= 0 || sg_max == 0) + break; + } + + rcu_read_unlock(); + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/** + * extract_iter_to_sg - Extract pages from an iterator and add to an sglist + * @iter: The iterator to extract from + * @maxsize: The amount of iterator to copy + * @sgtable: The scatterlist table to fill in + * @sg_max: Maximum number of elements in @sgtable that may be filled + * @extraction_flags: Flags to qualify the request + * + * Extract the page fragments from the given amount of the source iterator and + * add them to a scatterlist that refers to all of those bits, to a maximum + * addition of @sg_max elements. + * + * The pages referred to by UBUF- and IOVEC-type iterators are extracted and + * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE- + * and DISCARD-type are not supported. + * + * No end mark is placed on the scatterlist; that's left to the caller. + * + * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA + * be allowed on the pages extracted. + * + * If successful, @sgtable->nents is updated to include the number of elements + * added and the number of bytes added is returned. @sgtable->orig_nents is + * left unaltered. + * + * The iov_iter_extract_mode() function should be used to query how cleanup + * should be performed. + */ +ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize, + struct sg_table *sgtable, unsigned int sg_max, + iov_iter_extraction_t extraction_flags) +{ + if (maxsize == 0) + return 0; + + switch (iov_iter_type(iter)) { + case ITER_UBUF: + case ITER_IOVEC: + return extract_user_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + case ITER_BVEC: + return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + case ITER_KVEC: + return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + case ITER_XARRAY: + return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max, + extraction_flags); + default: + pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter)); + WARN_ON_ONCE(1); + return -EIO; + } +} +EXPORT_SYMBOL_GPL(extract_iter_to_sg); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index ade9ac672adb..fa0833410ac1 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -15056,8 +15056,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) int which, err; /* Allocate the table of programs to be used for tall calls */ - progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]), - GFP_KERNEL); + progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL); if (!progs) goto out_nomem; diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c index 19d7dec70cc6..49970a7c96f3 100644 --- a/lib/test_ref_tracker.c +++ b/lib/test_ref_tracker.c @@ -64,7 +64,7 @@ static int __init test_ref_tracker_init(void) { int i; - ref_tracker_dir_init(&ref_dir, 100); + ref_tracker_dir_init(&ref_dir, 100, "selftest"); timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0); mod_timer(&test_ref_tracker_timer, jiffies + 1); diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 1f2234221dd1..c8ecbf74ef29 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -60,10 +60,12 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) struct ts_bm *bm = ts_config_priv(conf); unsigned int i, text_len, consumed = state->offset; const u8 *text; - int shift = bm->patlen - 1, bs; + int bs; const u8 icase = conf->flags & TS_IGNORECASE; for (;;) { + int shift = bm->patlen - 1; + text_len = conf->get_next_block(consumed, &text, conf, state); if (unlikely(text_len == 0)) |