summaryrefslogtreecommitdiff
path: root/kernel/bpf/map_iter.c
diff options
context:
space:
mode:
authorYonghong Song <yhs@fb.com>2020-07-23 11:41:14 -0700
committerAlexei Starovoitov <ast@kernel.org>2020-07-25 20:16:33 -0700
commitd6c4503cc29638f328e1a6e6fefbdbda401c28fc (patch)
treeeff79cbe41008eab2c86846965b56e583b3464ba /kernel/bpf/map_iter.c
parenta5cbe05a6673b85bed2a63ffcfea6a96c6410cff (diff)
bpf: Implement bpf iterator for hash maps
The bpf iterators for hash, percpu hash, lru hash and lru percpu hash are implemented. During link time, bpf_iter_reg->check_target() will check map type and ensure the program access key/value region is within the map defined key/value size limit. For percpu hash and lru hash maps, the bpf program will receive values for all cpus. The map element bpf iterator infrastructure will prepare value properly before passing the value pointer to the bpf program. This patch set supports readonly map keys and read/write map values. It does not support deleting map elements, e.g., from hash tables. If there is a user case for this, the following mechanism can be used to support map deletion for hashtab, etc. - permit a new bpf program return value, e.g., 2, to let bpf iterator know the map element should be removed. - since bucket lock is taken, the map element will be queued. - once bucket lock is released after all elements under this bucket are traversed, all to-be-deleted map elements can be deleted. Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200723184114.590470-1-yhs@fb.com
Diffstat (limited to 'kernel/bpf/map_iter.c')
-rw-r--r--kernel/bpf/map_iter.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
index 8a1f9b3355d0..bcb68b55bf65 100644
--- a/kernel/bpf/map_iter.c
+++ b/kernel/bpf/map_iter.c
@@ -101,7 +101,29 @@ static struct bpf_iter_reg bpf_map_reg_info = {
static int bpf_iter_check_map(struct bpf_prog *prog,
struct bpf_iter_aux_info *aux)
{
- return -EINVAL;
+ u32 key_acc_size, value_acc_size, key_size, value_size;
+ struct bpf_map *map = aux->map;
+ bool is_percpu = false;
+
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH)
+ is_percpu = true;
+ else if (map->map_type != BPF_MAP_TYPE_HASH &&
+ map->map_type != BPF_MAP_TYPE_LRU_HASH)
+ return -EINVAL;
+
+ key_acc_size = prog->aux->max_rdonly_access;
+ value_acc_size = prog->aux->max_rdwr_access;
+ key_size = map->key_size;
+ if (!is_percpu)
+ value_size = map->value_size;
+ else
+ value_size = round_up(map->value_size, 8) * num_possible_cpus();
+
+ if (key_acc_size > key_size || value_acc_size > value_size)
+ return -EACCES;
+
+ return 0;
}
DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,