aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/arraymap.c
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-05-29 18:03:59 -0700
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 16:52:56 -0700
commitc85d69135a9175c50a823d04d62d932312d037b3 (patch)
treeedd6ec707ebbf68a89fc1c3fc2b2d06364978ad3 /kernel/bpf/arraymap.c
parentbpf: rework memlock-based memory accounting for maps (diff)
downloadlinux-dev-c85d69135a9175c50a823d04d62d932312d037b3.tar.xz
linux-dev-c85d69135a9175c50a823d04d62d932312d037b3.zip
bpf: move memory size checks to bpf_map_charge_init()
Most bpf map types doing similar checks and bytes to pages conversion during memory allocation and charging. Let's unify these checks by moving them into bpf_map_charge_init(). Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r--kernel/bpf/arraymap.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3552da4407d9..0349cbf23cdb 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -117,14 +117,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
/* make sure there is no u32 overflow later in round_up() */
cost = array_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- if (percpu) {
+ if (percpu)
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- }
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
ret = bpf_map_charge_init(&mem, cost);
if (ret < 0)