From fc651001d2c5ca4f8b87efae2edb69fca94a6365 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 22 May 2019 12:22:21 -0700 Subject: neighbor: Add tracepoint to __neigh_create Add tracepoint to __neigh_create to enable debugging of new entries. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/core/neighbour.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net/core') diff --git a/net/core/neighbour.c b/net/core/neighbour.c index dfa871061f14..a5556e4d3f96 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -587,6 +587,8 @@ static struct neighbour *___neigh_create(struct neigh_table *tbl, int error; struct neigh_hash_table *nht; + trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); + if (!n) { rc = ERR_PTR(-ENOBUFS); goto out; -- cgit v1.2.3-59-g8ed1b From 136bf27fc0e9376525b9b6d9a1aa08508a0d1ac2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 23 May 2019 10:43:35 +0200 Subject: devlink: add warning in case driver does not set port type Prevent misbehavior of drivers who would not set port type for longer period of time. Drivers should always set port type. Do WARN if that happens. Note that it is perfectly fine to temporarily not have the type set, during initialization and port type change. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 2 ++ net/core/devlink.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) (limited to 'net/core') diff --git a/include/net/devlink.h b/include/net/devlink.h index 1c4adfb4195a..151eb930d329 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -64,6 +65,7 @@ struct devlink_port { enum devlink_port_type desired_type; void *type_dev; struct devlink_port_attrs attrs; + struct delayed_work type_warn_dw; }; struct devlink_sb_pool_info { diff --git a/net/core/devlink.c b/net/core/devlink.c index d43bc52b8840..9716a7f382cb 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -5390,6 +5391,38 @@ void devlink_free(struct devlink *devlink) } EXPORT_SYMBOL_GPL(devlink_free); +static void devlink_port_type_warn(struct work_struct *work) +{ + WARN(true, "Type was not set for devlink port."); +} + +static bool devlink_port_type_should_warn(struct devlink_port *devlink_port) +{ + /* Ignore CPU and DSA flavours. */ + return devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU && + devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA; +} + +#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 30) + +static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port) +{ + if (!devlink_port_type_should_warn(devlink_port)) + return; + /* Schedule a work to WARN in case driver does not set port + * type within timeout. + */ + schedule_delayed_work(&devlink_port->type_warn_dw, + DEVLINK_PORT_TYPE_WARN_TIMEOUT); +} + +static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port) +{ + if (!devlink_port_type_should_warn(devlink_port)) + return; + cancel_delayed_work_sync(&devlink_port->type_warn_dw); +} + /** * devlink_port_register - Register devlink port * @@ -5419,6 +5452,8 @@ int devlink_port_register(struct devlink *devlink, list_add_tail(&devlink_port->list, &devlink->port_list); INIT_LIST_HEAD(&devlink_port->param_list); mutex_unlock(&devlink->lock); + INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn); + devlink_port_type_warn_schedule(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); return 0; } @@ -5433,6 +5468,7 @@ void devlink_port_unregister(struct devlink_port *devlink_port) { struct devlink *devlink = devlink_port->devlink; + devlink_port_type_warn_cancel(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); mutex_lock(&devlink->lock); list_del(&devlink_port->list); @@ -5446,6 +5482,7 @@ static void __devlink_port_type_set(struct devlink_port *devlink_port, { if (WARN_ON(!devlink_port->registered)) return; + devlink_port_type_warn_cancel(devlink_port); spin_lock(&devlink_port->type_lock); devlink_port->type = type; devlink_port->type_dev = type_dev; @@ -5519,6 +5556,7 @@ EXPORT_SYMBOL_GPL(devlink_port_type_ib_set); void devlink_port_type_clear(struct devlink_port *devlink_port) { __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL); + devlink_port_type_warn_schedule(devlink_port); } EXPORT_SYMBOL_GPL(devlink_port_type_clear); -- cgit v1.2.3-59-g8ed1b From 6dca9360a9d7a15cef215cea4a55575dbfd2ebdd Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 23 May 2019 17:56:53 -0500 Subject: flow_offload: use struct_size() in kzalloc() One of the more common cases of allocation size calculations is finding the size of a structure that has a zero-sized array at the end, along with memory for some number of elements for that array. For example: struct foo { int stuff; struct boo entry[]; }; instance = kzalloc(sizeof(struct foo) + count * sizeof(struct boo), GFP_KERNEL); Instead of leaving these open-coded and prone to type mistakes, we can now use the new struct_size() helper: instance = kzalloc(struct_size(instance, entry, count), GFP_KERNEL); This code was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/core/flow_offload.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net/core') diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 5ce7d47a960e..3d93e51b83e0 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -7,8 +7,7 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions) { struct flow_rule *rule; - rule = kzalloc(sizeof(struct flow_rule) + - sizeof(struct flow_action_entry) * num_actions, + rule = kzalloc(struct_size(rule, action.entries, num_actions), GFP_KERNEL); if (!rule) return NULL; -- cgit v1.2.3-59-g8ed1b From 2544af0344bae65c51e350663ce95110445d7ec8 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Wed, 29 May 2019 17:13:48 +0200 Subject: net: avoid indirect calls in L4 checksum calculation Commit 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin") introduces some macros to avoid doing indirect calls. Use these helpers to remove two indirect calls in the L4 checksum calculation for devices which don't have hardware support for it. As a test I generate packets with pktgen out to a dummy interface with HW checksumming disabled, to have the checksum calculated in every sent packet. The packet rate measured with an i7-6700K CPU and a single pktgen thread raised from 6143 to 6608 Kpps, an increase by 7.5% Suggested-by: Davide Caratti Signed-off-by: Matteo Croce Signed-off-by: David S. Miller --- net/core/skbuff.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'net/core') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e89be6282693..0c2e7d4946ef 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -76,6 +76,7 @@ #include #include #include +#include #include "datagram.h" @@ -2507,7 +2508,8 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, if (copy > 0) { if (copy > len) copy = len; - csum = ops->update(skb->data + offset, copy, csum); + csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, + skb->data + offset, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; @@ -2534,9 +2536,13 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, frag->page_offset + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); - csum2 = ops->update(vaddr + p_off, p_len, 0); + csum2 = INDIRECT_CALL_1(ops->update, + csum_partial_ext, + vaddr + p_off, p_len, 0); kunmap_atomic(vaddr); - csum = ops->combine(csum, csum2, pos, p_len); + csum = INDIRECT_CALL_1(ops->combine, + csum_block_add_ext, csum, + csum2, pos, p_len); pos += p_len; } @@ -2559,7 +2565,8 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, copy = len; csum2 = __skb_checksum(frag_iter, offset - start, copy, 0, ops); - csum = ops->combine(csum, csum2, pos, copy); + csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, + csum, csum2, pos, copy); if ((len -= copy) == 0) return csum; offset += copy; -- cgit v1.2.3-59-g8ed1b From d50836cda698f6966e63c2c7f718d7c2f687ec8a Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Wed, 29 May 2019 18:03:56 -0700 Subject: bpf: add memlock precharge for socket local storage Socket local storage maps lack the memlock precharge check, which is performed before the memory allocation for most other bpf map types. Let's add it in order to unify all map types. Signed-off-by: Roman Gushchin Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- net/core/bpf_sk_storage.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'net/core') diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index cc9597a87770..9a8aaf8e235d 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -626,7 +626,9 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) struct bpf_sk_storage_map *smap; unsigned int i; u32 nbuckets; + u32 pages; u64 cost; + int ret; smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN); if (!smap) @@ -635,13 +637,19 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus())); nbuckets = 1U << smap->bucket_log; + cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); + pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + ret = bpf_map_precharge_memlock(pages); + if (ret < 0) + return ERR_PTR(ret); + smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, GFP_USER | __GFP_NOWARN); if (!smap->buckets) { kfree(smap); return ERR_PTR(-ENOMEM); } - cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); for (i = 0; i < nbuckets; i++) { INIT_HLIST_HEAD(&smap->buckets[i].list); @@ -651,7 +659,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size; smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) % BPF_SK_STORAGE_CACHE_SIZE; - smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + smap->map.pages = pages; return &smap->map; } -- cgit v1.2.3-59-g8ed1b From 3539b96e041c06e4317082816d90ec09160aeb11 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Wed, 29 May 2019 18:03:57 -0700 Subject: bpf: group memory related fields in struct bpf_map_memory Group "user" and "pages" fields of bpf_map into the bpf_map_memory structure. Later it can be extended with "memcg" and other related information. The main reason for a such change (beside cosmetics) is to pass bpf_map_memory structure to charging functions before the actual allocation of bpf_map. Signed-off-by: Roman Gushchin Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 10 +++++++--- kernel/bpf/arraymap.c | 2 +- kernel/bpf/cpumap.c | 4 ++-- kernel/bpf/devmap.c | 4 ++-- kernel/bpf/hashtab.c | 4 ++-- kernel/bpf/local_storage.c | 2 +- kernel/bpf/lpm_trie.c | 4 ++-- kernel/bpf/queue_stack_maps.c | 2 +- kernel/bpf/reuseport_array.c | 2 +- kernel/bpf/stackmap.c | 4 ++-- kernel/bpf/syscall.c | 19 ++++++++++--------- kernel/bpf/xskmap.c | 4 ++-- net/core/bpf_sk_storage.c | 2 +- net/core/sock_map.c | 4 ++-- 14 files changed, 36 insertions(+), 31 deletions(-) (limited to 'net/core') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2cc58fc0f413..2e7c1c40d949 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -66,6 +66,11 @@ struct bpf_map_ops { u64 imm, u32 *off); }; +struct bpf_map_memory { + u32 pages; + struct user_struct *user; +}; + struct bpf_map { /* The first two cachelines with read-mostly members of which some * are also accessed in fast-path (e.g. ops, max_entries). @@ -86,7 +91,7 @@ struct bpf_map { u32 btf_key_type_id; u32 btf_value_type_id; struct btf *btf; - u32 pages; + struct bpf_map_memory memory; bool unpriv_array; bool frozen; /* write-once */ /* 48 bytes hole */ @@ -94,8 +99,7 @@ struct bpf_map { /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. */ - struct user_struct *user ____cacheline_aligned; - atomic_t refcnt; + atomic_t refcnt ____cacheline_aligned; atomic_t usercnt; struct work_struct work; char name[BPF_OBJ_NAME_LEN]; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 584636c9e2eb..8fda24e78193 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -138,7 +138,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); - array->map.pages = cost; + array->map.memory.pages = cost; array->elem_size = elem_size; if (percpu && bpf_array_alloc_percpu(array)) { diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index cf727d77c6c6..035268add724 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); if (cost >= U32_MAX - PAGE_SIZE) goto free_cmap; - cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; /* Notice returns -EPERM on if map size is larger than memlock limit */ - ret = bpf_map_precharge_memlock(cmap->map.pages); + ret = bpf_map_precharge_memlock(cmap->map.memory.pages); if (ret) { err = ret; goto free_cmap; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 1e525d70f833..f6c57efb1d0d 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -111,10 +111,10 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) if (cost >= U32_MAX - PAGE_SIZE) goto free_dtab; - dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; /* if map size is larger than memlock limit, reject it early */ - err = bpf_map_precharge_memlock(dtab->map.pages); + err = bpf_map_precharge_memlock(dtab->map.memory.pages); if (err) goto free_dtab; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 0f2708fde5f7..15bf228d2e98 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -364,10 +364,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ goto free_htab; - htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; /* if map size is larger than memlock limit, reject it early */ - err = bpf_map_precharge_memlock(htab->map.pages); + err = bpf_map_precharge_memlock(htab->map.memory.pages); if (err) goto free_htab; diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index e48302ecb389..574325276650 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -303,7 +303,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) if (!map) return ERR_PTR(-ENOMEM); - map->map.pages = pages; + map->map.memory.pages = pages; /* copy mandatory map attributes */ bpf_map_init_from_attr(&map->map, attr); diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index e61630c2e50b..8e423a582760 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -578,9 +578,9 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) goto out_err; } - trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + trie->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - ret = bpf_map_precharge_memlock(trie->map.pages); + ret = bpf_map_precharge_memlock(trie->map.memory.pages); if (ret) goto out_err; diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 0b140d236889..8a510e71d486 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -89,7 +89,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) bpf_map_init_from_attr(&qs->map, attr); - qs->map.pages = cost; + qs->map.memory.pages = cost; qs->size = size; raw_spin_lock_init(&qs->lock); diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 18e225de80ff..819515242739 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -176,7 +176,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); - array->map.pages = cost; + array->map.memory.pages = cost; return &array->map; } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 950ab2f28922..08d4efff73ac 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -131,9 +131,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) bpf_map_init_from_attr(&smap->map, attr); smap->map.value_size = value_size; smap->n_buckets = n_buckets; - smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + smap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - err = bpf_map_precharge_memlock(smap->map.pages); + err = bpf_map_precharge_memlock(smap->map.memory.pages); if (err) goto free_smap; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1539774d78c7..8289a2ce14fc 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -222,19 +222,20 @@ static int bpf_map_init_memlock(struct bpf_map *map) struct user_struct *user = get_current_user(); int ret; - ret = bpf_charge_memlock(user, map->pages); + ret = bpf_charge_memlock(user, map->memory.pages); if (ret) { free_uid(user); return ret; } - map->user = user; + map->memory.user = user; return ret; } static void bpf_map_release_memlock(struct bpf_map *map) { - struct user_struct *user = map->user; - bpf_uncharge_memlock(user, map->pages); + struct user_struct *user = map->memory.user; + + bpf_uncharge_memlock(user, map->memory.pages); free_uid(user); } @@ -242,17 +243,17 @@ int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) { int ret; - ret = bpf_charge_memlock(map->user, pages); + ret = bpf_charge_memlock(map->memory.user, pages); if (ret) return ret; - map->pages += pages; + map->memory.pages += pages; return ret; } void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) { - bpf_uncharge_memlock(map->user, pages); - map->pages -= pages; + bpf_uncharge_memlock(map->memory.user, pages); + map->memory.pages -= pages; } static int bpf_map_alloc_id(struct bpf_map *map) @@ -395,7 +396,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) map->value_size, map->max_entries, map->map_flags, - map->pages * 1ULL << PAGE_SHIFT, + map->memory.pages * 1ULL << PAGE_SHIFT, map->id, READ_ONCE(map->frozen)); diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index 686d244e798d..f816ee1a0fa0 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -40,10 +40,10 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) if (cost >= U32_MAX - PAGE_SIZE) goto free_m; - m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + m->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; /* Notice returns -EPERM on if map size is larger than memlock limit */ - err = bpf_map_precharge_memlock(m->map.pages); + err = bpf_map_precharge_memlock(m->map.memory.pages); if (err) goto free_m; diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 9a8aaf8e235d..92581c3ff220 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -659,7 +659,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size; smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) % BPF_SK_STORAGE_CACHE_SIZE; - smap->map.pages = pages; + smap->map.memory.pages = pages; return &smap->map; } diff --git a/net/core/sock_map.c b/net/core/sock_map.c index be6092ac69f8..4eb5b6a1b29f 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -49,8 +49,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) goto free_stab; } - stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - err = bpf_map_precharge_memlock(stab->map.pages); + stab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + err = bpf_map_precharge_memlock(stab->map.memory.pages); if (err) goto free_stab; -- cgit v1.2.3-59-g8ed1b From b936ca643ade11f265fa10e5fb71c20d9c5243f1 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Wed, 29 May 2019 18:03:58 -0700 Subject: bpf: rework memlock-based memory accounting for maps In order to unify the existing memlock charging code with the memcg-based memory accounting, which will be added later, let's rework the current scheme. Currently the following design is used: 1) .alloc() callback optionally checks if the allocation will likely succeed using bpf_map_precharge_memlock() 2) .alloc() performs actual allocations 3) .alloc() callback calculates map cost and sets map.memory.pages 4) map_create() calls bpf_map_init_memlock() which sets map.memory.user and performs actual charging; in case of failure the map is destroyed 1) bpf_map_free_deferred() calls bpf_map_release_memlock(), which performs uncharge and releases the user 2) .map_free() callback releases the memory The scheme can be simplified and made more robust: 1) .alloc() calculates map cost and calls bpf_map_charge_init() 2) bpf_map_charge_init() sets map.memory.user and performs actual charge 3) .alloc() performs actual allocations 1) .map_free() callback releases the memory 2) bpf_map_charge_finish() performs uncharge and releases the user The new scheme also allows to reuse bpf_map_charge_init()/finish() functions for memcg-based accounting. Because charges are performed before actual allocations and uncharges after freeing the memory, no bogus memory pressure can be created. In cases when the map structure is not available (e.g. it's not created yet, or is already destroyed), on-stack bpf_map_memory structure is used. The charge can be transferred with the bpf_map_charge_move() function. Signed-off-by: Roman Gushchin Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 5 +++- kernel/bpf/arraymap.c | 10 +++++-- kernel/bpf/cpumap.c | 8 +++-- kernel/bpf/devmap.c | 13 ++++---- kernel/bpf/hashtab.c | 11 +++---- kernel/bpf/local_storage.c | 9 ++++-- kernel/bpf/lpm_trie.c | 5 ++-- kernel/bpf/queue_stack_maps.c | 9 ++++-- kernel/bpf/reuseport_array.c | 9 ++++-- kernel/bpf/stackmap.c | 30 +++++++++++-------- kernel/bpf/syscall.c | 69 +++++++++++++++++++++---------------------- kernel/bpf/xskmap.c | 9 +++--- net/core/bpf_sk_storage.c | 8 +++-- net/core/sock_map.c | 5 ++-- 14 files changed, 112 insertions(+), 88 deletions(-) (limited to 'net/core') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2e7c1c40d949..3c8f24f402bf 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -650,9 +650,12 @@ struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); -int bpf_map_precharge_memlock(u32 pages); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); +int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages); +void bpf_map_charge_finish(struct bpf_map_memory *mem); +void bpf_map_charge_move(struct bpf_map_memory *dst, + struct bpf_map_memory *src); void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 8fda24e78193..3552da4407d9 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -83,6 +83,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) u32 elem_size, index_mask, max_entries; bool unpriv = !capable(CAP_SYS_ADMIN); u64 cost, array_size, mask64; + struct bpf_map_memory mem; struct bpf_array *array; elem_size = round_up(attr->value_size, 8); @@ -125,23 +126,26 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) } cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - ret = bpf_map_precharge_memlock(cost); + ret = bpf_map_charge_init(&mem, cost); if (ret < 0) return ERR_PTR(ret); /* allocate all map elements and zero-initialize them */ array = bpf_map_area_alloc(array_size, numa_node); - if (!array) + if (!array) { + bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); + } array->index_mask = index_mask; array->map.unpriv_array = unpriv; /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); - array->map.memory.pages = cost; + bpf_map_charge_move(&array->map.memory, &mem); array->elem_size = elem_size; if (percpu && bpf_array_alloc_percpu(array)) { + bpf_map_charge_finish(&array->map.memory); bpf_map_area_free(array); return ERR_PTR(-ENOMEM); } diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 035268add724..c633c8d68023 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); if (cost >= U32_MAX - PAGE_SIZE) goto free_cmap; - cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; /* Notice returns -EPERM on if map size is larger than memlock limit */ - ret = bpf_map_precharge_memlock(cmap->map.memory.pages); + ret = bpf_map_charge_init(&cmap->map.memory, + round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); if (ret) { err = ret; goto free_cmap; @@ -121,7 +121,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) cmap->flush_needed = __alloc_percpu(cpu_map_bitmap_size(attr), __alignof__(unsigned long)); if (!cmap->flush_needed) - goto free_cmap; + goto free_charge; /* Alloc array for possible remote "destination" CPUs */ cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * @@ -133,6 +133,8 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) return &cmap->map; free_percpu: free_percpu(cmap->flush_needed); +free_charge: + bpf_map_charge_finish(&cmap->map.memory); free_cmap: kfree(cmap); return ERR_PTR(err); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index f6c57efb1d0d..371bd880ed58 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -111,10 +111,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) if (cost >= U32_MAX - PAGE_SIZE) goto free_dtab; - dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - - /* if map size is larger than memlock limit, reject it early */ - err = bpf_map_precharge_memlock(dtab->map.memory.pages); + /* if map size is larger than memlock limit, reject it */ + err = bpf_map_charge_init(&dtab->map.memory, + round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); if (err) goto free_dtab; @@ -125,19 +124,21 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) __alignof__(unsigned long), GFP_KERNEL | __GFP_NOWARN); if (!dtab->flush_needed) - goto free_dtab; + goto free_charge; dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *), dtab->map.numa_node); if (!dtab->netdev_map) - goto free_dtab; + goto free_charge; spin_lock(&dev_map_lock); list_add_tail_rcu(&dtab->list, &dev_map_list); spin_unlock(&dev_map_lock); return &dtab->map; +free_charge: + bpf_map_charge_finish(&dtab->map.memory); free_dtab: free_percpu(dtab->flush_needed); kfree(dtab); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 15bf228d2e98..b0bdc7b040ad 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -364,10 +364,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ goto free_htab; - htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - - /* if map size is larger than memlock limit, reject it early */ - err = bpf_map_precharge_memlock(htab->map.memory.pages); + /* if map size is larger than memlock limit, reject it */ + err = bpf_map_charge_init(&htab->map.memory, + round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); if (err) goto free_htab; @@ -376,7 +375,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) sizeof(struct bucket), htab->map.numa_node); if (!htab->buckets) - goto free_htab; + goto free_charge; if (htab->map.map_flags & BPF_F_ZERO_SEED) htab->hashrnd = 0; @@ -409,6 +408,8 @@ free_prealloc: prealloc_destroy(htab); free_buckets: bpf_map_area_free(htab->buckets); +free_charge: + bpf_map_charge_finish(&htab->map.memory); free_htab: kfree(htab); return ERR_PTR(err); diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 574325276650..e49bfd4f4f6d 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -272,6 +272,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) { int numa_node = bpf_map_attr_numa_node(attr); struct bpf_cgroup_storage_map *map; + struct bpf_map_memory mem; u32 pages; int ret; @@ -294,16 +295,18 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) pages = round_up(sizeof(struct bpf_cgroup_storage_map), PAGE_SIZE) >> PAGE_SHIFT; - ret = bpf_map_precharge_memlock(pages); + ret = bpf_map_charge_init(&mem, pages); if (ret < 0) return ERR_PTR(ret); map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map), __GFP_ZERO | GFP_USER, numa_node); - if (!map) + if (!map) { + bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); + } - map->map.memory.pages = pages; + bpf_map_charge_move(&map->map.memory, &mem); /* copy mandatory map attributes */ bpf_map_init_from_attr(&map->map, attr); diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 8e423a582760..6345a8d2dcd0 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -578,9 +578,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) goto out_err; } - trie->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - - ret = bpf_map_precharge_memlock(trie->map.memory.pages); + ret = bpf_map_charge_init(&trie->map.memory, + round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); if (ret) goto out_err; diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 8a510e71d486..224cb0fd8f03 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -67,6 +67,7 @@ static int queue_stack_map_alloc_check(union bpf_attr *attr) static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) { int ret, numa_node = bpf_map_attr_numa_node(attr); + struct bpf_map_memory mem = {0}; struct bpf_queue_stack *qs; u64 size, queue_size, cost; @@ -77,19 +78,21 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - ret = bpf_map_precharge_memlock(cost); + ret = bpf_map_charge_init(&mem, cost); if (ret < 0) return ERR_PTR(ret); qs = bpf_map_area_alloc(queue_size, numa_node); - if (!qs) + if (!qs) { + bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); + } memset(qs, 0, sizeof(*qs)); bpf_map_init_from_attr(&qs->map, attr); - qs->map.memory.pages = cost; + bpf_map_charge_move(&qs->map.memory, &mem); qs->size = size; raw_spin_lock_init(&qs->lock); diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 819515242739..5c6e25b1b9b1 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -151,6 +151,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) { int err, numa_node = bpf_map_attr_numa_node(attr); struct reuseport_array *array; + struct bpf_map_memory mem; u64 cost, array_size; if (!capable(CAP_SYS_ADMIN)) @@ -165,18 +166,20 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - err = bpf_map_precharge_memlock(cost); + err = bpf_map_charge_init(&mem, cost); if (err) return ERR_PTR(err); /* allocate all map elements and zero-initialize them */ array = bpf_map_area_alloc(array_size, numa_node); - if (!array) + if (!array) { + bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); + } /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); - array->map.memory.pages = cost; + bpf_map_charge_move(&array->map.memory, &mem); return &array->map; } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 08d4efff73ac..8da24ca65d97 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -89,6 +89,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) { u32 value_size = attr->value_size; struct bpf_stack_map *smap; + struct bpf_map_memory mem; u64 cost, n_buckets; int err; @@ -116,40 +117,43 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) n_buckets = roundup_pow_of_two(attr->max_entries); cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); + if (cost >= U32_MAX - PAGE_SIZE) + return ERR_PTR(-E2BIG); + cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); if (cost >= U32_MAX - PAGE_SIZE) return ERR_PTR(-E2BIG); + err = bpf_map_charge_init(&mem, + round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + if (err) + return ERR_PTR(err); + smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); - if (!smap) + if (!smap) { + bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); - - err = -E2BIG; - cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); - if (cost >= U32_MAX - PAGE_SIZE) - goto free_smap; + } bpf_map_init_from_attr(&smap->map, attr); smap->map.value_size = value_size; smap->n_buckets = n_buckets; - smap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - - err = bpf_map_precharge_memlock(smap->map.memory.pages); - if (err) - goto free_smap; err = get_callchain_buffers(sysctl_perf_event_max_stack); if (err) - goto free_smap; + goto free_charge; err = prealloc_elems_and_freelist(smap); if (err) goto put_buffers; + bpf_map_charge_move(&smap->map.memory, &mem); + return &smap->map; put_buffers: put_callchain_buffers(); -free_smap: +free_charge: + bpf_map_charge_finish(&mem); bpf_map_area_free(smap); return ERR_PTR(err); } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8289a2ce14fc..4a5ebad99154 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -188,19 +188,6 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) map->numa_node = bpf_map_attr_numa_node(attr); } -int bpf_map_precharge_memlock(u32 pages) -{ - struct user_struct *user = get_current_user(); - unsigned long memlock_limit, cur; - - memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - cur = atomic_long_read(&user->locked_vm); - free_uid(user); - if (cur + pages > memlock_limit) - return -EPERM; - return 0; -} - static int bpf_charge_memlock(struct user_struct *user, u32 pages) { unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; @@ -214,29 +201,40 @@ static int bpf_charge_memlock(struct user_struct *user, u32 pages) static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) { - atomic_long_sub(pages, &user->locked_vm); + if (user) + atomic_long_sub(pages, &user->locked_vm); } -static int bpf_map_init_memlock(struct bpf_map *map) +int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages) { struct user_struct *user = get_current_user(); int ret; - ret = bpf_charge_memlock(user, map->memory.pages); + ret = bpf_charge_memlock(user, pages); if (ret) { free_uid(user); return ret; } - map->memory.user = user; - return ret; + + mem->pages = pages; + mem->user = user; + + return 0; } -static void bpf_map_release_memlock(struct bpf_map *map) +void bpf_map_charge_finish(struct bpf_map_memory *mem) { - struct user_struct *user = map->memory.user; + bpf_uncharge_memlock(mem->user, mem->pages); + free_uid(mem->user); +} - bpf_uncharge_memlock(user, map->memory.pages); - free_uid(user); +void bpf_map_charge_move(struct bpf_map_memory *dst, + struct bpf_map_memory *src) +{ + *dst = *src; + + /* Make sure src will not be used for the redundant uncharging. */ + memset(src, 0, sizeof(struct bpf_map_memory)); } int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) @@ -304,11 +302,13 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) static void bpf_map_free_deferred(struct work_struct *work) { struct bpf_map *map = container_of(work, struct bpf_map, work); + struct bpf_map_memory mem; - bpf_map_release_memlock(map); + bpf_map_charge_move(&mem, &map->memory); security_bpf_map_free(map); /* implementation dependent freeing */ map->ops->map_free(map); + bpf_map_charge_finish(&mem); } static void bpf_map_put_uref(struct bpf_map *map) @@ -550,6 +550,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, static int map_create(union bpf_attr *attr) { int numa_node = bpf_map_attr_numa_node(attr); + struct bpf_map_memory mem; struct bpf_map *map; int f_flags; int err; @@ -574,7 +575,7 @@ static int map_create(union bpf_attr *attr) err = bpf_obj_name_cpy(map->name, attr->map_name); if (err) - goto free_map_nouncharge; + goto free_map; atomic_set(&map->refcnt, 1); atomic_set(&map->usercnt, 1); @@ -584,20 +585,20 @@ static int map_create(union bpf_attr *attr) if (!attr->btf_value_type_id) { err = -EINVAL; - goto free_map_nouncharge; + goto free_map; } btf = btf_get_by_fd(attr->btf_fd); if (IS_ERR(btf)) { err = PTR_ERR(btf); - goto free_map_nouncharge; + goto free_map; } err = map_check_btf(map, btf, attr->btf_key_type_id, attr->btf_value_type_id); if (err) { btf_put(btf); - goto free_map_nouncharge; + goto free_map; } map->btf = btf; @@ -609,15 +610,11 @@ static int map_create(union bpf_attr *attr) err = security_bpf_map_alloc(map); if (err) - goto free_map_nouncharge; - - err = bpf_map_init_memlock(map); - if (err) - goto free_map_sec; + goto free_map; err = bpf_map_alloc_id(map); if (err) - goto free_map; + goto free_map_sec; err = bpf_map_new_fd(map, f_flags); if (err < 0) { @@ -633,13 +630,13 @@ static int map_create(union bpf_attr *attr) return err; -free_map: - bpf_map_release_memlock(map); free_map_sec: security_bpf_map_free(map); -free_map_nouncharge: +free_map: btf_put(map->btf); + bpf_map_charge_move(&mem, &map->memory); map->ops->map_free(map); + bpf_map_charge_finish(&mem); return err; } diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index f816ee1a0fa0..a329dab7c7a4 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -40,10 +40,9 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) if (cost >= U32_MAX - PAGE_SIZE) goto free_m; - m->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - /* Notice returns -EPERM on if map size is larger than memlock limit */ - err = bpf_map_precharge_memlock(m->map.memory.pages); + err = bpf_map_charge_init(&m->map.memory, + round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); if (err) goto free_m; @@ -51,7 +50,7 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) m->flush_list = alloc_percpu(struct list_head); if (!m->flush_list) - goto free_m; + goto free_charge; for_each_possible_cpu(cpu) INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu)); @@ -65,6 +64,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) free_percpu: free_percpu(m->flush_list); +free_charge: + bpf_map_charge_finish(&m->map.memory); free_m: kfree(m); return ERR_PTR(err); diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 92581c3ff220..621a0b07ff11 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -640,13 +640,16 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - ret = bpf_map_precharge_memlock(pages); - if (ret < 0) + ret = bpf_map_charge_init(&smap->map.memory, pages); + if (ret < 0) { + kfree(smap); return ERR_PTR(ret); + } smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, GFP_USER | __GFP_NOWARN); if (!smap->buckets) { + bpf_map_charge_finish(&smap->map.memory); kfree(smap); return ERR_PTR(-ENOMEM); } @@ -659,7 +662,6 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size; smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) % BPF_SK_STORAGE_CACHE_SIZE; - smap->map.memory.pages = pages; return &smap->map; } diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 4eb5b6a1b29f..1028c922a149 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -49,8 +49,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) goto free_stab; } - stab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - err = bpf_map_precharge_memlock(stab->map.memory.pages); + err = bpf_map_charge_init(&stab->map.memory, + round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); if (err) goto free_stab; @@ -60,6 +60,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) if (stab->sks) return &stab->map; err = -ENOMEM; + bpf_map_charge_finish(&stab->map.memory); free_stab: kfree(stab); return ERR_PTR(err); -- cgit v1.2.3-59-g8ed1b From c85d69135a9175c50a823d04d62d932312d037b3 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Wed, 29 May 2019 18:03:59 -0700 Subject: bpf: move memory size checks to bpf_map_charge_init() Most bpf map types doing similar checks and bytes to pages conversion during memory allocation and charging. Let's unify these checks by moving them into bpf_map_charge_init(). Signed-off-by: Roman Gushchin Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 2 +- kernel/bpf/arraymap.c | 8 +------- kernel/bpf/cpumap.c | 5 +---- kernel/bpf/devmap.c | 5 +---- kernel/bpf/hashtab.c | 7 +------ kernel/bpf/local_storage.c | 5 +---- kernel/bpf/lpm_trie.c | 7 +------ kernel/bpf/queue_stack_maps.c | 4 ---- kernel/bpf/reuseport_array.c | 10 ++-------- kernel/bpf/stackmap.c | 8 +------- kernel/bpf/syscall.c | 9 +++++++-- kernel/bpf/xskmap.c | 5 +---- net/core/bpf_sk_storage.c | 4 +--- net/core/sock_map.c | 8 +------- 14 files changed, 20 insertions(+), 67 deletions(-) (limited to 'net/core') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3c8f24f402bf..e5a309e6a400 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -652,7 +652,7 @@ void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); -int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages); +int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); void bpf_map_charge_finish(struct bpf_map_memory *mem); void bpf_map_charge_move(struct bpf_map_memory *dst, struct bpf_map_memory *src); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 3552da4407d9..0349cbf23cdb 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -117,14 +117,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) /* make sure there is no u32 overflow later in round_up() */ cost = array_size; - if (cost >= U32_MAX - PAGE_SIZE) - return ERR_PTR(-ENOMEM); - if (percpu) { + if (percpu) cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); - if (cost >= U32_MAX - PAGE_SIZE) - return ERR_PTR(-ENOMEM); - } - cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; ret = bpf_map_charge_init(&mem, cost); if (ret < 0) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index c633c8d68023..b31a71909307 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -106,12 +106,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); - if (cost >= U32_MAX - PAGE_SIZE) - goto free_cmap; /* Notice returns -EPERM on if map size is larger than memlock limit */ - ret = bpf_map_charge_init(&cmap->map.memory, - round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + ret = bpf_map_charge_init(&cmap->map.memory, cost); if (ret) { err = ret; goto free_cmap; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 371bd880ed58..5ae7cce5ef16 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -108,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); cost += dev_map_bitmap_size(attr) * num_possible_cpus(); - if (cost >= U32_MAX - PAGE_SIZE) - goto free_dtab; /* if map size is larger than memlock limit, reject it */ - err = bpf_map_charge_init(&dtab->map.memory, - round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + err = bpf_map_charge_init(&dtab->map.memory, cost); if (err) goto free_dtab; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b0bdc7b040ad..d92e05d9979b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -360,13 +360,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) else cost += (u64) htab->elem_size * num_possible_cpus(); - if (cost >= U32_MAX - PAGE_SIZE) - /* make sure page count doesn't overflow */ - goto free_htab; - /* if map size is larger than memlock limit, reject it */ - err = bpf_map_charge_init(&htab->map.memory, - round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + err = bpf_map_charge_init(&htab->map.memory, cost); if (err) goto free_htab; diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index e49bfd4f4f6d..addd6fdceec8 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -273,7 +273,6 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) int numa_node = bpf_map_attr_numa_node(attr); struct bpf_cgroup_storage_map *map; struct bpf_map_memory mem; - u32 pages; int ret; if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) @@ -293,9 +292,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) /* max_entries is not used and enforced to be 0 */ return ERR_PTR(-EINVAL); - pages = round_up(sizeof(struct bpf_cgroup_storage_map), PAGE_SIZE) >> - PAGE_SHIFT; - ret = bpf_map_charge_init(&mem, pages); + ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map)); if (ret < 0) return ERR_PTR(ret); diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 6345a8d2dcd0..09334f13a8a0 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -573,13 +573,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) cost_per_node = sizeof(struct lpm_trie_node) + attr->value_size + trie->data_size; cost += (u64) attr->max_entries * cost_per_node; - if (cost >= U32_MAX - PAGE_SIZE) { - ret = -E2BIG; - goto out_err; - } - ret = bpf_map_charge_init(&trie->map.memory, - round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + ret = bpf_map_charge_init(&trie->map.memory, cost); if (ret) goto out_err; diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 224cb0fd8f03..f697647ceb54 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -73,10 +73,6 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) size = (u64) attr->max_entries + 1; cost = queue_size = sizeof(*qs) + size * attr->value_size; - if (cost >= U32_MAX - PAGE_SIZE) - return ERR_PTR(-E2BIG); - - cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; ret = bpf_map_charge_init(&mem, cost); if (ret < 0) diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 5c6e25b1b9b1..50c083ba978c 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -152,7 +152,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) int err, numa_node = bpf_map_attr_numa_node(attr); struct reuseport_array *array; struct bpf_map_memory mem; - u64 cost, array_size; + u64 array_size; if (!capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); @@ -160,13 +160,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) array_size = sizeof(*array); array_size += (u64)attr->max_entries * sizeof(struct sock *); - /* make sure there is no u32 overflow later in round_up() */ - cost = array_size; - if (cost >= U32_MAX - PAGE_SIZE) - return ERR_PTR(-ENOMEM); - cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - - err = bpf_map_charge_init(&mem, cost); + err = bpf_map_charge_init(&mem, array_size); if (err) return ERR_PTR(err); diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 8da24ca65d97..3d86072d8e32 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -117,14 +117,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) n_buckets = roundup_pow_of_two(attr->max_entries); cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); - if (cost >= U32_MAX - PAGE_SIZE) - return ERR_PTR(-E2BIG); cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); - if (cost >= U32_MAX - PAGE_SIZE) - return ERR_PTR(-E2BIG); - - err = bpf_map_charge_init(&mem, - round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + err = bpf_map_charge_init(&mem, cost); if (err) return ERR_PTR(err); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 4a5ebad99154..4c53cbd3329d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -205,11 +205,16 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) atomic_long_sub(pages, &user->locked_vm); } -int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages) +int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) { - struct user_struct *user = get_current_user(); + u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; + struct user_struct *user; int ret; + if (size >= U32_MAX - PAGE_SIZE) + return -E2BIG; + + user = get_current_user(); ret = bpf_charge_memlock(user, pages); if (ret) { free_uid(user); diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index a329dab7c7a4..22066c28ba61 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -37,12 +37,9 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *); cost += sizeof(struct list_head) * num_possible_cpus(); - if (cost >= U32_MAX - PAGE_SIZE) - goto free_m; /* Notice returns -EPERM on if map size is larger than memlock limit */ - err = bpf_map_charge_init(&m->map.memory, - round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + err = bpf_map_charge_init(&m->map.memory, cost); if (err) goto free_m; diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 621a0b07ff11..f40e3d35fd9c 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -626,7 +626,6 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) struct bpf_sk_storage_map *smap; unsigned int i; u32 nbuckets; - u32 pages; u64 cost; int ret; @@ -638,9 +637,8 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus())); nbuckets = 1U << smap->bucket_log; cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); - pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - ret = bpf_map_charge_init(&smap->map.memory, pages); + ret = bpf_map_charge_init(&smap->map.memory, cost); if (ret < 0) { kfree(smap); return ERR_PTR(ret); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 1028c922a149..52d4faeee18b 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -44,13 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) /* Make sure page count doesn't overflow. */ cost = (u64) stab->map.max_entries * sizeof(struct sock *); - if (cost >= U32_MAX - PAGE_SIZE) { - err = -EINVAL; - goto free_stab; - } - - err = bpf_map_charge_init(&stab->map.memory, - round_up(cost, PAGE_SIZE) >> PAGE_SHIFT); + err = bpf_map_charge_init(&stab->map.memory, cost); if (err) goto free_stab; -- cgit v1.2.3-59-g8ed1b From 2638eb8b50cfc16240e0bb080b9afbf541a9b39d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 31 May 2019 18:27:09 +0200 Subject: net: ipv4: provide __rcu annotation for ifa_list ifa_list is protected by rcu, yet code doesn't reflect this. Add the __rcu annotations and fix up all places that are now reported by sparse. I've done this in the same commit to not add intermediate patches that result in new warnings. Reported-by: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/infiniband/hw/i40iw/i40iw_utils.c | 12 ++-- drivers/infiniband/hw/nes/nes.c | 8 ++- drivers/infiniband/hw/usnic/usnic_ib_main.c | 15 +++-- drivers/net/ethernet/via/via-velocity.h | 2 +- drivers/net/plip/plip.c | 4 +- drivers/net/vmxnet3/vmxnet3_drv.c | 19 ++++-- drivers/net/wireless/ath/ath6kl/cfg80211.c | 4 +- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 +- drivers/staging/isdn/hysdn/hysdn_net.c | 6 +- include/linux/inetdevice.h | 21 ++---- net/core/netpoll.c | 10 ++- net/core/pktgen.c | 8 ++- net/ipv4/devinet.c | 88 ++++++++++++++++--------- net/mac80211/main.c | 4 +- net/netfilter/nf_nat_redirect.c | 12 ++-- 15 files changed, 134 insertions(+), 81 deletions(-) (limited to 'net/core') diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 337410f40860..016524683e17 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -174,10 +174,14 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, rcu_read_lock(); in = __in_dev_get_rcu(upper_dev); - if (!in->ifa_list) - local_ipaddr = 0; - else - local_ipaddr = ntohl(in->ifa_list->ifa_address); + local_ipaddr = 0; + if (in) { + struct in_ifaddr *ifa; + + ifa = rcu_dereference(in->ifa_list); + if (ifa) + local_ipaddr = ntohl(ifa->ifa_address); + } rcu_read_unlock(); } else { diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index e00add6d78ec..29b324726ea6 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -183,7 +183,13 @@ static int nes_inetaddr_event(struct notifier_block *notifier, rcu_read_lock(); in = __in_dev_get_rcu(upper_dev); - nesvnic->local_ipaddr = in->ifa_list->ifa_address; + if (in) { + struct in_ifaddr *ifa; + + ifa = rcu_dereference(in->ifa_list); + if (ifa) + nesvnic->local_ipaddr = ifa->ifa_address; + } rcu_read_unlock(); } else { nesvnic->local_ipaddr = ifa->ifa_address; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index d88d9f8a7f9a..34c1f9d6c915 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -427,11 +427,16 @@ static void *usnic_ib_device_add(struct pci_dev *dev) if (netif_carrier_ok(us_ibdev->netdev)) usnic_fwd_carrier_up(us_ibdev->ufdev); - ind = in_dev_get(netdev); - if (ind->ifa_list) - usnic_fwd_add_ipaddr(us_ibdev->ufdev, - ind->ifa_list->ifa_address); - in_dev_put(ind); + rcu_read_lock(); + ind = __in_dev_get_rcu(netdev); + if (ind) { + const struct in_ifaddr *ifa; + + ifa = rcu_dereference(ind->ifa_list); + if (ifa) + usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); + } + rcu_read_unlock(); usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, us_ibdev->ufdev->inaddr, &gid.raw[0]); diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index c0ecc6c7b5e0..cdfe7809e3c1 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1509,7 +1509,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr) rcu_read_lock(); in_dev = __in_dev_get_rcu(vptr->netdev); if (in_dev != NULL) { - ifa = (struct in_ifaddr *) in_dev->ifa_list; + ifa = rcu_dereference(in_dev->ifa_list); if (ifa != NULL) { memcpy(vptr->ip_addr, &ifa->ifa_address, 4); res = 0; diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index feb92ecd1880..3e3ac2e496a1 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -1012,7 +1012,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth) in_dev = __in_dev_get_rcu(dev); if (in_dev) { /* Any address will do - we take the first */ - const struct in_ifaddr *ifa = in_dev->ifa_list; + const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list); if (ifa) { memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); memset(eth->h_dest, 0xfc, 2); @@ -1107,7 +1107,7 @@ plip_open(struct net_device *dev) /* Any address will do - we take the first. We already have the first two bytes filled with 0xfc, from plip_init_dev(). */ - struct in_ifaddr *ifa=in_dev->ifa_list; + const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list); if (ifa != NULL) { memcpy(dev->dev_addr+2, &ifa->ifa_local, 4); } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 89984fcab01e..1b2a18ea855c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3651,13 +3651,19 @@ vmxnet3_suspend(struct device *device) } if (adapter->wol & WAKE_ARP) { - in_dev = in_dev_get(netdev); - if (!in_dev) + rcu_read_lock(); + + in_dev = __in_dev_get_rcu(netdev); + if (!in_dev) { + rcu_read_unlock(); goto skip_arp; + } - ifa = (struct in_ifaddr *)in_dev->ifa_list; - if (!ifa) + ifa = rcu_dereference(in_dev->ifa_list); + if (!ifa) { + rcu_read_unlock(); goto skip_arp; + } pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ sizeof(struct arphdr) + /* ARP header */ @@ -3677,7 +3683,9 @@ vmxnet3_suspend(struct device *device) /* The Unicast IPv4 address in 'tip' field. */ arpreq += 2 * ETH_ALEN + sizeof(u32); - *(u32 *)arpreq = ifa->ifa_address; + *(__be32 *)arpreq = ifa->ifa_address; + + rcu_read_unlock(); /* The mask for the relevant bits. */ pmConf->filters[i].mask[0] = 0x00; @@ -3686,7 +3694,6 @@ vmxnet3_suspend(struct device *device) pmConf->filters[i].mask[3] = 0x00; pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ - in_dev_put(in_dev); pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; i++; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 5477a014e1fb..37cf602d8adf 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -2194,13 +2194,13 @@ static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif, if (!in_dev) return 0; - ifa = in_dev->ifa_list; + ifa = rtnl_dereference(in_dev->ifa_list); memset(&ips, 0, sizeof(ips)); /* Configure IP addr only if IP address count < MAX_IP_ADDRS */ while (index < MAX_IP_ADDRS && ifa) { ips[index] = ifa->ifa_local; - ifa = ifa->ifa_next; + ifa = rtnl_dereference(ifa->ifa_next); index++; } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index e11a4bb67172..5a7cdb981789 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3268,7 +3268,7 @@ static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv, in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev); if (!in_dev) continue; - ifa = in_dev->ifa_list; + ifa = rtnl_dereference(in_dev->ifa_list); if (!ifa || !ifa->ifa_local) continue; ips[i] = ifa->ifa_local; diff --git a/drivers/staging/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c index 8e9c34f33d86..bea37ae30ebb 100644 --- a/drivers/staging/isdn/hysdn/hysdn_net.c +++ b/drivers/staging/isdn/hysdn/hysdn_net.c @@ -70,9 +70,13 @@ net_open(struct net_device *dev) for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = 0xfc; if ((in_dev = dev->ip_ptr) != NULL) { - struct in_ifaddr *ifa = in_dev->ifa_list; + const struct in_ifaddr *ifa; + + rcu_read_lock(); + ifa = rcu_dereference(in_dev->ifa_list); if (ifa != NULL) memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local)); + rcu_read_unlock(); } } else memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN); diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index d5d05503a04b..3515ca64e638 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -26,7 +26,7 @@ struct in_device { struct net_device *dev; refcount_t refcnt; int dead; - struct in_ifaddr *ifa_list; /* IP ifaddr chain */ + struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ struct ip_mc_list __rcu * __rcu *mc_hash; @@ -136,7 +136,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) struct in_ifaddr { struct hlist_node hash; - struct in_ifaddr *ifa_next; + struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; __be32 ifa_local; @@ -206,22 +206,13 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr) return false; } -#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ - for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next) - -#define for_ifa(in_dev) { struct in_ifaddr *ifa; \ - for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next) - - -#define endfor_ifa(in_dev) } - #define in_dev_for_each_ifa_rtnl(ifa, in_dev) \ - for (ifa = (in_dev)->ifa_list; ifa; \ - ifa = ifa->ifa_next) + for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ + ifa = rtnl_dereference(ifa->ifa_next)) #define in_dev_for_each_ifa_rcu(ifa, in_dev) \ - for (ifa = (in_dev)->ifa_list; ifa; \ - ifa = ifa->ifa_next) + for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ + ifa = rcu_dereference(ifa->ifa_next)) static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) { diff --git a/net/core/netpoll.c b/net/core/netpoll.c index dd8b1a460d64..2cf27da1baeb 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -696,16 +696,22 @@ int netpoll_setup(struct netpoll *np) if (!np->local_ip.ip) { if (!np->ipv6) { + const struct in_ifaddr *ifa; + in_dev = __in_dev_get_rtnl(ndev); + if (!in_dev) + goto put_noaddr; - if (!in_dev || !in_dev->ifa_list) { + ifa = rtnl_dereference(in_dev->ifa_list); + if (!ifa) { +put_noaddr: np_err(np, "no IP address for %s, aborting\n", np->dev_name); err = -EDESTADDRREQ; goto put; } - np->local_ip.ip = in_dev->ifa_list->ifa_local; + np->local_ip.ip = ifa->ifa_local; np_info(np, "local IP %pI4\n", &np->local_ip.ip); } else { #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 319ad5490fb3..4cd120dc30ad 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2125,9 +2125,11 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) rcu_read_lock(); in_dev = __in_dev_get_rcu(pkt_dev->odev); if (in_dev) { - if (in_dev->ifa_list) { - pkt_dev->saddr_min = - in_dev->ifa_list->ifa_address; + const struct in_ifaddr *ifa; + + ifa = rcu_dereference(in_dev->ifa_list); + if (ifa) { + pkt_dev->saddr_min = ifa->ifa_address; pkt_dev->saddr_max = pkt_dev->saddr_min; } } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index b45421b2b734..ebaea05b4033 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -194,7 +194,8 @@ static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32); static BLOCKING_NOTIFIER_HEAD(inetaddr_chain); static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain); -static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, +static void inet_del_ifa(struct in_device *in_dev, + struct in_ifaddr __rcu **ifap, int destroy); #ifdef CONFIG_SYSCTL static int devinet_sysctl_register(struct in_device *idev); @@ -300,8 +301,8 @@ static void in_dev_rcu_put(struct rcu_head *head) static void inetdev_destroy(struct in_device *in_dev) { - struct in_ifaddr *ifa; struct net_device *dev; + struct in_ifaddr *ifa; ASSERT_RTNL(); @@ -311,7 +312,7 @@ static void inetdev_destroy(struct in_device *in_dev) ip_mc_destroy_dev(in_dev); - while ((ifa = in_dev->ifa_list) != NULL) { + while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) { inet_del_ifa(in_dev, &in_dev->ifa_list, 0); inet_free_ifa(ifa); } @@ -342,17 +343,20 @@ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b) return 0; } -static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, - int destroy, struct nlmsghdr *nlh, u32 portid) +static void __inet_del_ifa(struct in_device *in_dev, + struct in_ifaddr __rcu **ifap, + int destroy, struct nlmsghdr *nlh, u32 portid) { struct in_ifaddr *promote = NULL; - struct in_ifaddr *ifa, *ifa1 = *ifap; - struct in_ifaddr *last_prim = in_dev->ifa_list; + struct in_ifaddr *ifa, *ifa1; + struct in_ifaddr *last_prim; struct in_ifaddr *prev_prom = NULL; int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev); ASSERT_RTNL(); + ifa1 = rtnl_dereference(*ifap); + last_prim = rtnl_dereference(in_dev->ifa_list); if (in_dev->dead) goto no_promotions; @@ -361,9 +365,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, **/ if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) { - struct in_ifaddr **ifap1 = &ifa1->ifa_next; + struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next; - while ((ifa = *ifap1) != NULL) { + while ((ifa = rtnl_dereference(*ifap1)) != NULL) { if (!(ifa->ifa_flags & IFA_F_SECONDARY) && ifa1->ifa_scope <= ifa->ifa_scope) last_prim = ifa; @@ -396,7 +400,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, * and later to add them back with new prefsrc. Do this * while all addresses are on the device list. */ - for (ifa = promote; ifa; ifa = ifa->ifa_next) { + for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) { if (ifa1->ifa_mask == ifa->ifa_mask && inet_ifa_match(ifa1->ifa_address, ifa)) fib_del_ifaddr(ifa, ifa1); @@ -422,19 +426,24 @@ no_promotions: blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); if (promote) { - struct in_ifaddr *next_sec = promote->ifa_next; + struct in_ifaddr *next_sec; + next_sec = rtnl_dereference(promote->ifa_next); if (prev_prom) { - prev_prom->ifa_next = promote->ifa_next; - promote->ifa_next = last_prim->ifa_next; - last_prim->ifa_next = promote; + struct in_ifaddr *last_sec; + + last_sec = rtnl_dereference(last_prim->ifa_next); + rcu_assign_pointer(prev_prom->ifa_next, next_sec); + rcu_assign_pointer(promote->ifa_next, last_sec); + rcu_assign_pointer(last_prim->ifa_next, promote); } promote->ifa_flags &= ~IFA_F_SECONDARY; rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote); - for (ifa = next_sec; ifa; ifa = ifa->ifa_next) { + for (ifa = next_sec; ifa; + ifa = rtnl_dereference(ifa->ifa_next)) { if (ifa1->ifa_mask != ifa->ifa_mask || !inet_ifa_match(ifa1->ifa_address, ifa)) continue; @@ -446,7 +455,8 @@ no_promotions: inet_free_ifa(ifa1); } -static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, +static void inet_del_ifa(struct in_device *in_dev, + struct in_ifaddr __rcu **ifap, int destroy) { __inet_del_ifa(in_dev, ifap, destroy, NULL, 0); @@ -459,9 +469,10 @@ static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime); static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, u32 portid, struct netlink_ext_ack *extack) { + struct in_ifaddr __rcu **last_primary, **ifap; struct in_device *in_dev = ifa->ifa_dev; - struct in_ifaddr *ifa1, **ifap, **last_primary; struct in_validator_info ivi; + struct in_ifaddr *ifa1; int ret; ASSERT_RTNL(); @@ -474,8 +485,10 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, ifa->ifa_flags &= ~IFA_F_SECONDARY; last_primary = &in_dev->ifa_list; - for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL; - ifap = &ifa1->ifa_next) { + ifap = &in_dev->ifa_list; + ifa1 = rtnl_dereference(*ifap); + + while (ifa1) { if (!(ifa1->ifa_flags & IFA_F_SECONDARY) && ifa->ifa_scope <= ifa1->ifa_scope) last_primary = &ifa1->ifa_next; @@ -491,6 +504,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, } ifa->ifa_flags |= IFA_F_SECONDARY; } + + ifap = &ifa1->ifa_next; + ifa1 = rtnl_dereference(*ifap); } /* Allow any devices that wish to register ifaddr validtors to weigh @@ -516,8 +532,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, ifap = last_primary; } - ifa->ifa_next = *ifap; - *ifap = ifa; + rcu_assign_pointer(ifa->ifa_next, *ifap); + rcu_assign_pointer(*ifap, ifa); inet_hash_insert(dev_net(in_dev->dev), ifa); @@ -617,10 +633,12 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); + struct in_ifaddr __rcu **ifap; struct nlattr *tb[IFA_MAX+1]; struct in_device *in_dev; struct ifaddrmsg *ifm; - struct in_ifaddr *ifa, **ifap; + struct in_ifaddr *ifa; + int err = -EINVAL; ASSERT_RTNL(); @@ -637,7 +655,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; } - for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; + for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) { if (tb[IFA_LOCAL] && ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL])) @@ -725,15 +743,20 @@ static void check_lifetime(struct work_struct *work) if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && age >= ifa->ifa_valid_lft) { - struct in_ifaddr **ifap; - - for (ifap = &ifa->ifa_dev->ifa_list; - *ifap != NULL; ifap = &(*ifap)->ifa_next) { - if (*ifap == ifa) { + struct in_ifaddr __rcu **ifap; + struct in_ifaddr *tmp; + + ifap = &ifa->ifa_dev->ifa_list; + tmp = rtnl_dereference(*ifap); + while (tmp) { + tmp = rtnl_dereference(tmp->ifa_next); + if (rtnl_dereference(*ifap) == ifa) { inet_del_ifa(ifa->ifa_dev, ifap, 1); break; } + ifap = &tmp->ifa_next; + tmp = rtnl_dereference(*ifap); } } else if (ifa->ifa_preferred_lft != INFINITY_LIFE_TIME && @@ -977,8 +1000,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) { struct sockaddr_in sin_orig; struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr; + struct in_ifaddr __rcu **ifap = NULL; struct in_device *in_dev; - struct in_ifaddr **ifap = NULL; struct in_ifaddr *ifa = NULL; struct net_device *dev; char *colon; @@ -1049,7 +1072,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) /* note: we only do this for a limited set of ioctls and only if the original address family was AF_INET. This is checked above. */ - for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; + + for (ifap = &in_dev->ifa_list; + (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) { if (!strcmp(ifr->ifr_name, ifa->ifa_label) && sin_orig.sin_addr.s_addr == @@ -1062,7 +1087,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) 4.3BSD-style and passed in junk so we fall back to comparing just the label */ if (!ifa) { - for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; + for (ifap = &in_dev->ifa_list; + (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) if (!strcmp(ifr->ifr_name, ifa->ifa_label)) break; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 2b608044ae23..1f11907dc528 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -354,11 +354,11 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, sdata_lock(sdata); /* Copy the addresses to the bss_conf list */ - ifa = idev->ifa_list; + ifa = rtnl_dereference(idev->ifa_list); while (ifa) { if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN) bss_conf->arp_addr_list[c] = ifa->ifa_address; - ifa = ifa->ifa_next; + ifa = rtnl_dereference(ifa->ifa_next); c++; } diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c index 78a9e6454ff3..8598e80968e0 100644 --- a/net/netfilter/nf_nat_redirect.c +++ b/net/netfilter/nf_nat_redirect.c @@ -47,15 +47,17 @@ nf_nat_redirect_ipv4(struct sk_buff *skb, if (hooknum == NF_INET_LOCAL_OUT) { newdst = htonl(0x7F000001); } else { - struct in_device *indev; - struct in_ifaddr *ifa; + const struct in_device *indev; newdst = 0; indev = __in_dev_get_rcu(skb->dev); - if (indev && indev->ifa_list) { - ifa = indev->ifa_list; - newdst = ifa->ifa_local; + if (indev) { + const struct in_ifaddr *ifa; + + ifa = rcu_dereference(indev->ifa_list); + if (ifa) + newdst = ifa->ifa_local; } if (!newdst) -- cgit v1.2.3-59-g8ed1b From 1cc26450a855aa35a6d515be14c539944d5f9648 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Fri, 31 May 2019 14:05:06 -0700 Subject: flow_dissector: remove unused FLOW_DISSECTOR_F_STOP_AT_L3 flag This flag is not used by any caller, remove it. Signed-off-by: Stanislav Fomichev Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 5 ++--- net/core/flow_dissector.c | 10 +--------- 2 files changed, 3 insertions(+), 12 deletions(-) (limited to 'net/core') diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 7c5a8d9a8d2a..797e19c2fc40 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -229,9 +229,8 @@ enum flow_dissector_key_id { }; #define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0) -#define FLOW_DISSECTOR_F_STOP_AT_L3 BIT(1) -#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(2) -#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(3) +#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1) +#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2) struct flow_dissector_key { enum flow_dissector_key_id key_id; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index edd622956083..c0559af9e5e5 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -757,7 +757,7 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) * @hlen: packet header length, if @data is NULL use skb_headlen(skb) * @flags: flags that control the dissection process, e.g. - * FLOW_DISSECTOR_F_STOP_AT_L3. + * FLOW_DISSECTOR_F_STOP_AT_ENCAP. * * The function will try to retrieve individual keys into target specified * by flow_dissector from either the skbuff or a raw buffer specified by the @@ -922,11 +922,6 @@ proto_again: __skb_flow_dissect_ipv4(skb, flow_dissector, target_container, data, iph); - if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) { - fdret = FLOW_DISSECT_RET_OUT_GOOD; - break; - } - break; } case htons(ETH_P_IPV6): { @@ -975,9 +970,6 @@ proto_again: __skb_flow_dissect_ipv6(skb, flow_dissector, target_container, data, iph); - if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) - fdret = FLOW_DISSECT_RET_OUT_GOOD; - break; } case htons(ETH_P_8021AD): -- cgit v1.2.3-59-g8ed1b From 191ed2024de9fcfaab24106f9dbf7e544b07d633 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 4 Jun 2019 15:40:40 +0200 Subject: devlink: allow driver to update progress of flash update Introduce a function to be called from drivers during flash. It sends notification to userspace about flash update progress. Signed-off-by: Jiri Pirko Reviewed-by: Jakub Kicinski Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 8 ++++ include/uapi/linux/devlink.h | 5 +++ net/core/devlink.c | 102 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+) (limited to 'net/core') diff --git a/include/net/devlink.h b/include/net/devlink.h index 151eb930d329..8f65356132be 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -741,6 +741,14 @@ void devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, enum devlink_health_reporter_state state); +void devlink_flash_update_begin_notify(struct devlink *devlink); +void devlink_flash_update_end_notify(struct devlink *devlink); +void devlink_flash_update_status_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total); + #if IS_ENABLED(CONFIG_NET_DEVLINK) void devlink_compat_running_version(struct net_device *dev, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 5bb4ea67d84f..5287b42c181f 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -104,6 +104,8 @@ enum devlink_command { DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR, DEVLINK_CMD_FLASH_UPDATE, + DEVLINK_CMD_FLASH_UPDATE_END, /* notification only */ + DEVLINK_CMD_FLASH_UPDATE_STATUS, /* notification only */ /* add new commands above here */ __DEVLINK_CMD_MAX, @@ -331,6 +333,9 @@ enum devlink_attr { DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME, /* string */ DEVLINK_ATTR_FLASH_UPDATE_COMPONENT, /* string */ + DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG, /* string */ + DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE, /* u64 */ + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL, /* u64 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 9716a7f382cb..963178d32dda 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2673,6 +2673,108 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return devlink->ops->reload(devlink, info->extack); } +static int devlink_nl_flash_update_fill(struct sk_buff *msg, + struct devlink *devlink, + enum devlink_command cmd, + const char *status_msg, + const char *component, + unsigned long done, unsigned long total) +{ + void *hdr; + + hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + + if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS) + goto out; + + if (status_msg && + nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG, + status_msg)) + goto nla_put_failure; + if (component && + nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT, + component)) + goto nla_put_failure; + if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE, + done, DEVLINK_ATTR_PAD)) + goto nla_put_failure; + if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL, + total, DEVLINK_ATTR_PAD)) + goto nla_put_failure; + +out: + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static void __devlink_flash_update_notify(struct devlink *devlink, + enum devlink_command cmd, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total) +{ + struct sk_buff *msg; + int err; + + WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE && + cmd != DEVLINK_CMD_FLASH_UPDATE_END && + cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + err = devlink_nl_flash_update_fill(msg, devlink, cmd, status_msg, + component, done, total); + if (err) + goto out_free_msg; + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); + return; + +out_free_msg: + nlmsg_free(msg); +} + +void devlink_flash_update_begin_notify(struct devlink *devlink) +{ + __devlink_flash_update_notify(devlink, + DEVLINK_CMD_FLASH_UPDATE, + NULL, NULL, 0, 0); +} +EXPORT_SYMBOL_GPL(devlink_flash_update_begin_notify); + +void devlink_flash_update_end_notify(struct devlink *devlink) +{ + __devlink_flash_update_notify(devlink, + DEVLINK_CMD_FLASH_UPDATE_END, + NULL, NULL, 0, 0); +} +EXPORT_SYMBOL_GPL(devlink_flash_update_end_notify); + +void devlink_flash_update_status_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total) +{ + __devlink_flash_update_notify(devlink, + DEVLINK_CMD_FLASH_UPDATE_STATUS, + status_msg, component, done, total); +} +EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify); + static int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info) { -- cgit v1.2.3-59-g8ed1b From da29e4b466e6916a52e0e2f60054f855c324a9c2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 3 Jun 2019 15:16:58 -0700 Subject: net/tls: fully initialize the msg wrapper skb If strparser gets cornered into starting a new message from an sk_buff which already has frags, it will allocate a new skb to become the "wrapper" around the fragments of the message. This new skb does not inherit any metadata fields. In case of TLS offload this may lead to unnecessarily re-encrypting the message, as skb->decrypted is not set for the wrapper skb. Try to be conservative and copy all fields of old skb strparser's user may reasonably need. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + net/core/skbuff.c | 25 +++++++++++++++++++++++++ net/strparser/strparser.c | 8 ++------ 3 files changed, 28 insertions(+), 6 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2ee5e63195c0..98ff5ac98caa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1063,6 +1063,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, int max_page_order, int *errcode, gfp_t gfp_mask); +struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); /* Layout of fast clones : [skb1][skb2][fclone_ref] */ struct sk_buff_fclones { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4a712a00243a..b50a5e3ac4e4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -913,6 +913,31 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) #undef C } +/** + * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg + * @first: first sk_buff of the msg + */ +struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) +{ + struct sk_buff *n; + + n = alloc_skb(0, GFP_ATOMIC); + if (!n) + return NULL; + + n->len = first->len; + n->data_len = first->len; + n->truesize = first->truesize; + + skb_shinfo(n)->frag_list = first; + + __copy_skb_header(n, first); + n->destructor = NULL; + + return n; +} +EXPORT_SYMBOL_GPL(alloc_skb_for_msg); + /** * skb_morph - morph one skb into another * @dst: the skb to receive the contents diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index e137698e8aef..3fe541b746b0 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -160,18 +160,14 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, return 0; } - skb = alloc_skb(0, GFP_ATOMIC); + skb = alloc_skb_for_msg(head); if (!skb) { STRP_STATS_INCR(strp->stats.mem_fail); desc->error = -ENOMEM; return 0; } - skb->len = head->len; - skb->data_len = head->len; - skb->truesize = head->truesize; - *_strp_msg(skb) = *_strp_msg(head); + strp->skb_nextp = &head->next; - skb_shinfo(skb)->frag_list = head; strp->skb_head = skb; head = skb; } else { -- cgit v1.2.3-59-g8ed1b From 5481d73f81549e2a05cbbb49867a9a560c5292df Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 3 Jun 2019 20:19:49 -0700 Subject: ipv4: Use accessors for fib_info nexthop data Use helpers to access fib_nh and fib_nhs fields of a fib_info. Drop the fib_dev macro which is an alias for the first nexthop. Replacements: fi->fib_dev --> fib_info_nh(fi, 0)->fib_nh_dev fi->fib_nh --> fib_info_nh(fi, 0) fi->fib_nh[i] --> fib_info_nh(fi, i) fi->fib_nhs --> fib_info_num_path(fi) where fib_info_nh(fi, i) returns fi->fib_nh[nhsel] and fib_info_num_path returns fi->fib_nhs. Move the existing fib_info_nhc to nexthop.h and define the new ones there. A later patch adds a check if a fib_info uses a nexthop object, and defining the helpers in nexthop.h avoid circular header dependencies. After this all remaining open coded references to fi->fib_nhs and fi->fib_nh are in: - fib_create_info and helpers used to lookup an existing fib_info entry, and - the netdev event functions fib_sync_down_dev and fib_sync_up. The latter two will not be reused for nexthops, and the fib_create_info will be updated to handle a nexthop in a fib_info. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c | 29 ++++++---- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 19 ++++--- drivers/net/ethernet/rocker/rocker_ofdpa.c | 25 +++++--- include/net/ip_fib.h | 6 -- include/net/nexthop.h | 15 +++++ net/core/filter.c | 3 +- net/ipv4/fib_frontend.c | 11 ++-- net/ipv4/fib_lookup.h | 1 + net/ipv4/fib_rules.c | 8 ++- net/ipv4/fib_semantics.c | 66 ++++++++++++---------- net/ipv4/fib_trie.c | 26 +++++---- net/ipv4/route.c | 3 +- 12 files changed, 132 insertions(+), 80 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index 8212bfd05733..2cbfaa8da7fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Mellanox Technologies. */ #include +#include #include "lag.h" #include "lag_mp.h" #include "mlx5_core.h" @@ -110,6 +111,8 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, struct fib_info *fi) { struct lag_mp *mp = &ldev->lag_mp; + struct fib_nh *fib_nh0, *fib_nh1; + unsigned int nhs; /* Handle delete event */ if (event == FIB_EVENT_ENTRY_DEL) { @@ -120,9 +123,11 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, } /* Handle add/replace event */ - if (fi->fib_nhs == 1) { + nhs = fib_info_num_path(fi); + if (nhs == 1) { if (__mlx5_lag_is_active(ldev)) { - struct net_device *nh_dev = fi->fib_nh[0].fib_nh_dev; + struct fib_nh *nh = fib_info_nh(fi, 0); + struct net_device *nh_dev = nh->fib_nh_dev; int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); mlx5_lag_set_port_affinity(ldev, ++i); @@ -130,14 +135,16 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, return; } - if (fi->fib_nhs != 2) + if (nhs != 2) return; /* Verify next hops are ports of the same hca */ - if (!(fi->fib_nh[0].fib_nh_dev == ldev->pf[0].netdev && - fi->fib_nh[1].fib_nh_dev == ldev->pf[1].netdev) && - !(fi->fib_nh[0].fib_nh_dev == ldev->pf[1].netdev && - fi->fib_nh[1].fib_nh_dev == ldev->pf[0].netdev)) { + fib_nh0 = fib_info_nh(fi, 0); + fib_nh1 = fib_info_nh(fi, 1); + if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev && + fib_nh1->fib_nh_dev == ldev->pf[1].netdev) && + !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev && + fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) { mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n"); return; } @@ -174,7 +181,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, mlx5_lag_set_port_affinity(ldev, i); } } else if (event == FIB_EVENT_NH_ADD && - fi->fib_nhs == 2) { + fib_info_num_path(fi) == 2) { mlx5_lag_set_port_affinity(ldev, 0); } } @@ -238,6 +245,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, struct mlx5_fib_event_work *fib_work; struct fib_entry_notifier_info *fen_info; struct fib_nh_notifier_info *fnh_info; + struct net_device *fib_dev; struct fib_info *fi; if (info->family != AF_INET) @@ -254,8 +262,9 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, fen_info = container_of(info, struct fib_entry_notifier_info, info); fi = fen_info->fi; - if (fi->fib_dev != ldev->pf[0].netdev && - fi->fib_dev != ldev->pf[1].netdev) { + fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; + if (fib_dev != ldev->pf[0].netdev && + fib_dev != ldev->pf[1].netdev) { return NOTIFY_DONE; } fib_work = mlx5_lag_init_fib_work(ldev, event); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0ec52be7cc33..4f781358aef1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -3816,23 +3817,25 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, - const struct fib_info *fi) + struct fib_info *fi) { - return fi->fib_nh->fib_nh_scope == RT_SCOPE_LINK || - mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL); + const struct fib_nh *nh = fib_info_nh(fi, 0); + + return nh->fib_nh_scope == RT_SCOPE_LINK || + mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); } static struct mlxsw_sp_nexthop_group * mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { + unsigned int nhs = fib_info_num_path(fi); struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_nexthop *nh; struct fib_nh *fib_nh; int i; int err; - nh_grp = kzalloc(struct_size(nh_grp, nexthops, fi->fib_nhs), - GFP_KERNEL); + nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); nh_grp->priv = fi; @@ -3840,11 +3843,11 @@ mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) nh_grp->neigh_tbl = &arp_tbl; nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi); - nh_grp->count = fi->fib_nhs; + nh_grp->count = nhs; fib_info_hold(fi); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - fib_nh = &fi->fib_nh[i]; + fib_nh = fib_info_nh(fi, i); err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); if (err) goto err_nexthop4_init; @@ -4282,9 +4285,9 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info, struct mlxsw_sp_fib_entry *fib_entry) { + struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) }; u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id); - struct net_device *dev = fen_info->fi->fib_dev; struct mlxsw_sp_ipip_entry *ipip_entry; struct fib_info *fi = fen_info->fi; diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 30a49802fb51..47ed9d41047f 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "rocker.h" @@ -2286,8 +2287,8 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst, /* XXX support ECMP */ - nh = fi->fib_nh; - nh_on_port = (fi->fib_dev == ofdpa_port->dev); + nh = fib_info_nh(fi, 0); + nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev); has_gw = !!nh->fib_nh_gw4; if (has_gw && nh_on_port) { @@ -2737,11 +2738,13 @@ static int ofdpa_fib4_add(struct rocker *rocker, { struct ofdpa *ofdpa = rocker->wpriv; struct ofdpa_port *ofdpa_port; + struct fib_nh *nh; int err; if (ofdpa->fib_aborted) return 0; - ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); + nh = fib_info_nh(fen_info->fi, 0); + ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker); if (!ofdpa_port) return 0; err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), @@ -2749,7 +2752,7 @@ static int ofdpa_fib4_add(struct rocker *rocker, fen_info->tb_id, 0); if (err) return err; - fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; + nh->fib_nh_flags |= RTNH_F_OFFLOAD; return 0; } @@ -2758,13 +2761,15 @@ static int ofdpa_fib4_del(struct rocker *rocker, { struct ofdpa *ofdpa = rocker->wpriv; struct ofdpa_port *ofdpa_port; + struct fib_nh *nh; if (ofdpa->fib_aborted) return 0; - ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); + nh = fib_info_nh(fen_info->fi, 0); + ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker); if (!ofdpa_port) return 0; - fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; + nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), fen_info->dst_len, fen_info->fi, fen_info->tb_id, OFDPA_OP_FLAG_REMOVE); @@ -2784,14 +2789,16 @@ static void ofdpa_fib4_abort(struct rocker *rocker) spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) { + struct fib_nh *nh; + if (flow_entry->key.tbl_id != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) continue; - ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev, - rocker); + nh = fib_info_nh(flow_entry->fi, 0); + ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker); if (!ofdpa_port) continue; - flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; + nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, flow_entry); } diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 70ba0302c8c9..42b1a806f6f5 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -153,7 +153,6 @@ struct fib_info { bool nh_updated; struct rcu_head rcu; struct fib_nh fib_nh[0]; -#define fib_dev fib_nh[0].fib_nh_dev }; @@ -190,11 +189,6 @@ struct fib_result_nl { int err; }; -static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel) -{ - return &fi->fib_nh[nhsel].nh_common; -} - #ifdef CONFIG_IP_MULTIPLE_TABLES #define FIB_TABLE_HASHSZ 256 #else diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 6e1b8f53624c..e501d77b82c8 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -192,4 +192,19 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh) nhi = rcu_dereference_rtnl(nh->nh_info); return nhi->reject_nh; } + +static inline unsigned int fib_info_num_path(const struct fib_info *fi) +{ + return fi->fib_nhs; +} + +static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel) +{ + return &fi->fib_nh[nhsel].nh_common; +} + +static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel) +{ + return &fi->fib_nh[nhsel]; +} #endif diff --git a/net/core/filter.c b/net/core/filter.c index 55bfc941d17a..2ae72bbfa6d2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include #include @@ -4674,7 +4675,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, if (res.type != RTN_UNICAST) return BPF_FIB_LKUP_RET_NOT_FWDED; - if (res.fi->fib_nhs > 1) + if (fib_info_num_path(res.fi) > 1) fib_select_path(net, &res, &fl4, NULL); if (check_mtu) { diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c7cdb8d0d164..a4691360b395 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -234,7 +235,9 @@ static inline unsigned int __inet_dev_addr_type(struct net *net, if (table) { ret = RTN_UNICAST; if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) { - if (!dev || dev == res.fi->fib_dev) + struct fib_nh *nh = fib_info_nh(res.fi, 0); + + if (!dev || dev == nh->fib_nh_dev) ret = res.type; } } @@ -321,8 +324,8 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev) #ifdef CONFIG_IP_ROUTE_MULTIPATH int ret; - for (ret = 0; ret < fi->fib_nhs; ret++) { - struct fib_nh *nh = &fi->fib_nh[ret]; + for (ret = 0; ret < fib_info_num_path(fi); ret++) { + const struct fib_nh *nh = fib_info_nh(fi, ret); if (nh->fib_nh_dev == dev) { dev_match = true; @@ -333,7 +336,7 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev) } } #else - if (fi->fib_nh[0].fib_nh_dev == dev) + if (fib_info_nh(fi, 0)->fib_nh_dev == dev) dev_match = true; #endif diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index 7945f0534db7..a68b5e21ec51 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -5,6 +5,7 @@ #include #include #include +#include struct fib_alias { struct hlist_node fa_list; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index cfec3af54c8d..ab06fd73b343 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -31,6 +31,7 @@ #include #include #include +#include #include struct fib4_rule { @@ -145,8 +146,11 @@ static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg struct fib_result *result = (struct fib_result *) arg->result; struct net_device *dev = NULL; - if (result->fi) - dev = result->fi->fib_dev; + if (result->fi) { + struct fib_nh *nh = fib_info_nh(result->fi, 0); + + dev = nh->fib_nh_dev; + } /* do not accept result if the route does * not meet the required prefix length diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 78648072783e..a37ff07718a8 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -65,13 +66,13 @@ static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; #define for_nexthops(fi) { \ int nhsel; const struct fib_nh *nh; \ for (nhsel = 0, nh = (fi)->fib_nh; \ - nhsel < (fi)->fib_nhs; \ + nhsel < fib_info_num_path((fi)); \ nh++, nhsel++) #define change_nexthops(fi) { \ int nhsel; struct fib_nh *nexthop_nh; \ for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ - nhsel < (fi)->fib_nhs; \ + nhsel < fib_info_num_path((fi)); \ nexthop_nh++, nhsel++) #else /* CONFIG_IP_ROUTE_MULTIPATH */ @@ -271,11 +272,13 @@ void fib_release_info(struct fib_info *fi) spin_unlock_bh(&fib_info_lock); } -static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) +static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi) { - const struct fib_nh *onh = ofi->fib_nh; + const struct fib_nh *onh; for_nexthops(fi) { + onh = fib_info_nh(ofi, nhsel); + if (nh->fib_nh_oif != onh->fib_nh_oif || nh->fib_nh_gw_family != onh->fib_nh_gw_family || nh->fib_nh_scope != onh->fib_nh_scope || @@ -296,8 +299,6 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) if (nh->fib_nh_gw_family == AF_INET6 && ipv6_addr_cmp(&nh->fib_nh_gw6, &onh->fib_nh_gw6)) return -1; - - onh++; } endfor_nexthops(fi); return 0; } @@ -326,7 +327,7 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi) return (val ^ (val >> 7) ^ (val >> 12)) & mask; } -static struct fib_info *fib_find_info(const struct fib_info *nfi) +static struct fib_info *fib_find_info(struct fib_info *nfi) { struct hlist_head *head; struct fib_info *fi; @@ -390,13 +391,14 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi) + nla_total_size(4) /* RTA_PRIORITY */ + nla_total_size(4) /* RTA_PREFSRC */ + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */ + unsigned int nhs = fib_info_num_path(fi); /* space for nested metrics */ payload += nla_total_size((RTAX_MAX * nla_total_size(4))); - if (fi->fib_nhs) { + if (nhs) { size_t nh_encapsize = 0; - /* Also handles the special case fib_nhs == 1 */ + /* Also handles the special case nhs == 1 */ /* each nexthop is packed in an attribute */ size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); @@ -416,8 +418,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi) } endfor_nexthops(fi); /* all nexthops are packed in a nested attribute */ - payload += nla_total_size((fi->fib_nhs * nhsize) + - nh_encapsize); + payload += nla_total_size((nhs * nhsize) + nh_encapsize); } @@ -584,6 +585,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, { struct net *net = fi->fib_net; struct fib_config fib_cfg; + struct fib_nh *nh; int ret; change_nexthops(fi) { @@ -646,24 +648,25 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, } endfor_nexthops(fi); ret = -EINVAL; - if (cfg->fc_oif && fi->fib_nh->fib_nh_oif != cfg->fc_oif) { + nh = fib_info_nh(fi, 0); + if (cfg->fc_oif && nh->fib_nh_oif != cfg->fc_oif) { NL_SET_ERR_MSG(extack, "Nexthop device index does not match RTA_OIF"); goto errout; } if (cfg->fc_gw_family) { - if (cfg->fc_gw_family != fi->fib_nh->fib_nh_gw_family || + if (cfg->fc_gw_family != nh->fib_nh_gw_family || (cfg->fc_gw_family == AF_INET && - fi->fib_nh->fib_nh_gw4 != cfg->fc_gw4) || + nh->fib_nh_gw4 != cfg->fc_gw4) || (cfg->fc_gw_family == AF_INET6 && - ipv6_addr_cmp(&fi->fib_nh->fib_nh_gw6, &cfg->fc_gw6))) { + ipv6_addr_cmp(&nh->fib_nh_gw6, &cfg->fc_gw6))) { NL_SET_ERR_MSG(extack, "Nexthop gateway does not match RTA_GATEWAY or RTA_VIA"); goto errout; } } #ifdef CONFIG_IP_ROUTE_CLASSID - if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) { + if (cfg->fc_flow && nh->nh_tclassid != cfg->fc_flow) { NL_SET_ERR_MSG(extack, "Nexthop class id does not match RTA_FLOW"); goto errout; @@ -679,7 +682,7 @@ static void fib_rebalance(struct fib_info *fi) int total; int w; - if (fi->fib_nhs < 2) + if (fib_info_num_path(fi) < 2) return; total = 0; @@ -761,27 +764,29 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, return 1; if (cfg->fc_oif || cfg->fc_gw_family) { + struct fib_nh *nh = fib_info_nh(fi, 0); + if (cfg->fc_encap) { if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap, - fi->fib_nh, cfg, extack)) + nh, cfg, extack)) return 1; } #ifdef CONFIG_IP_ROUTE_CLASSID if (cfg->fc_flow && - cfg->fc_flow != fi->fib_nh->nh_tclassid) + cfg->fc_flow != nh->nh_tclassid) return 1; #endif - if ((cfg->fc_oif && cfg->fc_oif != fi->fib_nh->fib_nh_oif) || + if ((cfg->fc_oif && cfg->fc_oif != nh->fib_nh_oif) || (cfg->fc_gw_family && - cfg->fc_gw_family != fi->fib_nh->fib_nh_gw_family)) + cfg->fc_gw_family != nh->fib_nh_gw_family)) return 1; if (cfg->fc_gw_family == AF_INET && - cfg->fc_gw4 != fi->fib_nh->fib_nh_gw4) + cfg->fc_gw4 != nh->fib_nh_gw4) return 1; if (cfg->fc_gw_family == AF_INET6 && - ipv6_addr_cmp(&cfg->fc_gw6, &fi->fib_nh->fib_nh_gw6)) + ipv6_addr_cmp(&cfg->fc_gw6, &nh->fib_nh_gw6)) return 1; return 0; @@ -1366,7 +1371,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, goto err_inval; } nh->fib_nh_scope = RT_SCOPE_NOWHERE; - nh->fib_nh_dev = dev_get_by_index(net, fi->fib_nh->fib_nh_oif); + nh->fib_nh_dev = dev_get_by_index(net, nh->fib_nh_oif); err = -ENODEV; if (!nh->fib_nh_dev) goto failure; @@ -1583,6 +1588,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi, unsigned int flags) { + unsigned int nhs = fib_info_num_path(fi); struct nlmsghdr *nlh; struct rtmsg *rtm; @@ -1618,8 +1624,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, if (fi->fib_prefsrc && nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc)) goto nla_put_failure; - if (fi->fib_nhs == 1) { - struct fib_nh *nh = &fi->fib_nh[0]; + if (nhs == 1) { + const struct fib_nh *nh = fib_info_nh(fi, 0); unsigned char flags = 0; if (fib_nexthop_info(skb, &nh->nh_common, &flags, false) < 0) @@ -1838,6 +1844,7 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) hlist_for_each_entry_rcu(fa, fa_head, fa_list) { struct fib_info *next_fi = fa->fa_info; + struct fib_nh *nh; if (fa->fa_slen != slen) continue; @@ -1859,8 +1866,9 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) if (next_fi->fib_scope != res->scope || fa->fa_type != RTN_UNICAST) continue; - if (!next_fi->fib_nh[0].fib_nh_gw4 || - next_fi->fib_nh[0].fib_nh_scope != RT_SCOPE_LINK) + + nh = fib_info_nh(next_fi, 0); + if (!nh->fib_nh_gw4 || nh->fib_nh_scope != RT_SCOPE_LINK) continue; fib_alias_accessed(fa); @@ -2024,7 +2032,7 @@ void fib_select_path(struct net *net, struct fib_result *res, goto check_saddr; #ifdef CONFIG_IP_ROUTE_MULTIPATH - if (res->fi->fib_nhs > 1) { + if (fib_info_num_path(res->fi) > 1) { int h = fib_multipath_hash(net, fl4, skb, NULL); fib_select_multipath(res, h); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b53ecef89d59..5c8a4d21b8e0 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1469,7 +1469,7 @@ found: } if (fi->fib_flags & RTNH_F_DEAD) continue; - for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { + for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); if (nhc->nhc_flags & RTNH_F_DEAD) @@ -2717,14 +2717,18 @@ static void fib_route_seq_stop(struct seq_file *seq, void *v) rcu_read_unlock(); } -static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi) +static unsigned int fib_flag_trans(int type, __be32 mask, struct fib_info *fi) { unsigned int flags = 0; if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT) flags = RTF_REJECT; - if (fi && fi->fib_nh->fib_nh_gw4) - flags |= RTF_GATEWAY; + if (fi) { + const struct fib_nh *nh = fib_info_nh(fi, 0); + + if (nh->fib_nh_gw4) + flags |= RTF_GATEWAY; + } if (mask == htonl(0xFFFFFFFF)) flags |= RTF_HOST; flags |= RTF_UP; @@ -2755,7 +2759,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) prefix = htonl(l->key); hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { - const struct fib_info *fi = fa->fa_info; + struct fib_info *fi = fa->fa_info; __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen); unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi); @@ -2768,26 +2772,28 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) seq_setwidth(seq, 127); - if (fi) + if (fi) { + struct fib_nh *nh = fib_info_nh(fi, 0); + seq_printf(seq, "%s\t%08X\t%08X\t%04X\t%d\t%u\t" "%d\t%08X\t%d\t%u\t%u", - fi->fib_dev ? fi->fib_dev->name : "*", + nh->fib_nh_dev ? nh->fib_nh_dev->name : "*", prefix, - fi->fib_nh->fib_nh_gw4, flags, 0, 0, + nh->fib_nh_gw4, flags, 0, 0, fi->fib_priority, mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0), fi->fib_window, fi->fib_rtt >> 3); - else + } else { seq_printf(seq, "*\t%08X\t%08X\t%04X\t%d\t%u\t" "%d\t%08X\t%d\t%u\t%u", prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0); - + } seq_pad(seq, '\n'); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 11ddc276776e..05a6a8ecb574 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -99,6 +99,7 @@ #include #include #include +#include #include #include #include @@ -1950,7 +1951,7 @@ static int ip_mkroute_input(struct sk_buff *skb, struct flow_keys *hkeys) { #ifdef CONFIG_IP_ROUTE_MULTIPATH - if (res->fi && res->fi->fib_nhs > 1) { + if (res->fi && fib_info_num_path(res->fi) > 1) { int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys); fib_select_multipath(res, h); -- cgit v1.2.3-59-g8ed1b From 4ecabd55c90469629460f035f4bf4c8ae3d2743b Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Jun 2019 13:30:12 -0700 Subject: bpf: allow CGROUP_SKB programs to use bpf_skb_cgroup_id() helper Currently bpf_skb_cgroup_id() is not supported for CGROUP_SKB programs. An attempt to load such a program generates an error like this: libbpf: 0: (b7) r6 = 0 ... 9: (85) call bpf_skb_cgroup_id#79 unknown func bpf_skb_cgroup_id#79 There are no particular reasons for denying it, and we have some use cases where it might be useful. So let's add it to the list of allowed helpers. Signed-off-by: Roman Gushchin Cc: Yonghong Song Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- net/core/filter.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net/core') diff --git a/net/core/filter.c b/net/core/filter.c index 55bfc941d17a..f2777dc0b624 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5919,6 +5919,10 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; +#ifdef CONFIG_SOCK_CGROUP_DATA + case BPF_FUNC_skb_cgroup_id: + return &bpf_skb_cgroup_id_proto; +#endif #ifdef CONFIG_INET case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; -- cgit v1.2.3-59-g8ed1b From 7ba7aeabbaba484347cc98fbe9045769ca0d118d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 7 Jun 2019 21:20:34 +0200 Subject: net: Don't disable interrupts in napi_alloc_frag() netdev_alloc_frag() can be used from any context and is used by NAPI and non-NAPI drivers. Non-NAPI drivers use it in interrupt context and NAPI drivers use it during initial allocation (->ndo_open() or ->ndo_change_mtu()). Some NAPI drivers share the same function for the initial allocation and the allocation in their NAPI callback. The interrupts are disabled in order to ensure locked access from every context to `netdev_alloc_cache'. Let netdev_alloc_frag() check if interrupts are disabled. If they are, use `netdev_alloc_cache' otherwise disable BH and invoke __napi_alloc_frag() for the allocation. The IRQ check is cheaper compared to disabling & enabling interrupts and memory allocation with disabled interrupts does not work on -RT. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- net/core/skbuff.c | 49 +++++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 26 deletions(-) (limited to 'net/core') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 23c9bf8fc322..ede23fa9bc5a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -366,19 +366,21 @@ struct napi_alloc_cache { static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); -static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) +static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { - struct page_frag_cache *nc; - unsigned long flags; - void *data; + struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); - local_irq_save(flags); - nc = this_cpu_ptr(&netdev_alloc_cache); - data = page_frag_alloc(nc, fragsz, gfp_mask); - local_irq_restore(flags); - return data; + return page_frag_alloc(&nc->page, fragsz, gfp_mask); } +void *napi_alloc_frag(unsigned int fragsz) +{ + fragsz = SKB_DATA_ALIGN(fragsz); + + return __napi_alloc_frag(fragsz, GFP_ATOMIC); +} +EXPORT_SYMBOL(napi_alloc_frag); + /** * netdev_alloc_frag - allocate a page fragment * @fragsz: fragment size @@ -388,26 +390,21 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) */ void *netdev_alloc_frag(unsigned int fragsz) { - fragsz = SKB_DATA_ALIGN(fragsz); - - return __netdev_alloc_frag(fragsz, GFP_ATOMIC); -} -EXPORT_SYMBOL(netdev_alloc_frag); - -static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) -{ - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); - - return page_frag_alloc(&nc->page, fragsz, gfp_mask); -} + struct page_frag_cache *nc; + void *data; -void *napi_alloc_frag(unsigned int fragsz) -{ fragsz = SKB_DATA_ALIGN(fragsz); - - return __napi_alloc_frag(fragsz, GFP_ATOMIC); + if (in_irq() || irqs_disabled()) { + nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc(nc, fragsz, GFP_ATOMIC); + } else { + local_bh_disable(); + data = __napi_alloc_frag(fragsz, GFP_ATOMIC); + local_bh_enable(); + } + return data; } -EXPORT_SYMBOL(napi_alloc_frag); +EXPORT_SYMBOL(netdev_alloc_frag); /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device -- cgit v1.2.3-59-g8ed1b From 92dcabd7a0ea0fd88d414f39092132f848652772 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 7 Jun 2019 21:20:35 +0200 Subject: net: Don't disable interrupts in __netdev_alloc_skb() __netdev_alloc_skb() can be used from any context and is used by NAPI and non-NAPI drivers. Non-NAPI drivers use it in interrupt context and NAPI drivers use it during initial allocation (->ndo_open() or ->ndo_change_mtu()). Some NAPI drivers share the same function for the initial allocation and the allocation in their NAPI callback. The interrupts are disabled in order to ensure locked access from every context to `netdev_alloc_cache'. Let __netdev_alloc_skb() check if interrupts are disabled. If they are, use `netdev_alloc_cache'. Otherwise disable BH and use `napi_alloc_cache.page'. The IRQ check is cheaper compared to disabling & enabling interrupts and memory allocation with disabled interrupts does not work on -RT. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- net/core/skbuff.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'net/core') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ede23fa9bc5a..bab9484f1631 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -423,7 +423,6 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) { struct page_frag_cache *nc; - unsigned long flags; struct sk_buff *skb; bool pfmemalloc; void *data; @@ -444,13 +443,17 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; - local_irq_save(flags); - - nc = this_cpu_ptr(&netdev_alloc_cache); - data = page_frag_alloc(nc, len, gfp_mask); - pfmemalloc = nc->pfmemalloc; - - local_irq_restore(flags); + if (in_irq() || irqs_disabled()) { + nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; + } else { + local_bh_disable(); + nc = this_cpu_ptr(&napi_alloc_cache.page); + data = page_frag_alloc(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; + local_bh_enable(); + } if (unlikely(!data)) return NULL; -- cgit v1.2.3-59-g8ed1b From 6dcdd884e2a4bb57b0ed3654ff28974ae17d2a08 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 7 Jun 2019 21:20:40 +0200 Subject: net: hwbm: Make the hwbm_pool lock a mutex Based on review, `lock' is only acquired in hwbm_pool_add() which is invoked via ->probe(), ->resume() and ->ndo_change_mtu(). Based on this the lock can become a mutex and there is no need to disable interrupts during the procedure. Now that the lock is a mutex, hwbm_pool_add() no longer invokes hwbm_pool_refill() in an atomic context so we can pass GFP_KERNEL to hwbm_pool_refill() and remove the `gfp' argument from hwbm_pool_add(). Cc: Thomas Petazzoni Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 2 +- drivers/net/ethernet/marvell/mvneta_bm.c | 4 ++-- include/net/hwbm.h | 6 +++--- net/core/hwbm.c | 15 +++++++-------- 4 files changed, 13 insertions(+), 14 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 94dc0a272644..895bfed26a8a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1119,7 +1119,7 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); /* Fill entire long pool */ - num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); if (num != hwbm_pool->size) { WARN(1, "pool %d: %d of %d allocated\n", bm_pool->id, num, hwbm_pool->size); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index de468e1bdba9..82ee2bcca6fd 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -190,7 +190,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); hwbm_pool->construct = mvneta_bm_construct; hwbm_pool->priv = new_pool; - spin_lock_init(&hwbm_pool->lock); + mutex_init(&hwbm_pool->buf_lock); /* Create new pool */ err = mvneta_bm_pool_create(priv, new_pool); @@ -201,7 +201,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, } /* Allocate buffers for this pool */ - num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); if (num != hwbm_pool->size) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, hwbm_pool->size); diff --git a/include/net/hwbm.h b/include/net/hwbm.h index 89085e2e2da5..81643cf8a1c4 100644 --- a/include/net/hwbm.h +++ b/include/net/hwbm.h @@ -12,18 +12,18 @@ struct hwbm_pool { /* constructor called during alocation */ int (*construct)(struct hwbm_pool *bm_pool, void *buf); /* protect acces to the buffer counter*/ - spinlock_t lock; + struct mutex buf_lock; /* private data */ void *priv; }; #ifdef CONFIG_HWBM void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf); int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp); -int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp); +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num); #else void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; } -int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num) { return 0; } #endif /* CONFIG_HWBM */ #endif /* _HWBM_H */ diff --git a/net/core/hwbm.c b/net/core/hwbm.c index fd822ca5a245..ac1a66df9adc 100644 --- a/net/core/hwbm.c +++ b/net/core/hwbm.c @@ -43,34 +43,33 @@ int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) } EXPORT_SYMBOL_GPL(hwbm_pool_refill); -int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num) { int err, i; - unsigned long flags; - spin_lock_irqsave(&bm_pool->lock, flags); + mutex_lock(&bm_pool->buf_lock); if (bm_pool->buf_num == bm_pool->size) { pr_warn("pool already filled\n"); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return bm_pool->buf_num; } if (buf_num + bm_pool->buf_num > bm_pool->size) { pr_warn("cannot allocate %d buffers for pool\n", buf_num); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return 0; } if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { pr_warn("Adding %d buffers to the %d current buffers will overflow\n", buf_num, bm_pool->buf_num); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return 0; } for (i = 0; i < buf_num; i++) { - err = hwbm_pool_refill(bm_pool, gfp); + err = hwbm_pool_refill(bm_pool, GFP_KERNEL); if (err < 0) break; } @@ -79,7 +78,7 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) bm_pool->buf_num += i; pr_debug("hwpm pool: %d of %d buffers added\n", i, buf_num); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return i; } -- cgit v1.2.3-59-g8ed1b From fada7fdc83c0bf8755956bff707c42b609223301 Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 6 Jun 2019 13:59:40 -0700 Subject: bpf: Allow bpf_map_lookup_elem() on an xskmap Currently, the AF_XDP code uses a separate map in order to determine if an xsk is bound to a queue. Instead of doing this, have bpf_map_lookup_elem() return a xdp_sock. Rearrange some xdp_sock members to eliminate structure holes. Remove selftest - will be added back in later patch. Signed-off-by: Jonathan Lemon Acked-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 8 +++++ include/net/xdp_sock.h | 4 +-- include/uapi/linux/bpf.h | 4 +++ kernel/bpf/verifier.c | 26 ++++++++++++-- kernel/bpf/xskmap.c | 7 ++++ net/core/filter.c | 40 ++++++++++++++++++++++ .../selftests/bpf/verifier/prevent_map_lookup.c | 15 -------- 7 files changed, 85 insertions(+), 19 deletions(-) (limited to 'net/core') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e5a309e6a400..1fe137afa898 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -280,6 +280,7 @@ enum bpf_reg_type { PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ + PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ }; /* The information passed from prog-specific *_is_valid_access @@ -727,6 +728,13 @@ void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); void __cpu_map_flush(struct bpf_map *map); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); +bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); +u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); /* Return map's numa specified by userspace */ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index d074b6d60f8a..ae0f368a62bb 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -58,11 +58,11 @@ struct xdp_sock { struct xdp_umem *umem; struct list_head flush_node; u16 queue_id; - struct xsk_queue *tx ____cacheline_aligned_in_smp; - struct list_head list; bool zc; /* Protects multiple processes in the control path */ struct mutex mutex; + struct xsk_queue *tx ____cacheline_aligned_in_smp; + struct list_head list; /* Mutual exclusion of NAPI TX thread and sendmsg error paths * in the SKB destructor callback. */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 7c6aef253173..ae0907d8c03a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3083,6 +3083,10 @@ struct bpf_sock_tuple { }; }; +struct bpf_xdp_sock { + __u32 queue_id; +}; + #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5c2cb5bd84ce..8d1786357a09 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -334,7 +334,8 @@ static bool type_is_sk_pointer(enum bpf_reg_type type) { return type == PTR_TO_SOCKET || type == PTR_TO_SOCK_COMMON || - type == PTR_TO_TCP_SOCK; + type == PTR_TO_TCP_SOCK || + type == PTR_TO_XDP_SOCK; } static bool reg_type_may_be_null(enum bpf_reg_type type) @@ -406,6 +407,7 @@ static const char * const reg_type_str[] = { [PTR_TO_TCP_SOCK] = "tcp_sock", [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", [PTR_TO_TP_BUFFER] = "tp_buffer", + [PTR_TO_XDP_SOCK] = "xdp_sock", }; static char slot_type_char[] = { @@ -1363,6 +1365,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: + case PTR_TO_XDP_SOCK: return true; default: return false; @@ -1843,6 +1846,9 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, case PTR_TO_TCP_SOCK: valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); break; + case PTR_TO_XDP_SOCK: + valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); + break; default: valid = false; } @@ -2007,6 +2013,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, case PTR_TO_TCP_SOCK: pointer_desc = "tcp_sock "; break; + case PTR_TO_XDP_SOCK: + pointer_desc = "xdp_sock "; + break; default: break; } @@ -2905,10 +2914,14 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, * appear. */ case BPF_MAP_TYPE_CPUMAP: - case BPF_MAP_TYPE_XSKMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; + case BPF_MAP_TYPE_XSKMAP: + if (func_id != BPF_FUNC_redirect_map && + func_id != BPF_FUNC_map_lookup_elem) + goto error; + break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) @@ -3799,6 +3812,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: + case PTR_TO_XDP_SOCK: verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; @@ -5038,6 +5052,9 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; + } else if (reg->map_ptr->map_type == + BPF_MAP_TYPE_XSKMAP) { + reg->type = PTR_TO_XDP_SOCK; } else { reg->type = PTR_TO_MAP_VALUE; } @@ -6299,6 +6316,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: + case PTR_TO_XDP_SOCK: /* Only valid matches are exact, which memcmp() above * would have accepted */ @@ -6693,6 +6711,7 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type) case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: + case PTR_TO_XDP_SOCK: return false; default: return true; @@ -7826,6 +7845,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) case PTR_TO_TCP_SOCK: convert_ctx_access = bpf_tcp_sock_convert_ctx_access; break; + case PTR_TO_XDP_SOCK: + convert_ctx_access = bpf_xdp_sock_convert_ctx_access; + break; default: continue; } diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index 413d75f4fc72..ef7338cebd18 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -151,6 +151,12 @@ void __xsk_map_flush(struct bpf_map *map) } static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return __xsk_map_lookup_elem(map, *(u32 *)key); +} + +static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key) { return ERR_PTR(-EOPNOTSUPP); } @@ -218,6 +224,7 @@ const struct bpf_map_ops xsk_map_ops = { .map_free = xsk_map_free, .map_get_next_key = xsk_map_get_next_key, .map_lookup_elem = xsk_map_lookup_elem, + .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only, .map_update_elem = xsk_map_update_elem, .map_delete_elem = xsk_map_delete_elem, .map_check_btf = map_check_no_btf, diff --git a/net/core/filter.c b/net/core/filter.c index f2777dc0b624..a5e4ac7fcbe5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5680,6 +5680,46 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) return INET_ECN_set_ce(skb); } +bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id)) + return false; + + if (off % size != 0) + return false; + + switch (off) { + default: + return size == sizeof(__u32); + } +} + +u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, u32 *target_size) +{ + struct bpf_insn *insn = insn_buf; + +#define BPF_XDP_SOCK_GET(FIELD) \ + do { \ + BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \ + FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\ + si->dst_reg, si->src_reg, \ + offsetof(struct xdp_sock, FIELD)); \ + } while (0) + + switch (si->off) { + case offsetof(struct bpf_xdp_sock, queue_id): + BPF_XDP_SOCK_GET(queue_id); + break; + } + + return insn - insn_buf; +} + static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { .func = bpf_skb_ecn_set_ce, .gpl_only = false, diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c index bbdba990fefb..da7a4b37cb98 100644 --- a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c +++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c @@ -28,21 +28,6 @@ .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem", .prog_type = BPF_PROG_TYPE_SOCK_OPS, }, -{ - "prevent map lookup in xskmap", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_XDP, -}, { "prevent map lookup in stack trace", .insns = { -- cgit v1.2.3-59-g8ed1b From e44ef4e4516cce783e95d7221936aa9a4f325ad9 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 16 May 2019 09:49:20 +0300 Subject: devlink: Hang reporter's dump method on a dumpit cb The devlink health reporter provides a dump method on an error. Dump may contain a large amount of data, in this case doit cb isn't sufficient. This is because the user side is blocking and doesn't allow draining of the socket until the socket runs out of buffers. Using dumpit cb is the correct way to go. Please note that thankfully the dump op is not yet implemented in any driver and therefore this change is not breaking userspace. Fixes: 35455e23e6f3 ("devlink: Add health dump {get,clear} commands") Signed-off-by: Aya Levin Acked-by: Jiri Pirko Signed-off-by: Saeed Mahameed --- net/core/devlink.c | 118 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 98 insertions(+), 20 deletions(-) (limited to 'net/core') diff --git a/net/core/devlink.c b/net/core/devlink.c index fd15a66c1d2f..4baf716e535e 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4518,6 +4518,35 @@ nla_put_failure: return err; } +static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb, + struct netlink_callback *cb, + enum devlink_command cmd) +{ + int index = cb->args[0]; + int tmp_index = index; + void *hdr; + int err; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, cmd); + if (!hdr) { + err = -EMSGSIZE; + goto nla_put_failure; + } + + err = devlink_fmsg_prepare_skb(fmsg, skb, &index); + if ((err && err != -EMSGSIZE) || tmp_index == index) + goto nla_put_failure; + + cb->args[0] = index; + genlmsg_end(skb, hdr); + return skb->len; + +nla_put_failure: + genlmsg_cancel(skb, hdr); + return err; +} + struct devlink_health_reporter { struct list_head list; void *priv; @@ -4750,17 +4779,16 @@ int devlink_health_report(struct devlink_health_reporter *reporter, EXPORT_SYMBOL_GPL(devlink_health_report); static struct devlink_health_reporter * -devlink_health_reporter_get_from_info(struct devlink *devlink, - struct genl_info *info) +devlink_health_reporter_get_from_attrs(struct devlink *devlink, + struct nlattr **attrs) { struct devlink_health_reporter *reporter; char *reporter_name; - if (!info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]) + if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]) return NULL; - reporter_name = - nla_data(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]); + reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]); mutex_lock(&devlink->reporters_lock); reporter = devlink_health_reporter_find_by_name(devlink, reporter_name); if (reporter) @@ -4769,6 +4797,48 @@ devlink_health_reporter_get_from_info(struct devlink *devlink, return reporter; } +static struct devlink_health_reporter * +devlink_health_reporter_get_from_info(struct devlink *devlink, + struct genl_info *info) +{ + return devlink_health_reporter_get_from_attrs(devlink, info->attrs); +} + +static struct devlink_health_reporter * +devlink_health_reporter_get_from_cb(struct netlink_callback *cb) +{ + struct devlink_health_reporter *reporter; + struct devlink *devlink; + struct nlattr **attrs; + int err; + + attrs = kmalloc_array(DEVLINK_ATTR_MAX + 1, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return NULL; + + err = nlmsg_parse_deprecated(cb->nlh, + GENL_HDRLEN + devlink_nl_family.hdrsize, + attrs, DEVLINK_ATTR_MAX, + devlink_nl_family.policy, cb->extack); + if (err) + goto free; + + mutex_lock(&devlink_mutex); + devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs); + if (IS_ERR(devlink)) + goto unlock; + + reporter = devlink_health_reporter_get_from_attrs(devlink, attrs); + mutex_unlock(&devlink_mutex); + kfree(attrs); + return reporter; +unlock: + mutex_unlock(&devlink_mutex); +free: + kfree(attrs); + return NULL; +} + static void devlink_health_reporter_put(struct devlink_health_reporter *reporter) { @@ -5004,32 +5074,40 @@ out: return err; } -static int devlink_nl_cmd_health_reporter_dump_get_doit(struct sk_buff *skb, - struct genl_info *info) +static int +devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) { - struct devlink *devlink = info->user_ptr[0]; struct devlink_health_reporter *reporter; + u64 start = cb->args[0]; int err; - reporter = devlink_health_reporter_get_from_info(devlink, info); + reporter = devlink_health_reporter_get_from_cb(cb); if (!reporter) return -EINVAL; if (!reporter->ops->dump) { - devlink_health_reporter_put(reporter); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto out; } - mutex_lock(&reporter->dump_lock); - err = devlink_health_do_dump(reporter, NULL); - if (err) - goto out; - - err = devlink_fmsg_snd(reporter->dump_fmsg, info, - DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET, 0); + if (!start) { + err = devlink_health_do_dump(reporter, NULL); + if (err) + goto unlock; + cb->args[1] = reporter->dump_ts; + } + if (!reporter->dump_fmsg || cb->args[1] != reporter->dump_ts) { + NL_SET_ERR_MSG_MOD(cb->extack, "Dump trampled, please retry"); + err = -EAGAIN; + goto unlock; + } -out: + err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb, + DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET); +unlock: mutex_unlock(&reporter->dump_lock); +out: devlink_health_reporter_put(reporter); return err; } @@ -5366,7 +5444,7 @@ static const struct genl_ops devlink_nl_ops[] = { { .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = devlink_nl_cmd_health_reporter_dump_get_doit, + .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit, .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | DEVLINK_NL_FLAG_NO_LOCK, -- cgit v1.2.3-59-g8ed1b From 99f3a064bc2e4bd5fe50218646c5be342f2ad18c Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Thu, 13 Jun 2019 15:00:01 -0700 Subject: bpf: net: Add SO_DETACH_REUSEPORT_BPF There is SO_ATTACH_REUSEPORT_[CE]BPF but there is no DETACH. This patch adds SO_DETACH_REUSEPORT_BPF sockopt. The same sockopt can be used to undo both SO_ATTACH_REUSEPORT_[CE]BPF. reseport_detach_prog() is added and it is mostly a mirror of the existing reuseport_attach_prog(). The differences are, it does not call reuseport_alloc() and returns -ENOENT when there is no old prog. Cc: Craig Gallek Signed-off-by: Martin KaFai Lau Reviewed-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- arch/alpha/include/uapi/asm/socket.h | 2 ++ arch/mips/include/uapi/asm/socket.h | 2 ++ arch/parisc/include/uapi/asm/socket.h | 2 ++ arch/sparc/include/uapi/asm/socket.h | 2 ++ include/net/sock_reuseport.h | 2 ++ include/uapi/asm-generic/socket.h | 2 ++ net/core/sock.c | 4 ++++ net/core/sock_reuseport.c | 24 ++++++++++++++++++++++++ 8 files changed, 40 insertions(+) (limited to 'net/core') diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 976e89b116e5..de6c4df61082 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -122,6 +122,8 @@ #define SO_RCVTIMEO_NEW 66 #define SO_SNDTIMEO_NEW 67 +#define SO_DETACH_REUSEPORT_BPF 68 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index d41765cfbc6e..d0a9ed2ca2d6 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -133,6 +133,8 @@ #define SO_RCVTIMEO_NEW 66 #define SO_SNDTIMEO_NEW 67 +#define SO_DETACH_REUSEPORT_BPF 68 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 66c5dd245ac7..10173c32195e 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -114,6 +114,8 @@ #define SO_RCVTIMEO_NEW 0x4040 #define SO_SNDTIMEO_NEW 0x4041 +#define SO_DETACH_REUSEPORT_BPF 0x4042 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 9265a9eece15..8029b681fc7c 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -115,6 +115,8 @@ #define SO_RCVTIMEO_NEW 0x0044 #define SO_SNDTIMEO_NEW 0x0045 +#define SO_DETACH_REUSEPORT_BPF 0x0047 + #if !defined(__KERNEL__) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 8a5f70c7cdf2..d9112de85261 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -35,6 +35,8 @@ extern struct sock *reuseport_select_sock(struct sock *sk, struct sk_buff *skb, int hdr_len); extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); +extern int reuseport_detach_prog(struct sock *sk); + int reuseport_get_id(struct sock_reuseport *reuse); #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 8c1391c89171..77f7c1638eb1 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -117,6 +117,8 @@ #define SO_RCVTIMEO_NEW 66 #define SO_SNDTIMEO_NEW 67 +#define SO_DETACH_REUSEPORT_BPF 68 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/net/core/sock.c b/net/core/sock.c index 75b1c950b49f..06be30737b69 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1045,6 +1045,10 @@ set_rcvbuf: } break; + case SO_DETACH_REUSEPORT_BPF: + ret = reuseport_detach_prog(sk); + break; + case SO_DETACH_FILTER: ret = sk_detach_filter(sk); break; diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index dc4aefdf2a08..9408f9264d05 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -332,3 +332,27 @@ int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) return 0; } EXPORT_SYMBOL(reuseport_attach_prog); + +int reuseport_detach_prog(struct sock *sk) +{ + struct sock_reuseport *reuse; + struct bpf_prog *old_prog; + + if (!rcu_access_pointer(sk->sk_reuseport_cb)) + return sk->sk_reuseport ? -ENOENT : -EINVAL; + + old_prog = NULL; + spin_lock_bh(&reuseport_lock); + reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + rcu_swap_protected(reuse->prog, old_prog, + lockdep_is_held(&reuseport_lock)); + spin_unlock_bh(&reuseport_lock); + + if (!old_prog) + return -ENOENT; + + sk_reuseport_prog_free(old_prog); + return 0; +} +EXPORT_SYMBOL(reuseport_detach_prog); -- cgit v1.2.3-59-g8ed1b From fb85c4a730af221339c1dde1a434b73da0dfc3ed Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 12 Jun 2019 10:30:37 -0700 Subject: bpf: export bpf_sock for BPF_PROG_TYPE_CGROUP_SOCK_ADDR prog type And let it use bpf_sk_storage_{get,delete} helpers to access socket storage. Kernel context (struct bpf_sock_addr_kern) already has sk member, so I just expose it to the BPF hooks. Using PTR_TO_SOCKET instead of PTR_TO_SOCK_COMMON should be safe because the hook is called on bind/connect. Cc: Martin Lau Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'net/core') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ae0907d8c03a..8815fc418cde 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3247,6 +3247,7 @@ struct bpf_sock_addr { __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ + __bpf_md_ptr(struct bpf_sock *, sk); }; /* User bpf_sock_ops struct to access socket values and specify request ops diff --git a/net/core/filter.c b/net/core/filter.c index a5e4ac7fcbe5..37c4a2fd559b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5922,6 +5922,10 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_skc_lookup_tcp: return &bpf_sock_addr_skc_lookup_tcp_proto; #endif /* CONFIG_INET */ + case BPF_FUNC_sk_storage_get: + return &bpf_sk_storage_get_proto; + case BPF_FUNC_sk_storage_delete: + return &bpf_sk_storage_delete_proto; default: return bpf_base_func_proto(func_id); } @@ -6828,6 +6832,13 @@ static bool sock_addr_is_valid_access(int off, int size, if (size != size_default) return false; break; + case offsetof(struct bpf_sock_addr, sk): + if (type != BPF_READ) + return false; + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_SOCKET; + break; default: if (type == BPF_READ) { if (size != size_default) @@ -7778,6 +7789,11 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, struct bpf_sock_addr_kern, struct in6_addr, t_ctx, s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); break; + case offsetof(struct bpf_sock_addr, sk): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_addr_kern, sk)); + break; } return insn - insn_buf; -- cgit v1.2.3-59-g8ed1b From 1314ef561102e534e14cb1d37f89f5c1df0b2ea7 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 12 Jun 2019 10:30:38 -0700 Subject: bpf: export bpf_sock for BPF_PROG_TYPE_SOCK_OPS prog type And let it use bpf_sk_storage_{get,delete} helpers to access socket storage. Kernel context (struct bpf_sock_ops_kern) already has sk member, so I just expose it to the BPF hooks. I use PTR_TO_SOCKET_OR_NULL and return NULL in !is_fullsock case. I also export bpf_tcp_sock to make it possible to access tcp socket stats. Cc: Martin Lau Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'net/core') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 8815fc418cde..d0a23476f887 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3299,6 +3299,7 @@ struct bpf_sock_ops { __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; + __bpf_md_ptr(struct bpf_sock *, sk); }; /* Definitions for bpf_sock_ops_cb_flags */ diff --git a/net/core/filter.c b/net/core/filter.c index 37c4a2fd559b..8c18f2781afa 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6147,6 +6147,14 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_local_storage_proto; case BPF_FUNC_perf_event_output: return &bpf_sockopt_event_output_proto; + case BPF_FUNC_sk_storage_get: + return &bpf_sk_storage_get_proto; + case BPF_FUNC_sk_storage_delete: + return &bpf_sk_storage_delete_proto; +#ifdef CONFIG_INET + case BPF_FUNC_tcp_sock: + return &bpf_tcp_sock_proto; +#endif /* CONFIG_INET */ default: return bpf_base_func_proto(func_id); } @@ -6882,6 +6890,11 @@ static bool sock_ops_is_valid_access(int off, int size, if (size != sizeof(__u64)) return false; break; + case offsetof(struct bpf_sock_ops, sk): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_SOCKET_OR_NULL; + break; default: if (size != size_default) return false; @@ -8053,6 +8066,19 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, struct sock, type); break; + case offsetof(struct bpf_sock_ops, sk): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( + struct bpf_sock_ops_kern, + is_fullsock), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + is_fullsock)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( + struct bpf_sock_ops_kern, sk), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, sk)); + break; } return insn - insn_buf; } -- cgit v1.2.3-59-g8ed1b From 98fdbea550378e0153092bce21261df86a8ccc57 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 12 Jun 2019 15:20:11 +0300 Subject: net/mlx5: Declare more strictly devlink encap mode Devlink has UAPI declaration for encap mode, so there is no need to be loose on the data get/set by drivers. Update call sites to use enum devlink_eswitch_encap_mode instead of plain u8. Suggested-by: Parav Pandit Signed-off-by: Leon Romanovsky Acked-by: Jiri Pirko Reviewed-by: Parav Pandit Reviewed-by: Petr Vorel --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 8 +++++--- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 6 ++++-- include/net/devlink.h | 6 ++++-- net/core/devlink.c | 6 ++++-- 4 files changed, 17 insertions(+), 9 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index e03811be771d..8b9f2cf58e91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -176,7 +176,7 @@ struct mlx5_esw_offload { const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; u8 inline_mode; u64 num_flows; - u8 encap; + enum devlink_eswitch_encap_mode encap; }; /* E-Switch MC FDB table hash node */ @@ -357,9 +357,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, + enum devlink_eswitch_encap_mode encap, struct netlink_ext_ack *extack); -int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); +int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, + enum devlink_eswitch_encap_mode *encap); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 1638e4cdeb16..17abb98b48af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2160,7 +2160,8 @@ out: return 0; } -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, + enum devlink_eswitch_encap_mode encap, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); @@ -2209,7 +2210,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, return err; } -int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) +int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, + enum devlink_eswitch_encap_mode *encap) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; diff --git a/include/net/devlink.h b/include/net/devlink.h index 1c4adfb4195a..7a34fc586def 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -530,8 +530,10 @@ struct devlink_ops { int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode); int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode, struct netlink_ext_ack *extack); - int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode); - int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode, + int (*eswitch_encap_mode_get)(struct devlink *devlink, + enum devlink_eswitch_encap_mode *p_encap_mode); + int (*eswitch_encap_mode_set)(struct devlink *devlink, + enum devlink_eswitch_encap_mode encap_mode, struct netlink_ext_ack *extack); int (*info_get)(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack); diff --git a/net/core/devlink.c b/net/core/devlink.c index d43bc52b8840..47ae69363b07 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1552,7 +1552,8 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, u32 seq, int flags) { const struct devlink_ops *ops = devlink->ops; - u8 inline_mode, encap_mode; + enum devlink_eswitch_encap_mode encap_mode; + u8 inline_mode; void *hdr; int err = 0; u16 mode; @@ -1628,7 +1629,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; - u8 inline_mode, encap_mode; + enum devlink_eswitch_encap_mode encap_mode; + u8 inline_mode; int err = 0; u16 mode; -- cgit v1.2.3-59-g8ed1b From 56f0f84e69c7a7f229dfa524b13b0ceb6ce9b09e Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Sat, 15 Jun 2019 22:53:48 +0000 Subject: bpf: fix the check that forwarding is enabled in bpf_ipv6_fib_lookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bpf_ipv6_fib_lookup function should return BPF_FIB_LKUP_RET_FWD_DISABLED when forwarding is disabled for the input device. However instead of checking if forwarding is enabled on the input device, it checked the global net->ipv6.devconf_all->forwarding flag. Change it to behave as expected. Fixes: 87f5fc7e48dd ("bpf: Provide helper to do forwarding lookups in kernel FIB table") Signed-off-by: Anton Protopopov Acked-by: Toke Høiland-Jørgensen Reviewed-by: David Ahern Signed-off-by: Daniel Borkmann --- net/core/filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/filter.c b/net/core/filter.c index f615e42cf4ef..3fdf1b21be36 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4737,7 +4737,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, return -ENODEV; idev = __in6_dev_get_safely(dev); - if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) + if (unlikely(!idev || !idev->cnf.forwarding)) return BPF_FIB_LKUP_RET_FWD_DISABLED; if (flags & BPF_FIB_LOOKUP_OUTPUT) { -- cgit v1.2.3-59-g8ed1b From 75345f888f700c4ab2448287e35d48c760b202e6 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Mon, 17 Jun 2019 10:53:41 +0200 Subject: ipoib: show VF broadcast address in IPoIB case we can't see a VF broadcast address for but can see for PF Before: 11: ib1: mtu 2044 qdisc pfifo_fast state UP mode DEFAULT group default qlen 256 link/infiniband 80:00:00:66:fe:80:00:00:00:00:00:00:24:8a:07:03:00:a4:3e:7c brd 00:ff:ff:ff:ff:12:40:1b:ff:ff:00:00:00:00:00:00:ff:ff:ff:ff vf 0 MAC 14:80:00:00:66:fe, spoof checking off, link-state disable, trust off, query_rss off ... After: 11: ib1: mtu 2044 qdisc pfifo_fast state UP mode DEFAULT group default qlen 256 link/infiniband 80:00:00:66:fe:80:00:00:00:00:00:00:24:8a:07:03:00:a4:3e:7c brd 00:ff:ff:ff:ff:12:40:1b:ff:ff:00:00:00:00:00:00:ff:ff:ff:ff vf 0 link/infiniband 80:00:00:66:fe:80:00:00:00:00:00:00:24:8a:07:03:00:a4:3e:7c brd 00:ff:ff:ff:ff:12:40:1b:ff:ff:00:00:00:00:00:00:ff:ff:ff:ff, spoof checking off, link-state disable, trust off, query_rss off v1->v2: add the IFLA_VF_BROADCAST constant v2->v3: put IFLA_VF_BROADCAST at the end to avoid KABI breakage and set NLA_REJECT dev_setlink Signed-off-by: Denis Kirjanov Acked-by: Doug Ledford Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 5 +++++ net/core/rtnetlink.c | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'net/core') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 5b225ff63b48..6f75bda2c2d7 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -694,6 +694,7 @@ enum { IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ + IFLA_VF_BROADCAST, /* VF broadcast */ __IFLA_VF_MAX, }; @@ -704,6 +705,10 @@ struct ifla_vf_mac { __u8 mac[32]; /* MAX_ADDR_LEN */ }; +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + struct ifla_vf_vlan { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index cec60583931f..8ac81630ab5c 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -908,6 +908,7 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, size += num_vfs * (nla_total_size(0) + nla_total_size(sizeof(struct ifla_vf_mac)) + + nla_total_size(sizeof(struct ifla_vf_broadcast)) + nla_total_size(sizeof(struct ifla_vf_vlan)) + nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ nla_total_size(MAX_VLAN_LIST_LEN * @@ -1197,6 +1198,7 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, struct ifla_vf_vlan vf_vlan; struct ifla_vf_rate vf_rate; struct ifla_vf_mac vf_mac; + struct ifla_vf_broadcast vf_broadcast; struct ifla_vf_info ivi; memset(&ivi, 0, sizeof(ivi)); @@ -1231,6 +1233,7 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, vf_trust.vf = ivi.vf; memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); + memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); vf_vlan.vlan = ivi.vlan; vf_vlan.qos = ivi.qos; vf_vlan_info.vlan = ivi.vlan; @@ -1247,6 +1250,7 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, if (!vf) goto nla_put_vfinfo_failure; if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || + nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), &vf_rate) || @@ -1753,6 +1757,7 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, + [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, -- cgit v1.2.3-59-g8ed1b From 82828b88f081a0084cd65f90a4a1d3652f5adb66 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 19 Jun 2019 09:41:02 +0300 Subject: flow_dissector: add support for ingress ifindex dissection Add new key meta that contains ingress ifindex value and add a function to dissect this from skb. The key and function is prepared to cover other potential skb metadata values dissection. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ include/net/flow_dissector.h | 9 +++++++++ net/core/flow_dissector.c | 16 ++++++++++++++++ 3 files changed, 29 insertions(+) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 28bdaf978e72..b5d427b149c9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1320,6 +1320,10 @@ skb_flow_dissect_flow_keys_basic(const struct net *net, data, proto, nhoff, hlen, flags); } +void skb_flow_dissect_meta(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + void skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index d7ce647a8ca9..02478e48fae4 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -200,6 +200,14 @@ struct flow_dissector_key_ip { __u8 ttl; }; +/** + * struct flow_dissector_key_meta: + * @ingress_ifindex: ingress ifindex + */ +struct flow_dissector_key_meta { + int ingress_ifindex; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -225,6 +233,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ + FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index c0559af9e5e5..01ad60b5aa75 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -199,6 +199,22 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, } EXPORT_SYMBOL(__skb_flow_get_ports); +void skb_flow_dissect_meta(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container) +{ + struct flow_dissector_key_meta *meta; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_META)) + return; + + meta = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_META, + target_container); + meta->ingress_ifindex = skb->skb_iif; +} +EXPORT_SYMBOL(skb_flow_dissect_meta); + static void skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type, struct flow_dissector *flow_dissector, -- cgit v1.2.3-59-g8ed1b From 9558a83aee62be7c3ce9eddd6484a5da16aad4cf Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 19 Jun 2019 09:41:04 +0300 Subject: net: flow_offload: implement support for meta key Implement support for previously added flow dissector meta key. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/flow_offload.h | 6 ++++++ net/core/flow_offload.c | 7 +++++++ 2 files changed, 13 insertions(+) (limited to 'net/core') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 36fdb85c974d..36127c1858a4 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -10,6 +10,10 @@ struct flow_match { void *key; }; +struct flow_match_meta { + struct flow_dissector_key_meta *key, *mask; +}; + struct flow_match_basic { struct flow_dissector_key_basic *key, *mask; }; @@ -64,6 +68,8 @@ struct flow_match_enc_opts { struct flow_rule; +void flow_rule_match_meta(const struct flow_rule *rule, + struct flow_match_meta *out); void flow_rule_match_basic(const struct flow_rule *rule, struct flow_match_basic *out); void flow_rule_match_control(const struct flow_rule *rule, diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 3d93e51b83e0..f52fe0bc4017 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -25,6 +25,13 @@ EXPORT_SYMBOL(flow_rule_alloc); (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ +void flow_rule_match_meta(const struct flow_rule *rule, + struct flow_match_meta *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); +} +EXPORT_SYMBOL(flow_rule_match_meta); + void flow_rule_match_basic(const struct flow_rule *rule, struct flow_match_basic *out) { -- cgit v1.2.3-59-g8ed1b From a25d50bfe645b3ed6b2cb3773e7025db14a608f3 Mon Sep 17 00:00:00 2001 From: Ilias Apalodimas Date: Tue, 18 Jun 2019 15:05:17 +0200 Subject: net: page_pool: add helper function to unmap dma addresses On a previous patch dma addr was stored in 'struct page'. Use that to unmap DMA addresses used by network drivers Signed-off-by: Ilias Apalodimas Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/net/page_pool.h | 1 + net/core/page_pool.c | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'net/core') diff --git a/include/net/page_pool.h b/include/net/page_pool.h index b885d86cb7a1..ad218cef88c5 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -110,6 +110,7 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) struct page_pool *page_pool_create(const struct page_pool_params *params); void page_pool_destroy(struct page_pool *pool); +void page_pool_unmap_page(struct page_pool *pool, struct page *page); /* Never call this directly, use helpers below */ void __page_pool_put_page(struct page_pool *pool, diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 5b2252c6d49b..205af7bd6d09 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -190,6 +190,13 @@ static void __page_pool_clean_page(struct page_pool *pool, page->dma_addr = 0; } +/* unmap the page and clean our state */ +void page_pool_unmap_page(struct page_pool *pool, struct page *page) +{ + __page_pool_clean_page(pool, page); +} +EXPORT_SYMBOL(page_pool_unmap_page); + /* Return a page to the page allocator, cleaning up our state */ static void __page_pool_return_page(struct page_pool *pool, struct page *page) { -- cgit v1.2.3-59-g8ed1b From 516a7593fda6f20a04988e988725a182644f67b4 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:05:22 +0200 Subject: xdp: fix leak of IDA cyclic id if rhashtable_insert_slow fails Fix error handling case, where inserting ID with rhashtable_insert_slow fails in xdp_rxq_info_reg_mem_model, which leads to never releasing the IDA ID, as the lookup in xdp_rxq_info_unreg_mem_model fails and thus ida_simple_remove() is never called. Fix by releasing ID via ida_simple_remove(), and mark xdp_rxq->mem.id with zero, which is already checked in xdp_rxq_info_unreg_mem_model(). Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Ilias Apalodimas Signed-off-by: David S. Miller --- net/core/xdp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net/core') diff --git a/net/core/xdp.c b/net/core/xdp.c index 8aab08b131d9..1d5f2292962c 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -301,6 +301,8 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, /* Insert allocator into ID lookup table */ ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); if (IS_ERR(ptr)) { + ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id); + xdp_rxq->mem.id = 0; errno = PTR_ERR(ptr); goto err; } -- cgit v1.2.3-59-g8ed1b From 6bf071bf09d4b2ff3ee8783531e2ce814f0870cb Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:05:27 +0200 Subject: xdp: page_pool related fix to cpumap When converting an xdp_frame into an SKB, and sending this into the network stack, then the underlying XDP memory model need to release associated resources, because the network stack don't have callbacks for XDP memory models. The only memory model that needs this is page_pool, when a driver use the DMA-mapping feature. Introduce page_pool_release_page(), which basically does the same as page_pool_unmap_page(). Add xdp_release_frame() as the XDP memory model interface for calling it, if the memory model match MEM_TYPE_PAGE_POOL, to save the function call overhead for others. Have cpumap call xdp_release_frame() before xdp_scrub_frame(). Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/net/page_pool.h | 15 ++++++++++++++- include/net/xdp.h | 15 +++++++++++++++ kernel/bpf/cpumap.c | 3 +++ net/core/xdp.c | 15 +++++++++++++++ 4 files changed, 47 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/include/net/page_pool.h b/include/net/page_pool.h index ad218cef88c5..e240fac4c5b9 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -110,7 +110,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) struct page_pool *page_pool_create(const struct page_pool_params *params); void page_pool_destroy(struct page_pool *pool); -void page_pool_unmap_page(struct page_pool *pool, struct page *page); /* Never call this directly, use helpers below */ void __page_pool_put_page(struct page_pool *pool, @@ -133,6 +132,20 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, __page_pool_put_page(pool, page, true); } +/* Disconnects a page (from a page_pool). API users can have a need + * to disconnect a page (from a page_pool), to allow it to be used as + * a regular page (that will eventually be returned to the normal + * page-allocator via put_page). + */ +void page_pool_unmap_page(struct page_pool *pool, struct page *page); +static inline void page_pool_release_page(struct page_pool *pool, + struct page *page) +{ +#ifdef CONFIG_PAGE_POOL + page_pool_unmap_page(pool, page); +#endif +} + static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { return page->dma_addr; diff --git a/include/net/xdp.h b/include/net/xdp.h index 8e0deddef35c..40c6d3398458 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -129,6 +129,21 @@ void xdp_return_frame(struct xdp_frame *xdpf); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); void xdp_return_buff(struct xdp_buff *xdp); +/* When sending xdp_frame into the network stack, then there is no + * return point callback, which is needed to release e.g. DMA-mapping + * resources with page_pool. Thus, have explicit function to release + * frame resources. + */ +void __xdp_release_frame(void *data, struct xdp_mem_info *mem); +static inline void xdp_release_frame(struct xdp_frame *xdpf) +{ + struct xdp_mem_info *mem = &xdpf->mem; + + /* Curr only page_pool needs this */ + if (mem->type == MEM_TYPE_PAGE_POOL) + __xdp_release_frame(xdpf->data, mem); +} + int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index); void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 8ee5532cf6a6..8dff08768087 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -208,6 +208,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, * - RX ring dev queue index (skb_record_rx_queue) */ + /* Until page_pool get SKB return path, release DMA here */ + xdp_release_frame(xdpf); + /* Allow SKB to reuse area used by xdp_frame */ xdp_scrub_frame(xdpf); diff --git a/net/core/xdp.c b/net/core/xdp.c index 1d5f2292962c..0fcc32340c4e 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -381,6 +381,21 @@ void xdp_return_buff(struct xdp_buff *xdp) } EXPORT_SYMBOL_GPL(xdp_return_buff); +/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ +void __xdp_release_frame(void *data, struct xdp_mem_info *mem) +{ + struct xdp_mem_allocator *xa; + struct page *page; + + rcu_read_lock(); + xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); + page = virt_to_head_page(data); + if (xa) + page_pool_release_page(xa->page_pool, page); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(__xdp_release_frame); + int xdp_attachment_query(struct xdp_attachment_info *info, struct netdev_bpf *bpf) { -- cgit v1.2.3-59-g8ed1b From e54cfd7e1745e52eb6c67ee9c77aefb8e4666a88 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:05:37 +0200 Subject: page_pool: introduce page_pool_free and use in mlx5 In case driver fails to register the page_pool with XDP return API (via xdp_rxq_info_reg_mem_model()), then the driver can free the page_pool resources more directly than calling page_pool_destroy(), which does a unnecessarily RCU free procedure. This patch is preparing for removing page_pool_destroy(), from driver invocation. Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 +++--- include/net/page_pool.h | 11 +++++++++++ net/core/page_pool.c | 15 +++++++++++---- 3 files changed, 25 insertions(+), 7 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a8e8350b38aa..46323709ad47 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -545,8 +545,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, } err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); - if (err) + if (err) { + page_pool_free(rq->page_pool); goto err_free; + } for (i = 0; i < wq_sz; i++) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { @@ -611,8 +613,6 @@ err_rq_wq_destroy: if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); xdp_rxq_info_unreg(&rq->xdp_rxq); - if (rq->page_pool) - page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); return err; diff --git a/include/net/page_pool.h b/include/net/page_pool.h index e240fac4c5b9..754d980700df 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -111,6 +111,17 @@ struct page_pool *page_pool_create(const struct page_pool_params *params); void page_pool_destroy(struct page_pool *pool); +void __page_pool_free(struct page_pool *pool); +static inline void page_pool_free(struct page_pool *pool) +{ + /* When page_pool isn't compiled-in, net/core/xdp.c doesn't + * allow registering MEM_TYPE_PAGE_POOL, but shield linker. + */ +#ifdef CONFIG_PAGE_POOL + __page_pool_free(pool); +#endif +} + /* Never call this directly, use helpers below */ void __page_pool_put_page(struct page_pool *pool, struct page *page, bool allow_direct); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 205af7bd6d09..41391b5dc14c 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -292,17 +292,24 @@ static void __page_pool_empty_ring(struct page_pool *pool) } } +void __page_pool_free(struct page_pool *pool) +{ + WARN(pool->alloc.count, "API usage violation"); + WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty"); + + ptr_ring_cleanup(&pool->ring, NULL); + kfree(pool); +} +EXPORT_SYMBOL(__page_pool_free); + static void __page_pool_destroy_rcu(struct rcu_head *rcu) { struct page_pool *pool; pool = container_of(rcu, struct page_pool, rcu); - WARN(pool->alloc.count, "API usage violation"); - __page_pool_empty_ring(pool); - ptr_ring_cleanup(&pool->ring, NULL); - kfree(pool); + __page_pool_free(pool); } /* Cleanup and release resources */ -- cgit v1.2.3-59-g8ed1b From 99c07c43c4ea0bc101331401a0fabfc51933c6a3 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:05:47 +0200 Subject: xdp: tracking page_pool resources and safe removal This patch is needed before we can allow drivers to use page_pool for DMA-mappings. Today with page_pool and XDP return API, it is possible to remove the page_pool object (from rhashtable), while there are still in-flight packet-pages. This is safely handled via RCU and failed lookups in __xdp_return() fallback to call put_page(), when page_pool object is gone. In-case page is still DMA mapped, this will result in page note getting correctly DMA unmapped. To solve this, the page_pool is extended with tracking in-flight pages. And XDP disconnect system queries page_pool and waits, via workqueue, for all in-flight pages to be returned. To avoid killing performance when tracking in-flight pages, the implement use two (unsigned) counters, that in placed on different cache-lines, and can be used to deduct in-flight packets. This is done by mapping the unsigned "sequence" counters onto signed Two's complement arithmetic operations. This is e.g. used by kernel's time_after macros, described in kernel commit 1ba3aab3033b and 5a581b367b5, and also explained in RFC1982. The trick is these two incrementing counters only need to be read and compared, when checking if it's safe to free the page_pool structure. Which will only happen when driver have disconnected RX/alloc side. Thus, on a non-fast-path. It is chosen that page_pool tracking is also enabled for the non-DMA use-case, as this can be used for statistics later. After this patch, using page_pool requires more strict resource "release", e.g. via page_pool_release_page() that was introduced in this patchset, and previous patches implement/fix this more strict requirement. Drivers no-longer call page_pool_destroy(). Drivers already call xdp_rxq_info_unreg() which call xdp_rxq_info_unreg_mem_model(), which will attempt to disconnect the mem id, and if attempt fails schedule the disconnect for later via delayed workqueue. Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Ilias Apalodimas Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 -- include/net/page_pool.h | 41 ++++++++++---- net/core/page_pool.c | 62 +++++++++++++++------ net/core/xdp.c | 65 ++++++++++++++++++++--- 4 files changed, 136 insertions(+), 35 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 46b6a47bd1e3..5e40db8f92e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -643,9 +643,6 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) } xdp_rxq_info_unreg(&rq->xdp_rxq); - if (rq->page_pool) - page_pool_destroy(rq->page_pool); - mlx5_wq_destroy(&rq->wq_ctrl); } diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 754d980700df..f09b3f1994e6 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -16,14 +16,16 @@ * page_pool_alloc_pages() call. Drivers should likely use * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). * - * If page_pool handles DMA mapping (use page->private), then API user - * is responsible for invoking page_pool_put_page() once. In-case of - * elevated refcnt, the DMA state is released, assuming other users of - * the page will eventually call put_page(). + * API keeps track of in-flight pages, in-order to let API user know + * when it is safe to dealloactor page_pool object. Thus, API users + * must make sure to call page_pool_release_page() when a page is + * "leaving" the page_pool. Or call page_pool_put_page() where + * appropiate. For maintaining correct accounting. * - * If no DMA mapping is done, then it can act as shim-layer that - * fall-through to alloc_page. As no state is kept on the page, the - * regular put_page() call is sufficient. + * API user must only call page_pool_put_page() once on a page, as it + * will either recycle the page, or in case of elevated refcnt, it + * will release the DMA mapping and in-flight state accounting. We + * hope to lift this requirement in the future. */ #ifndef _NET_PAGE_POOL_H #define _NET_PAGE_POOL_H @@ -66,9 +68,10 @@ struct page_pool_params { }; struct page_pool { - struct rcu_head rcu; struct page_pool_params p; + u32 pages_state_hold_cnt; + /* * Data structure for allocation side * @@ -96,6 +99,8 @@ struct page_pool { * TODO: Implement bulk return pages into this structure. */ struct ptr_ring ring; + + atomic_t pages_state_release_cnt; }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); @@ -109,8 +114,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) struct page_pool *page_pool_create(const struct page_pool_params *params); -void page_pool_destroy(struct page_pool *pool); - void __page_pool_free(struct page_pool *pool); static inline void page_pool_free(struct page_pool *pool) { @@ -143,6 +146,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, __page_pool_put_page(pool, page, true); } +/* API user MUST have disconnected alloc-side (not allowed to call + * page_pool_alloc_pages()) before calling this. The free-side can + * still run concurrently, to handle in-flight packet-pages. + * + * A request to shutdown can fail (with false) if there are still + * in-flight packet-pages. + */ +bool __page_pool_request_shutdown(struct page_pool *pool); +static inline bool page_pool_request_shutdown(struct page_pool *pool) +{ + /* When page_pool isn't compiled-in, net/core/xdp.c doesn't + * allow registering MEM_TYPE_PAGE_POOL, but shield linker. + */ +#ifdef CONFIG_PAGE_POOL + return __page_pool_request_shutdown(pool); +#endif +} + /* Disconnects a page (from a page_pool). API users can have a need * to disconnect a page (from a page_pool), to allow it to be used as * a regular page (that will eventually be returned to the normal diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 41391b5dc14c..8679e24fd665 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -43,6 +43,8 @@ static int page_pool_init(struct page_pool *pool, if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) return -ENOMEM; + atomic_set(&pool->pages_state_release_cnt, 0); + return 0; } @@ -151,6 +153,9 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, page->dma_addr = dma; skip_dma_map: + /* Track how many pages are held 'in-flight' */ + pool->pages_state_hold_cnt++; + /* When page just alloc'ed is should/must have refcnt 1. */ return page; } @@ -173,6 +178,33 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) } EXPORT_SYMBOL(page_pool_alloc_pages); +/* Calculate distance between two u32 values, valid if distance is below 2^(31) + * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution + */ +#define _distance(a, b) (s32)((a) - (b)) + +static s32 page_pool_inflight(struct page_pool *pool) +{ + u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); + u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); + s32 distance; + + distance = _distance(hold_cnt, release_cnt); + + /* TODO: Add tracepoint here */ + return distance; +} + +static bool __page_pool_safe_to_destroy(struct page_pool *pool) +{ + s32 inflight = page_pool_inflight(pool); + + /* The distance should not be able to become negative */ + WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); + + return (inflight == 0); +} + /* Cleanup page_pool state from page */ static void __page_pool_clean_page(struct page_pool *pool, struct page *page) @@ -180,7 +212,7 @@ static void __page_pool_clean_page(struct page_pool *pool, dma_addr_t dma; if (!(pool->p.flags & PP_FLAG_DMA_MAP)) - return; + goto skip_dma_unmap; dma = page->dma_addr; /* DMA unmap */ @@ -188,11 +220,16 @@ static void __page_pool_clean_page(struct page_pool *pool, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); page->dma_addr = 0; +skip_dma_unmap: + atomic_inc(&pool->pages_state_release_cnt); } /* unmap the page and clean our state */ void page_pool_unmap_page(struct page_pool *pool, struct page *page) { + /* When page is unmapped, this implies page will not be + * returned to page_pool. + */ __page_pool_clean_page(pool, page); } EXPORT_SYMBOL(page_pool_unmap_page); @@ -201,6 +238,7 @@ EXPORT_SYMBOL(page_pool_unmap_page); static void __page_pool_return_page(struct page_pool *pool, struct page *page) { __page_pool_clean_page(pool, page); + put_page(page); /* An optimization would be to call __free_pages(page, pool->p.order) * knowing page is not part of page-cache (thus avoiding a @@ -296,24 +334,17 @@ void __page_pool_free(struct page_pool *pool) { WARN(pool->alloc.count, "API usage violation"); WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty"); + WARN(!__page_pool_safe_to_destroy(pool), "still in-flight pages"); ptr_ring_cleanup(&pool->ring, NULL); kfree(pool); } EXPORT_SYMBOL(__page_pool_free); -static void __page_pool_destroy_rcu(struct rcu_head *rcu) -{ - struct page_pool *pool; - - pool = container_of(rcu, struct page_pool, rcu); - - __page_pool_empty_ring(pool); - __page_pool_free(pool); -} - -/* Cleanup and release resources */ -void page_pool_destroy(struct page_pool *pool) +/* Request to shutdown: release pages cached by page_pool, and check + * for in-flight pages + */ +bool __page_pool_request_shutdown(struct page_pool *pool) { struct page *page; @@ -331,7 +362,6 @@ void page_pool_destroy(struct page_pool *pool) */ __page_pool_empty_ring(pool); - /* An xdp_mem_allocator can still ref page_pool pointer */ - call_rcu(&pool->rcu, __page_pool_destroy_rcu); + return __page_pool_safe_to_destroy(pool); } -EXPORT_SYMBOL(page_pool_destroy); +EXPORT_SYMBOL(__page_pool_request_shutdown); diff --git a/net/core/xdp.c b/net/core/xdp.c index 0fcc32340c4e..aae665ccee3f 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -38,6 +38,7 @@ struct xdp_mem_allocator { }; struct rhash_head node; struct rcu_head rcu; + struct delayed_work defer_wq; }; static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) @@ -79,13 +80,13 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) xa = container_of(rcu, struct xdp_mem_allocator, rcu); + /* Allocator have indicated safe to remove before this is called */ + if (xa->mem.type == MEM_TYPE_PAGE_POOL) + page_pool_free(xa->page_pool); + /* Allow this ID to be reused */ ida_simple_remove(&mem_id_pool, xa->mem.id); - /* Notice, driver is expected to free the *allocator, - * e.g. page_pool, and MUST also use RCU free. - */ - /* Poison memory */ xa->mem.id = 0xFFFF; xa->mem.type = 0xF0F0; @@ -94,6 +95,46 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) kfree(xa); } +bool __mem_id_disconnect(int id) +{ + struct xdp_mem_allocator *xa; + bool safe_to_remove = true; + + mutex_lock(&mem_id_lock); + + xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); + if (!xa) { + mutex_unlock(&mem_id_lock); + WARN(1, "Request remove non-existing id(%d), driver bug?", id); + return true; + } + + /* Detects in-flight packet-pages for page_pool */ + if (xa->mem.type == MEM_TYPE_PAGE_POOL) + safe_to_remove = page_pool_request_shutdown(xa->page_pool); + + if (safe_to_remove && + !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) + call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); + + mutex_unlock(&mem_id_lock); + return safe_to_remove; +} + +#define DEFER_TIME (msecs_to_jiffies(1000)) + +static void mem_id_disconnect_defer_retry(struct work_struct *wq) +{ + struct delayed_work *dwq = to_delayed_work(wq); + struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq); + + if (__mem_id_disconnect(xa->mem.id)) + return; + + /* Still not ready to be disconnected, retry later */ + schedule_delayed_work(&xa->defer_wq, DEFER_TIME); +} + void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) { struct xdp_mem_allocator *xa; @@ -112,16 +153,28 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) if (id == 0) return; + if (__mem_id_disconnect(id)) + return; + + /* Could not disconnect, defer new disconnect attempt to later */ mutex_lock(&mem_id_lock); xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); - if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) - call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); + if (!xa) { + mutex_unlock(&mem_id_lock); + return; + } + INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry); mutex_unlock(&mem_id_lock); + schedule_delayed_work(&xa->defer_wq, DEFER_TIME); } EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); +/* This unregister operation will also cleanup and destroy the + * allocator. The page_pool_free() operation is first called when it's + * safe to remove, possibly deferred to a workqueue. + */ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) { /* Simplify driver cleanup code paths, allow unreg "unused" */ -- cgit v1.2.3-59-g8ed1b From d956a048cd3fc1ba154101a1a50fb37950081ff6 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:05:53 +0200 Subject: xdp: force mem allocator removal and periodic warning If bugs exists or are introduced later e.g. by drivers misusing the API, then we want to warn about the issue, such that developer notice. This patch will generate a bit of noise in form of periodic pr_warn every 30 seconds. It is not nice to have this stall warning running forever. Thus, this patch will (after 120 attempts) force disconnect the mem id (from the rhashtable) and free the page_pool object. This will cause fallback to the put_page() as before, which only potentially leak DMA-mappings, if objects are really stuck for this long. In that unlikely case, a WARN_ONCE should show us the call stack. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- net/core/page_pool.c | 18 +++++++++++++++++- net/core/xdp.c | 37 +++++++++++++++++++++++++++++++------ 2 files changed, 48 insertions(+), 7 deletions(-) (limited to 'net/core') diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 8679e24fd665..42c3b0a5a259 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -330,11 +330,27 @@ static void __page_pool_empty_ring(struct page_pool *pool) } } +static void __warn_in_flight(struct page_pool *pool) +{ + u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); + u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); + s32 distance; + + distance = _distance(hold_cnt, release_cnt); + + /* Drivers should fix this, but only problematic when DMA is used */ + WARN(1, "Still in-flight pages:%d hold:%u released:%u", + distance, hold_cnt, release_cnt); +} + void __page_pool_free(struct page_pool *pool) { WARN(pool->alloc.count, "API usage violation"); WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty"); - WARN(!__page_pool_safe_to_destroy(pool), "still in-flight pages"); + + /* Can happen due to forced shutdown */ + if (!__page_pool_safe_to_destroy(pool)) + __warn_in_flight(pool); ptr_ring_cleanup(&pool->ring, NULL); kfree(pool); diff --git a/net/core/xdp.c b/net/core/xdp.c index aae665ccee3f..622c81dc7ba8 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -39,6 +39,9 @@ struct xdp_mem_allocator { struct rhash_head node; struct rcu_head rcu; struct delayed_work defer_wq; + unsigned long defer_start; + unsigned long defer_warn; + int disconnect_cnt; }; static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) @@ -95,7 +98,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) kfree(xa); } -bool __mem_id_disconnect(int id) +bool __mem_id_disconnect(int id, bool force) { struct xdp_mem_allocator *xa; bool safe_to_remove = true; @@ -108,29 +111,47 @@ bool __mem_id_disconnect(int id) WARN(1, "Request remove non-existing id(%d), driver bug?", id); return true; } + xa->disconnect_cnt++; /* Detects in-flight packet-pages for page_pool */ if (xa->mem.type == MEM_TYPE_PAGE_POOL) safe_to_remove = page_pool_request_shutdown(xa->page_pool); - if (safe_to_remove && + /* TODO: Tracepoint will be added here in next-patch */ + + if ((safe_to_remove || force) && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); mutex_unlock(&mem_id_lock); - return safe_to_remove; + return (safe_to_remove|force); } #define DEFER_TIME (msecs_to_jiffies(1000)) +#define DEFER_WARN_INTERVAL (30 * HZ) +#define DEFER_MAX_RETRIES 120 static void mem_id_disconnect_defer_retry(struct work_struct *wq) { struct delayed_work *dwq = to_delayed_work(wq); struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq); + bool force = false; + + if (xa->disconnect_cnt > DEFER_MAX_RETRIES) + force = true; - if (__mem_id_disconnect(xa->mem.id)) + if (__mem_id_disconnect(xa->mem.id, force)) return; + /* Periodic warning */ + if (time_after_eq(jiffies, xa->defer_warn)) { + int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ; + + pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n", + __func__, xa->mem.id, xa->disconnect_cnt, sec); + xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; + } + /* Still not ready to be disconnected, retry later */ schedule_delayed_work(&xa->defer_wq, DEFER_TIME); } @@ -153,7 +174,7 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) if (id == 0) return; - if (__mem_id_disconnect(id)) + if (__mem_id_disconnect(id, false)) return; /* Could not disconnect, defer new disconnect attempt to later */ @@ -164,6 +185,8 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) mutex_unlock(&mem_id_lock); return; } + xa->defer_start = jiffies; + xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry); mutex_unlock(&mem_id_lock); @@ -388,10 +411,12 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); page = virt_to_head_page(data); - if (xa) { + if (likely(xa)) { napi_direct &= !xdp_return_frame_no_direct(); page_pool_put_page(xa->page_pool, page, napi_direct); } else { + /* Hopefully stack show who to blame for late return */ + WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id); put_page(page); } rcu_read_unlock(); -- cgit v1.2.3-59-g8ed1b From f033b688c1ede5ec78c9a718fa9f0b374049bc31 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:05:58 +0200 Subject: xdp: add tracepoints for XDP mem These tracepoints make it easier to troubleshoot XDP mem id disconnect. The xdp:mem_disconnect tracepoint cannot be replaced via kprobe. It is placed at the last stable place for the pointer to struct xdp_mem_allocator, just before it's scheduled for RCU removal. It also extract info on 'safe_to_remove' and 'force'. Detailed info about in-flight pages is not available at this layer. The next patch will added tracepoints needed at the page_pool layer for this. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/net/xdp_priv.h | 23 +++++++++ include/trace/events/xdp.h | 115 +++++++++++++++++++++++++++++++++++++++++++++ net/core/xdp.c | 21 ++------- 3 files changed, 143 insertions(+), 16 deletions(-) create mode 100644 include/net/xdp_priv.h (limited to 'net/core') diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h new file mode 100644 index 000000000000..6a8cba6ea79a --- /dev/null +++ b/include/net/xdp_priv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_NET_XDP_PRIV_H__ +#define __LINUX_NET_XDP_PRIV_H__ + +#include + +/* Private to net/core/xdp.c, but used by trace/events/xdp.h */ +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + struct zero_copy_allocator *zc_alloc; + }; + int disconnect_cnt; + unsigned long defer_start; + struct rhash_head node; + struct rcu_head rcu; + struct delayed_work defer_wq; + unsigned long defer_warn; +}; + +#endif /* __LINUX_NET_XDP_PRIV_H__ */ diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index e95cb86b65cf..bb5e380e2ef3 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -269,6 +269,121 @@ TRACE_EVENT(xdp_devmap_xmit, __entry->from_ifindex, __entry->to_ifindex, __entry->err) ); +/* Expect users already include , but not xdp_priv.h */ +#include + +#define __MEM_TYPE_MAP(FN) \ + FN(PAGE_SHARED) \ + FN(PAGE_ORDER0) \ + FN(PAGE_POOL) \ + FN(ZERO_COPY) + +#define __MEM_TYPE_TP_FN(x) \ + TRACE_DEFINE_ENUM(MEM_TYPE_##x); +#define __MEM_TYPE_SYM_FN(x) \ + { MEM_TYPE_##x, #x }, +#define __MEM_TYPE_SYM_TAB \ + __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 } +__MEM_TYPE_MAP(__MEM_TYPE_TP_FN) + +TRACE_EVENT(mem_disconnect, + + TP_PROTO(const struct xdp_mem_allocator *xa, + bool safe_to_remove, bool force), + + TP_ARGS(xa, safe_to_remove, force), + + TP_STRUCT__entry( + __field(const struct xdp_mem_allocator *, xa) + __field(u32, mem_id) + __field(u32, mem_type) + __field(const void *, allocator) + __field(bool, safe_to_remove) + __field(bool, force) + __field(int, disconnect_cnt) + ), + + TP_fast_assign( + __entry->xa = xa; + __entry->mem_id = xa->mem.id; + __entry->mem_type = xa->mem.type; + __entry->allocator = xa->allocator; + __entry->safe_to_remove = safe_to_remove; + __entry->force = force; + __entry->disconnect_cnt = xa->disconnect_cnt; + ), + + TP_printk("mem_id=%d mem_type=%s allocator=%p" + " safe_to_remove=%s force=%s disconnect_cnt=%d", + __entry->mem_id, + __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), + __entry->allocator, + __entry->safe_to_remove ? "true" : "false", + __entry->force ? "true" : "false", + __entry->disconnect_cnt + ) +); + +TRACE_EVENT(mem_connect, + + TP_PROTO(const struct xdp_mem_allocator *xa, + const struct xdp_rxq_info *rxq), + + TP_ARGS(xa, rxq), + + TP_STRUCT__entry( + __field(const struct xdp_mem_allocator *, xa) + __field(u32, mem_id) + __field(u32, mem_type) + __field(const void *, allocator) + __field(const struct xdp_rxq_info *, rxq) + __field(int, ifindex) + ), + + TP_fast_assign( + __entry->xa = xa; + __entry->mem_id = xa->mem.id; + __entry->mem_type = xa->mem.type; + __entry->allocator = xa->allocator; + __entry->rxq = rxq; + __entry->ifindex = rxq->dev->ifindex; + ), + + TP_printk("mem_id=%d mem_type=%s allocator=%p" + " ifindex=%d", + __entry->mem_id, + __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), + __entry->allocator, + __entry->ifindex + ) +); + +TRACE_EVENT(mem_return_failed, + + TP_PROTO(const struct xdp_mem_info *mem, + const struct page *page), + + TP_ARGS(mem, page), + + TP_STRUCT__entry( + __field(const struct page *, page) + __field(u32, mem_id) + __field(u32, mem_type) + ), + + TP_fast_assign( + __entry->page = page; + __entry->mem_id = mem->id; + __entry->mem_type = mem->type; + ), + + TP_printk("mem_id=%d mem_type=%s page=%p", + __entry->mem_id, + __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), + __entry->page + ) +); + #endif /* _TRACE_XDP_H */ #include diff --git a/net/core/xdp.c b/net/core/xdp.c index 622c81dc7ba8..b29d7b513a18 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -14,6 +14,8 @@ #include #include +#include /* struct xdp_mem_allocator */ +#include #define REG_STATE_NEW 0x0 #define REG_STATE_REGISTERED 0x1 @@ -29,21 +31,6 @@ static int mem_id_next = MEM_ID_MIN; static bool mem_id_init; /* false */ static struct rhashtable *mem_id_ht; -struct xdp_mem_allocator { - struct xdp_mem_info mem; - union { - void *allocator; - struct page_pool *page_pool; - struct zero_copy_allocator *zc_alloc; - }; - struct rhash_head node; - struct rcu_head rcu; - struct delayed_work defer_wq; - unsigned long defer_start; - unsigned long defer_warn; - int disconnect_cnt; -}; - static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) { const u32 *k = data; @@ -117,7 +104,7 @@ bool __mem_id_disconnect(int id, bool force) if (xa->mem.type == MEM_TYPE_PAGE_POOL) safe_to_remove = page_pool_request_shutdown(xa->page_pool); - /* TODO: Tracepoint will be added here in next-patch */ + trace_mem_disconnect(xa, safe_to_remove, force); if ((safe_to_remove || force) && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) @@ -385,6 +372,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, mutex_unlock(&mem_id_lock); + trace_mem_connect(xdp_alloc, xdp_rxq); return 0; err: mutex_unlock(&mem_id_lock); @@ -417,6 +405,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, } else { /* Hopefully stack show who to blame for late return */ WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id); + trace_mem_return_failed(mem, page); put_page(page); } rcu_read_unlock(); -- cgit v1.2.3-59-g8ed1b From 32c28f7e413981c7dd4a3ad9bbb1151e4b654261 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:06:03 +0200 Subject: page_pool: add tracepoints for page_pool with details need by XDP The xdp tracepoints for mem id disconnect don't carry information about, why it was not safe_to_remove. The tracepoint page_pool:page_pool_inflight in this patch can be used for extract this info for further debugging. This patchset also adds tracepoint for the pages_state_* release/hold transitions, including a pointer to the page. This can be used for stats about in-flight pages, or used to debug page leakage via keeping track of page pointer and combining this with kprobe for __put_page(). Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/trace/events/page_pool.h | 87 ++++++++++++++++++++++++++++++++++++++++ net/core/net-traces.c | 4 ++ net/core/page_pool.c | 9 ++++- 3 files changed, 99 insertions(+), 1 deletion(-) create mode 100644 include/trace/events/page_pool.h (limited to 'net/core') diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h new file mode 100644 index 000000000000..47b5ee880aa9 --- /dev/null +++ b/include/trace/events/page_pool.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM page_pool + +#if !defined(_TRACE_PAGE_POOL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGE_POOL_H + +#include +#include + +#include + +TRACE_EVENT(page_pool_inflight, + + TP_PROTO(const struct page_pool *pool, + s32 inflight, u32 hold, u32 release), + + TP_ARGS(pool, inflight, hold, release), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(s32, inflight) + __field(u32, hold) + __field(u32, release) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->inflight = inflight; + __entry->hold = hold; + __entry->release = release; + ), + + TP_printk("page_pool=%p inflight=%d hold=%u release=%u", + __entry->pool, __entry->inflight, __entry->hold, __entry->release) +); + +TRACE_EVENT(page_pool_state_release, + + TP_PROTO(const struct page_pool *pool, + const struct page *page, u32 release), + + TP_ARGS(pool, page, release), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(const struct page *, page) + __field(u32, release) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->page = page; + __entry->release = release; + ), + + TP_printk("page_pool=%p page=%p release=%u", + __entry->pool, __entry->page, __entry->release) +); + +TRACE_EVENT(page_pool_state_hold, + + TP_PROTO(const struct page_pool *pool, + const struct page *page, u32 hold), + + TP_ARGS(pool, page, hold), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(const struct page *, page) + __field(u32, hold) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->page = page; + __entry->hold = hold; + ), + + TP_printk("page_pool=%p page=%p hold=%u", + __entry->pool, __entry->page, __entry->hold) +); + +#endif /* _TRACE_PAGE_POOL_H */ + +/* This part must be outside protection */ +#include diff --git a/net/core/net-traces.c b/net/core/net-traces.c index 470b179d599e..283ddb2dbc7d 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c @@ -43,6 +43,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete); EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update); #endif +#if IS_ENABLED(CONFIG_PAGE_POOL) +#include +#endif + #include EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update); EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update_done); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 42c3b0a5a259..f55ab055d543 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -4,6 +4,7 @@ * Author: Jesper Dangaard Brouer * Copyright (C) 2016 Red Hat, Inc. */ + #include #include #include @@ -14,6 +15,8 @@ #include #include /* for __put_page() */ +#include + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -156,6 +159,8 @@ skip_dma_map: /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; + trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); + /* When page just alloc'ed is should/must have refcnt 1. */ return page; } @@ -191,7 +196,7 @@ static s32 page_pool_inflight(struct page_pool *pool) distance = _distance(hold_cnt, release_cnt); - /* TODO: Add tracepoint here */ + trace_page_pool_inflight(pool, distance, hold_cnt, release_cnt); return distance; } @@ -222,6 +227,8 @@ static void __page_pool_clean_page(struct page_pool *pool, page->dma_addr = 0; skip_dma_unmap: atomic_inc(&pool->pages_state_release_cnt); + trace_page_pool_state_release(pool, page, + atomic_read(&pool->pages_state_release_cnt)); } /* unmap the page and clean our state */ -- cgit v1.2.3-59-g8ed1b From f71fec47c2df704c7081f946d7e46fe036a4208b Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 18 Jun 2019 15:06:08 +0200 Subject: page_pool: make sure struct device is stable For DMA mapping use-case the page_pool keeps a pointer to the struct device, which is used in DMA map/unmap calls. For our in-flight handling, we also need to make sure that the struct device have not disappeared. This is assured via using get_device/put_device API. Signed-off-by: Jesper Dangaard Brouer Reported-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- net/core/page_pool.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'net/core') diff --git a/net/core/page_pool.c b/net/core/page_pool.c index f55ab055d543..b366f59885c1 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -48,6 +49,9 @@ static int page_pool_init(struct page_pool *pool, atomic_set(&pool->pages_state_release_cnt, 0); + if (pool->p.flags & PP_FLAG_DMA_MAP) + get_device(pool->p.dev); + return 0; } @@ -360,6 +364,10 @@ void __page_pool_free(struct page_pool *pool) __warn_in_flight(pool); ptr_ring_cleanup(&pool->ring, NULL); + + if (pool->p.flags & PP_FLAG_DMA_MAP) + put_device(pool->p.dev); + kfree(pool); } EXPORT_SYMBOL(__page_pool_free); -- cgit v1.2.3-59-g8ed1b From d7d99872c144a2c2f5d9c9d83627fa833836cba5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 18 Jun 2019 11:08:59 -0700 Subject: netns: add pre_exit method to struct pernet_operations Current struct pernet_operations exit() handlers are highly discouraged to call synchronize_rcu(). There are cases where we need them, and exit_batch() does not help the common case where a single netns is dismantled. This patch leverages the existing synchronize_rcu() call in cleanup_net() Calling optional ->pre_exit() method before ->exit() or ->exit_batch() allows to benefit from a single synchronize_rcu() call. Note that the synchronize_rcu() calls added in this patch are only in error paths or slow paths. Tested: $ time for i in {1..1000}; do unshare -n /bin/false;done real 0m2.612s user 0m0.171s sys 0m2.216s Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/net_namespace.h | 5 +++++ net/core/net_namespace.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) (limited to 'net/core') diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index abb4f92456e1..ad9243afac67 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -355,8 +355,13 @@ struct pernet_operations { * synchronize_rcu() related to these pernet_operations, * instead of separate synchronize_rcu() for every net. * Please, avoid synchronize_rcu() at all, where it's possible. + * + * Note that a combination of pre_exit() and exit() can + * be used, since a synchronize_rcu() is guaranteed between + * the calls. */ int (*init)(struct net *net); + void (*pre_exit)(struct net *net); void (*exit)(struct net *net); void (*exit_batch)(struct list_head *net_exit_list); unsigned int *id; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 15f68842ac6b..89dc99a28978 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -145,6 +145,17 @@ static void ops_free(const struct pernet_operations *ops, struct net *net) } } +static void ops_pre_exit_list(const struct pernet_operations *ops, + struct list_head *net_exit_list) +{ + struct net *net; + + if (ops->pre_exit) { + list_for_each_entry(net, net_exit_list, exit_list) + ops->pre_exit(net); + } +} + static void ops_exit_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { @@ -328,6 +339,12 @@ out_undo: * for the pernet modules whose init functions did not fail. */ list_add(&net->exit_list, &net_exit_list); + saved_ops = ops; + list_for_each_entry_continue_reverse(ops, &pernet_list, list) + ops_pre_exit_list(ops, &net_exit_list); + + synchronize_rcu(); + saved_ops = ops; list_for_each_entry_continue_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); @@ -541,10 +558,15 @@ static void cleanup_net(struct work_struct *work) list_add_tail(&net->exit_list, &net_exit_list); } + /* Run all of the network namespace pre_exit methods */ + list_for_each_entry_reverse(ops, &pernet_list, list) + ops_pre_exit_list(ops, &net_exit_list); + /* * Another CPU might be rcu-iterating the list, wait for it. * This needs to be before calling the exit() notifiers, so * the rcu_barrier() below isn't sufficient alone. + * Also the pre_exit() and exit() methods need this barrier. */ synchronize_rcu(); @@ -1101,6 +1123,8 @@ static int __register_pernet_operations(struct list_head *list, out_undo: /* If I have an error cleanup all namespaces I initialized */ list_del(&ops->list); + ops_pre_exit_list(ops, &net_exit_list); + synchronize_rcu(); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); return error; @@ -1115,6 +1139,8 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) /* See comment in __register_pernet_operations() */ for_each_net(net) list_add_tail(&net->exit_list, &net_exit_list); + ops_pre_exit_list(ops, &net_exit_list); + synchronize_rcu(); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); } @@ -1139,6 +1165,8 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) } else { LIST_HEAD(net_exit_list); list_add(&init_net.exit_list, &net_exit_list); + ops_pre_exit_list(ops, &net_exit_list); + synchronize_rcu(); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); } -- cgit v1.2.3-59-g8ed1b From b272a0ad730103e84fb735fd0a8cc050cdf7f77c Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 20 Jun 2019 19:24:40 +0800 Subject: netns: restore ops before calling ops_exit_list ops has been iterated to first element when call pre_exit, and it needs to restore from save_ops, not save ops to save_ops Fixes: d7d99872c144 ("netns: add pre_exit method to struct pernet_operations") Signed-off-by: Li RongQing Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/net_namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 89dc99a28978..198ce503ae73 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -345,7 +345,7 @@ out_undo: synchronize_rcu(); - saved_ops = ops; + ops = saved_ops; list_for_each_entry_continue_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); -- cgit v1.2.3-59-g8ed1b From 572a6928f9e3689ad2c2f94814e6215104eec1b7 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 25 Jun 2019 10:31:37 +0800 Subject: xdp: Make __mem_id_disconnect static Fix sparse warning: net/core/xdp.c:88:6: warning: symbol '__mem_id_disconnect' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: YueHaibing Acked-by: Jesper Dangaard Brouer Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- net/core/xdp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/xdp.c b/net/core/xdp.c index b29d7b513a18..829377cc83db 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -85,7 +85,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) kfree(xa); } -bool __mem_id_disconnect(int id, bool force) +static bool __mem_id_disconnect(int id, bool force) { struct xdp_mem_allocator *xa; bool safe_to_remove = true; -- cgit v1.2.3-59-g8ed1b From c22a133a83ce64949dc35156eaefeda5a1e12bc3 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 21 Jun 2019 16:27:16 -0700 Subject: rtnetlink: skip metrics loop for dst_default_metrics dst_default_metrics has all of the metrics initialized to 0, so nothing will be added to the skb in rtnetlink_put_metrics. Avoid the loop if metrics is from dst_default_metrics. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net/core') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8ac81630ab5c..1ee6460f8275 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -751,6 +751,10 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) struct nlattr *mx; int i, valid = 0; + /* nothing is dumped for dst_default_metrics, so just skip the loop */ + if (metrics == dst_default_metrics.metrics) + return 0; + mx = nla_nest_start_noflag(skb, RTA_METRICS); if (mx == NULL) return -ENOBUFS; -- cgit v1.2.3-59-g8ed1b From 5b9469a285ebc85af29736a6d9fa995aea0dbf0b Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 27 Jun 2019 10:52:26 +0200 Subject: net: ethtool: Allow parsing ETHER_FLOW types when using flow_rule When parsing an ethtool_rx_flow_spec, users can specify an ethernet flow which could contain matches based on the ethernet header, such as the MAC address, the VLAN tag or the ethertype. ETHER_FLOW uses the src and dst ethernet addresses, along with the ethertype as keys. Matches based on the vlan tag are also possible, but they are specified using the special FLOW_EXT flag. Signed-off-by: Maxime Chevallier Acked-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- net/core/ethtool.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'net/core') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4d1011b2e24f..6288e69e94fc 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -2883,6 +2883,30 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input) match->mask.basic.n_proto = htons(0xffff); switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { + case ETHER_FLOW: { + const struct ethhdr *ether_spec, *ether_m_spec; + + ether_spec = &fs->h_u.ether_spec; + ether_m_spec = &fs->m_u.ether_spec; + + if (!is_zero_ether_addr(ether_m_spec->h_source)) { + ether_addr_copy(match->key.eth_addrs.src, + ether_spec->h_source); + ether_addr_copy(match->mask.eth_addrs.src, + ether_m_spec->h_source); + } + if (!is_zero_ether_addr(ether_m_spec->h_dest)) { + ether_addr_copy(match->key.eth_addrs.dst, + ether_spec->h_dest); + ether_addr_copy(match->mask.eth_addrs.dst, + ether_m_spec->h_dest); + } + if (ether_m_spec->h_proto) { + match->key.basic.n_proto = ether_spec->h_proto; + match->mask.basic.n_proto = ether_m_spec->h_proto; + } + } + break; case TCP_V4_FLOW: case UDP_V4_FLOW: { const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; -- cgit v1.2.3-59-g8ed1b From 0d01da6afc5402f60325c5da31b22f7d56689b49 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 27 Jun 2019 13:38:47 -0700 Subject: bpf: implement getsockopt and setsockopt hooks Implement new BPF_PROG_TYPE_CGROUP_SOCKOPT program type and BPF_CGROUP_{G,S}ETSOCKOPT cgroup hooks. BPF_CGROUP_SETSOCKOPT can modify user setsockopt arguments before passing them down to the kernel or bypass kernel completely. BPF_CGROUP_GETSOCKOPT can can inspect/modify getsockopt arguments that kernel returns. Both hooks reuse existing PTR_TO_PACKET{,_END} infrastructure. The buffer memory is pre-allocated (because I don't think there is a precedent for working with __user memory from bpf). This might be slow to do for each {s,g}etsockopt call, that's why I've added __cgroup_bpf_prog_array_is_empty that exits early if there is nothing attached to a cgroup. Note, however, that there is a race between __cgroup_bpf_prog_array_is_empty and BPF_PROG_RUN_ARRAY where cgroup program layout might have changed; this should not be a problem because in general there is a race between multiple calls to {s,g}etsocktop and user adding/removing bpf progs from a cgroup. The return code of the BPF program is handled as follows: * 0: EPERM * 1: success, continue with next BPF program in the cgroup chain v9: * allow overwriting setsockopt arguments (Alexei Starovoitov): * use set_fs (same as kernel_setsockopt) * buffer is always kzalloc'd (no small on-stack buffer) v8: * use s32 for optlen (Andrii Nakryiko) v7: * return only 0 or 1 (Alexei Starovoitov) * always run all progs (Alexei Starovoitov) * use optval=0 as kernel bypass in setsockopt (Alexei Starovoitov) (decided to use optval=-1 instead, optval=0 might be a valid input) * call getsockopt hook after kernel handlers (Alexei Starovoitov) v6: * rework cgroup chaining; stop as soon as bpf program returns 0 or 2; see patch with the documentation for the details * drop Andrii's and Martin's Acked-by (not sure they are comfortable with the new state of things) v5: * skip copy_to_user() and put_user() when ret == 0 (Martin Lau) v4: * don't export bpf_sk_fullsock helper (Martin Lau) * size != sizeof(__u64) for uapi pointers (Martin Lau) * offsetof instead of bpf_ctx_range when checking ctx access (Martin Lau) v3: * typos in BPF_PROG_CGROUP_SOCKOPT_RUN_ARRAY comments (Andrii Nakryiko) * reverse christmas tree in BPF_PROG_CGROUP_SOCKOPT_RUN_ARRAY (Andrii Nakryiko) * use __bpf_md_ptr instead of __u32 for optval{,_end} (Martin Lau) * use BPF_FIELD_SIZEOF() for consistency (Martin Lau) * new CG_SOCKOPT_ACCESS macro to wrap repeated parts v2: * moved bpf_sockopt_kern fields around to remove a hole (Martin Lau) * aligned bpf_sockopt_kern->buf to 8 bytes (Martin Lau) * bpf_prog_array_is_empty instead of bpf_prog_array_length (Martin Lau) * added [0,2] return code check to verifier (Martin Lau) * dropped unused buf[64] from the stack (Martin Lau) * use PTR_TO_SOCKET for bpf_sockopt->sk (Martin Lau) * dropped bpf_target_off from ctx rewrites (Martin Lau) * use return code for kernel bypass (Martin Lau & Andrii Nakryiko) Cc: Andrii Nakryiko Cc: Martin Lau Signed-off-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- include/linux/bpf-cgroup.h | 45 ++++++ include/linux/bpf.h | 2 + include/linux/bpf_types.h | 1 + include/linux/filter.h | 10 ++ include/uapi/linux/bpf.h | 14 ++ kernel/bpf/cgroup.c | 333 +++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/core.c | 9 ++ kernel/bpf/syscall.c | 19 +++ kernel/bpf/verifier.c | 8 ++ net/core/filter.c | 2 +- net/socket.c | 30 ++++ 11 files changed, 472 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index bd79ae32909a..169fd25f6bc2 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -124,6 +124,14 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, loff_t *ppos, void **new_buf, enum bpf_attach_type type); +int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, + int *optname, char __user *optval, + int *optlen, char **kernel_optval); +int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + int optname, char __user *optval, + int __user *optlen, int max_optlen, + int retval); + static inline enum bpf_cgroup_storage_type cgroup_storage_type( struct bpf_map *map) { @@ -286,6 +294,38 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, __ret; \ }) +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ + optname, optval, \ + optlen, \ + kernel_optval); \ + __ret; \ +}) + +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + get_user(__ret, optlen); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ + max_optlen, retval) \ +({ \ + int __ret = retval; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ + optname, optval, \ + optlen, max_optlen, \ + retval); \ + __ret; \ +}) + int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog); int cgroup_bpf_prog_detach(const union bpf_attr *attr, @@ -357,6 +397,11 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ + optlen, max_optlen, retval) ({ retval; }) +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) ({ 0; }) #define for_each_cgroup_storage_type(stype) for (; false; ) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a62e7889b0b6..18f4cc2c6acd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -518,6 +518,7 @@ struct bpf_prog_array { struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); void bpf_prog_array_free(struct bpf_prog_array *progs); int bpf_prog_array_length(struct bpf_prog_array *progs); +bool bpf_prog_array_is_empty(struct bpf_prog_array *array); int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, __u32 __user *prog_ids, u32 cnt); @@ -1051,6 +1052,7 @@ extern const struct bpf_func_proto bpf_spin_unlock_proto; extern const struct bpf_func_proto bpf_get_local_storage_proto; extern const struct bpf_func_proto bpf_strtol_proto; extern const struct bpf_func_proto bpf_strtoul_proto; +extern const struct bpf_func_proto bpf_tcp_sock_proto; /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 5a9975678d6f..eec5aeeeaf92 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -30,6 +30,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt) #endif #ifdef CONFIG_BPF_LIRC_MODE2 BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) diff --git a/include/linux/filter.h b/include/linux/filter.h index 43b45d6db36d..340f7d648974 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1199,4 +1199,14 @@ struct bpf_sysctl_kern { u64 tmp_reg; }; +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + s32 retval; +}; + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b077507efa3f..a396b516a2b2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -170,6 +170,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, + BPF_PROG_TYPE_CGROUP_SOCKOPT, }; enum bpf_attach_type { @@ -194,6 +195,8 @@ enum bpf_attach_type { BPF_CGROUP_SYSCTL, BPF_CGROUP_UDP4_RECVMSG, BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_GETSOCKOPT, + BPF_CGROUP_SETSOCKOPT, __MAX_BPF_ATTACH_TYPE }; @@ -3541,4 +3544,15 @@ struct bpf_sysctl { */ }; +struct bpf_sockopt { + __bpf_md_ptr(struct bpf_sock *, sk); + __bpf_md_ptr(void *, optval); + __bpf_md_ptr(void *, optval_end); + + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 077ed3a19848..76fa0076f20d 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "../cgroup/cgroup-internal.h" @@ -938,6 +939,188 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl); +static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, + enum bpf_attach_type attach_type) +{ + struct bpf_prog_array *prog_array; + bool empty; + + rcu_read_lock(); + prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); + empty = bpf_prog_array_is_empty(prog_array); + rcu_read_unlock(); + + return empty; +} + +static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) +{ + if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) + return -EINVAL; + + ctx->optval = kzalloc(max_optlen, GFP_USER); + if (!ctx->optval) + return -ENOMEM; + + ctx->optval_end = ctx->optval + max_optlen; + ctx->optlen = max_optlen; + + return 0; +} + +static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) +{ + kfree(ctx->optval); +} + +int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, + int *optname, char __user *optval, + int *optlen, char **kernel_optval) +{ + struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); + struct bpf_sockopt_kern ctx = { + .sk = sk, + .level = *level, + .optname = *optname, + }; + int ret; + + /* Opportunistic check to see whether we have any BPF program + * attached to the hook so we don't waste time allocating + * memory and locking the socket. + */ + if (!cgroup_bpf_enabled || + __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) + return 0; + + ret = sockopt_alloc_buf(&ctx, *optlen); + if (ret) + return ret; + + if (copy_from_user(ctx.optval, optval, *optlen) != 0) { + ret = -EFAULT; + goto out; + } + + lock_sock(sk); + ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], + &ctx, BPF_PROG_RUN); + release_sock(sk); + + if (!ret) { + ret = -EPERM; + goto out; + } + + if (ctx.optlen == -1) { + /* optlen set to -1, bypass kernel */ + ret = 1; + } else if (ctx.optlen > *optlen || ctx.optlen < -1) { + /* optlen is out of bounds */ + ret = -EFAULT; + } else { + /* optlen within bounds, run kernel handler */ + ret = 0; + + /* export any potential modifications */ + *level = ctx.level; + *optname = ctx.optname; + *optlen = ctx.optlen; + *kernel_optval = ctx.optval; + } + +out: + if (ret) + sockopt_free_buf(&ctx); + return ret; +} +EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt); + +int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + int optname, char __user *optval, + int __user *optlen, int max_optlen, + int retval) +{ + struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); + struct bpf_sockopt_kern ctx = { + .sk = sk, + .level = level, + .optname = optname, + .retval = retval, + }; + int ret; + + /* Opportunistic check to see whether we have any BPF program + * attached to the hook so we don't waste time allocating + * memory and locking the socket. + */ + if (!cgroup_bpf_enabled || + __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) + return retval; + + ret = sockopt_alloc_buf(&ctx, max_optlen); + if (ret) + return ret; + + if (!retval) { + /* If kernel getsockopt finished successfully, + * copy whatever was returned to the user back + * into our temporary buffer. Set optlen to the + * one that kernel returned as well to let + * BPF programs inspect the value. + */ + + if (get_user(ctx.optlen, optlen)) { + ret = -EFAULT; + goto out; + } + + if (ctx.optlen > max_optlen) + ctx.optlen = max_optlen; + + if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { + ret = -EFAULT; + goto out; + } + } + + lock_sock(sk); + ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], + &ctx, BPF_PROG_RUN); + release_sock(sk); + + if (!ret) { + ret = -EPERM; + goto out; + } + + if (ctx.optlen > max_optlen) { + ret = -EFAULT; + goto out; + } + + /* BPF programs only allowed to set retval to 0, not some + * arbitrary value. + */ + if (ctx.retval != 0 && ctx.retval != retval) { + ret = -EFAULT; + goto out; + } + + if (copy_to_user(optval, ctx.optval, ctx.optlen) || + put_user(ctx.optlen, optlen)) { + ret = -EFAULT; + goto out; + } + + ret = ctx.retval; + +out: + sockopt_free_buf(&ctx); + return ret; +} +EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt); + static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, size_t *lenp) { @@ -1198,3 +1381,153 @@ const struct bpf_verifier_ops cg_sysctl_verifier_ops = { const struct bpf_prog_ops cg_sysctl_prog_ops = { }; + +static const struct bpf_func_proto * +cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_sk_storage_get: + return &bpf_sk_storage_get_proto; + case BPF_FUNC_sk_storage_delete: + return &bpf_sk_storage_delete_proto; +#ifdef CONFIG_INET + case BPF_FUNC_tcp_sock: + return &bpf_tcp_sock_proto; +#endif + default: + return cgroup_base_func_proto(func_id, prog); + } +} + +static bool cg_sockopt_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + const int size_default = sizeof(__u32); + + if (off < 0 || off >= sizeof(struct bpf_sockopt)) + return false; + + if (off % size != 0) + return false; + + if (type == BPF_WRITE) { + switch (off) { + case offsetof(struct bpf_sockopt, retval): + if (size != size_default) + return false; + return prog->expected_attach_type == + BPF_CGROUP_GETSOCKOPT; + case offsetof(struct bpf_sockopt, optname): + /* fallthrough */ + case offsetof(struct bpf_sockopt, level): + if (size != size_default) + return false; + return prog->expected_attach_type == + BPF_CGROUP_SETSOCKOPT; + case offsetof(struct bpf_sockopt, optlen): + return size == size_default; + default: + return false; + } + } + + switch (off) { + case offsetof(struct bpf_sockopt, sk): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_SOCKET; + break; + case offsetof(struct bpf_sockopt, optval): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_PACKET; + break; + case offsetof(struct bpf_sockopt, optval_end): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_PACKET_END; + break; + case offsetof(struct bpf_sockopt, retval): + if (size != size_default) + return false; + return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; + default: + if (size != size_default) + return false; + break; + } + return true; +} + +#define CG_SOCKOPT_ACCESS_FIELD(T, F) \ + T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ + si->dst_reg, si->src_reg, \ + offsetof(struct bpf_sockopt_kern, F)) + +static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + struct bpf_insn *insn = insn_buf; + + switch (si->off) { + case offsetof(struct bpf_sockopt, sk): + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); + break; + case offsetof(struct bpf_sockopt, level): + if (type == BPF_WRITE) + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); + else + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); + break; + case offsetof(struct bpf_sockopt, optname): + if (type == BPF_WRITE) + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); + else + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); + break; + case offsetof(struct bpf_sockopt, optlen): + if (type == BPF_WRITE) + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); + else + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); + break; + case offsetof(struct bpf_sockopt, retval): + if (type == BPF_WRITE) + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); + else + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); + break; + case offsetof(struct bpf_sockopt, optval): + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); + break; + case offsetof(struct bpf_sockopt, optval_end): + *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); + break; + } + + return insn - insn_buf; +} + +static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, + bool direct_write, + const struct bpf_prog *prog) +{ + /* Nothing to do for sockopt argument. The data is kzalloc'ated. + */ + return 0; +} + +const struct bpf_verifier_ops cg_sockopt_verifier_ops = { + .get_func_proto = cg_sockopt_func_proto, + .is_valid_access = cg_sockopt_is_valid_access, + .convert_ctx_access = cg_sockopt_convert_ctx_access, + .gen_prologue = cg_sockopt_get_prologue, +}; + +const struct bpf_prog_ops cg_sockopt_prog_ops = { +}; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 561ed07d3007..e2c1b43728da 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1809,6 +1809,15 @@ int bpf_prog_array_length(struct bpf_prog_array *array) return cnt; } +bool bpf_prog_array_is_empty(struct bpf_prog_array *array) +{ + struct bpf_prog_array_item *item; + + for (item = array->items; item->prog; item++) + if (item->prog != &dummy_bpf_prog.prog) + return false; + return true; +} static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, u32 *prog_ids, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 7713cf39795a..b0f545e07425 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1590,6 +1590,14 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, default: return -EINVAL; } + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + switch (expected_attach_type) { + case BPF_CGROUP_SETSOCKOPT: + case BPF_CGROUP_GETSOCKOPT: + return 0; + default: + return -EINVAL; + } default: return 0; } @@ -1840,6 +1848,7 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, switch (prog->type) { case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + case BPF_PROG_TYPE_CGROUP_SOCKOPT: return attach_type == prog->expected_attach_type ? 0 : -EINVAL; case BPF_PROG_TYPE_CGROUP_SKB: return prog->enforce_expected_attach_type && @@ -1912,6 +1921,10 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_CGROUP_SYSCTL: ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; break; + case BPF_CGROUP_GETSOCKOPT: + case BPF_CGROUP_SETSOCKOPT: + ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; + break; default: return -EINVAL; } @@ -1995,6 +2008,10 @@ static int bpf_prog_detach(const union bpf_attr *attr) case BPF_CGROUP_SYSCTL: ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; break; + case BPF_CGROUP_GETSOCKOPT: + case BPF_CGROUP_SETSOCKOPT: + ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; + break; default: return -EINVAL; } @@ -2031,6 +2048,8 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_SOCK_OPS: case BPF_CGROUP_DEVICE: case BPF_CGROUP_SYSCTL: + case BPF_CGROUP_GETSOCKOPT: + case BPF_CGROUP_SETSOCKOPT: break; case BPF_LIRC_MODE2: return lirc_prog_query(attr, uattr); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0e079b2298f8..6b5623d320f9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2215,6 +2215,13 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, env->seen_direct_write = true; return true; + + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + if (t == BPF_WRITE) + env->seen_direct_write = true; + + return true; + default: return false; } @@ -6066,6 +6073,7 @@ static int check_return_code(struct bpf_verifier_env *env) case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_CGROUP_SYSCTL: + case BPF_PROG_TYPE_CGROUP_SOCKOPT: break; default: return 0; diff --git a/net/core/filter.c b/net/core/filter.c index 2014d76e0d2a..dc8534be12fc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5651,7 +5651,7 @@ BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) return (unsigned long)NULL; } -static const struct bpf_func_proto bpf_tcp_sock_proto = { +const struct bpf_func_proto bpf_tcp_sock_proto = { .func = bpf_tcp_sock, .gpl_only = false, .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL, diff --git a/net/socket.c b/net/socket.c index 963df5dbdd54..0ddfbfb761d9 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2051,6 +2051,8 @@ SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size, static int __sys_setsockopt(int fd, int level, int optname, char __user *optval, int optlen) { + mm_segment_t oldfs = get_fs(); + char *kernel_optval = NULL; int err, fput_needed; struct socket *sock; @@ -2063,6 +2065,22 @@ static int __sys_setsockopt(int fd, int level, int optname, if (err) goto out_put; + err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, + &optname, optval, &optlen, + &kernel_optval); + + if (err < 0) { + goto out_put; + } else if (err > 0) { + err = 0; + goto out_put; + } + + if (kernel_optval) { + set_fs(KERNEL_DS); + optval = (char __user __force *)kernel_optval; + } + if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, @@ -2071,6 +2089,11 @@ static int __sys_setsockopt(int fd, int level, int optname, err = sock->ops->setsockopt(sock, level, optname, optval, optlen); + + if (kernel_optval) { + set_fs(oldfs); + kfree(kernel_optval); + } out_put: fput_light(sock->file, fput_needed); } @@ -2093,6 +2116,7 @@ static int __sys_getsockopt(int fd, int level, int optname, { int err, fput_needed; struct socket *sock; + int max_optlen; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { @@ -2100,6 +2124,8 @@ static int __sys_getsockopt(int fd, int level, int optname, if (err) goto out_put; + max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); + if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, @@ -2108,6 +2134,10 @@ static int __sys_getsockopt(int fd, int level, int optname, err = sock->ops->getsockopt(sock, level, optname, optval, optlen); + + err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, + optval, optlen, + max_optlen, err); out_put: fput_light(sock->file, fput_needed); } -- cgit v1.2.3-59-g8ed1b From 720f22fed81bc6fd1765db7014651b6718887bea Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 24 Jun 2019 23:13:35 +0100 Subject: net: sched: refactor reinsert action The TC_ACT_REINSERT return type was added as an in-kernel only option to allow a packet ingress or egress redirect. This is used to avoid unnecessary skb clones in situations where they are not required. If a TC hook returns this code then the packet is 'reinserted' and no skb consume is carried out as no clone took place. This return type is only used in act_mirred. Rather than have the reinsert called from the main datapath, call it directly in act_mirred. Instead of returning TC_ACT_REINSERT, change the type to the new TC_ACT_CONSUMED which tells the caller that the packet has been stolen by another process and that no consume call is required. Moving all redirect calls to the act_mirred code is in preparation for tracking recursion created by act_mirred. Signed-off-by: John Hurley Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 +- include/net/sch_generic.h | 2 +- net/core/dev.c | 4 +--- net/sched/act_mirred.c | 3 ++- 4 files changed, 5 insertions(+), 6 deletions(-) (limited to 'net/core') diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 720f2b32fc2f..1a7596ba0dbe 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -10,7 +10,7 @@ #include /* TC action not accessible from user space */ -#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1) +#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) /* Basic packet classifier frontend definitions. */ diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 21f434f3ac9e..855167bbc372 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -279,7 +279,7 @@ struct tcf_result { }; const struct tcf_proto *goto_tp; - /* used by the TC_ACT_REINSERT action */ + /* used in the skb_tc_reinsert function */ struct { bool ingress; struct gnet_stats_queue *qstats; diff --git a/net/core/dev.c b/net/core/dev.c index d6edd218babd..58529318b3a9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4689,9 +4689,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, __skb_push(skb, skb->mac_len); skb_do_redirect(skb); return NULL; - case TC_ACT_REINSERT: - /* this does not scrub the packet, and updates stats on error */ - skb_tc_reinsert(skb, &cl_res); + case TC_ACT_CONSUMED: return NULL; default: break; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 58e7573dded4..8c1d73661cc4 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -277,7 +277,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, if (use_reinsert) { res->ingress = want_ingress; res->qstats = this_cpu_ptr(m->common.cpu_qstats); - return TC_ACT_REINSERT; + skb_tc_reinsert(skb, res); + return TC_ACT_CONSUMED; } } -- cgit v1.2.3-59-g8ed1b From d5df2830ca9922d03a33940ea424c9a5f39f1162 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Fri, 28 Jun 2019 11:12:34 +0200 Subject: devmap/cpumap: Use flush list instead of bitmap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The socket map uses a linked list instead of a bitmap to keep track of which entries to flush. Do the same for devmap and cpumap, as this means we don't have to care about the map index when enqueueing things into the map (and so we can cache the map lookup). Signed-off-by: Toke Høiland-Jørgensen Acked-by: Jonathan Lemon Acked-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- kernel/bpf/cpumap.c | 105 +++++++++++++++++++++++---------------------------- kernel/bpf/devmap.c | 107 +++++++++++++++++++++++----------------------------- net/core/filter.c | 2 - 3 files changed, 95 insertions(+), 119 deletions(-) (limited to 'net/core') diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 8dff08768087..ef49e17ae47c 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -32,14 +32,19 @@ /* General idea: XDP packets getting XDP redirected to another CPU, * will maximum be stored/queued for one driver ->poll() call. It is - * guaranteed that setting flush bit and flush operation happen on + * guaranteed that queueing the frame and the flush operation happen on * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr() * which queue in bpf_cpu_map_entry contains packets. */ #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ +struct bpf_cpu_map_entry; +struct bpf_cpu_map; + struct xdp_bulk_queue { void *q[CPU_MAP_BULK_SIZE]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; unsigned int count; }; @@ -52,6 +57,8 @@ struct bpf_cpu_map_entry { /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ struct xdp_bulk_queue __percpu *bulkq; + struct bpf_cpu_map *cmap; + /* Queue with potential multi-producers, and single-consumer kthread */ struct ptr_ring *queue; struct task_struct *kthread; @@ -65,23 +72,17 @@ struct bpf_cpu_map { struct bpf_map map; /* Below members specific for map type */ struct bpf_cpu_map_entry **cpu_map; - unsigned long __percpu *flush_needed; + struct list_head __percpu *flush_list; }; -static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, - struct xdp_bulk_queue *bq, bool in_napi_ctx); - -static u64 cpu_map_bitmap_size(const union bpf_attr *attr) -{ - return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); -} +static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx); static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) { struct bpf_cpu_map *cmap; int err = -ENOMEM; + int ret, cpu; u64 cost; - int ret; if (!capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); @@ -105,7 +106,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); - cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); + cost += sizeof(struct list_head) * num_possible_cpus(); /* Notice returns -EPERM on if map size is larger than memlock limit */ ret = bpf_map_charge_init(&cmap->map.memory, cost); @@ -114,12 +115,13 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) goto free_cmap; } - /* A per cpu bitfield with a bit per possible CPU in map */ - cmap->flush_needed = __alloc_percpu(cpu_map_bitmap_size(attr), - __alignof__(unsigned long)); - if (!cmap->flush_needed) + cmap->flush_list = alloc_percpu(struct list_head); + if (!cmap->flush_list) goto free_charge; + for_each_possible_cpu(cpu) + INIT_LIST_HEAD(per_cpu_ptr(cmap->flush_list, cpu)); + /* Alloc array for possible remote "destination" CPUs */ cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *), @@ -129,7 +131,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) return &cmap->map; free_percpu: - free_percpu(cmap->flush_needed); + free_percpu(cmap->flush_list); free_charge: bpf_map_charge_finish(&cmap->map.memory); free_cmap: @@ -334,7 +336,8 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, { gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct bpf_cpu_map_entry *rcpu; - int numa, err; + struct xdp_bulk_queue *bq; + int numa, err, i; /* Have map->numa_node, but choose node of redirect target CPU */ numa = cpu_to_node(cpu); @@ -349,6 +352,11 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, if (!rcpu->bulkq) goto free_rcu; + for_each_possible_cpu(i) { + bq = per_cpu_ptr(rcpu->bulkq, i); + bq->obj = rcpu; + } + /* Alloc queue */ rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa); if (!rcpu->queue) @@ -405,7 +413,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu) struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); /* No concurrent bq_enqueue can run at this point */ - bq_flush_to_queue(rcpu, bq, false); + bq_flush_to_queue(bq, false); } free_percpu(rcpu->bulkq); /* Cannot kthread_stop() here, last put free rcpu resources */ @@ -488,6 +496,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); if (!rcpu) return -ENOMEM; + rcpu->cmap = cmap; } rcu_read_lock(); __cpu_map_entry_replace(cmap, key_cpu, rcpu); @@ -514,14 +523,14 @@ static void cpu_map_free(struct bpf_map *map) synchronize_rcu(); /* To ensure all pending flush operations have completed wait for flush - * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. - * Because the above synchronize_rcu() ensures the map is disconnected - * from the program we can assume no new bits will be set. + * list be empty on _all_ cpus. Because the above synchronize_rcu() + * ensures the map is disconnected from the program we can assume no new + * items will be added to the list. */ for_each_online_cpu(cpu) { - unsigned long *bitmap = per_cpu_ptr(cmap->flush_needed, cpu); + struct list_head *flush_list = per_cpu_ptr(cmap->flush_list, cpu); - while (!bitmap_empty(bitmap, cmap->map.max_entries)) + while (!list_empty(flush_list)) cond_resched(); } @@ -538,7 +547,7 @@ static void cpu_map_free(struct bpf_map *map) /* bq flush and cleanup happens after RCU graze-period */ __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ } - free_percpu(cmap->flush_needed); + free_percpu(cmap->flush_list); bpf_map_area_free(cmap->cpu_map); kfree(cmap); } @@ -590,9 +599,9 @@ const struct bpf_map_ops cpu_map_ops = { .map_check_btf = map_check_no_btf, }; -static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, - struct xdp_bulk_queue *bq, bool in_napi_ctx) +static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx) { + struct bpf_cpu_map_entry *rcpu = bq->obj; unsigned int processed = 0, drops = 0; const int to_cpu = rcpu->cpu; struct ptr_ring *q; @@ -621,6 +630,8 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, bq->count = 0; spin_unlock(&q->producer_lock); + __list_del_clearprev(&bq->flush_node); + /* Feedback loop via tracepoints */ trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); return 0; @@ -631,10 +642,11 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, */ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) { + struct list_head *flush_list = this_cpu_ptr(rcpu->cmap->flush_list); struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) - bq_flush_to_queue(rcpu, bq, true); + bq_flush_to_queue(bq, true); /* Notice, xdp_buff/page MUST be queued here, long enough for * driver to code invoking us to finished, due to driver @@ -646,6 +658,10 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) * operation, when completing napi->poll call. */ bq->q[bq->count++] = xdpf; + + if (!bq->flush_node.prev) + list_add(&bq->flush_node, flush_list); + return 0; } @@ -665,41 +681,16 @@ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, return 0; } -void __cpu_map_insert_ctx(struct bpf_map *map, u32 bit) -{ - struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); - unsigned long *bitmap = this_cpu_ptr(cmap->flush_needed); - - __set_bit(bit, bitmap); -} - void __cpu_map_flush(struct bpf_map *map) { struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); - unsigned long *bitmap = this_cpu_ptr(cmap->flush_needed); - u32 bit; - - /* The napi->poll softirq makes sure __cpu_map_insert_ctx() - * and __cpu_map_flush() happen on same CPU. Thus, the percpu - * bitmap indicate which percpu bulkq have packets. - */ - for_each_set_bit(bit, bitmap, map->max_entries) { - struct bpf_cpu_map_entry *rcpu = READ_ONCE(cmap->cpu_map[bit]); - struct xdp_bulk_queue *bq; - - /* This is possible if entry is removed by user space - * between xdp redirect and flush op. - */ - if (unlikely(!rcpu)) - continue; - - __clear_bit(bit, bitmap); + struct list_head *flush_list = this_cpu_ptr(cmap->flush_list); + struct xdp_bulk_queue *bq, *tmp; - /* Flush all frames in bulkq to real queue */ - bq = this_cpu_ptr(rcpu->bulkq); - bq_flush_to_queue(rcpu, bq, true); + list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { + bq_flush_to_queue(bq, true); /* If already running, costs spin_lock_irqsave + smb_mb */ - wake_up_process(rcpu->kthread); + wake_up_process(bq->obj->kthread); } } diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 40e86a7e0ef0..a4dddc867cbf 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -17,9 +17,8 @@ * datapath always has a valid copy. However, the datapath does a "flush" * operation that pushes any pending packets in the driver outside the RCU * critical section. Each bpf_dtab_netdev tracks these pending operations using - * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed - * until all bits are cleared indicating outstanding flush operations have - * completed. + * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until + * this list is empty, indicating outstanding flush operations have completed. * * BPF syscalls may race with BPF program calls on any of the update, delete * or lookup operations. As noted above the xchg() operation also keep the @@ -48,9 +47,13 @@ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) #define DEV_MAP_BULK_SIZE 16 +struct bpf_dtab_netdev; + struct xdp_bulk_queue { struct xdp_frame *q[DEV_MAP_BULK_SIZE]; + struct list_head flush_node; struct net_device *dev_rx; + struct bpf_dtab_netdev *obj; unsigned int count; }; @@ -65,23 +68,18 @@ struct bpf_dtab_netdev { struct bpf_dtab { struct bpf_map map; struct bpf_dtab_netdev **netdev_map; - unsigned long __percpu *flush_needed; + struct list_head __percpu *flush_list; struct list_head list; }; static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); -static u64 dev_map_bitmap_size(const union bpf_attr *attr) -{ - return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long); -} - static struct bpf_map *dev_map_alloc(union bpf_attr *attr) { struct bpf_dtab *dtab; + int err, cpu; u64 cost; - int err; if (!capable(CAP_NET_ADMIN)) return ERR_PTR(-EPERM); @@ -99,7 +97,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); - cost += dev_map_bitmap_size(attr) * num_possible_cpus(); + cost += sizeof(struct list_head) * num_possible_cpus(); /* if map size is larger than memlock limit, reject it */ err = bpf_map_charge_init(&dtab->map.memory, cost); @@ -108,28 +106,30 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) err = -ENOMEM; - /* A per cpu bitfield with a bit per possible net device */ - dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr), - __alignof__(unsigned long), - GFP_KERNEL | __GFP_NOWARN); - if (!dtab->flush_needed) + dtab->flush_list = alloc_percpu(struct list_head); + if (!dtab->flush_list) goto free_charge; + for_each_possible_cpu(cpu) + INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); + dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *), dtab->map.numa_node); if (!dtab->netdev_map) - goto free_charge; + goto free_percpu; spin_lock(&dev_map_lock); list_add_tail_rcu(&dtab->list, &dev_map_list); spin_unlock(&dev_map_lock); return &dtab->map; + +free_percpu: + free_percpu(dtab->flush_list); free_charge: bpf_map_charge_finish(&dtab->map.memory); free_dtab: - free_percpu(dtab->flush_needed); kfree(dtab); return ERR_PTR(err); } @@ -158,14 +158,14 @@ static void dev_map_free(struct bpf_map *map) rcu_barrier(); /* To ensure all pending flush operations have completed wait for flush - * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. + * list to empty on _all_ cpus. * Because the above synchronize_rcu() ensures the map is disconnected - * from the program we can assume no new bits will be set. + * from the program we can assume no new items will be added. */ for_each_online_cpu(cpu) { - unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu); + struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu); - while (!bitmap_empty(bitmap, dtab->map.max_entries)) + while (!list_empty(flush_list)) cond_resched(); } @@ -181,7 +181,7 @@ static void dev_map_free(struct bpf_map *map) kfree(dev); } - free_percpu(dtab->flush_needed); + free_percpu(dtab->flush_list); bpf_map_area_free(dtab->netdev_map); kfree(dtab); } @@ -203,18 +203,10 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } -void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) -{ - struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); - unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); - - __set_bit(bit, bitmap); -} - -static int bq_xmit_all(struct bpf_dtab_netdev *obj, - struct xdp_bulk_queue *bq, u32 flags, +static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags, bool in_napi_ctx) { + struct bpf_dtab_netdev *obj = bq->obj; struct net_device *dev = obj->dev; int sent = 0, drops = 0, err = 0; int i; @@ -241,6 +233,7 @@ out: trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit, sent, drops, bq->dev_rx, dev, err); bq->dev_rx = NULL; + __list_del_clearprev(&bq->flush_node); return 0; error: /* If ndo_xdp_xmit fails with an errno, no frames have been @@ -263,31 +256,18 @@ error: * from the driver before returning from its napi->poll() routine. The poll() * routine is called either from busy_poll context or net_rx_action signaled * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the - * net device can be torn down. On devmap tear down we ensure the ctx bitmap - * is zeroed before completing to ensure all flush operations have completed. + * net device can be torn down. On devmap tear down we ensure the flush list + * is empty before completing to ensure all flush operations have completed. */ void __dev_map_flush(struct bpf_map *map) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); - unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); - u32 bit; + struct list_head *flush_list = this_cpu_ptr(dtab->flush_list); + struct xdp_bulk_queue *bq, *tmp; rcu_read_lock(); - for_each_set_bit(bit, bitmap, map->max_entries) { - struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); - struct xdp_bulk_queue *bq; - - /* This is possible if the dev entry is removed by user space - * between xdp redirect and flush op. - */ - if (unlikely(!dev)) - continue; - - bq = this_cpu_ptr(dev->bulkq); - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); - - __clear_bit(bit, bitmap); - } + list_for_each_entry_safe(bq, tmp, flush_list, flush_node) + bq_xmit_all(bq, XDP_XMIT_FLUSH, true); rcu_read_unlock(); } @@ -314,10 +294,11 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, struct net_device *dev_rx) { + struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list); struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) - bq_xmit_all(obj, bq, 0, true); + bq_xmit_all(bq, 0, true); /* Ingress dev_rx will be the same for all xdp_frame's in * bulk_queue, because bq stored per-CPU and must be flushed @@ -327,6 +308,10 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, bq->dev_rx = dev_rx; bq->q[bq->count++] = xdpf; + + if (!bq->flush_node.prev) + list_add(&bq->flush_node, flush_list); + return 0; } @@ -377,17 +362,12 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) { if (dev->dev->netdev_ops->ndo_xdp_xmit) { struct xdp_bulk_queue *bq; - unsigned long *bitmap; - int cpu; rcu_read_lock(); for_each_online_cpu(cpu) { - bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); - __clear_bit(dev->bit, bitmap); - bq = per_cpu_ptr(dev->bulkq, cpu); - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); + bq_xmit_all(bq, XDP_XMIT_FLUSH, false); } rcu_read_unlock(); } @@ -434,8 +414,10 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, struct net *net = current->nsproxy->net_ns; gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; struct bpf_dtab_netdev *dev, *old_dev; - u32 i = *(u32 *)key; u32 ifindex = *(u32 *)value; + struct xdp_bulk_queue *bq; + u32 i = *(u32 *)key; + int cpu; if (unlikely(map_flags > BPF_EXIST)) return -EINVAL; @@ -458,6 +440,11 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, return -ENOMEM; } + for_each_possible_cpu(cpu) { + bq = per_cpu_ptr(dev->bulkq, cpu); + bq->obj = dev; + } + dev->dev = dev_get_by_index(net, ifindex); if (!dev->dev) { free_percpu(dev->bulkq); diff --git a/net/core/filter.c b/net/core/filter.c index dc8534be12fc..1e5fd37e9ab5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3523,7 +3523,6 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, err = dev_map_enqueue(dst, xdp, dev_rx); if (unlikely(err)) return err; - __dev_map_insert_ctx(map, index); break; } case BPF_MAP_TYPE_CPUMAP: { @@ -3532,7 +3531,6 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, err = cpu_map_enqueue(rcpu, xdp, dev_rx); if (unlikely(err)) return err; - __cpu_map_insert_ctx(map, index); break; } case BPF_MAP_TYPE_XSKMAP: { -- cgit v1.2.3-59-g8ed1b From 4b55cf290dc6bd3a9e5da26d1ad60e77aa88c8cf Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Fri, 28 Jun 2019 11:12:34 +0200 Subject: devmap: Rename ifindex member in bpf_redirect_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bpf_redirect_info struct has an 'ifindex' member which was named back when the redirects could only target egress interfaces. Now that we can also redirect to sockets and CPUs, this is a bit misleading, so rename the member to tgt_index. Reorder the struct members so we can have 'tgt_index' and 'tgt_value' next to each other in a subsequent patch. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 2 +- net/core/filter.c | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index 340f7d648974..92bd192f7786 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -578,8 +578,8 @@ struct bpf_skb_data_end { }; struct bpf_redirect_info { - u32 ifindex; u32 flags; + u32 tgt_index; struct bpf_map *map; struct bpf_map *map_to_flush; u32 kern_flags; diff --git a/net/core/filter.c b/net/core/filter.c index 1e5fd37e9ab5..b4a062379bb9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2158,8 +2158,8 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) if (unlikely(flags & ~(BPF_F_INGRESS))) return TC_ACT_SHOT; - ri->ifindex = ifindex; ri->flags = flags; + ri->tgt_index = ifindex; return TC_ACT_REDIRECT; } @@ -2169,8 +2169,8 @@ int skb_do_redirect(struct sk_buff *skb) struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct net_device *dev; - dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); - ri->ifindex = 0; + dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index); + ri->tgt_index = 0; if (unlikely(!dev)) { kfree_skb(skb); return -EINVAL; @@ -3488,11 +3488,11 @@ xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri) { struct net_device *fwd; - u32 index = ri->ifindex; + u32 index = ri->tgt_index; int err; fwd = dev_get_by_index_rcu(dev_net(dev), index); - ri->ifindex = 0; + ri->tgt_index = 0; if (unlikely(!fwd)) { err = -EINVAL; goto err; @@ -3604,11 +3604,11 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_map *map, struct bpf_redirect_info *ri) { - u32 index = ri->ifindex; + u32 index = ri->tgt_index; void *fwd = NULL; int err; - ri->ifindex = 0; + ri->tgt_index = 0; WRITE_ONCE(ri->map, NULL); fwd = __xdp_map_lookup_elem(map, index); @@ -3651,11 +3651,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, struct bpf_map *map) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - u32 index = ri->ifindex; + u32 index = ri->tgt_index; void *fwd = NULL; int err = 0; - ri->ifindex = 0; + ri->tgt_index = 0; WRITE_ONCE(ri->map, NULL); fwd = __xdp_map_lookup_elem(map, index); @@ -3695,14 +3695,14 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = READ_ONCE(ri->map); - u32 index = ri->ifindex; + u32 index = ri->tgt_index; struct net_device *fwd; int err = 0; if (map) return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, map); - ri->ifindex = 0; + ri->tgt_index = 0; fwd = dev_get_by_index_rcu(dev_net(dev), index); if (unlikely(!fwd)) { err = -EINVAL; @@ -3730,8 +3730,8 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) if (unlikely(flags)) return XDP_ABORTED; - ri->ifindex = ifindex; ri->flags = flags; + ri->tgt_index = ifindex; WRITE_ONCE(ri->map, NULL); return XDP_REDIRECT; @@ -3753,8 +3753,8 @@ BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, if (unlikely(flags)) return XDP_ABORTED; - ri->ifindex = ifindex; ri->flags = flags; + ri->tgt_index = ifindex; WRITE_ONCE(ri->map, map); return XDP_REDIRECT; -- cgit v1.2.3-59-g8ed1b From 43e74c0267a35d6f5127218054b2d80c7fe801f5 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Fri, 28 Jun 2019 11:12:34 +0200 Subject: bpf_xdp_redirect_map: Perform map lookup in eBPF helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bpf_redirect_map() helper used by XDP programs doesn't return any indication of whether it can successfully redirect to the map index it was given. Instead, BPF programs have to track this themselves, leading to programs using duplicate maps to track which entries are populated in the devmap. This patch fixes this by moving the map lookup into the bpf_redirect_map() helper, which makes it possible to return failure to the eBPF program. The lower bits of the flags argument is used as the return code, which means that existing users who pass a '0' flag argument will get XDP_ABORTED. With this, a BPF program can check the return code from the helper call and react by, for instance, substituting a different redirect. This works for any type of map used for redirect. Signed-off-by: Toke Høiland-Jørgensen Acked-by: Jonathan Lemon Acked-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 1 + include/trace/events/xdp.h | 5 ++--- include/uapi/linux/bpf.h | 7 +++++-- net/core/filter.c | 32 ++++++++++++++++++-------------- 4 files changed, 26 insertions(+), 19 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index 92bd192f7786..1fe53e78c7e3 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -580,6 +580,7 @@ struct bpf_skb_data_end { struct bpf_redirect_info { u32 flags; u32 tgt_index; + void *tgt_value; struct bpf_map *map; struct bpf_map *map_to_flush; u32 kern_flags; diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 81e708c4b513..68899fdc985b 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -175,9 +175,8 @@ struct _bpf_dtab_netdev { #endif /* __DEVMAP_OBJ_TYPE */ #define devmap_ifindex(fwd, map) \ - (!fwd ? 0 : \ - ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \ - ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)) + ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \ + ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0) #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a396b516a2b2..cffea1826a1f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1571,8 +1571,11 @@ union bpf_attr { * but this is only implemented for native XDP (with driver * support) as of this writing). * - * All values for *flags* are reserved for future usage, and must - * be left at zero. + * The lower two bits of *flags* are used as the return code if + * the map lookup fails. This is so that the return value can be + * one of the XDP program return codes up to XDP_TX, as chosen by + * the caller. Any higher bits in the *flags* argument must be + * unset. * * When used to redirect packets to net devices, this helper * provides a high performance increase over **bpf_redirect**\ (). diff --git a/net/core/filter.c b/net/core/filter.c index b4a062379bb9..4836264f82ee 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3605,17 +3605,13 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_redirect_info *ri) { u32 index = ri->tgt_index; - void *fwd = NULL; + void *fwd = ri->tgt_value; int err; ri->tgt_index = 0; + ri->tgt_value = NULL; WRITE_ONCE(ri->map, NULL); - fwd = __xdp_map_lookup_elem(map, index); - if (unlikely(!fwd)) { - err = -EINVAL; - goto err; - } if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) xdp_do_flush_map(); @@ -3652,18 +3648,13 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); u32 index = ri->tgt_index; - void *fwd = NULL; + void *fwd = ri->tgt_value; int err = 0; ri->tgt_index = 0; + ri->tgt_value = NULL; WRITE_ONCE(ri->map, NULL); - fwd = __xdp_map_lookup_elem(map, index); - if (unlikely(!fwd)) { - err = -EINVAL; - goto err; - } - if (map->map_type == BPF_MAP_TYPE_DEVMAP) { struct bpf_dtab_netdev *dst = fwd; @@ -3732,6 +3723,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) ri->flags = flags; ri->tgt_index = ifindex; + ri->tgt_value = NULL; WRITE_ONCE(ri->map, NULL); return XDP_REDIRECT; @@ -3750,9 +3742,21 @@ BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - if (unlikely(flags)) + /* Lower bits of the flags are used as return code on lookup failure */ + if (unlikely(flags > XDP_TX)) return XDP_ABORTED; + ri->tgt_value = __xdp_map_lookup_elem(map, ifindex); + if (unlikely(!ri->tgt_value)) { + /* If the lookup fails we want to clear out the state in the + * redirect_info struct completely, so that if an eBPF program + * performs multiple lookups, the last one always takes + * precedence. + */ + WRITE_ONCE(ri->map, NULL); + return flags; + } + ri->flags = flags; ri->tgt_index = ifindex; WRITE_ONCE(ri->map, map); -- cgit v1.2.3-59-g8ed1b From 27ba4059e06b3bbd38a7d944fd5a78cdf47534f4 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 28 Jun 2019 09:13:19 +0800 Subject: net: link_watch: prevent starvation when processing linkwatch wq When user has configured a large number of virtual netdev, such as 4K vlans, the carrier on/off operation of the real netdev will also cause it's virtual netdev's link state to be processed in linkwatch. Currently, the processing is done in a work queue, which may cause rtnl locking starvation problem and worker starvation problem for other work queue, such as irqfd_inject wq. This patch releases the cpu when link watch worker has processed a fixed number of netdev' link watch event, and schedule the work queue again when there is still link watch event remaining. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- net/core/link_watch.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 04fdc9535772..f153e0601838 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -163,9 +163,16 @@ static void linkwatch_do_dev(struct net_device *dev) static void __linkwatch_run_queue(int urgent_only) { +#define MAX_DO_DEV_PER_LOOP 100 + + int do_dev = MAX_DO_DEV_PER_LOOP; struct net_device *dev; LIST_HEAD(wrk); + /* Give urgent case more budget */ + if (urgent_only) + do_dev += MAX_DO_DEV_PER_LOOP; + /* * Limit the number of linkwatch events to one * per second so that a runaway driver does not @@ -184,7 +191,7 @@ static void __linkwatch_run_queue(int urgent_only) spin_lock_irq(&lweventlist_lock); list_splice_init(&lweventlist, &wrk); - while (!list_empty(&wrk)) { + while (!list_empty(&wrk) && do_dev > 0) { dev = list_first_entry(&wrk, struct net_device, link_watch_list); list_del_init(&dev->link_watch_list); @@ -195,9 +202,13 @@ static void __linkwatch_run_queue(int urgent_only) } spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); + do_dev--; spin_lock_irq(&lweventlist_lock); } + /* Add the remaining work back to lweventlist */ + list_splice_init(&wrk, &lweventlist); + if (!list_empty(&lweventlist)) linkwatch_schedule_work(0); spin_unlock_irq(&lweventlist_lock); -- cgit v1.2.3-59-g8ed1b From 8d7017fd621d02ff0d47d19484350c2356828483 Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Mon, 1 Jul 2019 14:38:57 -0700 Subject: blackhole_netdev: use blackhole_netdev to invalidate dst entries Use blackhole_netdev instead of 'lo' device with lower MTU when marking dst "dead". Signed-off-by: Mahesh Bandewar Tested-by: Michael Chan Signed-off-by: David S. Miller --- net/core/dst.c | 2 +- net/ipv4/route.c | 3 +-- net/ipv6/route.c | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) (limited to 'net/core') diff --git a/net/core/dst.c b/net/core/dst.c index e46366228eaf..1325316d9eab 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -160,7 +160,7 @@ void dst_dev_put(struct dst_entry *dst) dst->ops->ifdown(dst, dev, true); dst->input = dst_discard; dst->output = dst_discard_out; - dst->dev = dev_net(dst->dev)->loopback_dev; + dst->dev = blackhole_netdev; dev_hold(dst->dev); dev_put(dev); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index bbd55c7f6b2e..dc1f510a7c81 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1532,7 +1532,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst) void rt_flush_dev(struct net_device *dev) { - struct net *net = dev_net(dev); struct rtable *rt; int cpu; @@ -1543,7 +1542,7 @@ void rt_flush_dev(struct net_device *dev) list_for_each_entry(rt, &ul->head, rt_uncached) { if (rt->dst.dev != dev) continue; - rt->dst.dev = net->loopback_dev; + rt->dst.dev = blackhole_netdev; dev_hold(rt->dst.dev); dev_put(dev); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7556275b1cef..39361f57351a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -176,7 +176,7 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) } if (rt_dev == dev) { - rt->dst.dev = loopback_dev; + rt->dst.dev = blackhole_netdev; dev_hold(rt->dst.dev); dev_put(rt_dev); } -- cgit v1.2.3-59-g8ed1b From 2377b81de52750997726d6d43b4114e5842c4bf9 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 2 Jul 2019 09:13:57 -0700 Subject: bpf: split shared bpf_tcp_sock and bpf_sock_ops implementation We've added bpf_tcp_sock member to bpf_sock_ops and don't expect any new tcp_sock fields in bpf_sock_ops. Let's remove CONVERT_COMMON_TCP_SOCK_FIELDS so bpf_tcp_sock can be independently extended. Cc: Eric Dumazet Cc: Priyaranjan Jha Cc: Yuchung Cheng Cc: Soheil Hassas Yeganeh Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- net/core/filter.c | 180 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 126 insertions(+), 54 deletions(-) (limited to 'net/core') diff --git a/net/core/filter.c b/net/core/filter.c index 4836264f82ee..ad908526545d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5194,54 +5194,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { }; #endif /* CONFIG_IPV6_SEG6_BPF */ -#define CONVERT_COMMON_TCP_SOCK_FIELDS(md_type, CONVERT) \ -do { \ - switch (si->off) { \ - case offsetof(md_type, snd_cwnd): \ - CONVERT(snd_cwnd); break; \ - case offsetof(md_type, srtt_us): \ - CONVERT(srtt_us); break; \ - case offsetof(md_type, snd_ssthresh): \ - CONVERT(snd_ssthresh); break; \ - case offsetof(md_type, rcv_nxt): \ - CONVERT(rcv_nxt); break; \ - case offsetof(md_type, snd_nxt): \ - CONVERT(snd_nxt); break; \ - case offsetof(md_type, snd_una): \ - CONVERT(snd_una); break; \ - case offsetof(md_type, mss_cache): \ - CONVERT(mss_cache); break; \ - case offsetof(md_type, ecn_flags): \ - CONVERT(ecn_flags); break; \ - case offsetof(md_type, rate_delivered): \ - CONVERT(rate_delivered); break; \ - case offsetof(md_type, rate_interval_us): \ - CONVERT(rate_interval_us); break; \ - case offsetof(md_type, packets_out): \ - CONVERT(packets_out); break; \ - case offsetof(md_type, retrans_out): \ - CONVERT(retrans_out); break; \ - case offsetof(md_type, total_retrans): \ - CONVERT(total_retrans); break; \ - case offsetof(md_type, segs_in): \ - CONVERT(segs_in); break; \ - case offsetof(md_type, data_segs_in): \ - CONVERT(data_segs_in); break; \ - case offsetof(md_type, segs_out): \ - CONVERT(segs_out); break; \ - case offsetof(md_type, data_segs_out): \ - CONVERT(data_segs_out); break; \ - case offsetof(md_type, lost_out): \ - CONVERT(lost_out); break; \ - case offsetof(md_type, sacked_out): \ - CONVERT(sacked_out); break; \ - case offsetof(md_type, bytes_received): \ - CONVERT(bytes_received); break; \ - case offsetof(md_type, bytes_acked): \ - CONVERT(bytes_acked); break; \ - } \ -} while (0) - #ifdef CONFIG_INET static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, int dif, int sdif, u8 family, u8 proto) @@ -5623,9 +5575,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, offsetof(struct tcp_sock, FIELD)); \ } while (0) - CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_tcp_sock, - BPF_TCP_SOCK_GET_COMMON); - if (insn > insn_buf) return insn - insn_buf; @@ -5640,6 +5589,69 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, offsetof(struct tcp_sock, rtt_min) + offsetof(struct minmax_sample, v)); break; + case offsetof(struct bpf_tcp_sock, snd_cwnd): + BPF_TCP_SOCK_GET_COMMON(snd_cwnd); + break; + case offsetof(struct bpf_tcp_sock, srtt_us): + BPF_TCP_SOCK_GET_COMMON(srtt_us); + break; + case offsetof(struct bpf_tcp_sock, snd_ssthresh): + BPF_TCP_SOCK_GET_COMMON(snd_ssthresh); + break; + case offsetof(struct bpf_tcp_sock, rcv_nxt): + BPF_TCP_SOCK_GET_COMMON(rcv_nxt); + break; + case offsetof(struct bpf_tcp_sock, snd_nxt): + BPF_TCP_SOCK_GET_COMMON(snd_nxt); + break; + case offsetof(struct bpf_tcp_sock, snd_una): + BPF_TCP_SOCK_GET_COMMON(snd_una); + break; + case offsetof(struct bpf_tcp_sock, mss_cache): + BPF_TCP_SOCK_GET_COMMON(mss_cache); + break; + case offsetof(struct bpf_tcp_sock, ecn_flags): + BPF_TCP_SOCK_GET_COMMON(ecn_flags); + break; + case offsetof(struct bpf_tcp_sock, rate_delivered): + BPF_TCP_SOCK_GET_COMMON(rate_delivered); + break; + case offsetof(struct bpf_tcp_sock, rate_interval_us): + BPF_TCP_SOCK_GET_COMMON(rate_interval_us); + break; + case offsetof(struct bpf_tcp_sock, packets_out): + BPF_TCP_SOCK_GET_COMMON(packets_out); + break; + case offsetof(struct bpf_tcp_sock, retrans_out): + BPF_TCP_SOCK_GET_COMMON(retrans_out); + break; + case offsetof(struct bpf_tcp_sock, total_retrans): + BPF_TCP_SOCK_GET_COMMON(total_retrans); + break; + case offsetof(struct bpf_tcp_sock, segs_in): + BPF_TCP_SOCK_GET_COMMON(segs_in); + break; + case offsetof(struct bpf_tcp_sock, data_segs_in): + BPF_TCP_SOCK_GET_COMMON(data_segs_in); + break; + case offsetof(struct bpf_tcp_sock, segs_out): + BPF_TCP_SOCK_GET_COMMON(segs_out); + break; + case offsetof(struct bpf_tcp_sock, data_segs_out): + BPF_TCP_SOCK_GET_COMMON(data_segs_out); + break; + case offsetof(struct bpf_tcp_sock, lost_out): + BPF_TCP_SOCK_GET_COMMON(lost_out); + break; + case offsetof(struct bpf_tcp_sock, sacked_out): + BPF_TCP_SOCK_GET_COMMON(sacked_out); + break; + case offsetof(struct bpf_tcp_sock, bytes_received): + BPF_TCP_SOCK_GET_COMMON(bytes_received); + break; + case offsetof(struct bpf_tcp_sock, bytes_acked): + BPF_TCP_SOCK_GET_COMMON(bytes_acked); + break; } return insn - insn_buf; @@ -7913,9 +7925,6 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ } while (0) - CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_sock_ops, - SOCK_OPS_GET_TCP_SOCK_FIELD); - if (insn > insn_buf) return insn - insn_buf; @@ -8085,6 +8094,69 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, struct sock, type); break; + case offsetof(struct bpf_sock_ops, snd_cwnd): + SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd); + break; + case offsetof(struct bpf_sock_ops, srtt_us): + SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us); + break; + case offsetof(struct bpf_sock_ops, snd_ssthresh): + SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh); + break; + case offsetof(struct bpf_sock_ops, rcv_nxt): + SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt); + break; + case offsetof(struct bpf_sock_ops, snd_nxt): + SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt); + break; + case offsetof(struct bpf_sock_ops, snd_una): + SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una); + break; + case offsetof(struct bpf_sock_ops, mss_cache): + SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache); + break; + case offsetof(struct bpf_sock_ops, ecn_flags): + SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags); + break; + case offsetof(struct bpf_sock_ops, rate_delivered): + SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered); + break; + case offsetof(struct bpf_sock_ops, rate_interval_us): + SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us); + break; + case offsetof(struct bpf_sock_ops, packets_out): + SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out); + break; + case offsetof(struct bpf_sock_ops, retrans_out): + SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out); + break; + case offsetof(struct bpf_sock_ops, total_retrans): + SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans); + break; + case offsetof(struct bpf_sock_ops, segs_in): + SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in); + break; + case offsetof(struct bpf_sock_ops, data_segs_in): + SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in); + break; + case offsetof(struct bpf_sock_ops, segs_out): + SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out); + break; + case offsetof(struct bpf_sock_ops, data_segs_out): + SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out); + break; + case offsetof(struct bpf_sock_ops, lost_out): + SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out); + break; + case offsetof(struct bpf_sock_ops, sacked_out): + SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out); + break; + case offsetof(struct bpf_sock_ops, bytes_received): + SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received); + break; + case offsetof(struct bpf_sock_ops, bytes_acked): + SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); + break; case offsetof(struct bpf_sock_ops, sk): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, -- cgit v1.2.3-59-g8ed1b From 0357746d1e40a8226f68a42c8d7222a12d7c451f Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 2 Jul 2019 09:13:58 -0700 Subject: bpf: add dsack_dups/delivered{, _ce} to bpf_tcp_sock Add more fields to bpf_tcp_sock that might be useful for debugging congestion control issues. Cc: Eric Dumazet Cc: Priyaranjan Jha Cc: Yuchung Cheng Cc: Soheil Hassas Yeganeh Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 5 +++++ net/core/filter.c | 11 ++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9cdd0aaeba06..bfb0b1a76684 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3073,6 +3073,11 @@ struct bpf_tcp_sock { * sum(delta(snd_una)), or how many bytes * were acked. */ + __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups + * total number of DSACK blocks received + */ + __u32 delivered; /* Total data packets delivered incl. rexmits */ + __u32 delivered_ce; /* Like the above but only ECE marked packets */ }; struct bpf_sock_tuple { diff --git a/net/core/filter.c b/net/core/filter.c index ad908526545d..3da4b6c38b46 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5544,7 +5544,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { - if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked)) + if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, delivered_ce)) return false; if (off % size != 0) @@ -5652,6 +5652,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, case offsetof(struct bpf_tcp_sock, bytes_acked): BPF_TCP_SOCK_GET_COMMON(bytes_acked); break; + case offsetof(struct bpf_tcp_sock, dsack_dups): + BPF_TCP_SOCK_GET_COMMON(dsack_dups); + break; + case offsetof(struct bpf_tcp_sock, delivered): + BPF_TCP_SOCK_GET_COMMON(delivered); + break; + case offsetof(struct bpf_tcp_sock, delivered_ce): + BPF_TCP_SOCK_GET_COMMON(delivered_ce); + break; } return insn - insn_buf; -- cgit v1.2.3-59-g8ed1b From c2cb5e82a720c05b707701c75dfeb356fe184787 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 2 Jul 2019 09:13:59 -0700 Subject: bpf: add icsk_retransmits to bpf_tcp_sock Add some inet_connection_sock fields to bpf_tcp_sock that might be useful for debugging congestion control issues. Cc: Eric Dumazet Cc: Priyaranjan Jha Cc: Yuchung Cheng Cc: Soheil Hassas Yeganeh Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'net/core') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bfb0b1a76684..ead27aebf491 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3078,6 +3078,7 @@ struct bpf_tcp_sock { */ __u32 delivered; /* Total data packets delivered incl. rexmits */ __u32 delivered_ce; /* Like the above but only ECE marked packets */ + __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ }; struct bpf_sock_tuple { diff --git a/net/core/filter.c b/net/core/filter.c index 3da4b6c38b46..089aaea0ccc6 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5544,7 +5544,8 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { - if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, delivered_ce)) + if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, + icsk_retransmits)) return false; if (off % size != 0) @@ -5575,6 +5576,20 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, offsetof(struct tcp_sock, FIELD)); \ } while (0) +#define BPF_INET_SOCK_GET_COMMON(FIELD) \ + do { \ + BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock, \ + FIELD) > \ + FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ + struct inet_connection_sock, \ + FIELD), \ + si->dst_reg, si->src_reg, \ + offsetof( \ + struct inet_connection_sock, \ + FIELD)); \ + } while (0) + if (insn > insn_buf) return insn - insn_buf; @@ -5661,6 +5676,9 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, case offsetof(struct bpf_tcp_sock, delivered_ce): BPF_TCP_SOCK_GET_COMMON(delivered_ce); break; + case offsetof(struct bpf_tcp_sock, icsk_retransmits): + BPF_INET_SOCK_GET_COMMON(icsk_retransmits); + break; } return insn - insn_buf; -- cgit v1.2.3-59-g8ed1b From 600c70bad6594cb124c641ed05355ca134650ea4 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 1 Jul 2019 10:38:39 -0700 Subject: bpf: allow wide (u64) aligned stores for some fields of bpf_sock_addr Since commit cd17d7770578 ("bpf/tools: sync bpf.h") clang decided that it can do a single u64 store into user_ip6[2] instead of two separate u32 ones: # 17: (18) r2 = 0x100000000000000 # ; ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2); # 19: (7b) *(u64 *)(r1 +16) = r2 # invalid bpf_context access off=16 size=8 >From the compiler point of view it does look like a correct thing to do, so let's support it on the kernel side. Credit to Andrii Nakryiko for a proper implementation of bpf_ctx_wide_store_ok. Cc: Andrii Nakryiko Cc: Yonghong Song Fixes: cd17d7770578 ("bpf/tools: sync bpf.h") Reported-by: kernel test robot Acked-by: Yonghong Song Acked-by: Andrii Nakryiko Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 6 ++++++ include/uapi/linux/bpf.h | 6 +++--- net/core/filter.c | 22 ++++++++++++++-------- 3 files changed, 23 insertions(+), 11 deletions(-) (limited to 'net/core') diff --git a/include/linux/filter.h b/include/linux/filter.h index 1fe53e78c7e3..6d944369ca87 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -747,6 +747,12 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) return size <= size_default && (size & (size - 1)) == 0; } +#define bpf_ctx_wide_store_ok(off, size, type, field) \ + (size == sizeof(__u64) && \ + off >= offsetof(type, field) && \ + off + sizeof(__u64) <= offsetofend(type, field) && \ + off % sizeof(__u64) == 0) + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ead27aebf491..c318385aba51 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3247,7 +3247,7 @@ struct bpf_sock_addr { __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ - __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. + __u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write. * Stored in network byte order. */ __u32 user_port; /* Allows 4-byte read and write. @@ -3256,10 +3256,10 @@ struct bpf_sock_addr { __u32 family; /* Allows 4-byte read, but no write */ __u32 type; /* Allows 4-byte read, but no write */ __u32 protocol; /* Allows 4-byte read, but no write */ - __u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write. + __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ - __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. + __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write. * Stored in network byte order. */ __bpf_md_ptr(struct bpf_sock *, sk); diff --git a/net/core/filter.c b/net/core/filter.c index 089aaea0ccc6..4481e950f020 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6890,6 +6890,16 @@ static bool sock_addr_is_valid_access(int off, int size, if (!bpf_ctx_narrow_access_ok(off, size, size_default)) return false; } else { + if (bpf_ctx_wide_store_ok(off, size, + struct bpf_sock_addr, + user_ip6)) + return true; + + if (bpf_ctx_wide_store_ok(off, size, + struct bpf_sock_addr, + msg_src_ip6)) + return true; + if (size != size_default) return false; } @@ -7730,9 +7740,6 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation. * - * It doesn't support SIZE argument though since narrow stores are not - * supported for now. - * * In addition it uses Temporary Field TF (member of struct S) as the 3rd * "register" since two registers available in convert_ctx_access are not * enough: we can't override neither SRC, since it contains value to store, nor @@ -7740,7 +7747,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, * instructions. But we need a temporary place to save pointer to nested * structure whose field we want to store to. */ -#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \ +#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \ do { \ int tmp_reg = BPF_REG_9; \ if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ @@ -7751,8 +7758,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, offsetof(S, TF)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ si->dst_reg, offsetof(S, F)); \ - *insn++ = BPF_STX_MEM( \ - BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \ + *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ target_size) \ + OFF); \ @@ -7764,8 +7770,8 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, TF) \ do { \ if (type == BPF_WRITE) { \ - SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \ - TF); \ + SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \ + OFF, TF); \ } else { \ SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \ S, NS, F, NF, SIZE, OFF); \ -- cgit v1.2.3-59-g8ed1b From 1da4bbeffe41ba318812d7590955faee8636668b Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 9 Jul 2019 00:34:28 +0300 Subject: net: core: page_pool: add user refcnt and reintroduce page_pool_destroy Jesper recently removed page_pool_destroy() (from driver invocation) and moved shutdown and free of page_pool into xdp_rxq_info_unreg(), in-order to handle in-flight packets/pages. This created an asymmetry in drivers create/destroy pairs. This patch reintroduce page_pool_destroy and add page_pool user refcnt. This serves the purpose to simplify drivers error handling as driver now drivers always calls page_pool_destroy() and don't need to track if xdp_rxq_info_reg_mem_model() was unsuccessful. This could be used for a special cases where a single RX-queue (with a single page_pool) provides packets for two net_device'es, and thus needs to register the same page_pool twice with two xdp_rxq_info structures. This patch is primarily to ease API usage for drivers. The recently merged netsec driver, actually have a bug in this area, which is solved by this API change. This patch is a modified version of Ivan Khoronzhuk's original patch. Link: https://lore.kernel.org/netdev/20190625175948.24771-2-ivan.khoronzhuk@linaro.org/ Fixes: 5c67bf0ec4d0 ("net: netsec: Use page_pool API") Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Ilias Apalodimas Acked-by: Jesper Dangaard Brouer Reviewed-by: Saeed Mahameed Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- drivers/net/ethernet/socionext/netsec.c | 8 ++------ include/net/page_pool.h | 25 +++++++++++++++++++++++ net/core/page_pool.c | 8 ++++++++ net/core/xdp.c | 3 +++ 5 files changed, 40 insertions(+), 8 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 83194d56434d..10efd69de7ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -577,8 +577,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, } err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); - if (err) - page_pool_free(rq->page_pool); } if (err) goto err_free; @@ -646,6 +644,7 @@ err_rq_wq_destroy: if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); xdp_rxq_info_unreg(&rq->xdp_rxq); + page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); return err; @@ -680,6 +679,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) } xdp_rxq_info_unreg(&rq->xdp_rxq); + page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); } diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 460777449cd9..d7307ab90d74 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1212,15 +1212,11 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) } } - /* Rx is currently using page_pool - * since the pool is created during netsec_setup_rx_dring(), we need to - * free the pool manually if the registration failed - */ + /* Rx is currently using page_pool */ if (id == NETSEC_RING_RX) { if (xdp_rxq_info_is_reg(&dring->xdp_rxq)) xdp_rxq_info_unreg(&dring->xdp_rxq); - else - page_pool_free(dring->page_pool); + page_pool_destroy(dring->page_pool); } memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); diff --git a/include/net/page_pool.h b/include/net/page_pool.h index ee9c871d2043..2cbcdbdec254 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -101,6 +101,12 @@ struct page_pool { struct ptr_ring ring; atomic_t pages_state_release_cnt; + + /* A page_pool is strictly tied to a single RX-queue being + * protected by NAPI, due to above pp_alloc_cache. This + * refcnt serves purpose is to simplify drivers error handling. + */ + refcount_t user_cnt; }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); @@ -134,6 +140,15 @@ static inline void page_pool_free(struct page_pool *pool) #endif } +/* Drivers use this instead of page_pool_free */ +static inline void page_pool_destroy(struct page_pool *pool) +{ + if (!pool) + return; + + page_pool_free(pool); +} + /* Never call this directly, use helpers below */ void __page_pool_put_page(struct page_pool *pool, struct page *page, bool allow_direct); @@ -201,4 +216,14 @@ static inline bool is_page_pool_compiled_in(void) #endif } +static inline void page_pool_get(struct page_pool *pool) +{ + refcount_inc(&pool->user_cnt); +} + +static inline bool page_pool_put(struct page_pool *pool) +{ + return refcount_dec_and_test(&pool->user_cnt); +} + #endif /* _NET_PAGE_POOL_H */ diff --git a/net/core/page_pool.c b/net/core/page_pool.c index b366f59885c1..3272dc7a8c81 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -49,6 +49,9 @@ static int page_pool_init(struct page_pool *pool, atomic_set(&pool->pages_state_release_cnt, 0); + /* Driver calling page_pool_create() also call page_pool_destroy() */ + refcount_set(&pool->user_cnt, 1); + if (pool->p.flags & PP_FLAG_DMA_MAP) get_device(pool->p.dev); @@ -70,6 +73,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params) kfree(pool); return ERR_PTR(err); } + return pool; } EXPORT_SYMBOL(page_pool_create); @@ -356,6 +360,10 @@ static void __warn_in_flight(struct page_pool *pool) void __page_pool_free(struct page_pool *pool) { + /* Only last user actually free/release resources */ + if (!page_pool_put(pool)) + return; + WARN(pool->alloc.count, "API usage violation"); WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty"); diff --git a/net/core/xdp.c b/net/core/xdp.c index 829377cc83db..d7bf62ffbb5e 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -370,6 +370,9 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, goto err; } + if (type == MEM_TYPE_PAGE_POOL) + page_pool_get(xdp_alloc->page_pool); + mutex_unlock(&mem_id_lock); trace_mem_connect(xdp_alloc, xdp_rxq); -- cgit v1.2.3-59-g8ed1b From 333f7909a8573145811c4ab7d8c9092301707721 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 5 Jul 2019 20:14:16 +0100 Subject: coallocate socket_wq with socket itself socket->wq is assign-once, set when we are initializing both struct socket it's in and struct socket_wq it points to. As the matter of fact, the only reason for separate allocation was the ability to RCU-delay freeing of socket_wq. RCU-delaying the freeing of socket itself gets rid of that need, so we can just fold struct socket_wq into the end of struct socket and simplify the life both for sock_alloc_inode() (one allocation instead of two) and for tun/tap oddballs, where we used to embed struct socket and struct socket_wq into the same structure (now - embedding just the struct socket). Note that reference to struct socket_wq in struct sock does remain a reference - that's unchanged. Signed-off-by: Al Viro Signed-off-by: David S. Miller --- drivers/net/tap.c | 5 ++--- drivers/net/tun.c | 8 +++----- include/linux/if_tap.h | 1 - include/linux/net.h | 4 ++-- include/net/sock.h | 4 ++-- net/core/sock.c | 2 +- net/socket.c | 19 +++++-------------- 7 files changed, 15 insertions(+), 28 deletions(-) (limited to 'net/core') diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 8e01390c738e..dd614c2cd994 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -520,8 +520,7 @@ static int tap_open(struct inode *inode, struct file *file) goto err; } - RCU_INIT_POINTER(q->sock.wq, &q->wq); - init_waitqueue_head(&q->wq.wait); + init_waitqueue_head(&q->sock.wq.wait); q->sock.type = SOCK_RAW; q->sock.state = SS_CONNECTED; q->sock.file = file; @@ -579,7 +578,7 @@ static __poll_t tap_poll(struct file *file, poll_table *wait) goto out; mask = 0; - poll_wait(file, &q->wq.wait, wait); + poll_wait(file, &q->sock.wq.wait, wait); if (!ptr_ring_empty(&q->ring)) mask |= EPOLLIN | EPOLLRDNORM; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d7c55e0fa8f4..3d443597bd04 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -160,7 +160,6 @@ struct tun_pcpu_stats { struct tun_file { struct sock sk; struct socket socket; - struct socket_wq wq; struct tun_struct __rcu *tun; struct fasync_struct *fasync; /* only used for fasnyc */ @@ -2165,7 +2164,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) goto out; } - add_wait_queue(&tfile->wq.wait, &wait); + add_wait_queue(&tfile->socket.wq.wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); @@ -2185,7 +2184,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) } __set_current_state(TASK_RUNNING); - remove_wait_queue(&tfile->wq.wait, &wait); + remove_wait_queue(&tfile->socket.wq.wait, &wait); out: *err = error; @@ -3415,8 +3414,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->flags = 0; tfile->ifindex = 0; - init_waitqueue_head(&tfile->wq.wait); - RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); + init_waitqueue_head(&tfile->socket.wq.wait); tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 8e66866c11be..915a187cfabd 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -62,7 +62,6 @@ struct tap_dev { struct tap_queue { struct sock sk; struct socket sock; - struct socket_wq wq; int vnet_hdr_sz; struct tap_dev __rcu *tap; struct file *file; diff --git a/include/linux/net.h b/include/linux/net.h index f7d672cf25b5..9cafb5f353a9 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -116,11 +116,11 @@ struct socket { unsigned long flags; - struct socket_wq *wq; - struct file *file; struct sock *sk; const struct proto_ops *ops; + + struct socket_wq wq; }; struct vm_area_struct; diff --git a/include/net/sock.h b/include/net/sock.h index 6cbc16136357..228db3998e46 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1822,7 +1822,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) { WARN_ON(parent->sk); write_lock_bh(&sk->sk_callback_lock); - rcu_assign_pointer(sk->sk_wq, parent->wq); + rcu_assign_pointer(sk->sk_wq, &parent->wq); parent->sk = sk; sk_set_socket(sk, parent); sk->sk_uid = SOCK_INODE(parent)->i_uid; @@ -2100,7 +2100,7 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock, poll_table *p) { if (!poll_does_not_wait(p)) { - poll_wait(filp, &sock->wq->wait, p); + poll_wait(filp, &sock->wq.wait, p); /* We need to be sure we are in sync with the * socket flags modification. * diff --git a/net/core/sock.c b/net/core/sock.c index 0eb21384079d..3e073ca6138f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2847,7 +2847,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) if (sock) { sk->sk_type = sock->type; - RCU_INIT_POINTER(sk->sk_wq, sock->wq); + RCU_INIT_POINTER(sk->sk_wq, &sock->wq); sock->sk = sk; sk->sk_uid = SOCK_INODE(sock)->i_uid; } else { diff --git a/net/socket.c b/net/socket.c index 541719a2443d..16449d6daeca 100644 --- a/net/socket.c +++ b/net/socket.c @@ -234,20 +234,13 @@ static struct kmem_cache *sock_inode_cachep __ro_after_init; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; - struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; - wq = kmalloc(sizeof(*wq), GFP_KERNEL); - if (!wq) { - kmem_cache_free(sock_inode_cachep, ei); - return NULL; - } - init_waitqueue_head(&wq->wait); - wq->fasync_list = NULL; - wq->flags = 0; - ei->socket.wq = wq; + init_waitqueue_head(&ei->socket.wq.wait); + ei->socket.wq.fasync_list = NULL; + ei->socket.wq.flags = 0; ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; @@ -263,7 +256,6 @@ static void sock_free_inode(struct inode *inode) struct socket_alloc *ei; ei = container_of(inode, struct socket_alloc, vfs_inode); - kfree(ei->socket.wq); kmem_cache_free(sock_inode_cachep, ei); } @@ -599,7 +591,7 @@ static void __sock_release(struct socket *sock, struct inode *inode) module_put(owner); } - if (sock->wq->fasync_list) + if (sock->wq.fasync_list) pr_err("%s: fasync list not empty!\n", __func__); if (!sock->file) { @@ -1288,13 +1280,12 @@ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; - struct socket_wq *wq; + struct socket_wq *wq = &sock->wq; if (sk == NULL) return -EINVAL; lock_sock(sk); - wq = sock->wq; fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) -- cgit v1.2.3-59-g8ed1b From 6413139dfc641aaaa30580b59696a5f7ea274194 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sun, 7 Jul 2019 05:51:55 -0400 Subject: skbuff: increase verbosity when dumping skb data skb_warn_bad_offload and netdev_rx_csum_fault trigger on hard to debug issues. Dump more state and the header. Optionally dump the entire packet and linear segment. This is required to debug checksum bugs that may include bytes past skb_tail_pointer(). Both call sites call this function inside a net_ratelimit() block. Limit full packet log further to a hard limit of can_dump_full (5). Based on an earlier patch by Cong Wang, see link below. Changes v1 -> v2 - dump frag_list only on full_pkt Link: https://patchwork.ozlabs.org/patch/1000841/ Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + net/core/dev.c | 16 ++------ net/core/skbuff.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 104 insertions(+), 12 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7ece49d5f8ef..1fdfdbb34e8e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1024,6 +1024,7 @@ static inline bool skb_unref(struct sk_buff *skb) void skb_release_head_state(struct sk_buff *skb); void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); +void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); void consume_skb(struct sk_buff *skb); void __consume_stateless_skb(struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index 58529318b3a9..fc676b2610e3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2900,12 +2900,10 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) else name = netdev_name(dev); } - WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " - "gso_type=%d ip_summed=%d\n", + skb_dump(KERN_WARNING, skb, false); + WARN(1, "%s: caps=(%pNF, %pNF)\n", name, dev ? &dev->features : &null_features, - skb->sk ? &skb->sk->sk_route_caps : &null_features, - skb->len, skb->data_len, skb_shinfo(skb)->gso_size, - skb_shinfo(skb)->gso_type, skb->ip_summed); + skb->sk ? &skb->sk->sk_route_caps : &null_features); } /* @@ -3124,13 +3122,7 @@ void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) { if (net_ratelimit()) { pr_err("%s: hw csum failure\n", dev ? dev->name : ""); - if (dev) - pr_err("dev features: %pNF\n", &dev->features); - pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n", - skb->len, skb->data_len, skb->pkt_type, - skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type, - skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum, - skb->csum_complete_sw, skb->csum_valid, skb->csum_level); + skb_dump(KERN_ERR, skb, true); dump_stack(); } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5323441a12cc..cdb0ccdaac0b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -707,6 +707,105 @@ void kfree_skb_list(struct sk_buff *segs) } EXPORT_SYMBOL(kfree_skb_list); +/* Dump skb information and contents. + * + * Must only be called from net_ratelimit()-ed paths. + * + * Dumps up to can_dump_full whole packets if full_pkt, headers otherwise. + */ +void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) +{ + static atomic_t can_dump_full = ATOMIC_INIT(5); + struct skb_shared_info *sh = skb_shinfo(skb); + struct net_device *dev = skb->dev; + struct sock *sk = skb->sk; + struct sk_buff *list_skb; + bool has_mac, has_trans; + int headroom, tailroom; + int i, len, seg_len; + + if (full_pkt) + full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0; + + if (full_pkt) + len = skb->len; + else + len = min_t(int, skb->len, MAX_HEADER + 128); + + headroom = skb_headroom(skb); + tailroom = skb_tailroom(skb); + + has_mac = skb_mac_header_was_set(skb); + has_trans = skb_transport_header_was_set(skb); + + printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" + "mac=(%d,%d) net=(%d,%d) trans=%d\n" + "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" + "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" + "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", + level, skb->len, headroom, skb_headlen(skb), tailroom, + has_mac ? skb->mac_header : -1, + has_mac ? skb_mac_header_len(skb) : -1, + skb->network_header, + has_trans ? skb_network_header_len(skb) : -1, + has_trans ? skb->transport_header : -1, + sh->tx_flags, sh->nr_frags, + sh->gso_size, sh->gso_type, sh->gso_segs, + skb->csum, skb->ip_summed, skb->csum_complete_sw, + skb->csum_valid, skb->csum_level, + skb->hash, skb->sw_hash, skb->l4_hash, + ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); + + if (dev) + printk("%sdev name=%s feat=0x%pNF\n", + level, dev->name, &dev->features); + if (sk) + printk("%ssk family=%hu type=%hu proto=%hu\n", + level, sk->sk_family, sk->sk_type, sk->sk_protocol); + + if (full_pkt && headroom) + print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->head, headroom, false); + + seg_len = min_t(int, skb_headlen(skb), len); + if (seg_len) + print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, seg_len, false); + len -= seg_len; + + if (full_pkt && tailroom) + print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, + 16, 1, skb_tail_pointer(skb), tailroom, false); + + for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 p_off, p_len, copied; + struct page *p; + u8 *vaddr; + + skb_frag_foreach_page(frag, frag->page_offset, + skb_frag_size(frag), p, p_off, p_len, + copied) { + seg_len = min_t(int, p_len, len); + vaddr = kmap_atomic(p); + print_hex_dump(level, "skb frag: ", + DUMP_PREFIX_OFFSET, + 16, 1, vaddr + p_off, seg_len, false); + kunmap_atomic(vaddr); + len -= seg_len; + if (!len) + break; + } + } + + if (full_pkt && skb_has_frag_list(skb)) { + printk("skb fraglist:\n"); + skb_walk_frags(skb, list_skb) + skb_dump(level, list_skb, true); + } +} +EXPORT_SYMBOL(skb_dump); + /** * skb_tx_error - report an sk_buff xmit error * @skb: buffer that triggered an error -- cgit v1.2.3-59-g8ed1b From 8822e270d697010e6a4fd42a319dbefc33db91e1 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Sun, 7 Jul 2019 15:01:54 +0100 Subject: net: core: move push MPLS functionality from OvS to core helper Open vSwitch provides code to push an MPLS header to a packet. In preparation for supporting this in TC, move the push code to an skb helper that can be reused. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Willem de Bruijn Acked-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + net/core/skbuff.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++ net/openvswitch/actions.c | 31 +++-------------------- 3 files changed, 69 insertions(+), 27 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1fdfdbb34e8e..1dc55000710c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3447,6 +3447,7 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index cdb0ccdaac0b..495fd743a935 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include @@ -5425,6 +5426,69 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) } EXPORT_SYMBOL(skb_vlan_push); +/* Update the ethertype of hdr and the skb csum value if required. */ +static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, + __be16 ethertype) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __be16 diff[] = { ~hdr->h_proto, ethertype }; + + skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); + } + + hdr->h_proto = ethertype; +} + +/** + * skb_mpls_push() - push a new MPLS header after the mac header + * + * @skb: buffer + * @mpls_lse: MPLS label stack entry to push + * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) + * + * Expects skb->data at mac header. + * + * Returns 0 on success, -errno otherwise. + */ +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto) +{ + struct mpls_shim_hdr *lse; + int err; + + if (unlikely(!eth_p_mpls(mpls_proto))) + return -EINVAL; + + /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ + if (skb->encapsulation) + return -EINVAL; + + err = skb_cow_head(skb, MPLS_HLEN); + if (unlikely(err)) + return err; + + if (!skb->inner_protocol) { + skb_set_inner_network_header(skb, skb->mac_len); + skb_set_inner_protocol(skb, skb->protocol); + } + + skb_push(skb, MPLS_HLEN); + memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), + skb->mac_len); + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb->mac_len); + + lse = mpls_hdr(skb); + lse->label_stack_entry = mpls_lse; + skb_postpush_rcsum(skb, lse, MPLS_HLEN); + + if (skb->dev && skb->dev->type == ARPHRD_ETHER) + skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); + skb->protocol = mpls_proto; + + return 0; +} +EXPORT_SYMBOL_GPL(skb_mpls_push); + /** * alloc_skb_with_frags - allocate skb with page frags * diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index bd131469e4ca..a9a6c9cbf946 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -175,34 +175,11 @@ static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_action_push_mpls *mpls) { - struct mpls_shim_hdr *new_mpls_lse; - - /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ - if (skb->encapsulation) - return -ENOTSUPP; - - if (skb_cow_head(skb, MPLS_HLEN) < 0) - return -ENOMEM; - - if (!skb->inner_protocol) { - skb_set_inner_network_header(skb, skb->mac_len); - skb_set_inner_protocol(skb, skb->protocol); - } - - skb_push(skb, MPLS_HLEN); - memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), - skb->mac_len); - skb_reset_mac_header(skb); - skb_set_network_header(skb, skb->mac_len); - - new_mpls_lse = mpls_hdr(skb); - new_mpls_lse->label_stack_entry = mpls->mpls_lse; - - skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); + int err; - if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) - update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype); - skb->protocol = mpls->mpls_ethertype; + err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype); + if (err) + return err; invalidate_flow_key(key); return 0; -- cgit v1.2.3-59-g8ed1b From ed246cee09b9865145a2e1e34f63ec0e31dd83a5 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Sun, 7 Jul 2019 15:01:55 +0100 Subject: net: core: move pop MPLS functionality from OvS to core helper Open vSwitch provides code to pop an MPLS header to a packet. In preparation for supporting this in TC, move the pop code to an skb helper that can be reused. Remove the, now unused, update_ethertype static function from OvS. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Willem de Bruijn Acked-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + net/core/skbuff.c | 42 ++++++++++++++++++++++++++++++++++++++++++ net/openvswitch/actions.c | 37 ++----------------------------------- 3 files changed, 45 insertions(+), 35 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1dc55000710c..08d1c8e70540 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3448,6 +3448,7 @@ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto); +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 495fd743a935..8c00be4d8919 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5489,6 +5489,48 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto) } EXPORT_SYMBOL_GPL(skb_mpls_push); +/** + * skb_mpls_pop() - pop the outermost MPLS header + * + * @skb: buffer + * @next_proto: ethertype of header after popped MPLS header + * + * Expects skb->data at mac header. + * + * Returns 0 on success, -errno otherwise. + */ +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto) +{ + int err; + + if (unlikely(!eth_p_mpls(skb->protocol))) + return -EINVAL; + + err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); + if (unlikely(err)) + return err; + + skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); + memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), + skb->mac_len); + + __skb_pull(skb, MPLS_HLEN); + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb->mac_len); + + if (skb->dev && skb->dev->type == ARPHRD_ETHER) { + struct ethhdr *hdr; + + /* use mpls_hdr() to get ethertype to account for VLANs. */ + hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); + skb_mod_eth_type(skb, hdr, next_proto); + } + skb->protocol = next_proto; + + return 0; +} +EXPORT_SYMBOL_GPL(skb_mpls_pop); + /** * alloc_skb_with_frags - allocate skb with page frags * diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index a9a6c9cbf946..62715bb8d611 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -160,18 +160,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr, int len); -static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, - __be16 ethertype) -{ - if (skb->ip_summed == CHECKSUM_COMPLETE) { - __be16 diff[] = { ~(hdr->h_proto), ethertype }; - - skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); - } - - hdr->h_proto = ethertype; -} - static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_action_push_mpls *mpls) { @@ -190,31 +178,10 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, { int err; - err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); - if (unlikely(err)) + err = skb_mpls_pop(skb, ethertype); + if (err) return err; - skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); - - memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), - skb->mac_len); - - __skb_pull(skb, MPLS_HLEN); - skb_reset_mac_header(skb); - skb_set_network_header(skb, skb->mac_len); - - if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) { - struct ethhdr *hdr; - - /* mpls_hdr() is used to locate the ethertype field correctly in the - * presence of VLAN tags. - */ - hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); - update_ethertype(skb, hdr, ethertype); - } - if (eth_p_mpls(skb->protocol)) - skb->protocol = ethertype; - invalidate_flow_key(key); return 0; } -- cgit v1.2.3-59-g8ed1b From d27cf5c59a12f66425df29cd81f61aa73ef14ac1 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Sun, 7 Jul 2019 15:01:56 +0100 Subject: net: core: add MPLS update core helper and use in OvS Open vSwitch allows the updating of an existing MPLS header on a packet. In preparation for supporting similar functionality in TC, move this to a common skb helper function. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Willem de Bruijn Acked-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + net/core/skbuff.c | 33 +++++++++++++++++++++++++++++++++ net/openvswitch/actions.c | 13 +++---------- 3 files changed, 37 insertions(+), 10 deletions(-) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 08d1c8e70540..9f7e01f2be83 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3449,6 +3449,7 @@ int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto); int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto); +int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8c00be4d8919..93443a01ab39 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5531,6 +5531,39 @@ int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto) } EXPORT_SYMBOL_GPL(skb_mpls_pop); +/** + * skb_mpls_update_lse() - modify outermost MPLS header and update csum + * + * @skb: buffer + * @mpls_lse: new MPLS label stack entry to update to + * + * Expects skb->data at mac header. + * + * Returns 0 on success, -errno otherwise. + */ +int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) +{ + int err; + + if (unlikely(!eth_p_mpls(skb->protocol))) + return -EINVAL; + + err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); + if (unlikely(err)) + return err; + + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; + + skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); + } + + mpls_hdr(skb)->label_stack_entry = mpls_lse; + + return 0; +} +EXPORT_SYMBOL_GPL(skb_mpls_update_lse); + /** * alloc_skb_with_frags - allocate skb with page frags * diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 62715bb8d611..3572e11b6f21 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -193,19 +193,12 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, __be32 lse; int err; - err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); - if (unlikely(err)) - return err; - stack = mpls_hdr(skb); lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask); - if (skb->ip_summed == CHECKSUM_COMPLETE) { - __be32 diff[] = { ~(stack->label_stack_entry), lse }; - - skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); - } + err = skb_mpls_update_lse(skb, lse); + if (err) + return err; - stack->label_stack_entry = lse; flow_key->mpls.top_lse = lse; return 0; } -- cgit v1.2.3-59-g8ed1b From 2a2ea50870baa3fb4de0872c5b60828138654ca7 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Sun, 7 Jul 2019 15:01:57 +0100 Subject: net: sched: add mpls manipulation actions to TC Currently, TC offers the ability to match on the MPLS fields of a packet through the use of the flow_dissector_key_mpls struct. However, as yet, TC actions do not allow the modification or manipulation of such fields. Add a new module that registers TC action ops to allow manipulation of MPLS. This includes the ability to push and pop headers as well as modify the contents of new or existing headers. A further action to decrement the TTL field of an MPLS header is also provided with a new helper added to support this. Examples of the usage of the new action with flower rules to push and pop MPLS labels are: tc filter add dev eth0 protocol ip parent ffff: flower \ action mpls push protocol mpls_uc label 123 \ action mirred egress redirect dev eth1 tc filter add dev eth0 protocol mpls_uc parent ffff: flower \ action mpls pop protocol ipv4 \ action mirred egress redirect dev eth1 Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Willem de Bruijn Acked-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/net/tc_act/tc_mpls.h | 30 +++ include/uapi/linux/pkt_cls.h | 3 +- include/uapi/linux/tc_act/tc_mpls.h | 33 +++ net/core/skbuff.c | 30 +++ net/sched/Kconfig | 11 + net/sched/Makefile | 1 + net/sched/act_mpls.c | 406 ++++++++++++++++++++++++++++++++++++ 8 files changed, 514 insertions(+), 1 deletion(-) create mode 100644 include/net/tc_act/tc_mpls.h create mode 100644 include/uapi/linux/tc_act/tc_mpls.h create mode 100644 net/sched/act_mpls.c (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9f7e01f2be83..9d7a2c28ea35 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3450,6 +3450,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto); int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto); int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); +int skb_mpls_dec_ttl(struct sk_buff *skb); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); diff --git a/include/net/tc_act/tc_mpls.h b/include/net/tc_act/tc_mpls.h new file mode 100644 index 000000000000..4bc3d9250ef0 --- /dev/null +++ b/include/net/tc_act/tc_mpls.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#ifndef __NET_TC_MPLS_H +#define __NET_TC_MPLS_H + +#include +#include + +struct tcf_mpls_params { + int tcfm_action; + u32 tcfm_label; + u8 tcfm_tc; + u8 tcfm_ttl; + u8 tcfm_bos; + __be16 tcfm_proto; + struct rcu_head rcu; +}; + +#define ACT_MPLS_TC_NOT_SET 0xff +#define ACT_MPLS_BOS_NOT_SET 0xff +#define ACT_MPLS_LABEL_NOT_SET 0xffffffff + +struct tcf_mpls { + struct tc_action common; + struct tcf_mpls_params __rcu *mpls_p; +}; +#define to_mpls(a) ((struct tcf_mpls *)a) + +#endif /* __NET_TC_MPLS_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 8cc6b6777b3c..e22ef4a940bc 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -104,8 +104,9 @@ enum tca_id { TCA_ID_SIMP = TCA_ACT_SIMP, TCA_ID_IFE = TCA_ACT_IFE, TCA_ID_SAMPLE = TCA_ACT_SAMPLE, - /* other actions go here */ TCA_ID_CTINFO, + TCA_ID_MPLS, + /* other actions go here */ __TCA_ID_MAX = 255 }; diff --git a/include/uapi/linux/tc_act/tc_mpls.h b/include/uapi/linux/tc_act/tc_mpls.h new file mode 100644 index 000000000000..9360e95273c7 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_mpls.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#ifndef __LINUX_TC_MPLS_H +#define __LINUX_TC_MPLS_H + +#include + +#define TCA_MPLS_ACT_POP 1 +#define TCA_MPLS_ACT_PUSH 2 +#define TCA_MPLS_ACT_MODIFY 3 +#define TCA_MPLS_ACT_DEC_TTL 4 + +struct tc_mpls { + tc_gen; /* generic TC action fields. */ + int m_action; /* action of type TCA_MPLS_ACT_*. */ +}; + +enum { + TCA_MPLS_UNSPEC, + TCA_MPLS_TM, /* struct tcf_t; time values associated with action. */ + TCA_MPLS_PARMS, /* struct tc_mpls; action type and general TC fields. */ + TCA_MPLS_PAD, + TCA_MPLS_PROTO, /* be16; eth_type of pushed or next (for pop) header. */ + TCA_MPLS_LABEL, /* u32; MPLS label. Lower 20 bits are used. */ + TCA_MPLS_TC, /* u8; MPLS TC field. Lower 3 bits are used. */ + TCA_MPLS_TTL, /* u8; MPLS TTL field. Must not be 0. */ + TCA_MPLS_BOS, /* u8; MPLS BOS field. Either 1 or 0. */ + __TCA_MPLS_MAX, +}; +#define TCA_MPLS_MAX (__TCA_MPLS_MAX - 1) + +#endif diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 93443a01ab39..6f1e31f674a3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #include @@ -5564,6 +5565,35 @@ int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) } EXPORT_SYMBOL_GPL(skb_mpls_update_lse); +/** + * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header + * + * @skb: buffer + * + * Expects skb->data at mac header. + * + * Returns 0 on success, -errno otherwise. + */ +int skb_mpls_dec_ttl(struct sk_buff *skb) +{ + u32 lse; + u8 ttl; + + if (unlikely(!eth_p_mpls(skb->protocol))) + return -EINVAL; + + lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); + ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; + if (!--ttl) + return -EINVAL; + + lse &= ~MPLS_LS_TTL_MASK; + lse |= ttl << MPLS_LS_TTL_SHIFT; + + return skb_mpls_update_lse(skb, cpu_to_be32(lse)); +} +EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); + /** * alloc_skb_with_frags - allocate skb with page frags * diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 360fdd3eaa77..731f5fbc2a3c 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -842,6 +842,17 @@ config NET_ACT_CSUM To compile this code as a module, choose M here: the module will be called act_csum. +config NET_ACT_MPLS + tristate "MPLS manipulation" + depends on NET_CLS_ACT + help + Say Y here to push or pop MPLS headers. + + If unsure, say N. + + To compile this code as a module, choose M here: the + module will be called act_mpls. + config NET_ACT_VLAN tristate "Vlan manipulation" depends on NET_CLS_ACT diff --git a/net/sched/Makefile b/net/sched/Makefile index d54bfcbd7981..c26603606c22 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o +obj-$(CONFIG_NET_ACT_MPLS) += act_mpls.o obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o obj-$(CONFIG_NET_ACT_BPF) += act_bpf.o obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c new file mode 100644 index 000000000000..ca2597ce4ac9 --- /dev/null +++ b/net/sched/act_mpls.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int mpls_net_id; +static struct tc_action_ops act_mpls_ops; + +#define ACT_MPLS_TTL_DEFAULT 255 + +static __be32 tcf_mpls_get_lse(struct mpls_shim_hdr *lse, + struct tcf_mpls_params *p, bool set_bos) +{ + u32 new_lse = 0; + + if (lse) + new_lse = be32_to_cpu(lse->label_stack_entry); + + if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET) { + new_lse &= ~MPLS_LS_LABEL_MASK; + new_lse |= p->tcfm_label << MPLS_LS_LABEL_SHIFT; + } + if (p->tcfm_ttl) { + new_lse &= ~MPLS_LS_TTL_MASK; + new_lse |= p->tcfm_ttl << MPLS_LS_TTL_SHIFT; + } + if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET) { + new_lse &= ~MPLS_LS_TC_MASK; + new_lse |= p->tcfm_tc << MPLS_LS_TC_SHIFT; + } + if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET) { + new_lse &= ~MPLS_LS_S_MASK; + new_lse |= p->tcfm_bos << MPLS_LS_S_SHIFT; + } else if (set_bos) { + new_lse |= 1 << MPLS_LS_S_SHIFT; + } + + return cpu_to_be32(new_lse); +} + +static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) +{ + struct tcf_mpls *m = to_mpls(a); + struct tcf_mpls_params *p; + __be32 new_lse; + int ret; + + tcf_lastuse_update(&m->tcf_tm); + bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); + + /* Ensure 'data' points at mac_header prior calling mpls manipulating + * functions. + */ + if (skb_at_tc_ingress(skb)) + skb_push_rcsum(skb, skb->mac_len); + + ret = READ_ONCE(m->tcf_action); + + p = rcu_dereference_bh(m->mpls_p); + + switch (p->tcfm_action) { + case TCA_MPLS_ACT_POP: + if (skb_mpls_pop(skb, p->tcfm_proto)) + goto drop; + break; + case TCA_MPLS_ACT_PUSH: + new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol)); + if (skb_mpls_push(skb, new_lse, p->tcfm_proto)) + goto drop; + break; + case TCA_MPLS_ACT_MODIFY: + new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false); + if (skb_mpls_update_lse(skb, new_lse)) + goto drop; + break; + case TCA_MPLS_ACT_DEC_TTL: + if (skb_mpls_dec_ttl(skb)) + goto drop; + break; + } + + if (skb_at_tc_ingress(skb)) + skb_pull_rcsum(skb, skb->mac_len); + + return ret; + +drop: + qstats_drop_inc(this_cpu_ptr(m->common.cpu_qstats)); + return TC_ACT_SHOT; +} + +static int valid_label(const struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + const u32 *label = nla_data(attr); + + if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) { + NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range"); + return -EINVAL; + } + + return 0; +} + +static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = { + [TCA_MPLS_UNSPEC] = { .strict_start_type = TCA_MPLS_UNSPEC + 1 }, + [TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)), + [TCA_MPLS_PROTO] = { .type = NLA_U16 }, + [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label), + [TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7), + [TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1), + [TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1), +}; + +static int tcf_mpls_init(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action **a, + int ovr, int bind, bool rtnl_held, + struct tcf_proto *tp, struct netlink_ext_ack *extack) +{ + struct tc_action_net *tn = net_generic(net, mpls_net_id); + struct nlattr *tb[TCA_MPLS_MAX + 1]; + struct tcf_chain *goto_ch = NULL; + struct tcf_mpls_params *p; + struct tc_mpls *parm; + bool exists = false; + struct tcf_mpls *m; + int ret = 0, err; + u8 mpls_ttl = 0; + + if (!nla) { + NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_MPLS_MAX, nla, mpls_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_MPLS_PARMS]) { + NL_SET_ERR_MSG_MOD(extack, "No MPLS params"); + return -EINVAL; + } + parm = nla_data(tb[TCA_MPLS_PARMS]); + + /* Verify parameters against action type. */ + switch (parm->m_action) { + case TCA_MPLS_ACT_POP: + if (!tb[TCA_MPLS_PROTO]) { + NL_SET_ERR_MSG_MOD(extack, "Protocol must be set for MPLS pop"); + return -EINVAL; + } + if (!eth_proto_is_802_3(nla_get_be16(tb[TCA_MPLS_PROTO]))) { + NL_SET_ERR_MSG_MOD(extack, "Invalid protocol type for MPLS pop"); + return -EINVAL; + } + if (tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || + tb[TCA_MPLS_BOS]) { + NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC or BOS cannot be used with MPLS pop"); + return -EINVAL; + } + break; + case TCA_MPLS_ACT_DEC_TTL: + if (tb[TCA_MPLS_PROTO] || tb[TCA_MPLS_LABEL] || + tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) { + NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC, BOS or protocol cannot be used with MPLS dec_ttl"); + return -EINVAL; + } + break; + case TCA_MPLS_ACT_PUSH: + if (!tb[TCA_MPLS_LABEL]) { + NL_SET_ERR_MSG_MOD(extack, "Label is required for MPLS push"); + return -EINVAL; + } + if (tb[TCA_MPLS_PROTO] && + !eth_p_mpls(nla_get_be16(tb[TCA_MPLS_PROTO]))) { + NL_SET_ERR_MSG_MOD(extack, "Protocol must be an MPLS type for MPLS push"); + return -EPROTONOSUPPORT; + } + /* Push needs a TTL - if not specified, set a default value. */ + if (!tb[TCA_MPLS_TTL]) { +#if IS_ENABLED(CONFIG_MPLS) + mpls_ttl = net->mpls.default_ttl ? + net->mpls.default_ttl : ACT_MPLS_TTL_DEFAULT; +#else + mpls_ttl = ACT_MPLS_TTL_DEFAULT; +#endif + } + break; + case TCA_MPLS_ACT_MODIFY: + if (tb[TCA_MPLS_PROTO]) { + NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be used with MPLS modify"); + return -EINVAL; + } + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Unknown MPLS action"); + return -EINVAL; + } + + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; + if (exists && bind) + return 0; + + if (!exists) { + ret = tcf_idr_create(tn, parm->index, est, a, + &act_mpls_ops, bind, true); + if (ret) { + tcf_idr_cleanup(tn, parm->index); + return ret; + } + + ret = ACT_P_CREATED; + } else if (!ovr) { + tcf_idr_release(*a, bind); + return -EEXIST; + } + + err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); + if (err < 0) + goto release_idr; + + m = to_mpls(*a); + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto put_chain; + } + + p->tcfm_action = parm->m_action; + p->tcfm_label = tb[TCA_MPLS_LABEL] ? nla_get_u32(tb[TCA_MPLS_LABEL]) : + ACT_MPLS_LABEL_NOT_SET; + p->tcfm_tc = tb[TCA_MPLS_TC] ? nla_get_u8(tb[TCA_MPLS_TC]) : + ACT_MPLS_TC_NOT_SET; + p->tcfm_ttl = tb[TCA_MPLS_TTL] ? nla_get_u8(tb[TCA_MPLS_TTL]) : + mpls_ttl; + p->tcfm_bos = tb[TCA_MPLS_BOS] ? nla_get_u8(tb[TCA_MPLS_BOS]) : + ACT_MPLS_BOS_NOT_SET; + p->tcfm_proto = tb[TCA_MPLS_PROTO] ? nla_get_be16(tb[TCA_MPLS_PROTO]) : + htons(ETH_P_MPLS_UC); + + spin_lock_bh(&m->tcf_lock); + goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); + rcu_swap_protected(m->mpls_p, p, lockdep_is_held(&m->tcf_lock)); + spin_unlock_bh(&m->tcf_lock); + + if (goto_ch) + tcf_chain_put_by_act(goto_ch); + if (p) + kfree_rcu(p, rcu); + + if (ret == ACT_P_CREATED) + tcf_idr_insert(tn, *a); + return ret; +put_chain: + if (goto_ch) + tcf_chain_put_by_act(goto_ch); +release_idr: + tcf_idr_release(*a, bind); + return err; +} + +static void tcf_mpls_cleanup(struct tc_action *a) +{ + struct tcf_mpls *m = to_mpls(a); + struct tcf_mpls_params *p; + + p = rcu_dereference_protected(m->mpls_p, 1); + if (p) + kfree_rcu(p, rcu); +} + +static int tcf_mpls_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) +{ + unsigned char *b = skb_tail_pointer(skb); + struct tcf_mpls *m = to_mpls(a); + struct tcf_mpls_params *p; + struct tc_mpls opt = { + .index = m->tcf_index, + .refcnt = refcount_read(&m->tcf_refcnt) - ref, + .bindcnt = atomic_read(&m->tcf_bindcnt) - bind, + }; + struct tcf_t t; + + spin_lock_bh(&m->tcf_lock); + opt.action = m->tcf_action; + p = rcu_dereference_protected(m->mpls_p, lockdep_is_held(&m->tcf_lock)); + opt.m_action = p->tcfm_action; + + if (nla_put(skb, TCA_MPLS_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + + if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET && + nla_put_u32(skb, TCA_MPLS_LABEL, p->tcfm_label)) + goto nla_put_failure; + + if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET && + nla_put_u8(skb, TCA_MPLS_TC, p->tcfm_tc)) + goto nla_put_failure; + + if (p->tcfm_ttl && nla_put_u8(skb, TCA_MPLS_TTL, p->tcfm_ttl)) + goto nla_put_failure; + + if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET && + nla_put_u8(skb, TCA_MPLS_BOS, p->tcfm_bos)) + goto nla_put_failure; + + if (nla_put_be16(skb, TCA_MPLS_PROTO, p->tcfm_proto)) + goto nla_put_failure; + + tcf_tm_dump(&t, &m->tcf_tm); + + if (nla_put_64bit(skb, TCA_MPLS_TM, sizeof(t), &t, TCA_MPLS_PAD)) + goto nla_put_failure; + + spin_unlock_bh(&m->tcf_lock); + + return skb->len; + +nla_put_failure: + spin_unlock_bh(&m->tcf_lock); + nlmsg_trim(skb, b); + return -EMSGSIZE; +} + +static int tcf_mpls_walker(struct net *net, struct sk_buff *skb, + struct netlink_callback *cb, int type, + const struct tc_action_ops *ops, + struct netlink_ext_ack *extack) +{ + struct tc_action_net *tn = net_generic(net, mpls_net_id); + + return tcf_generic_walker(tn, skb, cb, type, ops, extack); +} + +static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index) +{ + struct tc_action_net *tn = net_generic(net, mpls_net_id); + + return tcf_idr_search(tn, a, index); +} + +static struct tc_action_ops act_mpls_ops = { + .kind = "mpls", + .id = TCA_ID_MPLS, + .owner = THIS_MODULE, + .act = tcf_mpls_act, + .dump = tcf_mpls_dump, + .init = tcf_mpls_init, + .cleanup = tcf_mpls_cleanup, + .walk = tcf_mpls_walker, + .lookup = tcf_mpls_search, + .size = sizeof(struct tcf_mpls), +}; + +static __net_init int mpls_init_net(struct net *net) +{ + struct tc_action_net *tn = net_generic(net, mpls_net_id); + + return tc_action_net_init(tn, &act_mpls_ops); +} + +static void __net_exit mpls_exit_net(struct list_head *net_list) +{ + tc_action_net_exit(net_list, mpls_net_id); +} + +static struct pernet_operations mpls_net_ops = { + .init = mpls_init_net, + .exit_batch = mpls_exit_net, + .id = &mpls_net_id, + .size = sizeof(struct tc_action_net), +}; + +static int __init mpls_init_module(void) +{ + return tcf_register_action(&act_mpls_ops, &mpls_net_ops); +} + +static void __exit mpls_cleanup_module(void) +{ + tcf_unregister_action(&act_mpls_ops, &mpls_net_ops); +} + +module_init(mpls_init_module); +module_exit(mpls_cleanup_module); + +MODULE_AUTHOR("Netronome Systems "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MPLS manipulation actions"); -- cgit v1.2.3-59-g8ed1b From 378ef01b5f75e6c485b8f16b4f6a7842a312aa07 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 8 Jul 2019 23:17:35 -0500 Subject: devlink: Refactor physical port attributes To support additional devlink port flavours and to support few common and few different port attributes, move physical port attributes to a different structure. Acked-by: Jiri Pirko Signed-off-by: Parav Pandit Signed-off-by: David S. Miller --- include/net/devlink.h | 13 ++++++++++-- net/core/devlink.c | 58 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 49 insertions(+), 22 deletions(-) (limited to 'net/core') diff --git a/include/net/devlink.h b/include/net/devlink.h index 6625ea068d5e..4538c80fe293 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -38,14 +38,23 @@ struct devlink { char priv[0] __aligned(NETDEV_ALIGN); }; +struct devlink_port_phys_attrs { + u32 port_number; /* Same value as "split group". + * A physical port which is visible to the user + * for a given port flavour. + */ + u32 split_subport_number; +}; + struct devlink_port_attrs { u8 set:1, split:1, switch_port:1; enum devlink_port_flavour flavour; - u32 port_number; /* same value as "split group" */ - u32 split_subport_number; struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + }; }; struct devlink_port { diff --git a/net/core/devlink.c b/net/core/devlink.c index 89c533778135..eacaf37b5108 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -515,14 +515,16 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, return 0; if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; - if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, attrs->port_number)) + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, + attrs->phys.port_number)) return -EMSGSIZE; if (!attrs->split) return 0; - if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP, attrs->port_number)) + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP, + attrs->phys.port_number)) return -EMSGSIZE; if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, - attrs->split_subport_number)) + attrs->phys.split_subport_number)) return -EMSGSIZE; return 0; } @@ -5738,6 +5740,29 @@ void devlink_port_type_clear(struct devlink_port *devlink_port) } EXPORT_SYMBOL_GPL(devlink_port_type_clear); +static int __devlink_port_attrs_set(struct devlink_port *devlink_port, + enum devlink_port_flavour flavour, + const unsigned char *switch_id, + unsigned char switch_id_len) +{ + struct devlink_port_attrs *attrs = &devlink_port->attrs; + + if (WARN_ON(devlink_port->registered)) + return -EEXIST; + attrs->set = true; + attrs->flavour = flavour; + if (switch_id) { + attrs->switch_port = true; + if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN)) + switch_id_len = MAX_PHYS_ITEM_ID_LEN; + memcpy(attrs->switch_id.id, switch_id, switch_id_len); + attrs->switch_id.id_len = switch_id_len; + } else { + attrs->switch_port = false; + } + return 0; +} + /** * devlink_port_attrs_set - Set port attributes * @@ -5760,23 +5785,15 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, unsigned char switch_id_len) { struct devlink_port_attrs *attrs = &devlink_port->attrs; + int ret; - if (WARN_ON(devlink_port->registered)) + ret = __devlink_port_attrs_set(devlink_port, flavour, + switch_id, switch_id_len); + if (ret) return; - attrs->set = true; - attrs->flavour = flavour; - attrs->port_number = port_number; attrs->split = split; - attrs->split_subport_number = split_subport_number; - if (switch_id) { - attrs->switch_port = true; - if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN)) - switch_id_len = MAX_PHYS_ITEM_ID_LEN; - memcpy(attrs->switch_id.id, switch_id, switch_id_len); - attrs->switch_id.id_len = switch_id_len; - } else { - attrs->switch_port = false; - } + attrs->phys.port_number = port_number; + attrs->phys.split_subport_number = split_subport_number; } EXPORT_SYMBOL_GPL(devlink_port_attrs_set); @@ -5792,10 +5809,11 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, switch (attrs->flavour) { case DEVLINK_PORT_FLAVOUR_PHYSICAL: if (!attrs->split) - n = snprintf(name, len, "p%u", attrs->port_number); + n = snprintf(name, len, "p%u", attrs->phys.port_number); else - n = snprintf(name, len, "p%us%u", attrs->port_number, - attrs->split_subport_number); + n = snprintf(name, len, "p%us%u", + attrs->phys.port_number, + attrs->phys.split_subport_number); break; case DEVLINK_PORT_FLAVOUR_CPU: case DEVLINK_PORT_FLAVOUR_DSA: -- cgit v1.2.3-59-g8ed1b From a2c6b87dd08cdfc2d065d4ae369e4e747b521a7f Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 8 Jul 2019 23:17:36 -0500 Subject: devlink: Return physical port fields only for applicable port flavours Physical port number and split group fields are applicable only to physical port flavours such as PHYSICAL, CPU and DSA. Hence limit returning those values in netlink response to such port flavours. Acked-by: Jiri Pirko Signed-off-by: Parav Pandit Signed-off-by: David S. Miller --- net/core/devlink.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net/core') diff --git a/net/core/devlink.c b/net/core/devlink.c index eacaf37b5108..a9c4e5d8a99c 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -515,6 +515,10 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, return 0; if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; + if (devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PHYSICAL && + devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU && + devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA) + return 0; if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, attrs->phys.port_number)) return -EMSGSIZE; -- cgit v1.2.3-59-g8ed1b From 98fd2d6563fe4a799934a2a74d632601cd089beb Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 8 Jul 2019 23:17:37 -0500 Subject: devlink: Introduce PCI PF port flavour and port attribute In an eswitch, PCI PF may have port which is normally represented using a representor netdevice. To have better visibility of eswitch port, its association with PF and a representor netdevice, introduce a PCI PF port flavour and port attriute. When devlink port flavour is PCI PF, fill up PCI PF attributes of the port. Extend port name creation using PCI PF number on best effort basis. So that vendor drivers can skip defining their own scheme. $ devlink port show pci/0000:05:00.0/0: type eth netdev eth0 flavour pcipf pfnum 0 Acked-by: Jiri Pirko Signed-off-by: Parav Pandit Signed-off-by: David S. Miller --- include/net/devlink.h | 8 ++++++++ include/uapi/linux/devlink.h | 5 +++++ net/core/devlink.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+) (limited to 'net/core') diff --git a/include/net/devlink.h b/include/net/devlink.h index 4538c80fe293..97cef896e4d0 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -46,6 +46,10 @@ struct devlink_port_phys_attrs { u32 split_subport_number; }; +struct devlink_port_pci_pf_attrs { + u16 pf; /* Associated PCI PF for this port. */ +}; + struct devlink_port_attrs { u8 set:1, split:1, @@ -54,6 +58,7 @@ struct devlink_port_attrs { struct netdev_phys_item_id switch_id; union { struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; }; }; @@ -599,6 +604,9 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, u32 split_subport_number, const unsigned char *switch_id, unsigned char switch_id_len); +void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, + const unsigned char *switch_id, + unsigned char switch_id_len, u16 pf); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 5287b42c181f..f7323884c3fe 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -169,6 +169,10 @@ enum devlink_port_flavour { DEVLINK_PORT_FLAVOUR_DSA, /* Distributed switch architecture * interconnect port. */ + DEVLINK_PORT_FLAVOUR_PCI_PF, /* Represents eswitch port for + * the PCI PF. It is an internal + * port that faces the PCI PF. + */ }; enum devlink_param_cmode { @@ -337,6 +341,7 @@ enum devlink_attr { DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE, /* u64 */ DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL, /* u64 */ + DEVLINK_ATTR_PORT_PCI_PF_NUMBER, /* u16 */ /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index a9c4e5d8a99c..d362652a5cc7 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -515,6 +515,11 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, return 0; if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; + if (devlink_port->attrs.flavour == DEVLINK_PORT_FLAVOUR_PCI_PF) { + if (nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, + attrs->pci_pf.pf)) + return -EMSGSIZE; + } if (devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PHYSICAL && devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU && devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA) @@ -5801,6 +5806,32 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, } EXPORT_SYMBOL_GPL(devlink_port_attrs_set); +/** + * devlink_port_attrs_pci_pf_set - Set PCI PF port attributes + * + * @devlink_port: devlink port + * @pf: associated PF for the devlink port instance + * @switch_id: if the port is part of switch, this is buffer with ID, + * otherwise this is NULL + * @switch_id_len: length of the switch_id buffer + */ +void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, + const unsigned char *switch_id, + unsigned char switch_id_len, u16 pf) +{ + struct devlink_port_attrs *attrs = &devlink_port->attrs; + int ret; + + ret = __devlink_port_attrs_set(devlink_port, + DEVLINK_PORT_FLAVOUR_PCI_PF, + switch_id, switch_id_len); + if (ret) + return; + + attrs->pci_pf.pf = pf; +} +EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set); + static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, char *name, size_t len) { @@ -5826,6 +5857,9 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, */ WARN_ON(1); return -EINVAL; + case DEVLINK_PORT_FLAVOUR_PCI_PF: + n = snprintf(name, len, "pf%u", attrs->pci_pf.pf); + break; } if (n >= len) -- cgit v1.2.3-59-g8ed1b From e41b6bf3cdd474dc9c587cb55906b0256835bf6d Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 8 Jul 2019 23:17:38 -0500 Subject: devlink: Introduce PCI VF port flavour and port attribute In an eswitch, PCI VF may have port which is normally represented using a representor netdevice. To have better visibility of eswitch port, its association with VF, and its representor netdevice, introduce a PCI VF port flavour. When devlink port flavour is PCI VF, fill up PCI VF attributes of the port. Extend port name creation using PCI PF and VF number scheme on best effort basis, so that vendor drivers can skip defining their own scheme. $ devlink port show pci/0000:05:00.0/0: type eth netdev eth0 flavour pcipf pfnum 0 pci/0000:05:00.0/1: type eth netdev eth1 flavour pcivf pfnum 0 vfnum 0 pci/0000:05:00.0/2: type eth netdev eth2 flavour pcivf pfnum 0 vfnum 1 Acked-by: Jiri Pirko Signed-off-by: Parav Pandit Signed-off-by: David S. Miller --- include/net/devlink.h | 10 ++++++++++ include/uapi/linux/devlink.h | 6 ++++++ net/core/devlink.c | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+) (limited to 'net/core') diff --git a/include/net/devlink.h b/include/net/devlink.h index 97cef896e4d0..bc36f942a7d5 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -50,6 +50,11 @@ struct devlink_port_pci_pf_attrs { u16 pf; /* Associated PCI PF for this port. */ }; +struct devlink_port_pci_vf_attrs { + u16 pf; /* Associated PCI PF for this port. */ + u16 vf; /* Associated PCI VF for of the PCI PF for this port. */ +}; + struct devlink_port_attrs { u8 set:1, split:1, @@ -59,6 +64,7 @@ struct devlink_port_attrs { union { struct devlink_port_phys_attrs phys; struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; }; }; @@ -607,6 +613,10 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, const unsigned char *switch_id, unsigned char switch_id_len, u16 pf); +void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, + const unsigned char *switch_id, + unsigned char switch_id_len, + u16 pf, u16 vf); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index f7323884c3fe..ffc993256527 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -173,6 +173,10 @@ enum devlink_port_flavour { * the PCI PF. It is an internal * port that faces the PCI PF. */ + DEVLINK_PORT_FLAVOUR_PCI_VF, /* Represents eswitch port + * for the PCI VF. It is an internal + * port that faces the PCI VF. + */ }; enum devlink_param_cmode { @@ -342,6 +346,8 @@ enum devlink_attr { DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL, /* u64 */ DEVLINK_ATTR_PORT_PCI_PF_NUMBER, /* u16 */ + DEVLINK_ATTR_PORT_PCI_VF_NUMBER, /* u16 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index d362652a5cc7..4f40aeace902 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -519,6 +519,12 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, if (nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_pf.pf)) return -EMSGSIZE; + } else if (devlink_port->attrs.flavour == DEVLINK_PORT_FLAVOUR_PCI_VF) { + if (nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, + attrs->pci_vf.pf) || + nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER, + attrs->pci_vf.vf)) + return -EMSGSIZE; } if (devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PHYSICAL && devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU && @@ -5832,6 +5838,34 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, } EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set); +/** + * devlink_port_attrs_pci_vf_set - Set PCI VF port attributes + * + * @devlink_port: devlink port + * @pf: associated PF for the devlink port instance + * @vf: associated VF of a PF for the devlink port instance + * @switch_id: if the port is part of switch, this is buffer with ID, + * otherwise this is NULL + * @switch_id_len: length of the switch_id buffer + */ +void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, + const unsigned char *switch_id, + unsigned char switch_id_len, + u16 pf, u16 vf) +{ + struct devlink_port_attrs *attrs = &devlink_port->attrs; + int ret; + + ret = __devlink_port_attrs_set(devlink_port, + DEVLINK_PORT_FLAVOUR_PCI_VF, + switch_id, switch_id_len); + if (ret) + return; + attrs->pci_vf.pf = pf; + attrs->pci_vf.vf = vf; +} +EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set); + static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, char *name, size_t len) { @@ -5860,6 +5894,10 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, case DEVLINK_PORT_FLAVOUR_PCI_PF: n = snprintf(name, len, "pf%u", attrs->pci_pf.pf); break; + case DEVLINK_PORT_FLAVOUR_PCI_VF: + n = snprintf(name, len, "pf%uvf%u", + attrs->pci_vf.pf, attrs->pci_vf.vf); + break; } if (n >= len) -- cgit v1.2.3-59-g8ed1b From 75a56758d6390ea6db523ad26ce378f34b907b0c Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 9 Jul 2019 10:30:49 +0300 Subject: net/flow_dissector: add connection tracking dissection Retreives connection tracking zone, mark, label, and state from a SKB. Signed-off-by: Paul Blakey Signed-off-by: Marcelo Ricardo Leitner Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/skbuff.h | 10 ++++++++++ include/net/flow_dissector.h | 15 +++++++++++++++ net/core/flow_dissector.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) (limited to 'net/core') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9d7a2c28ea35..d8af86d995d6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1325,6 +1325,16 @@ void skb_flow_dissect_meta(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); +/* Gets a skb connection tracking info, ctinfo map should be a + * a map of mapsize to translate enum ip_conntrack_info states + * to user states. + */ +void +skb_flow_dissect_ct(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + u16 *ctinfo_map, + size_t mapsize); void skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 02478e48fae4..90bd210be060 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -208,6 +208,20 @@ struct flow_dissector_key_meta { int ingress_ifindex; }; +/** + * struct flow_dissector_key_ct: + * @ct_state: conntrack state after converting with map + * @ct_mark: conttrack mark + * @ct_zone: conntrack zone + * @ct_labels: conntrack labels + */ +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -234,6 +248,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */ + FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 01ad60b5aa75..3e6fedb57bc1 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -27,6 +27,10 @@ #include #include #include +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include +#include +#endif static DEFINE_MUTEX(flow_dissector_mutex); @@ -231,6 +235,46 @@ skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type, ctrl->addr_type = type; } +void +skb_flow_dissect_ct(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + u16 *ctinfo_map, + size_t mapsize) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + struct flow_dissector_key_ct *key; + enum ip_conntrack_info ctinfo; + struct nf_conn_labels *cl; + struct nf_conn *ct; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CT)) + return; + + ct = nf_ct_get(skb, &ctinfo); + if (!ct) + return; + + key = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_CT, + target_container); + + if (ctinfo < mapsize) + key->ct_state = ctinfo_map[ctinfo]; +#if IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) + key->ct_zone = ct->zone.id; +#endif +#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) + key->ct_mark = ct->mark; +#endif + + cl = nf_ct_labels_find(ct); + if (cl) + memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels)); +#endif /* CONFIG_NF_CONNTRACK */ +} +EXPORT_SYMBOL(skb_flow_dissect_ct); + void skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, -- cgit v1.2.3-59-g8ed1b From 4e95bc268b915c3a19ec8b9110f61e4ea41a1ed0 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:39 +0200 Subject: net: flow_offload: add flow_block_cb_setup_simple() Most drivers do the same thing to set up the flow block callbacks, this patch adds a helper function to do this. This preparation patch reduces the number of changes to adapt the existing drivers to use the flow block callback API. This new helper function takes a flow block list per-driver, which is set to NULL until this driver list is used. This patch also introduces the flow_block_command and flow_block_binder_type enumerations, which are renamed to use FLOW_BLOCK_* in follow up patches. There are three definitions (aliases) in order to reduce the number of updates in this patch, which go away once drivers are fully adapted to use this flow block API. Signed-off-by: Pablo Neira Ayuso Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 26 ++++------------- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 28 ++++-------------- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 26 ++++------------- drivers/net/ethernet/intel/i40e/i40e_main.c | 26 ++++------------- drivers/net/ethernet/intel/iavf/iavf_main.c | 35 ++++------------------- drivers/net/ethernet/intel/igb/igb_main.c | 24 +++------------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 27 ++++------------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 27 ++++------------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 26 ++++------------- drivers/net/ethernet/netronome/nfp/abm/cls.c | 17 ++--------- drivers/net/ethernet/netronome/nfp/bpf/main.c | 29 ++++--------------- drivers/net/ethernet/qlogic/qede/qede_main.c | 23 ++------------- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 22 ++------------ drivers/net/netdevsim/netdev.c | 26 ++++------------- include/net/flow_offload.h | 27 +++++++++++++++++ include/net/pkt_cls.h | 20 ++----------- net/core/flow_offload.c | 25 ++++++++++++++++ 17 files changed, 117 insertions(+), 317 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2b5b0ab8961a..06819590f6d0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9907,32 +9907,16 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int bnxt_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct bnxt *bp = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, - bp, bp, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct bnxt *bp = netdev_priv(dev); + switch (type) { case TC_SETUP_BLOCK: - return bnxt_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + bnxt_setup_tc_block_cb, + bp, bp, true); case TC_SETUP_QDISC_MQPRIO: { struct tc_mqprio_qopt *mqprio = type_data; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index f760921389a3..89398ff011d4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -161,34 +161,16 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, } } -static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct bnxt_vf_rep *vf_rep = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, - bnxt_vf_rep_setup_tc_block_cb, - vf_rep, vf_rep, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, - bnxt_vf_rep_setup_tc_block_cb, vf_rep); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + switch (type) { case TC_SETUP_BLOCK: - return bnxt_vf_rep_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + bnxt_vf_rep_setup_tc_block_cb, + vf_rep, vf_rep, true); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index b08efc48d42f..9a486282a32e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3190,32 +3190,16 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int cxgb_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct port_info *pi = netdev2pinfo(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, - pi, dev, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct port_info *pi = netdev2pinfo(dev); + switch (type) { case TC_SETUP_BLOCK: - return cxgb_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + cxgb_setup_tc_block_cb, + pi, dev, true); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5361c08328f7..52f0f14d4207 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8177,34 +8177,18 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int i40e_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, - np, np, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { + struct i40e_netdev_priv *np = netdev_priv(netdev); + switch (type) { case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return i40e_setup_tc_block(netdev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + i40e_setup_tc_block_cb, + np, np, true); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 881561b36083..fd0e2bcc75e5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3113,35 +3113,6 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -/** - * iavf_setup_tc_block - register callbacks for tc - * @netdev: network interface device structure - * @f: tc offload data - * - * This function registers block callbacks for tc - * offloads - **/ -static int iavf_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct iavf_adapter *adapter = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb, - adapter, adapter, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb, - adapter); - return 0; - default: - return -EOPNOTSUPP; - } -} - /** * iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure @@ -3156,11 +3127,15 @@ static int iavf_setup_tc_block(struct net_device *dev, static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { + struct iavf_adapter *adapter = netdev_priv(netdev); + switch (type) { case TC_SETUP_QDISC_MQPRIO: return __iavf_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return iavf_setup_tc_block(netdev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + iavf_setup_tc_block_cb, + adapter, adapter, true); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f66dae72fe37..836f9e1a136c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2783,25 +2783,6 @@ static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int igb_setup_tc_block(struct igb_adapter *adapter, - struct tc_block_offload *f) -{ - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, - adapter, adapter, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, - adapter); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int igb_offload_txtime(struct igb_adapter *adapter, struct tc_etf_qopt_offload *qopt) { @@ -2834,7 +2815,10 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: - return igb_setup_tc_block(adapter, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + igb_setup_tc_block_cb, + adapter, adapter, true); + case TC_SETUP_QDISC_ETF: return igb_offload_txtime(adapter, type_data); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b613e72c8ee4..b098f5be9c0d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9607,27 +9607,6 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int ixgbe_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, - adapter, adapter, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, - adapter); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int ixgbe_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt *mqprio) { @@ -9638,9 +9617,13 @@ static int ixgbe_setup_tc_mqprio(struct net_device *dev, static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct ixgbe_adapter *adapter = netdev_priv(dev); + switch (type) { case TC_SETUP_BLOCK: - return ixgbe_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + ixgbe_setup_tc_block_cb, + adapter, adapter, true); case TC_SETUP_QDISC_MQPRIO: return ixgbe_setup_tc_mqprio(dev, type_data); default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 10efd69de7ef..8e5ebdb7c459 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3457,36 +3457,19 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return -EOPNOTSUPP; } } - -static int mlx5e_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb, - priv, priv, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb, - priv); - return 0; - default: - return -EOPNOTSUPP; - } -} #endif static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct mlx5e_priv *priv = netdev_priv(dev); + switch (type) { #ifdef CONFIG_MLX5_ESWITCH case TC_SETUP_BLOCK: - return mlx5e_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + mlx5e_setup_tc_block_cb, + priv, priv, true); #endif case TC_SETUP_QDISC_MQPRIO: return mlx5e_setup_tc_mqprio(dev, type_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 6810b9fa0705..a211cdb5eb8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1153,32 +1153,16 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, } } -static int mlx5e_rep_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb, - priv, priv, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct mlx5e_priv *priv = netdev_priv(dev); + switch (type) { case TC_SETUP_BLOCK: - return mlx5e_rep_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + mlx5e_rep_setup_tc_cb, + priv, priv, true); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c index ff3913085665..29fb45734962 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c @@ -265,19 +265,6 @@ static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type, int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr, struct tc_block_offload *f) { - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, - nfp_abm_setup_tc_block_cb, - repr, repr, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb, - repr); - return 0; - default: - return -EOPNOTSUPP; - } + return flow_block_cb_setup_simple(f, NULL, nfp_abm_setup_tc_block_cb, + repr, repr, true); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 9c136da25221..0c93c84a188a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -160,35 +160,16 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, return 0; } -static int nfp_bpf_setup_tc_block(struct net_device *netdev, - struct tc_block_offload *f) -{ - struct nfp_net *nn = netdev_priv(netdev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, - nfp_bpf_setup_tc_block_cb, - nn, nn, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, - nfp_bpf_setup_tc_block_cb, - nn); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) { + struct nfp_net *nn = netdev_priv(netdev); + switch (type) { case TC_SETUP_BLOCK: - return nfp_bpf_setup_tc_block(netdev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + nfp_bpf_setup_tc_block_cb, + nn, nn, true); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d4a29660751d..cba97ed3dd56 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -579,25 +579,6 @@ static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int qede_setup_tc_block(struct qede_dev *edev, - struct tc_block_offload *f) -{ - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, - qede_setup_tc_block_cb, - edev, edev, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, void *type_data) @@ -607,7 +588,9 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return qede_setup_tc_block(edev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + qede_setup_tc_block_cb, + edev, edev, true); case TC_SETUP_QDISC_MQPRIO: mqprio = type_data; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e4b06dc484b7..93ef80c16f07 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3769,24 +3769,6 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return ret; } -static int stmmac_setup_tc_block(struct stmmac_priv *priv, - struct tc_block_offload *f) -{ - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, - priv, priv, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { @@ -3794,7 +3776,9 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return stmmac_setup_tc_block(priv, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + stmmac_setup_tc_block_cb, + priv, priv, true); case TC_SETUP_QDISC_CBS: return stmmac_tc_setup_cbs(priv, priv, type_data); default: diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index e5c8aa08e1cd..920dc79e9dc9 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -78,26 +78,6 @@ nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv); } -static int -nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f) -{ - struct netdevsim *ns = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb, - ns, ns, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct netdevsim *ns = netdev_priv(dev); @@ -226,9 +206,13 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) static int nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct netdevsim *ns = netdev_priv(dev); + switch (type) { case TC_SETUP_BLOCK: - return nsim_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, NULL, + nsim_setup_tc_block_cb, + ns, ns, true); default: return -EOPNOTSUPP; } diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index a09e256d2b27..2430e4907fe9 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -3,6 +3,7 @@ #include #include +#include struct flow_match { struct flow_dissector *dissector; @@ -237,4 +238,30 @@ static inline void flow_stats_update(struct flow_stats *flow_stats, flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); } +enum flow_block_command { + TC_BLOCK_BIND, + TC_BLOCK_UNBIND, +}; + +enum flow_block_binder_type { + TCF_BLOCK_BINDER_TYPE_UNSPEC, + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS, +}; + +struct tcf_block; +struct netlink_ext_ack; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + struct tcf_block *block; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; +}; + +int flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, bool ingress_only); + #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 1a7596ba0dbe..b6c306fa9541 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -26,11 +26,9 @@ struct tcf_walker { int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); -enum tcf_block_binder_type { - TCF_BLOCK_BINDER_TYPE_UNSPEC, - TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, - TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS, -}; +#define tc_block_offload flow_block_offload +#define tc_block_command flow_block_command +#define tcf_block_binder_type flow_block_binder_type struct tcf_block_ext_info { enum tcf_block_binder_type binder_type; @@ -610,18 +608,6 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, void *type_data, bool err_stop); unsigned int tcf_exts_num_actions(struct tcf_exts *exts); -enum tc_block_command { - TC_BLOCK_BIND, - TC_BLOCK_UNBIND, -}; - -struct tc_block_offload { - enum tc_block_command command; - enum tcf_block_binder_type binder_type; - struct tcf_block *block; - struct netlink_ext_ack *extack; -}; - struct tc_cls_common_offload { u32 chain_index; __be16 protocol; diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index f52fe0bc4017..e31c0fdb6b01 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -2,6 +2,7 @@ #include #include #include +#include struct flow_rule *flow_rule_alloc(unsigned int num_actions) { @@ -164,3 +165,27 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule, FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out); } EXPORT_SYMBOL(flow_rule_match_enc_opts); + +int flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_block_list, + tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->driver_block_list = driver_block_list; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(flow_block_cb_setup_simple); -- cgit v1.2.3-59-g8ed1b From 9c0e189ec988f306331036bc3f71085582b24fdc Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:40 +0200 Subject: net: flow_offload: rename TC_BLOCK_{UN}BIND to FLOW_BLOCK_{UN}BIND Rename from TC_BLOCK_{UN}BIND to FLOW_BLOCK_{UN}BIND and remove temporary tc_block_command alias. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 ++-- drivers/net/ethernet/mscc/ocelot_tc.c | 4 ++-- .../net/ethernet/netronome/nfp/flower/offload.c | 8 ++++---- include/net/flow_offload.h | 4 ++-- include/net/pkt_cls.h | 1 - net/core/flow_offload.c | 4 ++-- net/dsa/slave.c | 4 ++-- net/sched/cls_api.c | 22 +++++++++++----------- 9 files changed, 27 insertions(+), 28 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a211cdb5eb8b..853aff64ef4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -705,7 +705,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, return -EOPNOTSUPP; switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); if (indr_priv) return -EEXIST; @@ -728,7 +728,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, } return err; - case TC_BLOCK_UNBIND: + case FLOW_BLOCK_UNBIND: indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); if (!indr_priv) return -ENOENT; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ce285fbeebd3..9cf61a9d8291 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1679,7 +1679,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, } switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, mlxsw_sp_port, f->extack); if (err) @@ -1692,7 +1692,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, return err; } return 0; - case TC_BLOCK_UNBIND: + case FLOW_BLOCK_UNBIND: mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, f->block, ingress); tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c index 72084306240d..c84942ef8e7b 100644 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ b/drivers/net/ethernet/mscc/ocelot_tc.c @@ -147,14 +147,14 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, } switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: ret = tcf_block_cb_register(f->block, cb, port, port, f->extack); if (ret) return ret; return ocelot_setup_tc_block_flower_bind(port, f); - case TC_BLOCK_UNBIND: + case FLOW_BLOCK_UNBIND: ocelot_setup_tc_block_flower_unbind(port, f); tcf_block_cb_unregister(f->block, cb, port); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 6dbe947269c3..7c94f5142076 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1315,11 +1315,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, repr_priv->block_shared = tcf_block_shared(f->block); switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_flower_setup_tc_block_cb, repr, repr, f->extack); - case TC_BLOCK_UNBIND: + case FLOW_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_flower_setup_tc_block_cb, repr); @@ -1395,7 +1395,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, return -EOPNOTSUPP; switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); if (!cb_priv) return -ENOMEM; @@ -1413,7 +1413,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, } return err; - case TC_BLOCK_UNBIND: + case FLOW_BLOCK_UNBIND: cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); if (!cb_priv) return -ENOENT; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 2430e4907fe9..7c9f7a2ac7ce 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -239,8 +239,8 @@ static inline void flow_stats_update(struct flow_stats *flow_stats, } enum flow_block_command { - TC_BLOCK_BIND, - TC_BLOCK_UNBIND, + FLOW_BLOCK_BIND, + FLOW_BLOCK_UNBIND, }; enum flow_block_binder_type { diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index b6c306fa9541..1a96f469164f 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -27,7 +27,6 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); #define tc_block_offload flow_block_offload -#define tc_block_command flow_block_command #define tcf_block_binder_type flow_block_binder_type struct tcf_block_ext_info { diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index e31c0fdb6b01..593e73f7593a 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -178,10 +178,10 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, f->driver_block_list = driver_block_list; switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, f->extack); - case TC_BLOCK_UNBIND: + case FLOW_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cb, cb_ident); return 0; default: diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 99673f6b07f6..58a71ee0747a 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -955,9 +955,9 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, return -EOPNOTSUPP; switch (f->command) { - case TC_BLOCK_BIND: + case FLOW_BLOCK_BIND: return tcf_block_cb_register(f->block, cb, dev, dev, f->extack); - case TC_BLOCK_UNBIND: + case FLOW_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cb, dev); return 0; default: diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 4a7331ce830d..ed6f35cc11ea 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -675,7 +675,7 @@ static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb) static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev, struct tc_indr_block_cb *indr_block_cb, - enum tc_block_command command) + enum flow_block_command command) { struct tc_block_offload bo = { .command = command, @@ -706,7 +706,7 @@ int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, if (err) goto err_dev_put; - tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND); + tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_BIND); return 0; err_dev_put: @@ -743,7 +743,7 @@ void __tc_indr_block_cb_unregister(struct net_device *dev, return; /* Send unbind message if required to free any block cbs. */ - tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND); + tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_UNBIND); tc_indr_block_cb_del(indr_block_cb); tc_indr_block_dev_put(indr_dev); } @@ -760,7 +760,7 @@ EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister); static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, struct tcf_block_ext_info *ei, - enum tc_block_command command, + enum flow_block_command command, struct netlink_ext_ack *extack) { struct tc_indr_block_cb *indr_block_cb; @@ -776,7 +776,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, if (!indr_dev) return; - indr_dev->block = command == TC_BLOCK_BIND ? block : NULL; + indr_dev->block = command == FLOW_BLOCK_BIND ? block : NULL; list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, @@ -791,7 +791,7 @@ static bool tcf_block_offload_in_use(struct tcf_block *block) static int tcf_block_offload_cmd(struct tcf_block *block, struct net_device *dev, struct tcf_block_ext_info *ei, - enum tc_block_command command, + enum flow_block_command command, struct netlink_ext_ack *extack) { struct tc_block_offload bo = {}; @@ -821,20 +821,20 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, return -EOPNOTSUPP; } - err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack); + err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; if (err) return err; - tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack); + tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); return 0; no_offload_dev_inc: if (tcf_block_offload_in_use(block)) return -EOPNOTSUPP; block->nooffloaddevcnt++; - tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack); + tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); return 0; } @@ -844,11 +844,11 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, struct net_device *dev = q->dev_queue->dev; int err; - tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL); + tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); if (!dev->netdev_ops->ndo_setup_tc) goto no_offload_dev_dec; - err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL); + err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); if (err == -EOPNOTSUPP) goto no_offload_dev_dec; return; -- cgit v1.2.3-59-g8ed1b From 32f8c4093ac353a5f1b36cfed0ce0138faf8e15f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:41 +0200 Subject: net: flow_offload: rename TCF_BLOCK_BINDER_TYPE_* to FLOW_BLOCK_BINDER_TYPE_* Rename from TCF_BLOCK_BINDER_TYPE_* to FLOW_BLOCK_BINDER_TYPE_* and remove temporary tcf_block_binder_type alias. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 ++-- drivers/net/ethernet/mscc/ocelot_flower.c | 2 +- drivers/net/ethernet/mscc/ocelot_tc.c | 4 ++-- drivers/net/ethernet/netronome/nfp/flower/offload.c | 6 +++--- include/net/flow_offload.h | 6 +++--- include/net/pkt_cls.h | 3 +-- net/core/flow_offload.c | 2 +- net/dsa/slave.c | 4 ++-- net/sched/cls_api.c | 14 +++++++------- net/sched/sch_ingress.c | 6 +++--- 11 files changed, 26 insertions(+), 27 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 853aff64ef4b..f2ad1ca7ed2a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -701,7 +701,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, struct mlx5e_rep_indr_block_priv *indr_priv; int err = 0; - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; switch (f->command) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9cf61a9d8291..a178d082f061 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1668,10 +1668,10 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress; int err; - if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; ingress = true; - } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; ingress = false; } else { diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 8778dee5a471..b682f08a93b4 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -306,7 +306,7 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, struct tcf_block_cb *block_cb; int ret; - if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) return -EOPNOTSUPP; block_cb = tcf_block_cb_lookup(f->block, diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c index c84942ef8e7b..58a0b5f8850c 100644 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ b/drivers/net/ethernet/mscc/ocelot_tc.c @@ -137,10 +137,10 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n", f->command, f->binder_type); - if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { cb = ocelot_setup_tc_block_cb_ig; port->tc.block_shared = tcf_block_shared(f->block); - } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { cb = ocelot_setup_tc_block_cb_eg; } else { return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 7c94f5142076..46041e509150 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1308,7 +1308,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, struct nfp_repr *repr = netdev_priv(netdev); struct nfp_flower_repr_priv *repr_priv; - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; repr_priv = repr->app_priv; @@ -1389,8 +1389,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, struct nfp_flower_priv *priv = app->priv; int err; - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 7c9f7a2ac7ce..f12b905ad95e 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -244,9 +244,9 @@ enum flow_block_command { }; enum flow_block_binder_type { - TCF_BLOCK_BINDER_TYPE_UNSPEC, - TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, - TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS, + FLOW_BLOCK_BINDER_TYPE_UNSPEC, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, }; struct tcf_block; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 1a96f469164f..e4499526fde8 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -27,10 +27,9 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); #define tc_block_offload flow_block_offload -#define tcf_block_binder_type flow_block_binder_type struct tcf_block_ext_info { - enum tcf_block_binder_type binder_type; + enum flow_block_binder_type binder_type; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; u32 block_index; diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 593e73f7593a..6d8187e8effc 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -172,7 +172,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, bool ingress_only) { if (ingress_only && - f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; f->driver_block_list = driver_block_list; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 58a71ee0747a..9b5e202c255e 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -947,9 +947,9 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, { tc_setup_cb_t *cb; - if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) cb = dsa_slave_setup_tc_block_cb_ig; - else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) cb = dsa_slave_setup_tc_block_cb_eg; else return -EOPNOTSUPP; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index ed6f35cc11ea..49b89c89a8b9 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -679,7 +679,7 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev, { struct tc_block_offload bo = { .command = command, - .binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, .block = indr_dev->block, }; @@ -1341,17 +1341,17 @@ static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, struct tcf_block_owner_item { struct list_head list; struct Qdisc *q; - enum tcf_block_binder_type binder_type; + enum flow_block_binder_type binder_type; }; static void tcf_block_owner_netif_keep_dst(struct tcf_block *block, struct Qdisc *q, - enum tcf_block_binder_type binder_type) + enum flow_block_binder_type binder_type) { if (block->keep_dst && - binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) netif_keep_dst(qdisc_dev(q)); } @@ -1368,7 +1368,7 @@ EXPORT_SYMBOL(tcf_block_netif_keep_dst); static int tcf_block_owner_add(struct tcf_block *block, struct Qdisc *q, - enum tcf_block_binder_type binder_type) + enum flow_block_binder_type binder_type) { struct tcf_block_owner_item *item; @@ -1383,7 +1383,7 @@ static int tcf_block_owner_add(struct tcf_block *block, static void tcf_block_owner_del(struct tcf_block *block, struct Qdisc *q, - enum tcf_block_binder_type binder_type) + enum flow_block_binder_type binder_type) { struct tcf_block_owner_item *item; diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 599730f804d7..bf56aa519797 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -83,7 +83,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt, mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); - q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->block_info.chain_head_change = clsact_chain_head_change; q->block_info.chain_head_change_priv = &q->miniqp; @@ -217,7 +217,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); - q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->ingress_block_info.chain_head_change = clsact_chain_head_change; q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; @@ -228,7 +228,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress); - q->egress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS; + q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS; q->egress_block_info.chain_head_change = clsact_chain_head_change; q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; -- cgit v1.2.3-59-g8ed1b From d63db30c8537ba45208c156d71125db73d0fe522 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:42 +0200 Subject: net: flow_offload: add flow_block_cb_alloc() and flow_block_cb_free() Add a new helper function to allocate flow_block_cb objects. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- include/net/flow_offload.h | 14 ++++++++++++++ net/core/flow_offload.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'net/core') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index f12b905ad95e..45d74cb542cd 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -260,6 +260,20 @@ struct flow_block_offload { struct netlink_ext_ack *extack; }; +struct flow_block_cb { + struct list_head list; + tc_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *cb_priv); + unsigned int refcnt; +}; + +struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + void (*release)(void *cb_priv)); +void flow_block_cb_free(struct flow_block_cb *block_cb); + int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_list, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 6d8187e8effc..d08148cb6953 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -166,6 +166,34 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule, } EXPORT_SYMBOL(flow_rule_match_enc_opts); +struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + void (*release)(void *cb_priv)) +{ + struct flow_block_cb *block_cb; + + block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); + if (!block_cb) + return ERR_PTR(-ENOMEM); + + block_cb->cb = cb; + block_cb->cb_ident = cb_ident; + block_cb->cb_priv = cb_priv; + block_cb->release = release; + + return block_cb; +} +EXPORT_SYMBOL(flow_block_cb_alloc); + +void flow_block_cb_free(struct flow_block_cb *block_cb) +{ + if (block_cb->release) + block_cb->release(block_cb->cb_priv); + + kfree(block_cb); +} +EXPORT_SYMBOL(flow_block_cb_free); + int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, -- cgit v1.2.3-59-g8ed1b From da3eeb904ff432ec22cf7b4db17a47647428873a Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:43 +0200 Subject: net: flow_offload: add list handling functions This patch adds the list handling functions for the flow block API: * flow_block_cb_lookup() allows drivers to look up for existing flow blocks. * flow_block_cb_add() adds a flow block to the per driver list to be registered by the core. * flow_block_cb_remove() to remove a flow block from the list of existing flow blocks per driver and to request the core to unregister this. The flow block API also annotates the netns this flow block belongs to. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- include/net/flow_offload.h | 19 +++++++++++++++++++ net/core/flow_offload.c | 17 +++++++++++++++++ net/sched/cls_api.c | 3 +++ 3 files changed, 39 insertions(+) (limited to 'net/core') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 45d74cb542cd..563d7dc7afc1 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -256,12 +256,16 @@ struct flow_block_offload { enum flow_block_command command; enum flow_block_binder_type binder_type; struct tcf_block *block; + struct net *net; + struct list_head cb_list; struct list_head *driver_block_list; struct netlink_ext_ack *extack; }; struct flow_block_cb { + struct list_head driver_list; struct list_head list; + struct net *net; tc_setup_cb_t *cb; void *cb_ident; void *cb_priv; @@ -274,6 +278,21 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, void (*release)(void *cb_priv)); void flow_block_cb_free(struct flow_block_cb *block_cb); +struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload, + tc_setup_cb_t *cb, void *cb_ident); + +static inline void flow_block_cb_add(struct flow_block_cb *block_cb, + struct flow_block_offload *offload) +{ + list_add_tail(&block_cb->list, &offload->cb_list); +} + +static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, + struct flow_block_offload *offload) +{ + list_move(&block_cb->list, &offload->cb_list); +} + int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_list, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index d08148cb6953..c81a7e0c5e04 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -176,6 +176,7 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb, if (!block_cb) return ERR_PTR(-ENOMEM); + block_cb->net = net; block_cb->cb = cb; block_cb->cb_ident = cb_ident; block_cb->cb_priv = cb_priv; @@ -194,6 +195,22 @@ void flow_block_cb_free(struct flow_block_cb *block_cb) } EXPORT_SYMBOL(flow_block_cb_free); +struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f, + tc_setup_cb_t *cb, void *cb_ident) +{ + struct flow_block_cb *block_cb; + + list_for_each_entry(block_cb, f->driver_block_list, driver_list) { + if (block_cb->net == f->net && + block_cb->cb == cb && + block_cb->cb_ident == cb_ident) + return block_cb; + } + + return NULL; +} +EXPORT_SYMBOL(flow_block_cb_lookup); + int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 49b89c89a8b9..ccbd51bed88c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -680,6 +680,7 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev, struct tc_block_offload bo = { .command = command, .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + .net = dev_net(indr_dev->dev), .block = indr_dev->block, }; @@ -768,6 +769,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, struct tc_block_offload bo = { .command = command, .binder_type = ei->binder_type, + .net = dev_net(dev), .block = block, .extack = extack, }; @@ -796,6 +798,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block, { struct tc_block_offload bo = {}; + bo.net = dev_net(dev); bo.command = command; bo.binder_type = ei->binder_type; bo.block = block; -- cgit v1.2.3-59-g8ed1b From 67bd0d5ea7974d9dc9c502c7b4096e16a80a553d Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:44 +0200 Subject: net: flow_offload: add flow_block_cb_{priv, incref, decref}() This patch completes the flow block API to introduce: * flow_block_cb_priv() to access callback private data. * flow_block_cb_incref() to bump reference counter on this flow block. * flow_block_cb_decref() to decrement the reference counter. These functions are taken from the existing tcf_block_cb_priv(), tcf_block_cb_incref() and tcf_block_cb_decref(). Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- include/net/flow_offload.h | 4 ++++ net/core/flow_offload.c | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'net/core') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 563d7dc7afc1..3fb9cc4da63e 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -281,6 +281,10 @@ void flow_block_cb_free(struct flow_block_cb *block_cb); struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload, tc_setup_cb_t *cb, void *cb_ident); +void *flow_block_cb_priv(struct flow_block_cb *block_cb); +void flow_block_cb_incref(struct flow_block_cb *block_cb); +unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb); + static inline void flow_block_cb_add(struct flow_block_cb *block_cb, struct flow_block_offload *offload) { diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index c81a7e0c5e04..a36a9dc1c6df 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -211,6 +211,24 @@ struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f, } EXPORT_SYMBOL(flow_block_cb_lookup); +void *flow_block_cb_priv(struct flow_block_cb *block_cb) +{ + return block_cb->cb_priv; +} +EXPORT_SYMBOL(flow_block_cb_priv); + +void flow_block_cb_incref(struct flow_block_cb *block_cb) +{ + block_cb->refcnt++; +} +EXPORT_SYMBOL(flow_block_cb_incref); + +unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) +{ + return --block_cb->refcnt; +} +EXPORT_SYMBOL(flow_block_cb_decref); + int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, -- cgit v1.2.3-59-g8ed1b From 955bcb6ea0df0d9ace89ac475405f1295ced5962 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:46 +0200 Subject: drivers: net: use flow block API This patch updates flow_block_cb_setup_simple() to use the flow block API. Several drivers are also adjusted to use it. This patch introduces the per-driver list of flow blocks to account for blocks that are already in use. Remove tc_block_offload alias. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 +- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 5 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 +- drivers/net/ethernet/intel/iavf/iavf_main.c | 5 +- drivers/net/ethernet/intel/igb/igb_main.c | 5 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 5 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 42 +++++++--- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 92 ++++++++++++++-------- drivers/net/ethernet/mscc/ocelot_ace.h | 4 +- drivers/net/ethernet/mscc/ocelot_flower.c | 46 ++++++----- drivers/net/ethernet/mscc/ocelot_tc.c | 34 +++++--- drivers/net/ethernet/netronome/nfp/abm/cls.c | 7 +- drivers/net/ethernet/netronome/nfp/abm/main.h | 2 +- drivers/net/ethernet/netronome/nfp/bpf/main.c | 5 +- .../net/ethernet/netronome/nfp/flower/offload.c | 68 +++++++++++----- drivers/net/ethernet/qlogic/qede/qede_main.c | 5 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 +- drivers/net/netdevsim/netdev.c | 5 +- include/net/flow_offload.h | 3 +- include/net/pkt_cls.h | 2 - net/core/flow_offload.c | 20 ++++- net/dsa/slave.c | 22 +++++- net/sched/cls_api.c | 14 ++-- 25 files changed, 286 insertions(+), 130 deletions(-) (limited to 'net/core') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 06819590f6d0..3f632028eff0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9907,6 +9907,8 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +static LIST_HEAD(bnxt_block_cb_list); + static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -9914,7 +9916,8 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &bnxt_block_cb_list, bnxt_setup_tc_block_cb, bp, bp, true); case TC_SETUP_QDISC_MQPRIO: { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 89398ff011d4..f9bf7d7250ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -161,6 +161,8 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, } } +static LIST_HEAD(bnxt_vf_block_cb_list); + static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -168,7 +170,8 @@ static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &bnxt_vf_block_cb_list, bnxt_vf_rep_setup_tc_block_cb, vf_rep, vf_rep, true); default: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 9a486282a32e..fdc8ca4f8891 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3190,6 +3190,8 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +static LIST_HEAD(cxgb_block_cb_list); + static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -3197,7 +3199,8 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &cxgb_block_cb_list, cxgb_setup_tc_block_cb, pi, dev, true); default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 52f0f14d4207..7be1080680f5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8177,6 +8177,8 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +static LIST_HEAD(i40e_block_cb_list); + static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { @@ -8186,7 +8188,8 @@ static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &i40e_block_cb_list, i40e_setup_tc_block_cb, np, np, true); default: diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index fd0e2bcc75e5..05eca6f2e890 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3113,6 +3113,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +static LIST_HEAD(iavf_block_cb_list); + /** * iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure @@ -3133,7 +3135,8 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, case TC_SETUP_QDISC_MQPRIO: return __iavf_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &iavf_block_cb_list, iavf_setup_tc_block_cb, adapter, adapter, true); default: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 836f9e1a136c..00e8186e2c59 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2806,6 +2806,8 @@ static int igb_offload_txtime(struct igb_adapter *adapter, return 0; } +static LIST_HEAD(igb_block_cb_list); + static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -2815,7 +2817,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &igb_block_cb_list, igb_setup_tc_block_cb, adapter, adapter, true); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b098f5be9c0d..cbaf712d6529 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9614,6 +9614,8 @@ static int ixgbe_setup_tc_mqprio(struct net_device *dev, return ixgbe_setup_tc(dev, mqprio->num_tc); } +static LIST_HEAD(ixgbe_block_cb_list); + static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -9621,7 +9623,8 @@ static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &ixgbe_block_cb_list, ixgbe_setup_tc_block_cb, adapter, adapter, true); case TC_SETUP_QDISC_MQPRIO: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8e5ebdb7c459..4c138789c547 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3459,6 +3459,8 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } #endif +static LIST_HEAD(mlx5e_block_cb_list); + static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -3467,7 +3469,8 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { #ifdef CONFIG_MLX5_ESWITCH case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &mlx5e_block_cb_list, mlx5e_setup_tc_block_cb, priv, priv, true); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f2ad1ca7ed2a..7ca6b6472017 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -693,17 +693,29 @@ static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type, } } +static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv) +{ + struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv; + + list_del(&indr_priv->list); + kfree(indr_priv); +} + +static LIST_HEAD(mlx5e_block_cb_list); + static int mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, struct mlx5e_rep_priv *rpriv, - struct tc_block_offload *f) + struct flow_block_offload *f) { struct mlx5e_rep_indr_block_priv *indr_priv; - int err = 0; + struct flow_block_cb *block_cb; if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + f->driver_block_list = &mlx5e_block_cb_list; + switch (f->command) { case FLOW_BLOCK_BIND: indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); @@ -719,26 +731,32 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, list_add(&indr_priv->list, &rpriv->uplink_priv.tc_indr_block_priv_list); - err = tcf_block_cb_register(f->block, - mlx5e_rep_indr_setup_block_cb, - indr_priv, indr_priv, f->extack); - if (err) { + block_cb = flow_block_cb_alloc(f->net, + mlx5e_rep_indr_setup_block_cb, + indr_priv, indr_priv, + mlx5e_rep_indr_tc_block_unbind); + if (IS_ERR(block_cb)) { list_del(&indr_priv->list); kfree(indr_priv); + return PTR_ERR(block_cb); } + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list); - return err; + return 0; case FLOW_BLOCK_UNBIND: indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); if (!indr_priv) return -ENOENT; - tcf_block_cb_unregister(f->block, - mlx5e_rep_indr_setup_block_cb, - indr_priv); - list_del(&indr_priv->list); - kfree(indr_priv); + block_cb = flow_block_cb_lookup(f, + mlx5e_rep_indr_setup_block_cb, + indr_priv); + if (!block_cb) + return -ENOENT; + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a178d082f061..65bea6be84d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1585,33 +1585,45 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, } } +static void mlxsw_sp_tc_block_flower_release(void *cb_priv) +{ + struct mlxsw_sp_acl_block *acl_block = cb_priv; + + mlxsw_sp_acl_block_destroy(acl_block); +} + +static LIST_HEAD(mlxsw_sp_block_cb_list); + static int mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, - struct tcf_block *block, bool ingress, - struct netlink_ext_ack *extack) + struct flow_block_offload *f, bool ingress) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_block *acl_block; - struct tcf_block_cb *block_cb; + struct flow_block_cb *block_cb; + bool register_block = false; int err; - block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, - mlxsw_sp); + block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, + mlxsw_sp); if (!block_cb) { - acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); + acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); if (!acl_block) return -ENOMEM; - block_cb = __tcf_block_cb_register(block, - mlxsw_sp_setup_tc_block_cb_flower, - mlxsw_sp, acl_block, extack); + block_cb = flow_block_cb_alloc(f->net, + mlxsw_sp_setup_tc_block_cb_flower, + mlxsw_sp, acl_block, + mlxsw_sp_tc_block_flower_release); if (IS_ERR(block_cb)) { + mlxsw_sp_acl_block_destroy(acl_block); err = PTR_ERR(block_cb); goto err_cb_register; } + register_block = true; } else { - acl_block = tcf_block_cb_priv(block_cb); + acl_block = flow_block_cb_priv(block_cb); } - tcf_block_cb_incref(block_cb); + flow_block_cb_incref(block_cb); err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, mlxsw_sp_port, ingress); if (err) @@ -1622,28 +1634,31 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, else mlxsw_sp_port->eg_acl_block = acl_block; + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); + } + return 0; err_block_bind: - if (!tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block, block_cb); + if (!flow_block_cb_decref(block_cb)) + flow_block_cb_free(block_cb); err_cb_register: - mlxsw_sp_acl_block_destroy(acl_block); - } return err; } static void mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, - struct tcf_block *block, bool ingress) + struct flow_block_offload *f, bool ingress) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_block *acl_block; - struct tcf_block_cb *block_cb; + struct flow_block_cb *block_cb; int err; - block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, - mlxsw_sp); + block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, + mlxsw_sp); if (!block_cb) return; @@ -1652,18 +1667,19 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, else mlxsw_sp_port->eg_acl_block = NULL; - acl_block = tcf_block_cb_priv(block_cb); + acl_block = flow_block_cb_priv(block_cb); err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, mlxsw_sp_port, ingress); - if (!err && !tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block, block_cb); - mlxsw_sp_acl_block_destroy(acl_block); + if (!err && !flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); } } static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_block_offload *f) + struct flow_block_offload *f) { + struct flow_block_cb *block_cb; tc_setup_cb_t *cb; bool ingress; int err; @@ -1678,24 +1694,32 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } + f->driver_block_list = &mlxsw_sp_block_cb_list; + switch (f->command) { case FLOW_BLOCK_BIND: - err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, - mlxsw_sp_port, f->extack); - if (err) - return err; - err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, - f->block, ingress, - f->extack); + block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, + mlxsw_sp_port, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, + ingress); if (err) { - tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); + flow_block_cb_free(block_cb); return err; } + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); return 0; case FLOW_BLOCK_UNBIND: mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, - f->block, ingress); - tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); + f, ingress); + block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h index d621683643e1..e98944c87259 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.h +++ b/drivers/net/ethernet/mscc/ocelot_ace.h @@ -225,8 +225,8 @@ int ocelot_ace_init(struct ocelot *ocelot); void ocelot_ace_deinit(void); int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, - struct tc_block_offload *f); + struct flow_block_offload *f); void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port, - struct tc_block_offload *f); + struct flow_block_offload *f); #endif /* _MSCC_OCELOT_ACE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index b682f08a93b4..5b92c2a03f3d 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -299,36 +299,45 @@ static void ocelot_port_block_destroy(struct ocelot_port_block *block) kfree(block); } +static void ocelot_tc_block_unbind(void *cb_priv) +{ + struct ocelot_port_block *port_block = cb_priv; + + ocelot_port_block_destroy(port_block); +} + int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, - struct tc_block_offload *f) + struct flow_block_offload *f) { struct ocelot_port_block *port_block; - struct tcf_block_cb *block_cb; + struct flow_block_cb *block_cb; int ret; if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) return -EOPNOTSUPP; - block_cb = tcf_block_cb_lookup(f->block, - ocelot_setup_tc_block_cb_flower, port); + block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, + port); if (!block_cb) { port_block = ocelot_port_block_create(port); if (!port_block) return -ENOMEM; - block_cb = - __tcf_block_cb_register(f->block, - ocelot_setup_tc_block_cb_flower, - port, port_block, f->extack); + block_cb = flow_block_cb_alloc(f->net, + ocelot_setup_tc_block_cb_flower, + port, port_block, + ocelot_tc_block_unbind); if (IS_ERR(block_cb)) { ret = PTR_ERR(block_cb); goto err_cb_register; } + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, f->driver_block_list); } else { - port_block = tcf_block_cb_priv(block_cb); + port_block = flow_block_cb_priv(block_cb); } - tcf_block_cb_incref(block_cb); + flow_block_cb_incref(block_cb); return 0; err_cb_register: @@ -338,20 +347,17 @@ err_cb_register: } void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port, - struct tc_block_offload *f) + struct flow_block_offload *f) { - struct ocelot_port_block *port_block; - struct tcf_block_cb *block_cb; + struct flow_block_cb *block_cb; - block_cb = tcf_block_cb_lookup(f->block, - ocelot_setup_tc_block_cb_flower, port); + block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower, + port); if (!block_cb) return; - port_block = tcf_block_cb_priv(block_cb); - if (!tcf_block_cb_decref(block_cb)) { - tcf_block_cb_unregister(f->block, - ocelot_setup_tc_block_cb_flower, port); - ocelot_port_block_destroy(port_block); + if (!flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); } } diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c index 58a0b5f8850c..935a774cb291 100644 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ b/drivers/net/ethernet/mscc/ocelot_tc.c @@ -128,35 +128,51 @@ static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type, cb_priv, false); } +static LIST_HEAD(ocelot_block_cb_list); + static int ocelot_setup_tc_block(struct ocelot_port *port, - struct tc_block_offload *f) + struct flow_block_offload *f) { + struct flow_block_cb *block_cb; tc_setup_cb_t *cb; - int ret; + int err; netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n", f->command, f->binder_type); if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { cb = ocelot_setup_tc_block_cb_ig; - port->tc.block_shared = tcf_block_shared(f->block); + port->tc.block_shared = f->block_shared; } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { cb = ocelot_setup_tc_block_cb_eg; } else { return -EOPNOTSUPP; } + f->driver_block_list = &ocelot_block_cb_list; + switch (f->command) { case FLOW_BLOCK_BIND: - ret = tcf_block_cb_register(f->block, cb, port, - port, f->extack); - if (ret) - return ret; + block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); - return ocelot_setup_tc_block_flower_bind(port, f); + err = ocelot_setup_tc_block_flower_bind(port, f); + if (err < 0) { + flow_block_cb_free(block_cb); + return err; + } + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, f->driver_block_list); + return 0; case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f, cb, port); + if (!block_cb) + return -ENOENT; + ocelot_setup_tc_block_flower_unbind(port, f); - tcf_block_cb_unregister(f->block, cb, port); + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c index 29fb45734962..23ebddfb9532 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c @@ -262,9 +262,12 @@ static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type, } } +static LIST_HEAD(nfp_abm_block_cb_list); + int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr, - struct tc_block_offload *f) + struct flow_block_offload *f) { - return flow_block_cb_setup_simple(f, NULL, nfp_abm_setup_tc_block_cb, + return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list, + nfp_abm_setup_tc_block_cb, repr, repr, true); } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 49749c60885e..48746c9c6224 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -247,7 +247,7 @@ int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink, struct tc_gred_qopt_offload *opt); int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr, - struct tc_block_offload *opt); + struct flow_block_offload *opt); int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 0c93c84a188a..1c9fb11470df 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -160,6 +160,8 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, return 0; } +static LIST_HEAD(nfp_bpf_block_cb_list); + static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) { @@ -167,7 +169,8 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &nfp_bpf_block_cb_list, nfp_bpf_setup_tc_block_cb, nn, nn, true); default: diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 46041e509150..ddd6b509f27e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1302,27 +1302,41 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, } } +static LIST_HEAD(nfp_block_cb_list); + static int nfp_flower_setup_tc_block(struct net_device *netdev, - struct tc_block_offload *f) + struct flow_block_offload *f) { struct nfp_repr *repr = netdev_priv(netdev); struct nfp_flower_repr_priv *repr_priv; + struct flow_block_cb *block_cb; if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; repr_priv = repr->app_priv; - repr_priv->block_shared = tcf_block_shared(f->block); + repr_priv->block_shared = f->block_shared; + f->driver_block_list = &nfp_block_cb_list; switch (f->command) { case FLOW_BLOCK_BIND: - return tcf_block_cb_register(f->block, - nfp_flower_setup_tc_block_cb, - repr, repr, f->extack); + block_cb = flow_block_cb_alloc(f->net, + nfp_flower_setup_tc_block_cb, + repr, repr, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); + return 0; case FLOW_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, - nfp_flower_setup_tc_block_cb, - repr); + block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb, + repr); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; @@ -1381,13 +1395,21 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, } } +static void nfp_flower_setup_indr_tc_release(void *cb_priv) +{ + struct nfp_flower_indr_block_cb_priv *priv = cb_priv; + + list_del(&priv->list); + kfree(priv); +} + static int nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, - struct tc_block_offload *f) + struct flow_block_offload *f) { struct nfp_flower_indr_block_cb_priv *cb_priv; struct nfp_flower_priv *priv = app->priv; - int err; + struct flow_block_cb *block_cb; if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && @@ -1404,26 +1426,32 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, cb_priv->app = app; list_add(&cb_priv->list, &priv->indr_block_cb_priv); - err = tcf_block_cb_register(f->block, - nfp_flower_setup_indr_block_cb, - cb_priv, cb_priv, f->extack); - if (err) { + block_cb = flow_block_cb_alloc(f->net, + nfp_flower_setup_indr_block_cb, + cb_priv, cb_priv, + nfp_flower_setup_indr_tc_release); + if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); + return PTR_ERR(block_cb); } - return err; + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); + return 0; case FLOW_BLOCK_UNBIND: cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); if (!cb_priv) return -ENOENT; - tcf_block_cb_unregister(f->block, - nfp_flower_setup_indr_block_cb, - cb_priv); - list_del(&cb_priv->list); - kfree(cb_priv); + block_cb = flow_block_cb_lookup(f, + nfp_flower_setup_indr_block_cb, + cb_priv); + if (!block_cb) + return -ENOENT; + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index cba97ed3dd56..1be593a6e20d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -579,6 +579,8 @@ static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +static LIST_HEAD(qede_block_cb_list); + static int qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, void *type_data) @@ -588,7 +590,8 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &qede_block_cb_list, qede_setup_tc_block_cb, edev, edev, true); case TC_SETUP_QDISC_MQPRIO: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 93ef80c16f07..c7c9e5f162e6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3769,6 +3769,8 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return ret; } +static LIST_HEAD(stmmac_block_cb_list); + static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { @@ -3776,7 +3778,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &stmmac_block_cb_list, stmmac_setup_tc_block_cb, priv, priv, true); case TC_SETUP_QDISC_CBS: diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 920dc79e9dc9..0740940f41b1 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -203,6 +203,8 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) return 0; } +static LIST_HEAD(nsim_block_cb_list); + static int nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -210,7 +212,8 @@ nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) switch (type) { case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, NULL, + return flow_block_cb_setup_simple(type_data, + &nsim_block_cb_list, nsim_setup_tc_block_cb, ns, ns, true); default: diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 3fb9cc4da63e..377ba0004370 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -249,13 +249,12 @@ enum flow_block_binder_type { FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, }; -struct tcf_block; struct netlink_ext_ack; struct flow_block_offload { enum flow_block_command command; enum flow_block_binder_type binder_type; - struct tcf_block *block; + bool block_shared; struct net *net; struct list_head cb_list; struct list_head *driver_block_list; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e4499526fde8..9cf606b88526 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -26,8 +26,6 @@ struct tcf_walker { int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); -#define tc_block_offload flow_block_offload - struct tcf_block_ext_info { enum flow_block_binder_type binder_type; tcf_chain_head_change_t *chain_head_change; diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index a36a9dc1c6df..a1b36b47dd89 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -2,7 +2,6 @@ #include #include #include -#include struct flow_rule *flow_rule_alloc(unsigned int num_actions) { @@ -234,6 +233,8 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only) { + struct flow_block_cb *block_cb; + if (ingress_only && f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; @@ -242,10 +243,21 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, switch (f->command) { case FLOW_BLOCK_BIND: - return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, - f->extack); + block_cb = flow_block_cb_alloc(f->net, cb, cb_ident, + cb_priv, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, driver_block_list); + return 0; case FLOW_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, cb, cb_ident); + block_cb = flow_block_cb_lookup(f, cb, cb_ident); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 9b5e202c255e..90c32fd680db 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -942,9 +942,12 @@ static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type, return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); } +static LIST_HEAD(dsa_slave_block_cb_list); + static int dsa_slave_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) + struct flow_block_offload *f) { + struct flow_block_cb *block_cb; tc_setup_cb_t *cb; if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) @@ -954,11 +957,24 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, else return -EOPNOTSUPP; + f->driver_block_list = &dsa_slave_block_cb_list; + switch (f->command) { case FLOW_BLOCK_BIND: - return tcf_block_cb_register(f->block, cb, dev, dev, f->extack); + block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); + return 0; case FLOW_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, cb, dev); + block_cb = flow_block_cb_lookup(f, cb, dev); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f6602d0000e8..3589ccff5570 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -680,11 +680,11 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev, struct tc_indr_block_cb *indr_block_cb, enum flow_block_command command) { - struct tc_block_offload bo = { + struct flow_block_offload bo = { .command = command, .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, .net = dev_net(indr_dev->dev), - .block = indr_dev->block, + .block_shared = tcf_block_shared(indr_dev->block), }; INIT_LIST_HEAD(&bo.cb_list); @@ -771,11 +771,11 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, { struct tc_indr_block_cb *indr_block_cb; struct tc_indr_block_dev *indr_dev; - struct tc_block_offload bo = { + struct flow_block_offload bo = { .command = command, .binder_type = ei->binder_type, .net = dev_net(dev), - .block = block, + .block_shared = tcf_block_shared(block), .extack = extack, }; INIT_LIST_HEAD(&bo.cb_list); @@ -804,13 +804,13 @@ static int tcf_block_offload_cmd(struct tcf_block *block, enum flow_block_command command, struct netlink_ext_ack *extack) { - struct tc_block_offload bo = {}; + struct flow_block_offload bo = {}; int err; bo.net = dev_net(dev); bo.command = command; bo.binder_type = ei->binder_type; - bo.block = block; + bo.block_shared = tcf_block_shared(block); bo.extack = extack; INIT_LIST_HEAD(&bo.cb_list); @@ -3245,7 +3245,7 @@ EXPORT_SYMBOL(tcf_exts_dump_stats); int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, void *type_data, bool err_stop) { - struct tcf_block_cb *block_cb; + struct flow_block_cb *block_cb; int ok_count = 0; int err; -- cgit v1.2.3-59-g8ed1b From 0d4fd02e7199fbf57c0d175dd1890c82cd4a6f4f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 9 Jul 2019 22:55:48 +0200 Subject: net: flow_offload: add flow_block_cb_is_busy() and use it This patch adds a function to check if flow block callback is already in use. Call this new function from flow_block_cb_setup_simple() and from drivers. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 4 ++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 ++++ drivers/net/ethernet/mscc/ocelot_tc.c | 3 +++ drivers/net/ethernet/netronome/nfp/flower/offload.c | 4 ++++ include/net/flow_offload.h | 3 +++ net/core/flow_offload.c | 18 ++++++++++++++++++ net/dsa/slave.c | 3 +++ 7 files changed, 39 insertions(+) (limited to 'net/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7ca6b6472017..62cb5408424c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -722,6 +722,10 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, if (indr_priv) return -EEXIST; + if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb, + indr_priv, &mlx5e_block_cb_list)) + return -EBUSY; + indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); if (!indr_priv) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 65bea6be84d6..35adc174f277 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1698,6 +1698,10 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, switch (f->command) { case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(cb, mlxsw_sp_port, + &mlxsw_sp_block_cb_list)) + return -EBUSY; + block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, mlxsw_sp_port, NULL); if (IS_ERR(block_cb)) diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c index 935a774cb291..9e6464ffae5d 100644 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ b/drivers/net/ethernet/mscc/ocelot_tc.c @@ -153,6 +153,9 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, switch (f->command) { case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list)) + return -EBUSY; + block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index ddd6b509f27e..1b38cfeb646c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1320,6 +1320,10 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, switch (f->command) { case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr, + &nfp_block_cb_list)) + return -EBUSY; + block_cb = flow_block_cb_alloc(f->net, nfp_flower_setup_tc_block_cb, repr, repr, NULL); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 377ba0004370..42a36a346003 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -296,6 +296,9 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, list_move(&block_cb->list, &offload->cb_list); } +bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, + struct list_head *driver_block_list); + int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_list, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index a1b36b47dd89..76f8db3841d7 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -228,6 +228,21 @@ unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) } EXPORT_SYMBOL(flow_block_cb_decref); +bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident, + struct list_head *driver_block_list) +{ + struct flow_block_cb *block_cb; + + list_for_each_entry(block_cb, driver_block_list, driver_list) { + if (block_cb->cb == cb && + block_cb->cb_ident == cb_ident) + return true; + } + + return false; +} +EXPORT_SYMBOL(flow_block_cb_is_busy); + int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, @@ -243,6 +258,9 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, switch (f->command) { case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) + return -EBUSY; + block_cb = flow_block_cb_alloc(f->net, cb, cb_ident, cb_priv, NULL); if (IS_ERR(block_cb)) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 90c32fd680db..9bcb598fc840 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -961,6 +961,9 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, switch (f->command) { case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) + return -EBUSY; + block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); -- cgit v1.2.3-59-g8ed1b