aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEdward Cree <ecree@solarflare.com>2018-07-02 16:13:40 +0100
committerDavid S. Miller <davem@davemloft.net>2018-07-04 14:06:19 +0900
commit4ce0017a373afaaa9ef17614d8fa4f6fde261d18 (patch)
treea5d8c2c03598cafdf3afb2aa997d7e177511bfc1
parentnet: core: Another step of skb receive list processing (diff)
downloadlinux-dev-4ce0017a373afaaa9ef17614d8fa4f6fde261d18.tar.xz
linux-dev-4ce0017a373afaaa9ef17614d8fa4f6fde261d18.zip
net: core: another layer of lists, around PF_MEMALLOC skb handling
First example of a layer splitting the list (rather than merely taking individual packets off it). Involves new list.h function, list_cut_before(), like list_cut_position() but cuts on the other side of the given entry. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/list.h30
-rw-r--r--net/core/dev.c44
2 files changed, 66 insertions, 8 deletions
diff --git a/include/linux/list.h b/include/linux/list.h
index 4b129df4d46b..de04cc5ed536 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
__list_cut_position(list, head, entry);
}
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list. You should pass
+ * in @entry an element you know is on @head. @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (head->next == entry) {
+ INIT_LIST_HEAD(list);
+ return;
+ }
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry->prev;
+ list->prev->next = list;
+ head->next = entry;
+ entry->prev = head;
+}
+
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
diff --git a/net/core/dev.c b/net/core/dev.c
index 1e87361df2ab..9aadef976e8c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4784,6 +4784,14 @@ int netif_receive_skb_core(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_receive_skb_core);
+static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
+{
+ struct sk_buff *skb, *next;
+
+ list_for_each_entry_safe(skb, next, head, list)
+ __netif_receive_skb_core(skb, pfmemalloc);
+}
+
static int __netif_receive_skb(struct sk_buff *skb)
{
int ret;
@@ -4809,6 +4817,34 @@ static int __netif_receive_skb(struct sk_buff *skb)
return ret;
}
+static void __netif_receive_skb_list(struct list_head *head)
+{
+ unsigned long noreclaim_flag = 0;
+ struct sk_buff *skb, *next;
+ bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
+
+ list_for_each_entry_safe(skb, next, head, list) {
+ if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
+ struct list_head sublist;
+
+ /* Handle the previous sublist */
+ list_cut_before(&sublist, head, &skb->list);
+ __netif_receive_skb_list_core(&sublist, pfmemalloc);
+ pfmemalloc = !pfmemalloc;
+ /* See comments in __netif_receive_skb */
+ if (pfmemalloc)
+ noreclaim_flag = memalloc_noreclaim_save();
+ else
+ memalloc_noreclaim_restore(noreclaim_flag);
+ }
+ }
+ /* Handle the remaining sublist */
+ __netif_receive_skb_list_core(head, pfmemalloc);
+ /* Restore pflags */
+ if (pfmemalloc)
+ memalloc_noreclaim_restore(noreclaim_flag);
+}
+
static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
@@ -4843,14 +4879,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
return ret;
}
-static void __netif_receive_skb_list(struct list_head *head)
-{
- struct sk_buff *skb, *next;
-
- list_for_each_entry_safe(skb, next, head, list)
- __netif_receive_skb(skb);
-}
-
static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;