aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/xdp.c
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2019-06-18 15:05:53 +0200
committerDavid S. Miller <davem@davemloft.net>2019-06-19 11:23:13 -0400
commitd956a048cd3fc1ba154101a1a50fb37950081ff6 (patch)
tree79e45a34946dc77756fc1a508c78f84193ba1b86 /net/core/xdp.c
parentxdp: tracking page_pool resources and safe removal (diff)
downloadlinux-dev-d956a048cd3fc1ba154101a1a50fb37950081ff6.tar.xz
linux-dev-d956a048cd3fc1ba154101a1a50fb37950081ff6.zip
xdp: force mem allocator removal and periodic warning
If bugs exists or are introduced later e.g. by drivers misusing the API, then we want to warn about the issue, such that developer notice. This patch will generate a bit of noise in form of periodic pr_warn every 30 seconds. It is not nice to have this stall warning running forever. Thus, this patch will (after 120 attempts) force disconnect the mem id (from the rhashtable) and free the page_pool object. This will cause fallback to the put_page() as before, which only potentially leak DMA-mappings, if objects are really stuck for this long. In that unlikely case, a WARN_ONCE should show us the call stack. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/xdp.c')
-rw-r--r--net/core/xdp.c37
1 files changed, 31 insertions, 6 deletions
diff --git a/net/core/xdp.c b/net/core/xdp.c
index aae665ccee3f..622c81dc7ba8 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -39,6 +39,9 @@ struct xdp_mem_allocator {
struct rhash_head node;
struct rcu_head rcu;
struct delayed_work defer_wq;
+ unsigned long defer_start;
+ unsigned long defer_warn;
+ int disconnect_cnt;
};
static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
@@ -95,7 +98,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
kfree(xa);
}
-bool __mem_id_disconnect(int id)
+bool __mem_id_disconnect(int id, bool force)
{
struct xdp_mem_allocator *xa;
bool safe_to_remove = true;
@@ -108,29 +111,47 @@ bool __mem_id_disconnect(int id)
WARN(1, "Request remove non-existing id(%d), driver bug?", id);
return true;
}
+ xa->disconnect_cnt++;
/* Detects in-flight packet-pages for page_pool */
if (xa->mem.type == MEM_TYPE_PAGE_POOL)
safe_to_remove = page_pool_request_shutdown(xa->page_pool);
- if (safe_to_remove &&
+ /* TODO: Tracepoint will be added here in next-patch */
+
+ if ((safe_to_remove || force) &&
!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
mutex_unlock(&mem_id_lock);
- return safe_to_remove;
+ return (safe_to_remove|force);
}
#define DEFER_TIME (msecs_to_jiffies(1000))
+#define DEFER_WARN_INTERVAL (30 * HZ)
+#define DEFER_MAX_RETRIES 120
static void mem_id_disconnect_defer_retry(struct work_struct *wq)
{
struct delayed_work *dwq = to_delayed_work(wq);
struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq);
+ bool force = false;
+
+ if (xa->disconnect_cnt > DEFER_MAX_RETRIES)
+ force = true;
- if (__mem_id_disconnect(xa->mem.id))
+ if (__mem_id_disconnect(xa->mem.id, force))
return;
+ /* Periodic warning */
+ if (time_after_eq(jiffies, xa->defer_warn)) {
+ int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ;
+
+ pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n",
+ __func__, xa->mem.id, xa->disconnect_cnt, sec);
+ xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
+ }
+
/* Still not ready to be disconnected, retry later */
schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
}
@@ -153,7 +174,7 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
if (id == 0)
return;
- if (__mem_id_disconnect(id))
+ if (__mem_id_disconnect(id, false))
return;
/* Could not disconnect, defer new disconnect attempt to later */
@@ -164,6 +185,8 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
mutex_unlock(&mem_id_lock);
return;
}
+ xa->defer_start = jiffies;
+ xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry);
mutex_unlock(&mem_id_lock);
@@ -388,10 +411,12 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
- if (xa) {
+ if (likely(xa)) {
napi_direct &= !xdp_return_frame_no_direct();
page_pool_put_page(xa->page_pool, page, napi_direct);
} else {
+ /* Hopefully stack show who to blame for late return */
+ WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id);
put_page(page);
}
rcu_read_unlock();