aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/busy_poll.h
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2017-03-24 10:08:18 -0700
committerDavid S. Miller <davem@davemloft.net>2017-03-24 20:49:31 -0700
commit37056719bba500d0d2b8216fdf641e5507ec9a0e (patch)
treecb29ca52bc837c095ee470e8caf6410af54f9b03 /include/net/busy_poll.h
parentnet: Change return type of sk_busy_loop from bool to void (diff)
downloadlinux-dev-37056719bba500d0d2b8216fdf641e5507ec9a0e.tar.xz
linux-dev-37056719bba500d0d2b8216fdf641e5507ec9a0e.zip
net: Track start of busy loop instead of when it should end
This patch flips the logic we were using to determine if the busy polling has timed out. The main motivation for this is that we will need to support two different possible timeout values in the future and by recording the start time rather than when we would want to end we can focus on making the end_time specific to the task be it epoll or socket based polling. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/busy_poll.h')
-rw-r--r--include/net/busy_poll.h68
1 files changed, 38 insertions, 30 deletions
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c55760f4820f..72c82f2ea536 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -46,62 +46,70 @@ static inline bool net_busy_loop_on(void)
return sysctl_net_busy_poll;
}
-static inline u64 busy_loop_us_clock(void)
+static inline bool sk_can_busy_loop(const struct sock *sk)
{
- return local_clock() >> 10;
+ return sk->sk_ll_usec && !signal_pending(current);
}
-static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
-{
- return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
-}
+void sk_busy_loop(struct sock *sk, int nonblock);
-/* in poll/select we use the global sysctl_net_ll_poll value */
-static inline unsigned long busy_loop_end_time(void)
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline unsigned long net_busy_loop_on(void)
{
- return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
+ return 0;
}
-static inline bool sk_can_busy_loop(const struct sock *sk)
+static inline bool sk_can_busy_loop(struct sock *sk)
{
- return sk->sk_ll_usec && !signal_pending(current);
+ return false;
}
-static inline bool busy_loop_timeout(unsigned long end_time)
+static inline void sk_busy_loop(struct sock *sk, int nonblock)
{
- unsigned long now = busy_loop_us_clock();
-
- return time_after(now, end_time);
}
-void sk_busy_loop(struct sock *sk, int nonblock);
+#endif /* CONFIG_NET_RX_BUSY_POLL */
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline unsigned long net_busy_loop_on(void)
+static inline unsigned long busy_loop_current_time(void)
{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return (unsigned long)(local_clock() >> 10);
+#else
return 0;
+#endif
}
-static inline unsigned long busy_loop_end_time(void)
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline bool busy_loop_timeout(unsigned long start_time)
{
- return 0;
-}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
-static inline bool sk_can_busy_loop(struct sock *sk)
-{
- return false;
-}
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
-static inline bool busy_loop_timeout(unsigned long end_time)
-{
+ return time_after(now, end_time);
+ }
+#endif
return true;
}
-static inline void sk_busy_loop(struct sock *sk, int nonblock)
+static inline bool sk_busy_loop_timeout(struct sock *sk,
+ unsigned long start_time)
{
-}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
+
+ return time_after(now, end_time);
+ }
+#endif
+ return true;
+}
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb,