aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/wfx/queue.c
diff options
context:
space:
mode:
authorJérôme Pouiller <jerome.pouiller@silabs.com>2020-07-01 17:06:55 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-07-03 10:33:06 +0200
commit2a30cb163449b91eb65b7ad971198550c6a16f97 (patch)
tree27d938ff00f8cf008adf8c39e748cc4f3b6c3138 /drivers/staging/wfx/queue.c
parentstaging: qlge: qlge_ethtool.c: Proper indentation. (diff)
downloadlinux-dev-2a30cb163449b91eb65b7ad971198550c6a16f97.tar.xz
linux-dev-2a30cb163449b91eb65b7ad971198550c6a16f97.zip
staging: wfx: associate tx_queues to vifs
The device handles 4 queues (one per AC) for each virtual interface (and maximum 4 virtual interfaces). Until now the driver unified the queue of all interfaces and handled only 4 queues for whole device. This architecture did not allow to balance the traffic between the vif. So, this patch relocate the queues into the vif and change the API accordingly. Signed-off-by: Jérôme Pouiller <jerome.pouiller@silabs.com> Link: https://lore.kernel.org/r/20200701150707.222985-2-Jerome.Pouiller@silabs.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/wfx/queue.c')
-rw-r--r--drivers/staging/wfx/queue.c143
1 files changed, 63 insertions, 80 deletions
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 93ea2b72febd..7ec36598d9a8 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -57,84 +57,57 @@ void wfx_tx_lock_flush(struct wfx_dev *wdev)
wfx_tx_flush(wdev);
}
-void wfx_tx_queues_init(struct wfx_dev *wdev)
+void wfx_tx_queues_init(struct wfx_vif *wvif)
{
int i;
- skb_queue_head_init(&wdev->tx_pending);
- init_waitqueue_head(&wdev->tx_dequeue);
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- skb_queue_head_init(&wdev->tx_queue[i].normal);
- skb_queue_head_init(&wdev->tx_queue[i].cab);
+ skb_queue_head_init(&wvif->tx_queue[i].normal);
+ skb_queue_head_init(&wvif->tx_queue[i].cab);
}
}
-void wfx_tx_queues_check_empty(struct wfx_dev *wdev)
+void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
{
int i;
- WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- WARN_ON(atomic_read(&wdev->tx_queue[i].pending_frames));
- WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].normal));
- WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].cab));
+ WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
+ WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].normal));
+ WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].cab));
}
}
-static bool __wfx_tx_queue_empty(struct wfx_dev *wdev,
- struct sk_buff_head *skb_queue, int vif_id)
+bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
{
- struct hif_msg *hif_msg;
- struct sk_buff *skb;
-
- spin_lock_bh(&skb_queue->lock);
- skb_queue_walk(skb_queue, skb) {
- hif_msg = (struct hif_msg *)skb->data;
- if (vif_id < 0 || hif_msg->interface == vif_id) {
- spin_unlock_bh(&skb_queue->lock);
- return false;
- }
- }
- spin_unlock_bh(&skb_queue->lock);
- return true;
+ return skb_queue_empty(&queue->normal) && skb_queue_empty(&queue->cab);
}
-bool wfx_tx_queue_empty(struct wfx_dev *wdev,
- struct wfx_queue *queue, int vif_id)
-{
- return __wfx_tx_queue_empty(wdev, &queue->normal, vif_id) &&
- __wfx_tx_queue_empty(wdev, &queue->cab, vif_id);
-}
-
-static void __wfx_tx_queue_drop(struct wfx_dev *wdev,
- struct sk_buff_head *skb_queue, int vif_id,
+static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
+ struct sk_buff_head *skb_queue,
struct sk_buff_head *dropped)
{
struct sk_buff *skb, *tmp;
- struct hif_msg *hif_msg;
spin_lock_bh(&skb_queue->lock);
skb_queue_walk_safe(skb_queue, skb, tmp) {
- hif_msg = (struct hif_msg *)skb->data;
- if (vif_id < 0 || hif_msg->interface == vif_id) {
- __skb_unlink(skb, skb_queue);
- skb_queue_head(dropped, skb);
- }
+ __skb_unlink(skb, skb_queue);
+ skb_queue_head(dropped, skb);
}
spin_unlock_bh(&skb_queue->lock);
}
-void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
- int vif_id, struct sk_buff_head *dropped)
+void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
+ struct sk_buff_head *dropped)
{
- __wfx_tx_queue_drop(wdev, &queue->cab, vif_id, dropped);
- __wfx_tx_queue_drop(wdev, &queue->normal, vif_id, dropped);
- wake_up(&wdev->tx_dequeue);
+ __wfx_tx_queue_drop(wvif, &queue->cab, dropped);
+ __wfx_tx_queue_drop(wvif, &queue->normal, dropped);
+ wake_up(&wvif->wdev->tx_dequeue);
}
-void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
+void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
{
- struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
@@ -146,39 +119,45 @@ void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
{
struct wfx_queue *queue;
+ struct wfx_vif *wvif;
+ struct hif_msg *hif;
struct sk_buff *skb;
WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
__func__);
while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
- queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
- WARN_ON(skb_get_queue_mapping(skb) > 3);
- WARN_ON(!atomic_read(&queue->pending_frames));
- atomic_dec(&queue->pending_frames);
+ hif = (struct hif_msg *)skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ if (wvif) {
+ queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ }
skb_queue_head(dropped, skb);
}
}
-struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
+struct sk_buff *wfx_pending_get(struct wfx_vif *wvif, u32 packet_id)
{
struct wfx_queue *queue;
struct hif_req_tx *req;
struct sk_buff *skb;
- spin_lock_bh(&wdev->tx_pending.lock);
- skb_queue_walk(&wdev->tx_pending, skb) {
+ spin_lock_bh(&wvif->wdev->tx_pending.lock);
+ skb_queue_walk(&wvif->wdev->tx_pending, skb) {
req = wfx_skb_txreq(skb);
if (req->packet_id == packet_id) {
- spin_unlock_bh(&wdev->tx_pending.lock);
- queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ spin_unlock_bh(&wvif->wdev->tx_pending.lock);
+ queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
- skb_unlink(skb, &wdev->tx_pending);
+ skb_unlink(skb, &wvif->wdev->tx_pending);
return skb;
}
}
- spin_unlock_bh(&wdev->tx_pending.lock);
+ spin_unlock_bh(&wvif->wdev->tx_pending.lock);
WARN(1, "cannot find packet in pending queue");
return NULL;
}
@@ -221,7 +200,6 @@ unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
{
- struct wfx_dev *wdev = wvif->wdev;
int i;
if (wvif->vif->type != NL80211_IFTYPE_AP)
@@ -229,33 +207,39 @@ bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
for (i = 0; i < IEEE80211_NUM_ACS; ++i)
// Note: since only AP can have mcast frames in queue and only
// one vif can be AP, all queued frames has same interface id
- if (!skb_queue_empty_lockless(&wdev->tx_queue[i].cab))
+ if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
return true;
return false;
}
static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
{
- struct wfx_queue *sorted_queues[IEEE80211_NUM_ACS];
+ struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
+ int i, j, num_queues = 0;
struct wfx_vif *wvif;
struct hif_msg *hif;
struct sk_buff *skb;
- int i, j;
-
- // bubble sort
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- sorted_queues[i] = &wdev->tx_queue[i];
- for (j = i; j > 0; j--)
- if (atomic_read(&sorted_queues[j]->pending_frames) <
- atomic_read(&sorted_queues[j - 1]->pending_frames))
- swap(sorted_queues[j - 1], sorted_queues[j]);
+
+ // sort the queues
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ WARN_ON(num_queues >= ARRAY_SIZE(queues));
+ queues[num_queues] = &wvif->tx_queue[i];
+ for (j = num_queues; j > 0; j--)
+ if (atomic_read(&queues[j]->pending_frames) <
+ atomic_read(&queues[j - 1]->pending_frames))
+ swap(queues[j - 1], queues[j]);
+ num_queues++;
+ }
}
+
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
if (!wvif->after_dtim_tx_allowed)
continue;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- skb = skb_dequeue(&sorted_queues[i]->cab);
+ for (i = 0; i < num_queues; i++) {
+ skb = skb_dequeue(&queues[i]->cab);
if (!skb)
continue;
// Note: since only AP can have mcast frames in queue
@@ -263,21 +247,20 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
// same interface id
hif = (struct hif_msg *)skb->data;
WARN_ON(hif->interface != wvif->id);
- WARN_ON(sorted_queues[i] !=
- &wdev->tx_queue[skb_get_queue_mapping(skb)]);
- atomic_inc(&sorted_queues[i]->pending_frames);
+ WARN_ON(queues[i] !=
+ &wvif->tx_queue[skb_get_queue_mapping(skb)]);
+ atomic_inc(&queues[i]->pending_frames);
return skb;
}
// No more multicast to sent
wvif->after_dtim_tx_allowed = false;
schedule_work(&wvif->update_tim_work);
}
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- skb = skb_dequeue(&sorted_queues[i]->normal);
+
+ for (i = 0; i < num_queues; i++) {
+ skb = skb_dequeue(&queues[i]->normal);
if (skb) {
- WARN_ON(sorted_queues[i] !=
- &wdev->tx_queue[skb_get_queue_mapping(skb)]);
- atomic_inc(&sorted_queues[i]->pending_frames);
+ atomic_inc(&queues[i]->pending_frames);
return skb;
}
}