aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c239
1 files changed, 218 insertions, 21 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a3d79c7a21c6..ca82b3a91697 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -199,15 +199,18 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- unsigned int l2pad = L2PAD_SIZE(header_length);
+ /*
+ * L2 padding is only present if the skb contains more than just the
+ * IEEE 802.11 header.
+ */
+ unsigned int l2pad = (skb->len > header_length) ?
+ L2PAD_SIZE(header_length) : 0;
if (!l2pad)
return;
- memmove(skb->data + header_length, skb->data + header_length + l2pad,
- skb->len - header_length - l2pad);
-
- skb_trim(skb, skb->len - l2pad);
+ memmove(skb->data + l2pad, skb->data, header_length);
+ skb_pull(skb, l2pad);
}
static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
@@ -468,7 +471,7 @@ static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
*/
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
- queue->rt2x00dev->ops->lib->kick_tx_queue(queue);
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
}
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -582,7 +585,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
rt2x00queue_free_skb(intf->beacon);
if (!enable_beacon) {
- rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
+ rt2x00queue_stop_queue(intf->beacon->queue);
mutex_unlock(&intf->beacon_skb_mutex);
return 0;
}
@@ -735,6 +738,210 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to disable the queue
+ * inside mac80211.
+ */
+ ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
+
+void rt2x00queue_unpause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to enable the queue
+ * inside mac80211.
+ */
+ ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ case QID_RX:
+ /*
+ * For RX we need to kick the queue now in order to
+ * receive frames.
+ */
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
+
+void rt2x00queue_start_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ set_bit(QUEUE_PAUSED, &queue->flags);
+
+ queue->rt2x00dev->ops->lib->start_queue(queue);
+
+ rt2x00queue_unpause_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
+
+void rt2x00queue_stop_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ rt2x00queue_pause_queue(queue);
+
+ queue->rt2x00dev->ops->lib->stop_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
+
+void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
+{
+ unsigned int i;
+ bool started;
+ bool tx_queue =
+ (queue->qid == QID_AC_VO) ||
+ (queue->qid == QID_AC_VI) ||
+ (queue->qid == QID_AC_BE) ||
+ (queue->qid == QID_AC_BK);
+
+ mutex_lock(&queue->status_lock);
+
+ /*
+ * If the queue has been started, we must stop it temporarily
+ * to prevent any new frames to be queued on the device. If
+ * we are not dropping the pending frames, the queue must
+ * only be stopped in the software and not the hardware,
+ * otherwise the queue will never become empty on its own.
+ */
+ started = test_bit(QUEUE_STARTED, &queue->flags);
+ if (started) {
+ /*
+ * Pause the queue
+ */
+ rt2x00queue_pause_queue(queue);
+
+ /*
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
+ */
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+ }
+
+ /*
+ * Check if driver supports flushing, we can only guarentee
+ * full support for flushing if the driver is able
+ * to cancel all pending frames (drop = true).
+ */
+ if (drop && queue->rt2x00dev->ops->lib->flush_queue)
+ queue->rt2x00dev->ops->lib->flush_queue(queue);
+
+ /*
+ * When we don't want to drop any frames, or when
+ * the driver doesn't fully flush the queue correcly,
+ * we must wait for the queue to become empty.
+ */
+ for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
+ msleep(10);
+
+ /*
+ * The queue flush has failed...
+ */
+ if (unlikely(!rt2x00queue_empty(queue)))
+ WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
+
+ /*
+ * Restore the queue to the previous status
+ */
+ if (started)
+ rt2x00queue_unpause_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
+
+void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * rt2x00queue_start_queue will call ieee80211_wake_queue
+ * for each queue after is has been properly initialized.
+ */
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_start_queue(queue);
+
+ rt2x00queue_start_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
+
+void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * rt2x00queue_stop_queue will call ieee80211_stop_queue
+ * as well, but we are completely shutting doing everything
+ * now, so it is much safer to stop all TX queues at once,
+ * and use rt2x00queue_stop_queue for cleaning up.
+ */
+ ieee80211_stop_queues(rt2x00dev->hw);
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_stop_queue(queue);
+
+ rt2x00queue_stop_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
+
+void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
+{
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+
+ rt2x00queue_flush_queue(rt2x00dev->rx, drop);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
+
static void rt2x00queue_reset(struct data_queue *queue)
{
unsigned long irqflags;
@@ -753,14 +960,6 @@ static void rt2x00queue_reset(struct data_queue *queue)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- txall_queue_for_each(rt2x00dev, queue)
- rt2x00dev->ops->lib->kill_tx_queue(queue);
-}
-
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
@@ -769,11 +968,8 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
queue_for_each(rt2x00dev, queue) {
rt2x00queue_reset(queue);
- for (i = 0; i < queue->limit; i++) {
+ for (i = 0; i < queue->limit; i++)
rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
- if (queue->qid == QID_RX)
- rt2x00queue_index_inc(queue, Q_INDEX);
- }
}
}
@@ -902,6 +1098,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue, enum data_queue_qid qid)
{
+ mutex_init(&queue->status_lock);
spin_lock_init(&queue->index_lock);
queue->rt2x00dev = rt2x00dev;
@@ -944,7 +1141,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
/*
* Initialize queue parameters.
* RX: qid = QID_RX
- * TX: qid = QID_AC_BE + index
+ * TX: qid = QID_AC_VO + index
* TX: cw_min: 2^5 = 32.
* TX: cw_max: 2^10 = 1024.
* BCN: qid = QID_BEACON
@@ -952,7 +1149,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
*/
rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
- qid = QID_AC_BE;
+ qid = QID_AC_VO;
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_init(rt2x00dev, queue, qid++);