aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/ieee1394_transactions.c
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2006-07-02 14:17:00 +0200
committerStefan Richter <stefanr@s5r6.in-berlin.de>2006-09-17 19:17:13 +0200
commit9951903e616662e9a5dad5fbd296690e2ebbbc65 (patch)
tree3d0de40aa9100ccebea03a339dc080f7ad80333e /drivers/ieee1394/ieee1394_transactions.c
parentieee1394: merge from Linus (diff)
downloadlinux-dev-9951903e616662e9a5dad5fbd296690e2ebbbc65.tar.xz
linux-dev-9951903e616662e9a5dad5fbd296690e2ebbbc65.zip
ieee1394: shrink tlabel pools, remove tpool semaphores
This patch reduces the size of struct hpsb_host and also removes semaphores from ieee1394_transactions.c. On i386, struct hpsb_host shrinks from 10656 bytes to 6688 bytes. This is accomplished by - using a single wait_queue for hpsb_get_tlabel instead of many instances of semaphores, - using a single lock to serialize access to all tlabel pools (the protected code regions are small, i.e. lock contention very low), - omitting the sysfs attribute tlabels_allocations. Drawback: In the rare case that a process needs to sleep because all transaction labels for the node are temporarily exhausted, it is also woken up if a tlabel for a different node became free, checks for an available tlabel, and is put to sleep again. The check is not costly and the situation occurs extremely rarely. (Tlabels are typically only exhausted if there was no context switch to the khpsbpkt thread which recycles tlables.) Therefore the benefit of reduced tpool size outweighs this drawback. The sysfs attributes tlabels_free and tlabels_mask are not compiled anymore unless CONFIG_IEEE1394_VERBOSEDEBUG is set. The by far biggest member of struct hpsb_host, the struct csr_control csr (5272 bytes on i386), is now placed at the end of struct hpsb_host. Note, hpsb_get_tlabel calls the macro wait_event_interruptible with a condition argument which has a side effect (allocation of a tlabel and manipulation of the packet). This side effect happens only if the condition is true. The patch relies on wait_event_interruptible not evaluating the condition again after it became true. Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/ieee1394/ieee1394_transactions.c')
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c108
1 files changed, 63 insertions, 45 deletions
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 751960037e27..0833fc9f50c4 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -9,10 +9,9 @@
* directory of the kernel sources for details.
*/
-#include <linux/sched.h>
#include <linux/bitops.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <asm/bug.h>
#include <asm/errno.h>
@@ -21,8 +20,6 @@
#include "ieee1394_types.h"
#include "hosts.h"
#include "ieee1394_core.h"
-#include "highlevel.h"
-#include "nodemgr.h"
#include "ieee1394_transactions.h"
#define PREP_ASYNC_HEAD_ADDRESS(tc) \
@@ -32,6 +29,13 @@
packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
packet->header[2] = addr & 0xffffffff
+#ifndef HPSB_DEBUG_TLABELS
+static
+#endif
+spinlock_t hpsb_tlabel_lock = SPIN_LOCK_UNLOCKED;
+
+static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq);
+
static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
{
PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
@@ -115,9 +119,41 @@ static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
packet->tcode = TCODE_ISO_DATA;
}
+/* same as hpsb_get_tlabel, except that it returns immediately */
+static int hpsb_get_tlabel_atomic(struct hpsb_packet *packet)
+{
+ unsigned long flags, *tp;
+ u8 *next;
+ int tlabel, n = NODEID_TO_NODE(packet->node_id);
+
+ /* Broadcast transactions are complete once the request has been sent.
+ * Use the same transaction label for all broadcast transactions. */
+ if (unlikely(n == ALL_NODES)) {
+ packet->tlabel = 0;
+ return 0;
+ }
+ tp = packet->host->tl_pool[n].map;
+ next = &packet->host->next_tl[n];
+
+ spin_lock_irqsave(&hpsb_tlabel_lock, flags);
+ tlabel = find_next_zero_bit(tp, 64, *next);
+ if (tlabel > 63)
+ tlabel = find_first_zero_bit(tp, 64);
+ if (tlabel > 63) {
+ spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
+ return -EAGAIN;
+ }
+ __set_bit(tlabel, tp);
+ *next = (tlabel + 1) & 63;
+ spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
+
+ packet->tlabel = tlabel;
+ return 0;
+}
+
/**
* hpsb_get_tlabel - allocate a transaction label
- * @packet: the packet who's tlabel/tpool we set
+ * @packet: the packet whose tlabel and tl_pool we set
*
* Every asynchronous transaction on the 1394 bus needs a transaction
* label to match the response to the request. This label has to be
@@ -131,42 +167,25 @@ static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
* Return value: Zero on success, otherwise non-zero. A non-zero return
* generally means there are no available tlabels. If this is called out
* of interrupt or atomic context, then it will sleep until can return a
- * tlabel.
+ * tlabel or a signal is received.
*/
int hpsb_get_tlabel(struct hpsb_packet *packet)
{
- unsigned long flags;
- struct hpsb_tlabel_pool *tp;
- int n = NODEID_TO_NODE(packet->node_id);
-
- if (unlikely(n == ALL_NODES))
- return 0;
- tp = &packet->host->tpool[n];
-
- if (irqs_disabled() || in_atomic()) {
- if (down_trylock(&tp->count))
- return 1;
- } else {
- down(&tp->count);
- }
-
- spin_lock_irqsave(&tp->lock, flags);
-
- packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
- if (packet->tlabel > 63)
- packet->tlabel = find_first_zero_bit(tp->pool, 64);
- tp->next = (packet->tlabel + 1) % 64;
- /* Should _never_ happen */
- BUG_ON(test_and_set_bit(packet->tlabel, tp->pool));
- tp->allocations++;
- spin_unlock_irqrestore(&tp->lock, flags);
-
- return 0;
+ if (irqs_disabled() || in_atomic())
+ return hpsb_get_tlabel_atomic(packet);
+
+ /* NB: The macro wait_event_interruptible() is called with a condition
+ * argument with side effect. This is only possible because the side
+ * effect does not occur until the condition became true, and
+ * wait_event_interruptible() won't evaluate the condition again after
+ * that. */
+ return wait_event_interruptible(tlabel_wq,
+ !hpsb_get_tlabel_atomic(packet));
}
/**
* hpsb_free_tlabel - free an allocated transaction label
- * @packet: packet whos tlabel/tpool needs to be cleared
+ * @packet: packet whose tlabel and tl_pool needs to be cleared
*
* Frees the transaction label allocated with hpsb_get_tlabel(). The
* tlabel has to be freed after the transaction is complete (i.e. response
@@ -177,21 +196,20 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
*/
void hpsb_free_tlabel(struct hpsb_packet *packet)
{
- unsigned long flags;
- struct hpsb_tlabel_pool *tp;
- int n = NODEID_TO_NODE(packet->node_id);
+ unsigned long flags, *tp;
+ int tlabel, n = NODEID_TO_NODE(packet->node_id);
if (unlikely(n == ALL_NODES))
return;
- tp = &packet->host->tpool[n];
-
- BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
+ tp = packet->host->tl_pool[n].map;
+ tlabel = packet->tlabel;
+ BUG_ON(tlabel > 63 || tlabel < 0);
- spin_lock_irqsave(&tp->lock, flags);
- BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool));
- spin_unlock_irqrestore(&tp->lock, flags);
+ spin_lock_irqsave(&hpsb_tlabel_lock, flags);
+ BUG_ON(!__test_and_clear_bit(tlabel, tp));
+ spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
- up(&tp->count);
+ wake_up_interruptible(&tlabel_wq);
}
int hpsb_packet_success(struct hpsb_packet *packet)