aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c10
2 files changed, 9 insertions, 9 deletions
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 3f5f3d27ebe5..fd2552a9091d 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -126,7 +126,7 @@ static int siw_dev_qualified(struct net_device *netdev)
return 0;
}
-static DEFINE_PER_CPU(atomic_t, use_cnt = ATOMIC_INIT(0));
+static DEFINE_PER_CPU(atomic_t, siw_use_cnt);
static struct {
struct cpumask **tx_valid_cpus;
@@ -215,7 +215,7 @@ int siw_get_tx_cpu(struct siw_device *sdev)
if (!siw_tx_thread[cpu])
continue;
- usage = atomic_read(&per_cpu(use_cnt, cpu));
+ usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
if (usage <= min_use) {
tx_cpu = cpu;
min_use = usage;
@@ -226,7 +226,7 @@ int siw_get_tx_cpu(struct siw_device *sdev)
out:
if (tx_cpu >= 0)
- atomic_inc(&per_cpu(use_cnt, tx_cpu));
+ atomic_inc(&per_cpu(siw_use_cnt, tx_cpu));
else
pr_warn("siw: no tx cpu found\n");
@@ -235,7 +235,7 @@ out:
void siw_put_tx_cpu(int cpu)
{
- atomic_dec(&per_cpu(use_cnt, cpu));
+ atomic_dec(&per_cpu(siw_use_cnt, cpu));
}
static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 5e926fac51db..1c9fa8fa96e5 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -1183,12 +1183,12 @@ struct tx_task_t {
wait_queue_head_t waiting;
};
-static DEFINE_PER_CPU(struct tx_task_t, tx_task_g);
+static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
void siw_stop_tx_thread(int nr_cpu)
{
kthread_stop(siw_tx_thread[nr_cpu]);
- wake_up(&per_cpu(tx_task_g, nr_cpu).waiting);
+ wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
}
int siw_run_sq(void *data)
@@ -1196,7 +1196,7 @@ int siw_run_sq(void *data)
const int nr_cpu = (unsigned int)(long)data;
struct llist_node *active;
struct siw_qp *qp;
- struct tx_task_t *tx_task = &per_cpu(tx_task_g, nr_cpu);
+ struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
init_llist_head(&tx_task->active);
init_waitqueue_head(&tx_task->waiting);
@@ -1261,9 +1261,9 @@ int siw_sq_start(struct siw_qp *qp)
}
siw_qp_get(qp);
- llist_add(&qp->tx_list, &per_cpu(tx_task_g, qp->tx_cpu).active);
+ llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
- wake_up(&per_cpu(tx_task_g, qp->tx_cpu).waiting);
+ wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);
return 0;
}