aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorYury Norov <yury.norov@gmail.com>2025-01-28 11:46:32 -0500
committerYury Norov <yury.norov@gmail.com>2025-02-24 16:37:22 -0500
commit2a402aa64c10251093273656891bf788cd95677f (patch)
tree68bc8bfedbbf2e6fd0d65d3dfe9dd199ba32f841
parentvirtio_net: simplify virtnet_set_affinity() (diff)
downloadwireguard-linux-2a402aa64c10251093273656891bf788cd95677f.tar.xz
wireguard-linux-2a402aa64c10251093273656891bf788cd95677f.zip
ibmvnic: simplify ibmvnic_set_queue_affinity()
A loop based on cpumask_next_wrap() opencodes the dedicated macro for_each_online_cpu_wrap(). Use it as it improves readability and simplifies maintenance. This also helps to drop cpumask handling code in the caller function. Signed-off-by: Yury Norov <yury.norov@gmail.com> Tested-by: Nick Child <nnac123@linux.ibm.com>
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e95ae0d39948..bef18ff69065 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;