aboutsummaryrefslogtreecommitdiffstats
path: root/net/smc/smc_ib.c
diff options
context:
space:
mode:
authorKarsten Graul <kgraul@linux.ibm.com>2020-04-29 17:10:40 +0200
committerDavid S. Miller <davem@davemloft.net>2020-04-29 12:26:32 -0700
commit387707fdf48697c667dd5e9715ac4feb41602d15 (patch)
treec6613774062587509d31e09dca852dd3ae6e51a5 /net/smc/smc_ib.c
parentnet/smc: introduce link_idx for link group array (diff)
downloadlinux-dev-387707fdf48697c667dd5e9715ac4feb41602d15.tar.xz
linux-dev-387707fdf48697c667dd5e9715ac4feb41602d15.zip
net/smc: convert static link ID to dynamic references
As a preparation for the support of multiple links remove the usage of a static link id (SMC_SINGLE_LINK) and allow dynamic link ids. Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Reviewed-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_ib.c')
-rw-r--r--net/smc/smc_ib.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 440f9e319a38..c090678a3e5a 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -389,15 +389,15 @@ void smc_ib_put_memory_region(struct ib_mr *mr)
ib_dereg_mr(mr);
}
-static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
+static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
{
unsigned int offset = 0;
int sg_num;
/* map the largest prefix of a dma mapped SG list */
- sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK],
- buf_slot->sgt[SMC_SINGLE_LINK].sgl,
- buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
+ sg_num = ib_map_mr_sg(buf_slot->mr_rx[link_idx],
+ buf_slot->sgt[link_idx].sgl,
+ buf_slot->sgt[link_idx].orig_nents,
&offset, PAGE_SIZE);
return sg_num;
@@ -405,29 +405,29 @@ static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
/* Allocate a memory region and map the dma mapped SG list of buf_slot */
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
- struct smc_buf_desc *buf_slot)
+ struct smc_buf_desc *buf_slot, u8 link_idx)
{
- if (buf_slot->mr_rx[SMC_SINGLE_LINK])
+ if (buf_slot->mr_rx[link_idx])
return 0; /* already done */
- buf_slot->mr_rx[SMC_SINGLE_LINK] =
+ buf_slot->mr_rx[link_idx] =
ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
- if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) {
+ if (IS_ERR(buf_slot->mr_rx[link_idx])) {
int rc;
- rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]);
- buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL;
+ rc = PTR_ERR(buf_slot->mr_rx[link_idx]);
+ buf_slot->mr_rx[link_idx] = NULL;
return rc;
}
- if (smc_ib_map_mr_sg(buf_slot) != 1)
+ if (smc_ib_map_mr_sg(buf_slot, link_idx) != 1)
return -EINVAL;
return 0;
}
/* synchronize buffer usage for cpu access */
-void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
+void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction)
{
@@ -435,11 +435,11 @@ void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
unsigned int i;
/* for now there is just one DMA address */
- for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
- buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
+ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
+ buf_slot->sgt[lnk->link_idx].nents, i) {
if (!sg_dma_len(sg))
break;
- ib_dma_sync_single_for_cpu(smcibdev->ibdev,
+ ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
sg_dma_address(sg),
sg_dma_len(sg),
data_direction);
@@ -447,7 +447,7 @@ void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
}
/* synchronize buffer usage for device access */
-void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
+void smc_ib_sync_sg_for_device(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction)
{
@@ -455,11 +455,11 @@ void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
unsigned int i;
/* for now there is just one DMA address */
- for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
- buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
+ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
+ buf_slot->sgt[lnk->link_idx].nents, i) {
if (!sg_dma_len(sg))
break;
- ib_dma_sync_single_for_device(smcibdev->ibdev,
+ ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
sg_dma_address(sg),
sg_dma_len(sg),
data_direction);
@@ -467,15 +467,15 @@ void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
}
/* Map a new TX or RX buffer SG-table to DMA */
-int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
+int smc_ib_buf_map_sg(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction)
{
int mapped_nents;
- mapped_nents = ib_dma_map_sg(smcibdev->ibdev,
- buf_slot->sgt[SMC_SINGLE_LINK].sgl,
- buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
+ mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
+ buf_slot->sgt[lnk->link_idx].sgl,
+ buf_slot->sgt[lnk->link_idx].orig_nents,
data_direction);
if (!mapped_nents)
return -ENOMEM;
@@ -483,18 +483,18 @@ int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
return mapped_nents;
}
-void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev,
+void smc_ib_buf_unmap_sg(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction)
{
- if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address)
+ if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
return; /* already unmapped */
- ib_dma_unmap_sg(smcibdev->ibdev,
- buf_slot->sgt[SMC_SINGLE_LINK].sgl,
- buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
+ ib_dma_unmap_sg(lnk->smcibdev->ibdev,
+ buf_slot->sgt[lnk->link_idx].sgl,
+ buf_slot->sgt[lnk->link_idx].orig_nents,
data_direction);
- buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0;
+ buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;
}
long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)