aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cache.c
diff options
context:
space:
mode:
authorHåkon Bugge <haakon.bugge@oracle.com>2021-05-25 19:49:09 +0200
committerJason Gunthorpe <jgg@nvidia.com>2021-05-28 20:26:16 -0300
commitd58c23c9254894d438ce5c516745cf694eac86b7 (patch)
treebcc6f5c8cacae8846893daea9ffd4e8809c9f82d /drivers/infiniband/core/cache.c
parentRDMA/hns: Refactor capability configuration flow of VF (diff)
downloadlinux-dev-d58c23c9254894d438ce5c516745cf694eac86b7.tar.xz
linux-dev-d58c23c9254894d438ce5c516745cf694eac86b7.zip
IB/core: Only update PKEY and GID caches on respective events
Both the PKEY and GID tables in an HCA can hold in the order of hundreds entries. Reading them is expensive. Partly because the API for retrieving them only returns a single entry at a time. Further, on certain implementations, e.g., CX-3, the VFs are paravirtualized in this respect and have to rely on the PF driver to perform the read. This again demands VF to PF communication. IB Core's cache is refreshed on all events. Hence, filter the refresh of the PKEY and GID caches based on the event received being IB_EVENT_PKEY_CHANGE and IB_EVENT_GID_CHANGE respectively. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Link: https://lore.kernel.org/r/1621964949-28484-1-git-send-email-haakon.bugge@oracle.com Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com> Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/core/cache.c')
-rw-r--r--drivers/infiniband/core/cache.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3b0991fedd81..d32045986109 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1465,10 +1465,12 @@ err:
}
static int
-ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
+ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
+ bool update_pkeys, bool enforce_security)
{
struct ib_port_attr *tprops = NULL;
- struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
+ struct ib_pkey_cache *pkey_cache = NULL;
+ struct ib_pkey_cache *old_pkey_cache = NULL;
int i;
int ret;
@@ -1485,14 +1487,16 @@ ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
goto err;
}
- if (!rdma_protocol_roce(device, port)) {
+ if (!rdma_protocol_roce(device, port) && update_gids) {
ret = config_non_roce_gid_cache(device, port,
tprops->gid_tbl_len);
if (ret)
goto err;
}
- if (tprops->pkey_tbl_len) {
+ update_pkeys &= !!tprops->pkey_tbl_len;
+
+ if (update_pkeys) {
pkey_cache = kmalloc(struct_size(pkey_cache, table,
tprops->pkey_tbl_len),
GFP_KERNEL);
@@ -1517,9 +1521,10 @@ ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
write_lock_irq(&device->cache_lock);
- old_pkey_cache = device->port_data[port].cache.pkey;
-
- device->port_data[port].cache.pkey = pkey_cache;
+ if (update_pkeys) {
+ old_pkey_cache = device->port_data[port].cache.pkey;
+ device->port_data[port].cache.pkey = pkey_cache;
+ }
device->port_data[port].cache.lmc = tprops->lmc;
device->port_data[port].cache.port_state = tprops->state;
@@ -1551,6 +1556,8 @@ static void ib_cache_event_task(struct work_struct *_work)
* the cache.
*/
ret = ib_cache_update(work->event.device, work->event.element.port_num,
+ work->event.event == IB_EVENT_GID_CHANGE,
+ work->event.event == IB_EVENT_PKEY_CHANGE,
work->enforce_security);
/* GID event is notified already for individual GID entries by
@@ -1624,7 +1631,7 @@ int ib_cache_setup_one(struct ib_device *device)
return err;
rdma_for_each_port (device, p) {
- err = ib_cache_update(device, p, true);
+ err = ib_cache_update(device, p, true, true, true);
if (err)
return err;
}