aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2019-02-03 14:55:51 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-02-08 16:51:04 -0700
commit21a428a019c9a6d133e745b529b9bf18c1187e70 (patch)
tree78f13d8cfff04995d1381ac7d19ceb8aab23f6ab /drivers/infiniband/sw
parentRDMA/core: Share driver structure size with core (diff)
downloadlinux-dev-21a428a019c9a6d133e745b529b9bf18c1187e70.tar.xz
linux-dev-21a428a019c9a6d133e745b529b9bf18c1187e70.zip
RDMA: Handle PD allocations by IB/core
The PD allocations in IB/core allows us to simplify drivers and their error flows in their .alloc_pd() paths. The changes in .alloc_pd() go hand in had with relevant update in .dealloc_pd(). We will use this opportunity and convert .dealloc_pd() to don't fail, as it was suggested a long time ago, failures are not happening as we have never seen a WARN_ON print. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c29
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h7
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c60
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
7 files changed, 73 insertions, 46 deletions
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
index dcc1870b8d23..6033054b22fa 100644
--- a/drivers/infiniband/sw/rdmavt/pd.c
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -50,7 +50,7 @@
/**
* rvt_alloc_pd - allocate a protection domain
- * @ibdev: ib device
+ * @ibpd: PD
* @context: optional user context
* @udata: optional user data
*
@@ -58,19 +58,14 @@
*
* Return: 0 on success
*/
-struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = ibpd->device;
struct rvt_dev_info *dev = ib_to_rvt(ibdev);
- struct rvt_pd *pd;
- struct ib_pd *ret;
+ struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
+ int ret = 0;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
/*
* While we could continue allocating protecetion domains, being
* constrained only by system resources. The IBTA spec defines that
@@ -81,8 +76,7 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
spin_lock(&dev->n_pds_lock);
if (dev->n_pds_allocated == dev->dparms.props.max_pd) {
spin_unlock(&dev->n_pds_lock);
- kfree(pd);
- ret = ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
goto bail;
}
@@ -92,8 +86,6 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
/* ib_alloc_pd() will initialize pd->ibpd. */
pd->user = !!udata;
- ret = &pd->ibpd;
-
bail:
return ret;
}
@@ -104,16 +96,11 @@ bail:
*
* Return: always 0
*/
-int rvt_dealloc_pd(struct ib_pd *ibpd)
+void rvt_dealloc_pd(struct ib_pd *ibpd)
{
- struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
spin_lock(&dev->n_pds_lock);
dev->n_pds_allocated--;
spin_unlock(&dev->n_pds_lock);
-
- kfree(pd);
-
- return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
index 1892ca4a9746..7a887e4a45e7 100644
--- a/drivers/infiniband/sw/rdmavt/pd.h
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -50,9 +50,8 @@
#include <rdma/rdma_vt.h>
-struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int rvt_dealloc_pd(struct ib_pd *ibpd);
+int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+void rvt_dealloc_pd(struct ib_pd *ibpd);
#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index b3f0c5578925..a19832c73d5a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -436,6 +436,7 @@ static const struct ib_device_ops rvt_dev_ops = {
.req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq,
.unmap_fmr = rvt_unmap_fmr,
+ INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
};
static noinline int check_support(struct rvt_dev_info *rdi, int verb)
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index b5c91df22047..cd3f14629ba8 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -46,6 +46,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
+ .flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
@@ -119,8 +120,10 @@ static void rxe_cache_clean(size_t cnt)
for (i = 0; i < cnt; i++) {
type = &rxe_type_info[i];
- kmem_cache_destroy(type->cache);
- type->cache = NULL;
+ if (!(type->flags & RXE_POOL_NO_ALLOC)) {
+ kmem_cache_destroy(type->cache);
+ type->cache = NULL;
+ }
}
}
@@ -134,14 +137,17 @@ int rxe_cache_init(void)
for (i = 0; i < RXE_NUM_TYPES; i++) {
type = &rxe_type_info[i];
size = ALIGN(type->size, RXE_POOL_ALIGN);
- type->cache = kmem_cache_create(type->name, size,
- RXE_POOL_ALIGN,
- RXE_POOL_CACHE_FLAGS, NULL);
- if (!type->cache) {
- pr_err("Unable to init kmem cache for %s\n",
- type->name);
- err = -ENOMEM;
- goto err1;
+ if (!(type->flags & RXE_POOL_NO_ALLOC)) {
+ type->cache =
+ kmem_cache_create(type->name, size,
+ RXE_POOL_ALIGN,
+ RXE_POOL_CACHE_FLAGS, NULL);
+ if (!type->cache) {
+ pr_err("Unable to init kmem cache for %s\n",
+ type->name);
+ err = -ENOMEM;
+ goto err1;
+ }
}
}
@@ -415,6 +421,37 @@ out_put_pool:
return NULL;
}
+int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+{
+ unsigned long flags;
+
+ might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+
+ read_lock_irqsave(&pool->pool_lock, flags);
+ if (pool->state != RXE_POOL_STATE_VALID) {
+ read_unlock_irqrestore(&pool->pool_lock, flags);
+ return -EINVAL;
+ }
+ kref_get(&pool->ref_cnt);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
+
+ kref_get(&pool->rxe->ref_cnt);
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto out_put_pool;
+
+ elem->pool = pool;
+ kref_init(&elem->ref_cnt);
+
+ return 0;
+
+out_put_pool:
+ atomic_dec(&pool->num_elem);
+ rxe_dev_put(pool->rxe);
+ rxe_pool_put(pool);
+ return -EINVAL;
+}
+
void rxe_elem_release(struct kref *kref)
{
struct rxe_pool_entry *elem =
@@ -424,7 +461,8 @@ void rxe_elem_release(struct kref *kref)
if (pool->cleanup)
pool->cleanup(elem);
- kmem_cache_free(pool_cache(pool), elem);
+ if (!(pool->flags & RXE_POOL_NO_ALLOC))
+ kmem_cache_free(pool_cache(pool), elem);
atomic_dec(&pool->num_elem);
rxe_dev_put(pool->rxe);
rxe_pool_put(pool);
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 72968c29e01f..2f2cff1cbe43 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -41,6 +41,7 @@ enum rxe_pool_flags {
RXE_POOL_ATOMIC = BIT(0),
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
+ RXE_POOL_NO_ALLOC = BIT(4),
};
enum rxe_elem_type {
@@ -131,6 +132,9 @@ void rxe_pool_cleanup(struct rxe_pool *pool);
/* allocate an object from pool */
void *rxe_alloc(struct rxe_pool *pool);
+/* connect already allocated object to pool */
+int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+
/* assign an index to an indexed object and insert object into
* pool's rb tree
*/
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index cc5a05124ece..051c3930e808 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -191,23 +191,20 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
return 0;
}
-static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_pd *pd;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
- pd = rxe_alloc(&rxe->pd_pool);
- return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
+ return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
}
-static int rxe_dealloc_pd(struct ib_pd *ibpd)
+static void rxe_dealloc_pd(struct ib_pd *ibpd)
{
struct rxe_pd *pd = to_rpd(ibpd);
rxe_drop_ref(pd);
- return 0;
}
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
@@ -1183,6 +1180,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.reg_user_mr = rxe_reg_user_mr,
.req_notify_cq = rxe_req_notify_cq,
.resize_cq = rxe_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
};
int rxe_register_device(struct rxe_dev *rxe)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 74e04801d34d..70839d3f55d9 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -66,8 +66,8 @@ struct rxe_ucontext {
};
struct rxe_pd {
+ struct ib_pd ibpd;
struct rxe_pool_entry pelem;
- struct ib_pd ibpd;
};
struct rxe_ah {