diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 2019 |
1 files changed, 1024 insertions, 995 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 6fa0a83c19de..410cc5fd2523 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2020, Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -36,10 +37,12 @@ #include <linux/debugfs.h> #include <linux/export.h> #include <linux/delay.h> -#include <rdma/ib_umem.h> +#include <linux/dma-buf.h> +#include <linux/dma-resv.h> #include <rdma/ib_umem_odp.h> -#include <rdma/ib_verbs.h> +#include "dm.h" #include "mlx5_ib.h" +#include "umr.h" enum { MAX_PENDING_REG_MR = 8, @@ -47,166 +50,362 @@ enum { #define MLX5_UMR_ALIGN 2048 -static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); -static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); -static int mr_cache_max_order(struct mlx5_ib_dev *dev); +static void +create_mkey_callback(int status, struct mlx5_async_work *context); +static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, + u64 iova, int access_flags, + unsigned int page_size, bool populate); -static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) +static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, + struct ib_pd *pd) { - return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); + struct mlx5_ib_dev *dev = to_mdev(pd->device); + + MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); + MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); + MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); + MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); + MLX5_SET(mkc, mkc, lr, 1); + + if ((acc & IB_ACCESS_RELAXED_ORDERING) && + pcie_relaxed_ordering_enabled(dev->mdev->pdev)) { + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) + MLX5_SET(mkc, mkc, relaxed_ordering_write, 1); + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) + MLX5_SET(mkc, mkc, relaxed_ordering_read, 1); + } + + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET64(mkc, mkc, start_addr, start_addr); } +static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in) +{ + u8 key = atomic_inc_return(&dev->mkey_var); + void *mkc; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, mkey_7_0, key); + *mkey = key; +} + +static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, + struct mlx5_ib_mkey *mkey, u32 *in, int inlen) +{ + int ret; + + assign_mkey_variant(dev, &mkey->key, in); + ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); + if (!ret) + init_waitqueue_head(&mkey->wait); + + return ret; +} + +static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create) +{ + struct mlx5_ib_dev *dev = async_create->ent->dev; + size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out); + + MLX5_SET(create_mkey_in, async_create->in, opcode, + MLX5_CMD_OP_CREATE_MKEY); + assign_mkey_variant(dev, &async_create->mkey, async_create->in); + return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen, + async_create->out, outlen, create_mkey_callback, + &async_create->cb_work); +} + +static int mkey_cache_max_order(struct mlx5_ib_dev *dev); +static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); + static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); - return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); } -static int order2idx(struct mlx5_ib_dev *dev, int order) +static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out) { - struct mlx5_mr_cache *cache = &dev->cache; + if (status == -ENXIO) /* core driver is not available */ + return; - if (order < cache->ent[0].order) - return 0; - else - return order - cache->ent[0].order; + mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); + if (status != -EREMOTEIO) /* driver specific failure */ + return; + + /* Failed in FW, print cmd out failure details */ + mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); } -static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) + +static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings, + void *to_store) { - return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= - length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); + XA_STATE(xas, &ent->mkeys, 0); + void *curr; + + xa_lock_irq(&ent->mkeys); + if (limit_pendings && + (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) { + xa_unlock_irq(&ent->mkeys); + return -EAGAIN; + } + while (1) { + /* + * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version + * doesn't transparently unlock. Instead we set the xas index to + * the current value of reserved every iteration. + */ + xas_set(&xas, ent->reserved); + curr = xas_load(&xas); + if (!curr) { + if (to_store && ent->stored == ent->reserved) + xas_store(&xas, to_store); + else + xas_store(&xas, XA_ZERO_ENTRY); + if (xas_valid(&xas)) { + ent->reserved++; + if (to_store) { + if (ent->stored != ent->reserved) + __xa_store(&ent->mkeys, + ent->stored, + to_store, + GFP_KERNEL); + ent->stored++; + queue_adjust_cache_locked(ent); + WRITE_ONCE(ent->dev->cache.last_add, + jiffies); + } + } + } + xa_unlock_irq(&ent->mkeys); + + /* + * Notice xas_nomem() must always be called as it cleans + * up any cached allocation. + */ + if (!xas_nomem(&xas, GFP_KERNEL)) + break; + xa_lock_irq(&ent->mkeys); + } + if (xas_error(&xas)) + return xas_error(&xas); + if (WARN_ON(curr)) + return -EINVAL; + return 0; } -static void reg_mr_callback(int status, struct mlx5_async_work *context) +static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent) { - struct mlx5_ib_mr *mr = - container_of(context, struct mlx5_ib_mr, cb_work); - struct mlx5_ib_dev *dev = mr->dev; - struct mlx5_mr_cache *cache = &dev->cache; - int c = order2idx(dev, mr->order); - struct mlx5_cache_ent *ent = &cache->ent[c]; - u8 key; + void *old; + + ent->reserved--; + old = __xa_erase(&ent->mkeys, ent->reserved); + WARN_ON(old); +} + +static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey) +{ + void *old; + + old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0); + WARN_ON(old); + ent->stored++; +} + +static u32 pop_stored_mkey(struct mlx5_cache_ent *ent) +{ + void *old, *xa_mkey; + + ent->stored--; + ent->reserved--; + + if (ent->stored == ent->reserved) { + xa_mkey = __xa_erase(&ent->mkeys, ent->stored); + WARN_ON(!xa_mkey); + return (u32)xa_to_value(xa_mkey); + } + + xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY, + GFP_KERNEL); + WARN_ON(!xa_mkey || xa_is_err(xa_mkey)); + old = __xa_erase(&ent->mkeys, ent->reserved); + WARN_ON(old); + return (u32)xa_to_value(xa_mkey); +} + +static void create_mkey_callback(int status, struct mlx5_async_work *context) +{ + struct mlx5r_async_create_mkey *mkey_out = + container_of(context, struct mlx5r_async_create_mkey, cb_work); + struct mlx5_cache_ent *ent = mkey_out->ent; + struct mlx5_ib_dev *dev = ent->dev; unsigned long flags; - spin_lock_irqsave(&ent->lock, flags); - ent->pending--; - spin_unlock_irqrestore(&ent->lock, flags); if (status) { - mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); - kfree(mr); - dev->fill_delay = 1; + create_mkey_warn(dev, status, mkey_out->out); + kfree(mkey_out); + xa_lock_irqsave(&ent->mkeys, flags); + undo_push_reserve_mkey(ent); + WRITE_ONCE(dev->fill_delay, 1); + xa_unlock_irqrestore(&ent->mkeys, flags); mod_timer(&dev->delay_timer, jiffies + HZ); return; } - mr->mmkey.type = MLX5_MKEY_MR; - spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); - key = dev->mdev->priv.mkey_key++; - spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); - mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key; + mkey_out->mkey |= mlx5_idx_to_mkey( + MLX5_GET(create_mkey_out, mkey_out->out, mkey_index)); + WRITE_ONCE(dev->cache.last_add, jiffies); - cache->last_add = jiffies; + xa_lock_irqsave(&ent->mkeys, flags); + push_to_reserved(ent, mkey_out->mkey); + /* If we are doing fill_to_high_water then keep going. */ + queue_adjust_cache_locked(ent); + xa_unlock_irqrestore(&ent->mkeys, flags); + kfree(mkey_out); +} - spin_lock_irqsave(&ent->lock, flags); - list_add_tail(&mr->list, &ent->head); - ent->cur++; - ent->size++; - spin_unlock_irqrestore(&ent->lock, flags); +static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) +{ + int ret = 0; - if (!completion_done(&ent->compl)) - complete(&ent->compl); + switch (access_mode) { + case MLX5_MKC_ACCESS_MODE_MTT: + ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD / + sizeof(struct mlx5_mtt)); + break; + case MLX5_MKC_ACCESS_MODE_KSM: + ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD / + sizeof(struct mlx5_klm)); + break; + default: + WARN_ON(1); + } + return ret; } -static int add_keys(struct mlx5_ib_dev *dev, int c, int num) +static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) { - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent = &cache->ent[c]; - int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); - struct mlx5_ib_mr *mr; + set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); + MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); + + MLX5_SET(mkc, mkc, translations_octword_size, + get_mkc_octo_size(ent->access_mode, ent->ndescs)); + MLX5_SET(mkc, mkc, log_page_size, ent->page); +} + +/* Asynchronously schedule new MRs to be populated in the cache. */ +static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) +{ + struct mlx5r_async_create_mkey *async_create; void *mkc; - u32 *in; int err = 0; int i; - in = kzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); for (i = 0; i < num; i++) { - if (ent->pending >= MAX_PENDING_REG_MR) { - err = -EAGAIN; - break; - } + async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey), + GFP_KERNEL); + if (!async_create) + return -ENOMEM; + mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in, + memory_key_mkey_entry); + set_cache_mkc(ent, mkc); + async_create->ent = ent; + + err = push_mkey(ent, true, NULL); + if (err) + goto free_async_create; - mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - break; - } - mr->order = ent->order; - mr->allocated_from_cache = true; - mr->dev = dev; - - MLX5_SET(mkc, mkc, free, 1); - MLX5_SET(mkc, mkc, umr_en, 1); - MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); - MLX5_SET(mkc, mkc, access_mode_4_2, - (ent->access_mode >> 2) & 0x7); - - MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); - MLX5_SET(mkc, mkc, log_page_size, ent->page); - - spin_lock_irq(&ent->lock); - ent->pending++; - spin_unlock_irq(&ent->lock); - err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey, - &dev->async_ctx, in, inlen, - mr->out, sizeof(mr->out), - reg_mr_callback, &mr->cb_work); + err = mlx5_ib_create_mkey_cb(async_create); if (err) { - spin_lock_irq(&ent->lock); - ent->pending--; - spin_unlock_irq(&ent->lock); - mlx5_ib_warn(dev, "create mkey failed %d\n", err); - kfree(mr); - break; + mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); + goto err_undo_reserve; } } + return 0; + +err_undo_reserve: + xa_lock_irq(&ent->mkeys); + undo_push_reserve_mkey(ent); + xa_unlock_irq(&ent->mkeys); +free_async_create: + kfree(async_create); + return err; +} + +/* Synchronously create a MR in the cache */ +static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey) +{ + size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; + int err; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + set_cache_mkc(ent, mkc); + + err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); + if (err) + goto free_in; + + WRITE_ONCE(ent->dev->cache.last_add, jiffies); +free_in: kfree(in); return err; } -static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) +static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) { - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent = &cache->ent[c]; - struct mlx5_ib_mr *tmp_mr; - struct mlx5_ib_mr *mr; - LIST_HEAD(del_list); - int i; + u32 mkey; - for (i = 0; i < num; i++) { - spin_lock_irq(&ent->lock); - if (list_empty(&ent->head)) { - spin_unlock_irq(&ent->lock); - break; - } - mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); - list_move(&mr->list, &del_list); - ent->cur--; - ent->size--; - spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); - } + lockdep_assert_held(&ent->mkeys.xa_lock); + if (!ent->stored) + return; + mkey = pop_stored_mkey(ent); + xa_unlock_irq(&ent->mkeys); + mlx5_core_destroy_mkey(ent->dev->mdev, mkey); + xa_lock_irq(&ent->mkeys); +} - list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { - list_del(&mr->list); - kfree(mr); +static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, + bool limit_fill) + __acquires(&ent->mkeys) __releases(&ent->mkeys) +{ + int err; + + lockdep_assert_held(&ent->mkeys.xa_lock); + + while (true) { + if (limit_fill) + target = ent->limit * 2; + if (target == ent->reserved) + return 0; + if (target > ent->reserved) { + u32 todo = target - ent->reserved; + + xa_unlock_irq(&ent->mkeys); + err = add_keys(ent, todo); + if (err == -EAGAIN) + usleep_range(3000, 5000); + xa_lock_irq(&ent->mkeys); + if (err) { + if (err != -EAGAIN) + return err; + } else + return 0; + } else { + remove_cache_mr_locked(ent); + } } } @@ -214,37 +413,38 @@ static ssize_t size_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct mlx5_cache_ent *ent = filp->private_data; - struct mlx5_ib_dev *dev = ent->dev; - char lbuf[20] = {0}; - u32 var; + u32 target; int err; - int c; - - count = min(count, sizeof(lbuf) - 1); - if (copy_from_user(lbuf, buf, count)) - return -EFAULT; - c = order2idx(dev, ent->order); - - if (sscanf(lbuf, "%u", &var) != 1) - return -EINVAL; - - if (var < ent->limit) - return -EINVAL; - - if (var > ent->size) { - do { - err = add_keys(dev, c, var - ent->size); - if (err && err != -EAGAIN) - return err; + err = kstrtou32_from_user(buf, count, 0, &target); + if (err) + return err; - usleep_range(3000, 5000); - } while (err); - } else if (var < ent->size) { - remove_keys(dev, c, ent->size - var); + /* + * Target is the new value of total_mrs the user requests, however we + * cannot free MRs that are in use. Compute the target value for stored + * mkeys. + */ + xa_lock_irq(&ent->mkeys); + if (target < ent->in_use) { + err = -EINVAL; + goto err_unlock; + } + target = target - ent->in_use; + if (target < ent->limit || target > ent->limit*2) { + err = -EINVAL; + goto err_unlock; } + err = resize_available_mrs(ent, target, false); + if (err) + goto err_unlock; + xa_unlock_irq(&ent->mkeys); return count; + +err_unlock: + xa_unlock_irq(&ent->mkeys); + return err; } static ssize_t size_read(struct file *filp, char __user *buf, size_t count, @@ -254,7 +454,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count, char lbuf[20]; int err; - err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); + err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); if (err < 0) return err; @@ -272,32 +472,23 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct mlx5_cache_ent *ent = filp->private_data; - struct mlx5_ib_dev *dev = ent->dev; - char lbuf[20] = {0}; u32 var; int err; - int c; - - count = min(count, sizeof(lbuf) - 1); - if (copy_from_user(lbuf, buf, count)) - return -EFAULT; - c = order2idx(dev, ent->order); - - if (sscanf(lbuf, "%u", &var) != 1) - return -EINVAL; - - if (var > ent->size) - return -EINVAL; + err = kstrtou32_from_user(buf, count, 0, &var); + if (err) + return err; + /* + * Upon set we immediately fill the cache to high water mark implied by + * the limit. + */ + xa_lock_irq(&ent->mkeys); ent->limit = var; - - if (ent->cur < ent->limit) { - err = add_keys(dev, c, 2 * ent->limit - ent->cur); - if (err) - return err; - } - + err = resize_available_mrs(ent, 0, true); + xa_unlock_irq(&ent->mkeys); + if (err) + return err; return count; } @@ -322,68 +513,120 @@ static const struct file_operations limit_fops = { .read = limit_read, }; -static int someone_adding(struct mlx5_mr_cache *cache) +static bool someone_adding(struct mlx5_mkey_cache *cache) { - int i; + unsigned int i; + + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { + struct mlx5_cache_ent *ent = &cache->ent[i]; + bool ret; - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - if (cache->ent[i].cur < cache->ent[i].limit) - return 1; + xa_lock_irq(&ent->mkeys); + ret = ent->stored < ent->limit; + xa_unlock_irq(&ent->mkeys); + if (ret) + return true; } + return false; +} - return 0; +/* + * Check if the bucket is outside the high/low water mark and schedule an async + * update. The cache refill has hysteresis, once the low water mark is hit it is + * refilled up to the high mark. + */ +static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) +{ + lockdep_assert_held(&ent->mkeys.xa_lock); + + if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) + return; + if (ent->stored < ent->limit) { + ent->fill_to_high_water = true; + mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); + } else if (ent->fill_to_high_water && + ent->reserved < 2 * ent->limit) { + /* + * Once we start populating due to hitting a low water mark + * continue until we pass the high water mark. + */ + mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); + } else if (ent->stored == 2 * ent->limit) { + ent->fill_to_high_water = false; + } else if (ent->stored > 2 * ent->limit) { + /* Queue deletion of excess entries */ + ent->fill_to_high_water = false; + if (ent->stored != ent->reserved) + queue_delayed_work(ent->dev->cache.wq, &ent->dwork, + msecs_to_jiffies(1000)); + else + mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); + } } static void __cache_work_func(struct mlx5_cache_ent *ent) { struct mlx5_ib_dev *dev = ent->dev; - struct mlx5_mr_cache *cache = &dev->cache; - int i = order2idx(dev, ent->order); + struct mlx5_mkey_cache *cache = &dev->cache; int err; - if (cache->stopped) - return; + xa_lock_irq(&ent->mkeys); + if (ent->disabled) + goto out; - ent = &dev->cache.ent[i]; - if (ent->cur < 2 * ent->limit && !dev->fill_delay) { - err = add_keys(dev, i, 1); - if (ent->cur < 2 * ent->limit) { - if (err == -EAGAIN) { - mlx5_ib_dbg(dev, "returned eagain, order %d\n", - i + 2); - queue_delayed_work(cache->wq, &ent->dwork, - msecs_to_jiffies(3)); - } else if (err) { - mlx5_ib_warn(dev, "command failed order %d, err %d\n", - i + 2, err); + if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && + !READ_ONCE(dev->fill_delay)) { + xa_unlock_irq(&ent->mkeys); + err = add_keys(ent, 1); + xa_lock_irq(&ent->mkeys); + if (ent->disabled) + goto out; + if (err) { + /* + * EAGAIN only happens if there are pending MRs, so we + * will be rescheduled when storing them. The only + * failure path here is ENOMEM. + */ + if (err != -EAGAIN) { + mlx5_ib_warn( + dev, + "command failed order %d, err %d\n", + ent->order, err); queue_delayed_work(cache->wq, &ent->dwork, msecs_to_jiffies(1000)); - } else { - queue_work(cache->wq, &ent->work); } } - } else if (ent->cur > 2 * ent->limit) { + } else if (ent->stored > 2 * ent->limit) { + bool need_delay; + /* - * The remove_keys() logic is performed as garbage collection - * task. Such task is intended to be run when no other active - * processes are running. + * The remove_cache_mr() logic is performed as garbage + * collection task. Such task is intended to be run when no + * other active processes are running. * * The need_resched() will return TRUE if there are user tasks * to be activated in near future. * - * In such case, we don't execute remove_keys() and postpone - * the garbage collection work to try to run in next cycle, - * in order to free CPU resources to other tasks. + * In such case, we don't execute remove_cache_mr() and postpone + * the garbage collection work to try to run in next cycle, in + * order to free CPU resources to other tasks. */ - if (!need_resched() && !someone_adding(cache) && - time_after(jiffies, cache->last_add + 300 * HZ)) { - remove_keys(dev, i, 1); - if (ent->cur > ent->limit) - queue_work(cache->wq, &ent->work); - } else { + xa_unlock_irq(&ent->mkeys); + need_delay = need_resched() || someone_adding(cache) || + !time_after(jiffies, + READ_ONCE(cache->last_add) + 300 * HZ); + xa_lock_irq(&ent->mkeys); + if (ent->disabled) + goto out; + if (need_delay) { queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); + goto out; } + remove_cache_mr_locked(ent); + queue_adjust_cache_locked(ent); } +out: + xa_unlock_irq(&ent->mkeys); } static void delayed_cache_work_func(struct work_struct *work) @@ -394,157 +637,64 @@ static void delayed_cache_work_func(struct work_struct *work) __cache_work_func(ent); } -static void cache_work_func(struct work_struct *work) +struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, + struct mlx5_cache_ent *ent, + int access_flags) { - struct mlx5_cache_ent *ent; - - ent = container_of(work, struct mlx5_cache_ent, work); - __cache_work_func(ent); -} - -struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) -{ - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent; struct mlx5_ib_mr *mr; int err; - if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { - mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); - return ERR_PTR(-EINVAL); - } - - ent = &cache->ent[entry]; - while (1) { - spin_lock_irq(&ent->lock); - if (list_empty(&ent->head)) { - spin_unlock_irq(&ent->lock); - - err = add_keys(dev, entry, 1); - if (err && err != -EAGAIN) - return ERR_PTR(err); - - wait_for_completion(&ent->compl); - } else { - mr = list_first_entry(&ent->head, struct mlx5_ib_mr, - list); - list_del(&mr->list); - ent->cur--; - spin_unlock_irq(&ent->lock); - if (ent->cur < ent->limit) - queue_work(cache->wq, &ent->work); - return mr; - } - } -} - -static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) -{ - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_ib_mr *mr = NULL; - struct mlx5_cache_ent *ent; - int last_umr_cache_entry; - int c; - int i; + if (!mlx5r_umr_can_reconfig(dev, 0, access_flags)) + return ERR_PTR(-EOPNOTSUPP); - c = order2idx(dev, order); - last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev)); - if (c < 0 || c > last_umr_cache_entry) { - mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); - return NULL; - } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); - for (i = c; i <= last_umr_cache_entry; i++) { - ent = &cache->ent[i]; + xa_lock_irq(&ent->mkeys); + ent->in_use++; - mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); - - spin_lock_irq(&ent->lock); - if (!list_empty(&ent->head)) { - mr = list_first_entry(&ent->head, struct mlx5_ib_mr, - list); - list_del(&mr->list); - ent->cur--; - spin_unlock_irq(&ent->lock); - if (ent->cur < ent->limit) - queue_work(cache->wq, &ent->work); - break; + if (!ent->stored) { + queue_adjust_cache_locked(ent); + ent->miss++; + xa_unlock_irq(&ent->mkeys); + err = create_cache_mkey(ent, &mr->mmkey.key); + if (err) { + xa_lock_irq(&ent->mkeys); + ent->in_use--; + xa_unlock_irq(&ent->mkeys); + kfree(mr); + return ERR_PTR(err); } - spin_unlock_irq(&ent->lock); - - queue_work(cache->wq, &ent->work); + } else { + mr->mmkey.key = pop_stored_mkey(ent); + queue_adjust_cache_locked(ent); + xa_unlock_irq(&ent->mkeys); } - - if (!mr) - cache->ent[c].miss++; - + mr->mmkey.cache_ent = ent; + mr->mmkey.type = MLX5_MKEY_MR; + init_waitqueue_head(&mr->mmkey.wait); return mr; } -void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) -{ - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent; - int shrink = 0; - int c; - - if (!mr->allocated_from_cache) - return; - - c = order2idx(dev, mr->order); - WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); - - if (mlx5_mr_cache_invalidate(mr)) { - mr->allocated_from_cache = false; - destroy_mkey(dev, mr); - ent = &cache->ent[c]; - if (ent->cur < ent->limit) - queue_work(cache->wq, &ent->work); - return; - } - - ent = &cache->ent[c]; - spin_lock_irq(&ent->lock); - list_add_tail(&mr->list, &ent->head); - ent->cur++; - if (ent->cur > 2 * ent->limit) - shrink = 1; - spin_unlock_irq(&ent->lock); - - if (shrink) - queue_work(cache->wq, &ent->work); -} - static void clean_keys(struct mlx5_ib_dev *dev, int c) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; struct mlx5_cache_ent *ent = &cache->ent[c]; - struct mlx5_ib_mr *tmp_mr; - struct mlx5_ib_mr *mr; - LIST_HEAD(del_list); + u32 mkey; cancel_delayed_work(&ent->dwork); - while (1) { - spin_lock_irq(&ent->lock); - if (list_empty(&ent->head)) { - spin_unlock_irq(&ent->lock); - break; - } - mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); - list_move(&mr->list, &del_list); - ent->cur--; - ent->size--; - spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); - } - - list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { - list_del(&mr->list); - kfree(mr); + xa_lock_irq(&ent->mkeys); + while (ent->stored) { + mkey = pop_stored_mkey(ent); + xa_unlock_irq(&ent->mkeys); + mlx5_core_destroy_mkey(dev->mdev, mkey); + xa_lock_irq(&ent->mkeys); } + xa_unlock_irq(&ent->mkeys); } -static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) { if (!mlx5_debugfs_root || dev->is_rep) return; @@ -553,9 +703,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) dev->cache.root = NULL; } -static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) +static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; struct dentry *dir; int i; @@ -563,15 +713,15 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) if (!mlx5_debugfs_root || dev->is_rep) return; - cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); + cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev)); - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; sprintf(ent->name, "%d", ent->order); dir = debugfs_create_dir(ent->name, cache->root); debugfs_create_file("size", 0600, dir, ent, &size_fops); debugfs_create_file("limit", 0600, dir, ent, &limit_fops); - debugfs_create_u32("cur", 0400, dir, &ent->cur); + debugfs_create_ulong("cur", 0400, dir, &ent->stored); debugfs_create_u32("miss", 0600, dir, &ent->miss); } } @@ -580,12 +730,12 @@ static void delay_time_func(struct timer_list *t) { struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); - dev->fill_delay = 0; + WRITE_ONCE(dev->fill_delay, 0); } -int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) +int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; int i; @@ -598,58 +748,62 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); timer_setup(&dev->delay_timer, delay_time_func, 0); - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; - INIT_LIST_HEAD(&ent->head); - spin_lock_init(&ent->lock); + xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); ent->order = i + 2; ent->dev = dev; ent->limit = 0; - init_completion(&ent->compl); - INIT_WORK(&ent->work, cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); - if (i > MR_CACHE_LAST_STD_ENTRY) { - mlx5_odp_init_mr_cache_entry(ent); + if (i > MKEY_CACHE_LAST_STD_ENTRY) { + mlx5_odp_init_mkey_cache_entry(ent); continue; } - if (ent->order > mr_cache_max_order(dev)) + if (ent->order > mkey_cache_max_order(dev)) continue; ent->page = PAGE_SHIFT; - ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / - MLX5_IB_UMR_OCTOWORD; + ent->ndescs = 1 << ent->order; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; - if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && - !dev->is_rep && - mlx5_core_is_pf(dev->mdev)) - ent->limit = dev->mdev->profile->mr_cache[i].limit; + if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && + !dev->is_rep && mlx5_core_is_pf(dev->mdev) && + mlx5r_umr_can_load_pas(dev, 0)) + ent->limit = dev->mdev->profile.mr_cache[i].limit; else ent->limit = 0; - queue_work(cache->wq, &ent->work); + xa_lock_irq(&ent->mkeys); + queue_adjust_cache_locked(ent); + xa_unlock_irq(&ent->mkeys); } - mlx5_mr_cache_debugfs_init(dev); + mlx5_mkey_cache_debugfs_init(dev); return 0; } -int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) +int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) { - int i; + unsigned int i; if (!dev->cache.wq) return 0; - dev->cache.stopped = 1; - flush_workqueue(dev->cache.wq); + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { + struct mlx5_cache_ent *ent = &dev->cache.ent[i]; - mlx5_mr_cache_debugfs_cleanup(dev); + xa_lock_irq(&ent->mkeys); + ent->disabled = true; + xa_unlock_irq(&ent->mkeys); + cancel_delayed_work_sync(&ent->dwork); + } + + mlx5_mkey_cache_debugfs_cleanup(dev); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) clean_keys(dev, i); destroy_workqueue(dev->cache.wq); @@ -658,34 +812,10 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) return 0; } -static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, - struct ib_pd *pd) -{ - struct mlx5_ib_dev *dev = to_mdev(pd->device); - - MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); - MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); - MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); - MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); - MLX5_SET(mkc, mkc, lr, 1); - - if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) - MLX5_SET(mkc, mkc, relaxed_ordering_write, - !!(acc & IB_ACCESS_RELAXED_ORDERING)); - if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) - MLX5_SET(mkc, mkc, relaxed_ordering_read, - !!(acc & IB_ACCESS_RELAXED_ORDERING)); - - MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); - MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET64(mkc, mkc, start_addr, start_addr); -} - struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) { struct mlx5_ib_dev *dev = to_mdev(pd->device); int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); - struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_ib_mr *mr; void *mkc; u32 *in; @@ -705,9 +835,10 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, length64, 1); - set_mkc_access_pd_addr_fields(mkc, acc, 0, pd); + set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0, + pd); - err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); + err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); if (err) goto err_in; @@ -739,301 +870,98 @@ static int get_octo_len(u64 addr, u64 len, int page_shift) return (npages + 1) / 2; } -static int mr_cache_max_order(struct mlx5_ib_dev *dev) +static int mkey_cache_max_order(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) - return MR_CACHE_LAST_STD_ENTRY + 2; + return MKEY_CACHE_LAST_STD_ENTRY + 2; return MLX5_MAX_UMR_SHIFT; } -static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length, - int access_flags, struct ib_umem **umem, int *npages, - int *page_shift, int *ncont, int *order) +static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev, + unsigned int order) { - struct ib_umem *u; + struct mlx5_mkey_cache *cache = &dev->cache; - *umem = NULL; - - if (access_flags & IB_ACCESS_ON_DEMAND) { - struct ib_umem_odp *odp; - - odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, - &mlx5_mn_ops); - if (IS_ERR(odp)) { - mlx5_ib_dbg(dev, "umem get failed (%ld)\n", - PTR_ERR(odp)); - return PTR_ERR(odp); - } - - u = &odp->umem; - - *page_shift = odp->page_shift; - *ncont = ib_umem_odp_num_pages(odp); - *npages = *ncont << (*page_shift - PAGE_SHIFT); - if (order) - *order = ilog2(roundup_pow_of_two(*ncont)); - } else { - u = ib_umem_get(&dev->ib_dev, start, length, access_flags); - if (IS_ERR(u)) { - mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u)); - return PTR_ERR(u); - } - - mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, - page_shift, ncont, order); - } - - if (!*npages) { - mlx5_ib_warn(dev, "avoid zero region\n"); - ib_umem_release(u); - return -EINVAL; - } - - *umem = u; - - mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", - *npages, *ncont, *order, *page_shift); - - return 0; -} - -static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) -{ - struct mlx5_ib_umr_context *context = - container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); - - context->status = wc->status; - complete(&context->done); + if (order < cache->ent[0].order) + return &cache->ent[0]; + order = order - cache->ent[0].order; + if (order > MKEY_CACHE_LAST_STD_ENTRY) + return NULL; + return &cache->ent[order]; } -static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) +static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, + u64 length, int access_flags, u64 iova) { - context->cqe.done = mlx5_ib_umr_done; - context->status = -1; - init_completion(&context->done); + mr->ibmr.lkey = mr->mmkey.key; + mr->ibmr.rkey = mr->mmkey.key; + mr->ibmr.length = length; + mr->ibmr.device = &dev->ib_dev; + mr->ibmr.iova = iova; + mr->access_flags = access_flags; } -static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, - struct mlx5_umr_wr *umrwr) +static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, + u64 iova) { - struct umr_common *umrc = &dev->umrc; - const struct ib_send_wr *bad; - int err; - struct mlx5_ib_umr_context umr_context; - - mlx5_ib_init_umr_context(&umr_context); - umrwr->wr.wr_cqe = &umr_context.cqe; - - down(&umrc->sem); - err = ib_post_send(umrc->qp, &umrwr->wr, &bad); - if (err) { - mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); - } else { - wait_for_completion(&umr_context.done); - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_warn(dev, "reg umr failed (%u)\n", - umr_context.status); - err = -EFAULT; - } - } - up(&umrc->sem); - return err; + /* + * The alignment of iova has already been checked upon entering + * UVERBS_METHOD_REG_DMABUF_MR + */ + umem->iova = iova; + return PAGE_SIZE; } -static struct mlx5_ib_mr *alloc_mr_from_cache( - struct ib_pd *pd, struct ib_umem *umem, - u64 virt_addr, u64 len, int npages, - int page_shift, int order, int access_flags) +static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, + struct ib_umem *umem, u64 iova, + int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_cache_ent *ent; struct mlx5_ib_mr *mr; - int err = 0; - int i; - - for (i = 0; i < 1; i++) { - mr = alloc_cached_mr(dev, order); - if (mr) - break; + unsigned int page_size; - err = add_keys(dev, order2idx(dev, order), 1); - if (err && err != -EAGAIN) { - mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); - break; - } + if (umem->is_dmabuf) + page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); + else + page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, + 0, iova); + if (WARN_ON(!page_size)) + return ERR_PTR(-EINVAL); + ent = mkey_cache_ent_from_order( + dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size))); + /* + * Matches access in alloc_cache_mr(). If the MR can't come from the + * cache then synchronously create an uncached one. + */ + if (!ent || ent->limit == 0 || + !mlx5r_umr_can_reconfig(dev, 0, access_flags) || + mlx5_umem_needs_ats(dev, umem, access_flags)) { + mutex_lock(&dev->slow_path_mutex); + mr = reg_create(pd, umem, iova, access_flags, page_size, false); + mutex_unlock(&dev->slow_path_mutex); + return mr; } - if (!mr) - return ERR_PTR(-EAGAIN); + mr = mlx5_mr_cache_alloc(dev, ent, access_flags); + if (IS_ERR(mr)) + return mr; mr->ibmr.pd = pd; mr->umem = umem; - mr->access_flags = access_flags; - mr->desc_size = sizeof(struct mlx5_mtt); - mr->mmkey.iova = virt_addr; - mr->mmkey.size = len; - mr->mmkey.pd = to_mpd(pd)->pdn; + mr->page_shift = order_base_2(page_size); + set_mr_fields(dev, mr, umem->length, access_flags, iova); return mr; } -#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ - MLX5_UMR_MTT_ALIGNMENT) -#define MLX5_SPARE_UMR_CHUNK 0x10000 - -int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, - int page_shift, int flags) -{ - struct mlx5_ib_dev *dev = mr->dev; - struct device *ddev = dev->ib_dev.dev.parent; - int size; - void *xlt; - dma_addr_t dma; - struct mlx5_umr_wr wr; - struct ib_sge sg; - int err = 0; - int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) - ? sizeof(struct mlx5_klm) - : sizeof(struct mlx5_mtt); - const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; - const int page_mask = page_align - 1; - size_t pages_mapped = 0; - size_t pages_to_map = 0; - size_t pages_iter = 0; - size_t size_to_map = 0; - gfp_t gfp; - bool use_emergency_page = false; - - if ((flags & MLX5_IB_UPD_XLT_INDIRECT) && - !umr_can_use_indirect_mkey(dev)) - return -EPERM; - - /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, - * so we need to align the offset and length accordingly - */ - if (idx & page_mask) { - npages += idx & page_mask; - idx &= ~page_mask; - } - - gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; - gfp |= __GFP_ZERO | __GFP_NOWARN; - - pages_to_map = ALIGN(npages, page_align); - size = desc_size * pages_to_map; - size = min_t(int, size, MLX5_MAX_UMR_CHUNK); - - xlt = (void *)__get_free_pages(gfp, get_order(size)); - if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { - mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", - size, get_order(size), MLX5_SPARE_UMR_CHUNK); - - size = MLX5_SPARE_UMR_CHUNK; - xlt = (void *)__get_free_pages(gfp, get_order(size)); - } - - if (!xlt) { - mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); - xlt = (void *)mlx5_ib_get_xlt_emergency_page(); - size = PAGE_SIZE; - memset(xlt, 0, size); - use_emergency_page = true; - } - pages_iter = size / desc_size; - dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); - if (dma_mapping_error(ddev, dma)) { - mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); - err = -ENOMEM; - goto free_xlt; - } - - if (mr->umem->is_odp) { - if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) { - struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); - size_t max_pages = ib_umem_odp_num_pages(odp) - idx; - - pages_to_map = min_t(size_t, pages_to_map, max_pages); - } - } - - sg.addr = dma; - sg.lkey = dev->umrc.pd->local_dma_lkey; - - memset(&wr, 0, sizeof(wr)); - wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; - if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) - wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; - wr.wr.sg_list = &sg; - wr.wr.num_sge = 1; - wr.wr.opcode = MLX5_IB_WR_UMR; - - wr.pd = mr->ibmr.pd; - wr.mkey = mr->mmkey.key; - wr.length = mr->mmkey.size; - wr.virt_addr = mr->mmkey.iova; - wr.access_flags = mr->access_flags; - wr.page_shift = page_shift; - - for (pages_mapped = 0; - pages_mapped < pages_to_map && !err; - pages_mapped += pages_iter, idx += pages_iter) { - npages = min_t(int, pages_iter, pages_to_map - pages_mapped); - size_to_map = npages * desc_size; - dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); - if (mr->umem->is_odp) { - mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); - } else { - __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx, - npages, xlt, - MLX5_IB_MTT_PRESENT); - /* Clear padding after the pages - * brought from the umem. - */ - memset(xlt + size_to_map, 0, size - size_to_map); - } - dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); - - sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT); - - if (pages_mapped + pages_iter >= pages_to_map) { - if (flags & MLX5_IB_UPD_XLT_ENABLE) - wr.wr.send_flags |= - MLX5_IB_SEND_UMR_ENABLE_MR | - MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | - MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; - if (flags & MLX5_IB_UPD_XLT_PD || - flags & MLX5_IB_UPD_XLT_ACCESS) - wr.wr.send_flags |= - MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; - if (flags & MLX5_IB_UPD_XLT_ADDR) - wr.wr.send_flags |= - MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; - } - - wr.offset = idx * desc_size; - wr.xlt_size = sg.length; - - err = mlx5_ib_post_send_wait(dev, &wr); - } - dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); - -free_xlt: - if (use_emergency_page) - mlx5_ib_put_xlt_emergency_page(); - else - free_pages((unsigned long)xlt, get_order(size)); - - return err; -} - /* * If ibmr is NULL it will be allocated by reg_create. * Else, the given ibmr will be used. */ -static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, - u64 virt_addr, u64 length, - struct ib_umem *umem, int npages, - int page_shift, int access_flags, - bool populate) +static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, + u64 iova, int access_flags, + unsigned int page_size, bool populate) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr; @@ -1044,67 +972,66 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, int err; bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); - mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); + if (!page_size) + return ERR_PTR(-EINVAL); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->ibmr.pd = pd; mr->access_flags = access_flags; + mr->page_shift = order_base_2(page_size); inlen = MLX5_ST_SZ_BYTES(create_mkey_in); if (populate) - inlen += sizeof(*pas) * roundup(npages, 2); + inlen += sizeof(*pas) * + roundup(ib_umem_num_dma_blocks(umem, page_size), 2); in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_1; } pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - if (populate && !(access_flags & IB_ACCESS_ON_DEMAND)) - mlx5_ib_populate_pas(dev, umem, page_shift, pas, + if (populate) { + if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) { + err = -EINVAL; + goto err_2; + } + mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, pg_cap ? MLX5_IB_MTT_PRESENT : 0); + } /* The pg_access bit allows setting the access flags * in the page list submitted with the command. */ MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + set_mkc_access_pd_addr_fields(mkc, access_flags, iova, + populate ? pd : dev->umrc.pd); MLX5_SET(mkc, mkc, free, !populate); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); - if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) - MLX5_SET(mkc, mkc, relaxed_ordering_write, - !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); - if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) - MLX5_SET(mkc, mkc, relaxed_ordering_read, - !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); - MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); - MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); - MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); - MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); - MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, umr_en, 1); - MLX5_SET64(mkc, mkc, start_addr, virt_addr); - MLX5_SET64(mkc, mkc, len, length); - MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET64(mkc, mkc, len, umem->length); MLX5_SET(mkc, mkc, bsf_octword_size, 0); MLX5_SET(mkc, mkc, translations_octword_size, - get_octo_len(virt_addr, length, page_shift)); - MLX5_SET(mkc, mkc, log_page_size, page_shift); - MLX5_SET(mkc, mkc, qpn, 0xffffff); + get_octo_len(iova, umem->length, mr->page_shift)); + MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); + if (mlx5_umem_needs_ats(dev, umem, access_flags)) + MLX5_SET(mkc, mkc, ma_translation_mode, 1); if (populate) { MLX5_SET(create_mkey_in, in, translations_octword_actual_size, - get_octo_len(virt_addr, length, page_shift)); + get_octo_len(iova, umem->length, mr->page_shift)); } - err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); + err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); if (err) { mlx5_ib_warn(dev, "create mkey failed\n"); goto err_2; } mr->mmkey.type = MLX5_MKEY_MR; - mr->desc_size = sizeof(struct mlx5_mtt); - mr->dev = dev; + mr->umem = umem; + set_mr_fields(dev, mr, umem->length, access_flags, iova); kvfree(in); mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); @@ -1113,31 +1040,16 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, err_2: kvfree(in); - err_1: - if (!ibmr) - kfree(mr); - + kfree(mr); return ERR_PTR(err); } -static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, - int npages, u64 length, int access_flags) -{ - mr->npages = npages; - atomic_add(npages, &dev->mdev->priv.reg_pages); - mr->ibmr.lkey = mr->mmkey.key; - mr->ibmr.rkey = mr->mmkey.key; - mr->ibmr.length = length; - mr->access_flags = access_flags; -} - static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, u64 length, int acc, int mode) { struct mlx5_ib_dev *dev = to_mdev(pd->device); int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); - struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_ib_mr *mr; void *mkc; u32 *in; @@ -1160,14 +1072,13 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, MLX5_SET64(mkc, mkc, len, length); set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd); - err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); + err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); if (err) goto err_in; kfree(in); - mr->umem = NULL; - set_mr_fields(dev, mr, 0, length, acc); + set_mr_fields(dev, mr, length, acc, start_addr); return &mr->ibmr; @@ -1188,7 +1099,8 @@ int mlx5_ib_advise_mr(struct ib_pd *pd, struct uverbs_attr_bundle *attrs) { if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH && - advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE) + advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && + advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) return -EOPNOTSUPP; return mlx5_ib_advise_mr_prefetch(pd, advice, flags, @@ -1214,6 +1126,7 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, break; case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: + case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM: if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) return ERR_PTR(-EINVAL); @@ -1227,271 +1140,383 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, attr->access_flags, mode); } -struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, - u64 virt_addr, int access_flags, - struct ib_udata *udata) +static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, + u64 iova, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr = NULL; - bool use_umr; - struct ib_umem *umem; - int page_shift; - int npages; - int ncont; - int order; + bool xlt_with_umr; int err; - if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) - return ERR_PTR(-EOPNOTSUPP); + xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length); + if (xlt_with_umr) { + mr = alloc_cacheable_mr(pd, umem, iova, access_flags); + } else { + unsigned int page_size = mlx5_umem_find_best_pgsz( + umem, mkc, log_page_size, 0, iova); + + mutex_lock(&dev->slow_path_mutex); + mr = reg_create(pd, umem, iova, access_flags, page_size, true); + mutex_unlock(&dev->slow_path_mutex); + } + if (IS_ERR(mr)) { + ib_umem_release(umem); + return ERR_CAST(mr); + } + + mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); + + atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); + + if (xlt_with_umr) { + /* + * If the MR was created with reg_create then it will be + * configured properly but left disabled. It is safe to go ahead + * and configure it again via UMR while enabling it. + */ + err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); + if (err) { + mlx5_ib_dereg_mr(&mr->ibmr, NULL); + return ERR_PTR(err); + } + } + return &mr->ibmr; +} + +static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, + u64 iova, int access_flags, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_umem_odp *odp; + struct mlx5_ib_mr *mr; + int err; - mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", - start, virt_addr, length, access_flags); + if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) + return ERR_PTR(-EOPNOTSUPP); - if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start && - length == U64_MAX) { - if (virt_addr != start) + err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq); + if (err) + return ERR_PTR(err); + if (!start && length == U64_MAX) { + if (iova != 0) return ERR_PTR(-EINVAL); - if (!(access_flags & IB_ACCESS_ON_DEMAND) || - !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) + if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) return ERR_PTR(-EINVAL); - mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags); + mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); if (IS_ERR(mr)) return ERR_CAST(mr); return &mr->ibmr; } - err = mr_umem_get(dev, start, length, access_flags, &umem, - &npages, &page_shift, &ncont, &order); - - if (err < 0) - return ERR_PTR(err); + /* ODP requires xlt update via umr to work. */ + if (!mlx5r_umr_can_load_pas(dev, length)) + return ERR_PTR(-EINVAL); - use_umr = mlx5_ib_can_use_umr(dev, true, access_flags); + odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, + &mlx5_mn_ops); + if (IS_ERR(odp)) + return ERR_CAST(odp); - if (order <= mr_cache_max_order(dev) && use_umr) { - mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, - page_shift, order, access_flags); - if (PTR_ERR(mr) == -EAGAIN) { - mlx5_ib_dbg(dev, "cache empty for order %d\n", order); - mr = NULL; - } - } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { - if (access_flags & IB_ACCESS_ON_DEMAND) { - err = -EINVAL; - pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); - goto error; - } - use_umr = false; + mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); + if (IS_ERR(mr)) { + ib_umem_release(&odp->umem); + return ERR_CAST(mr); } + xa_init(&mr->implicit_children); - if (!mr) { - mutex_lock(&dev->slow_path_mutex); - mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, - page_shift, access_flags, !use_umr); - mutex_unlock(&dev->slow_path_mutex); - } + odp->private = mr; + err = mlx5r_store_odp_mkey(dev, &mr->mmkey); + if (err) + goto err_dereg_mr; - if (IS_ERR(mr)) { - err = PTR_ERR(mr); - goto error; - } + err = mlx5_ib_init_odp_mr(mr); + if (err) + goto err_dereg_mr; + return &mr->ibmr; - mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); +err_dereg_mr: + mlx5_ib_dereg_mr(&mr->ibmr, NULL); + return ERR_PTR(err); +} - mr->umem = umem; - set_mr_fields(dev, mr, npages, length, access_flags); +struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 iova, int access_flags, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_umem *umem; - if (use_umr) { - int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; + if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) + return ERR_PTR(-EOPNOTSUPP); - if (access_flags & IB_ACCESS_ON_DEMAND) - update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP; + mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", + start, iova, length, access_flags); - err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, - update_xlt_flags); + if (access_flags & IB_ACCESS_ON_DEMAND) + return create_user_odp_mr(pd, start, length, iova, access_flags, + udata); + umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); + if (IS_ERR(umem)) + return ERR_CAST(umem); + return create_real_mr(pd, umem, iova, access_flags); +} - if (err) { - dereg_mr(dev, mr); - return ERR_PTR(err); - } +static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach) +{ + struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; + struct mlx5_ib_mr *mr = umem_dmabuf->private; + + dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); + + if (!umem_dmabuf->sgt) + return; + + mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); + ib_umem_dmabuf_unmap_pages(umem_dmabuf); +} + +static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = { + .allow_peer2peer = 1, + .move_notify = mlx5_ib_dmabuf_invalidate_cb, +}; + +struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, + u64 length, u64 virt_addr, + int fd, int access_flags, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_mr *mr = NULL; + struct ib_umem_dmabuf *umem_dmabuf; + int err; + + if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || + !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) + return ERR_PTR(-EOPNOTSUPP); + + mlx5_ib_dbg(dev, + "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n", + offset, virt_addr, length, fd, access_flags); + + /* dmabuf requires xlt update via umr to work. */ + if (!mlx5r_umr_can_load_pas(dev, length)) + return ERR_PTR(-EINVAL); + + umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, + access_flags, + &mlx5_ib_dmabuf_attach_ops); + if (IS_ERR(umem_dmabuf)) { + mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n", + PTR_ERR(umem_dmabuf)); + return ERR_CAST(umem_dmabuf); } - if (is_odp_mr(mr)) { - to_ib_umem_odp(mr->umem)->private = mr; - atomic_set(&mr->num_deferred_work, 0); - err = xa_err(xa_store(&dev->odp_mkeys, - mlx5_base_mkey(mr->mmkey.key), &mr->mmkey, - GFP_KERNEL)); - if (err) { - dereg_mr(dev, mr); - return ERR_PTR(err); - } + mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, + access_flags); + if (IS_ERR(mr)) { + ib_umem_release(&umem_dmabuf->umem); + return ERR_CAST(mr); } + mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); + + atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); + umem_dmabuf->private = mr; + err = mlx5r_store_odp_mkey(dev, &mr->mmkey); + if (err) + goto err_dereg_mr; + + err = mlx5_ib_init_dmabuf_mr(mr); + if (err) + goto err_dereg_mr; return &mr->ibmr; -error: - ib_umem_release(umem); + +err_dereg_mr: + mlx5_ib_dereg_mr(&mr->ibmr, NULL); return ERR_PTR(err); } -/** - * mlx5_mr_cache_invalidate - Fence all DMA on the MR - * @mr: The MR to fence - * - * Upon return the NIC will not be doing any DMA to the pages under the MR, - * and any DMA inprogress will be completed. Failure of this function - * indicates the HW has failed catastrophically. +/* + * True if the change in access flags can be done via UMR, only some access + * flags can be updated. */ -int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr) +static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev, + unsigned int current_access_flags, + unsigned int target_access_flags) { - struct mlx5_umr_wr umrwr = {}; + unsigned int diffs = current_access_flags ^ target_access_flags; - if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) - return 0; + if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING)) + return false; + return mlx5r_umr_can_reconfig(dev, current_access_flags, + target_access_flags); +} - umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | - MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; - umrwr.wr.opcode = MLX5_IB_WR_UMR; - umrwr.pd = mr->dev->umrc.pd; - umrwr.mkey = mr->mmkey.key; - umrwr.ignore_free_state = 1; +static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, + struct ib_umem *new_umem, + int new_access_flags, u64 iova, + unsigned long *page_size) +{ + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); - return mlx5_ib_post_send_wait(mr->dev, &umrwr); + /* We only track the allocated sizes of MRs from the cache */ + if (!mr->mmkey.cache_ent) + return false; + if (!mlx5r_umr_can_load_pas(dev, new_umem->length)) + return false; + + *page_size = + mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova); + if (WARN_ON(!*page_size)) + return false; + return (1ULL << mr->mmkey.cache_ent->order) >= + ib_umem_num_dma_blocks(new_umem, *page_size); } -static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, - int access_flags, int flags) +static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, + int access_flags, int flags, struct ib_umem *new_umem, + u64 iova, unsigned long page_size) { - struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_umr_wr umrwr = {}; + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE; + struct ib_umem *old_umem = mr->umem; int err; - umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; - - umrwr.wr.opcode = MLX5_IB_WR_UMR; - umrwr.mkey = mr->mmkey.key; + /* + * To keep everything simple the MR is revoked before we start to mess + * with it. This ensure the change is atomic relative to any use of the + * MR. + */ + err = mlx5r_umr_revoke_mr(mr); + if (err) + return err; - if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { - umrwr.pd = pd; - umrwr.access_flags = access_flags; - umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; + if (flags & IB_MR_REREG_PD) { + mr->ibmr.pd = pd; + upd_flags |= MLX5_IB_UPD_XLT_PD; + } + if (flags & IB_MR_REREG_ACCESS) { + mr->access_flags = access_flags; + upd_flags |= MLX5_IB_UPD_XLT_ACCESS; } - err = mlx5_ib_post_send_wait(dev, &umrwr); + mr->ibmr.iova = iova; + mr->ibmr.length = new_umem->length; + mr->page_shift = order_base_2(page_size); + mr->umem = new_umem; + err = mlx5r_umr_update_mr_pas(mr, upd_flags); + if (err) { + /* + * The MR is revoked at this point so there is no issue to free + * new_umem. + */ + mr->umem = old_umem; + return err; + } - return err; + atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages); + ib_umem_release(old_umem); + atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages); + return 0; } -int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, - u64 length, u64 virt_addr, int new_access_flags, - struct ib_pd *new_pd, struct ib_udata *udata) +struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 length, u64 iova, int new_access_flags, + struct ib_pd *new_pd, + struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); struct mlx5_ib_mr *mr = to_mmr(ib_mr); - struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; - int access_flags = flags & IB_MR_REREG_ACCESS ? - new_access_flags : - mr->access_flags; - int page_shift = 0; - int upd_flags = 0; - int npages = 0; - int ncont = 0; - int order = 0; - u64 addr, len; int err; - mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", - start, virt_addr, length, access_flags); + if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) + return ERR_PTR(-EOPNOTSUPP); - atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); + mlx5_ib_dbg( + dev, + "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", + start, iova, length, new_access_flags); - if (!mr->umem) - return -EINVAL; + if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) + return ERR_PTR(-EOPNOTSUPP); - if (is_odp_mr(mr)) - return -EOPNOTSUPP; + if (!(flags & IB_MR_REREG_ACCESS)) + new_access_flags = mr->access_flags; + if (!(flags & IB_MR_REREG_PD)) + new_pd = ib_mr->pd; - if (flags & IB_MR_REREG_TRANS) { - addr = virt_addr; - len = length; - } else { - addr = mr->umem->address; - len = mr->umem->length; - } + if (!(flags & IB_MR_REREG_TRANS)) { + struct ib_umem *umem; - if (flags != IB_MR_REREG_PD) { - /* - * Replace umem. This needs to be done whether or not UMR is - * used. - */ - flags |= IB_MR_REREG_TRANS; - ib_umem_release(mr->umem); - mr->umem = NULL; - err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, - &npages, &page_shift, &ncont, &order); - if (err) - goto err; - } + /* Fast path for PD/access change */ + if (can_use_umr_rereg_access(dev, mr->access_flags, + new_access_flags)) { + err = mlx5r_umr_rereg_pd_access(mr, new_pd, + new_access_flags); + if (err) + return ERR_PTR(err); + return NULL; + } + /* DM or ODP MR's don't have a normal umem so we can't re-use it */ + if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) + goto recreate; - if (!mlx5_ib_can_use_umr(dev, true, access_flags) || - (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) { /* - * UMR can't be used - MKey needs to be replaced. + * Only one active MR can refer to a umem at one time, revoke + * the old MR before assigning the umem to the new one. */ - if (mr->allocated_from_cache) - err = mlx5_mr_cache_invalidate(mr); - else - err = destroy_mkey(dev, mr); + err = mlx5r_umr_revoke_mr(mr); if (err) - goto err; - - mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, - page_shift, access_flags, true); + return ERR_PTR(err); + umem = mr->umem; + mr->umem = NULL; + atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); - if (IS_ERR(mr)) { - err = PTR_ERR(mr); - mr = to_mmr(ib_mr); - goto err; - } + return create_real_mr(new_pd, umem, mr->ibmr.iova, + new_access_flags); + } - mr->allocated_from_cache = false; - } else { - /* - * Send a UMR WQE - */ - mr->ibmr.pd = pd; - mr->access_flags = access_flags; - mr->mmkey.iova = addr; - mr->mmkey.size = len; - mr->mmkey.pd = to_mpd(pd)->pdn; - - if (flags & IB_MR_REREG_TRANS) { - upd_flags = MLX5_IB_UPD_XLT_ADDR; - if (flags & IB_MR_REREG_PD) - upd_flags |= MLX5_IB_UPD_XLT_PD; - if (flags & IB_MR_REREG_ACCESS) - upd_flags |= MLX5_IB_UPD_XLT_ACCESS; - err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, - upd_flags); - } else { - err = rereg_umr(pd, mr, access_flags, flags); + /* + * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does + * but the logic around releasing the umem is different + */ + if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) + goto recreate; + + if (!(new_access_flags & IB_ACCESS_ON_DEMAND) && + can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { + struct ib_umem *new_umem; + unsigned long page_size; + + new_umem = ib_umem_get(&dev->ib_dev, start, length, + new_access_flags); + if (IS_ERR(new_umem)) + return ERR_CAST(new_umem); + + /* Fast path for PAS change */ + if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, + &page_size)) { + err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, + new_umem, iova, page_size); + if (err) { + ib_umem_release(new_umem); + return ERR_PTR(err); + } + return NULL; } - - if (err) - goto err; + return create_real_mr(new_pd, new_umem, iova, new_access_flags); } - set_mr_fields(dev, mr, npages, len, access_flags); - - return 0; - -err: - ib_umem_release(mr->umem); - mr->umem = NULL; - - clean_mr(dev, mr); - return err; + /* + * Everything else has no state we can preserve, just create a new MR + * from scratch + */ +recreate: + return mlx5_ib_reg_user_mr(new_pd, start, length, iova, + new_access_flags, udata); } static int @@ -1500,6 +1525,8 @@ mlx5_alloc_priv_descs(struct ib_device *device, int ndescs, int desc_size) { + struct mlx5_ib_dev *dev = to_mdev(device); + struct device *ddev = &dev->mdev->pdev->dev; int size = ndescs * desc_size; int add_size; int ret; @@ -1512,9 +1539,8 @@ mlx5_alloc_priv_descs(struct ib_device *device, mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); - mr->desc_map = dma_map_single(device->dev.parent, mr->descs, - size, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev.parent, mr->desc_map)) { + mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); + if (dma_mapping_error(ddev, mr->desc_map)) { ret = -ENOMEM; goto err; } @@ -1529,78 +1555,94 @@ err: static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) { - if (mr->descs) { + if (!mr->umem && mr->descs) { struct ib_device *device = mr->ibmr.device; int size = mr->max_descs * mr->desc_size; + struct mlx5_ib_dev *dev = to_mdev(device); - dma_unmap_single(device->dev.parent, mr->desc_map, - size, DMA_TO_DEVICE); + dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, + DMA_TO_DEVICE); kfree(mr->descs_alloc); mr->descs = NULL; } } -static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) +int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { - int allocated_from_cache = mr->allocated_from_cache; + struct mlx5_ib_mr *mr = to_mmr(ibmr); + struct mlx5_ib_dev *dev = to_mdev(ibmr->device); + int rc; + + /* + * Any async use of the mr must hold the refcount, once the refcount + * goes to zero no other thread, such as ODP page faults, prefetch, any + * UMR activity, etc can touch the mkey. Thus it is safe to destroy it. + */ + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && + refcount_read(&mr->mmkey.usecount) != 0 && + xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) + mlx5r_deref_wait_odp_mkey(&mr->mmkey); + + if (ibmr->type == IB_MR_TYPE_INTEGRITY) { + xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), + mr->sig, NULL, GFP_KERNEL); + + if (mr->mtt_mr) { + rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); + if (rc) + return rc; + mr->mtt_mr = NULL; + } + if (mr->klm_mr) { + rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); + if (rc) + return rc; + mr->klm_mr = NULL; + } - if (mr->sig) { if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", mr->sig->psv_memory.psv_idx); - if (mlx5_core_destroy_psv(dev->mdev, - mr->sig->psv_wire.psv_idx)) + if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", mr->sig->psv_wire.psv_idx); - xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key)); kfree(mr->sig); mr->sig = NULL; } - if (!allocated_from_cache) { - destroy_mkey(dev, mr); - mlx5_free_priv_descs(mr); - } -} - -static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) -{ - int npages = mr->npages; - struct ib_umem *umem = mr->umem; - - /* Stop all DMA */ - if (is_odp_mr(mr)) - mlx5_ib_fence_odp_mr(mr); - else - clean_mr(dev, mr); - - if (mr->allocated_from_cache) - mlx5_mr_cache_free(dev, mr); - else - kfree(mr); - - ib_umem_release(umem); - atomic_sub(npages, &dev->mdev->priv.reg_pages); + /* Stop DMA */ + if (mr->mmkey.cache_ent) { + xa_lock_irq(&mr->mmkey.cache_ent->mkeys); + mr->mmkey.cache_ent->in_use--; + xa_unlock_irq(&mr->mmkey.cache_ent->mkeys); -} - -int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) -{ - struct mlx5_ib_mr *mmr = to_mmr(ibmr); - - if (ibmr->type == IB_MR_TYPE_INTEGRITY) { - dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr); - dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr); + if (mlx5r_umr_revoke_mr(mr) || + push_mkey(mr->mmkey.cache_ent, false, + xa_mk_value(mr->mmkey.key))) + mr->mmkey.cache_ent = NULL; + } + if (!mr->mmkey.cache_ent) { + rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); + if (rc) + return rc; } - if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) { - mlx5_ib_free_implicit_mr(mmr); - return 0; + if (mr->umem) { + bool is_odp = is_odp_mr(mr); + + if (!is_odp) + atomic_sub(ib_umem_num_pages(mr->umem), + &dev->mdev->priv.reg_pages); + ib_umem_release(mr->umem); + if (is_odp) + mlx5_ib_free_odp_mr(mr); } - dereg_mr(to_mdev(ibmr->device), mmr); + if (!mr->mmkey.cache_ent) + mlx5_free_priv_descs(mr); + kfree(mr); return 0; } @@ -1611,9 +1653,9 @@ static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + /* This is only used from the kernel, so setting the PD is OK. */ + set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd); MLX5_SET(mkc, mkc, free, 1); - MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); MLX5_SET(mkc, mkc, translations_octword_size, ndescs); MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); @@ -1638,7 +1680,7 @@ static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift); - err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); + err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); if (err) goto err_free_descs; @@ -1772,10 +1814,10 @@ err_free_descs: destroy_mkey(dev, mr); mlx5_free_priv_descs(mr); err_free_mtt_mr: - dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr); + mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); mr->mtt_mr = NULL; err_free_klm_mr: - dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr); + mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); mr->klm_mr = NULL; err_destroy_psv: if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) @@ -1845,7 +1887,7 @@ err_free: } struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); } @@ -1857,15 +1899,14 @@ struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, max_num_meta_sg); } -struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, - struct ib_udata *udata) +int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) { - struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_dev *dev = to_mdev(ibmw->device); int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); - struct mlx5_ib_mw *mw = NULL; + struct mlx5_ib_mw *mw = to_mmw(ibmw); + unsigned int ndescs; u32 *in = NULL; void *mkc; - int ndescs; int err; struct mlx5_ib_alloc_mw req = {}; struct { @@ -1875,21 +1916,20 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); if (err) - return ERR_PTR(err); + return err; if (req.comp_mask || req.reserved1 || req.reserved2) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; if (udata->inlen > sizeof(req) && !ib_is_udata_cleared(udata, sizeof(req), udata->inlen - sizeof(req))) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); - mw = kzalloc(sizeof(*mw), GFP_KERNEL); in = kzalloc(inlen, GFP_KERNEL); - if (!mw || !in) { + if (!in) { err = -ENOMEM; goto free; } @@ -1898,70 +1938,59 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, translations_octword_size, ndescs); - MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); - MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2))); + MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); MLX5_SET(mkc, mkc, qpn, 0xffffff); - err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen); + err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); if (err) goto free; mw->mmkey.type = MLX5_MKEY_MW; - mw->ibmw.rkey = mw->mmkey.key; - mw->ndescs = ndescs; + ibmw->rkey = mw->mmkey.key; + mw->mmkey.ndescs = ndescs; - resp.response_length = min(offsetof(typeof(resp), response_length) + - sizeof(resp.response_length), udata->outlen); + resp.response_length = + min(offsetofend(typeof(resp), response_length), udata->outlen); if (resp.response_length) { err = ib_copy_to_udata(udata, &resp, resp.response_length); - if (err) { - mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); - goto free; - } + if (err) + goto free_mkey; } if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { - err = xa_err(xa_store(&dev->odp_mkeys, - mlx5_base_mkey(mw->mmkey.key), &mw->mmkey, - GFP_KERNEL)); + err = mlx5r_store_odp_mkey(dev, &mw->mmkey); if (err) goto free_mkey; } kfree(in); - return &mw->ibmw; + return 0; free_mkey: - mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); + mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); free: - kfree(mw); kfree(in); - return ERR_PTR(err); + return err; } int mlx5_ib_dealloc_mw(struct ib_mw *mw) { struct mlx5_ib_dev *dev = to_mdev(mw->device); struct mlx5_ib_mw *mmw = to_mmw(mw); - int err; - if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { - xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)); + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && + xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key))) /* - * pagefault_single_data_segment() may be accessing mmw under - * SRCU if the user bound an ODP MR to this MW. + * pagefault_single_data_segment() may be accessing mmw + * if the user bound an ODP MR to this MW. */ - synchronize_srcu(&dev->odp_srcu); - } + mlx5r_deref_wait_odp_mkey(&mmw->mmkey); - err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); - if (err) - return err; - kfree(mmw); - return 0; + return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); } int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, @@ -2018,7 +2047,7 @@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, mr->meta_length = 0; if (data_sg_nents == 1) { n++; - mr->ndescs = 1; + mr->mmkey.ndescs = 1; if (data_sg_offset) sg_offset = *data_sg_offset; mr->data_length = sg_dma_len(data_sg) - sg_offset; @@ -2071,7 +2100,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, if (sg_offset_p) *sg_offset_p = sg_offset; - mr->ndescs = i; + mr->mmkey.ndescs = i; mr->data_length = mr->ibmr.length; if (meta_sg_nents) { @@ -2104,11 +2133,11 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs; - if (unlikely(mr->ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs == mr->max_descs)) return -ENOMEM; descs = mr->descs; - descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); + descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); return 0; } @@ -2118,11 +2147,11 @@ static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs; - if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) return -ENOMEM; descs = mr->descs; - descs[mr->ndescs + mr->meta_ndescs++] = + descs[mr->mmkey.ndescs + mr->meta_ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); return 0; @@ -2138,7 +2167,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, struct mlx5_ib_mr *pi_mr = mr->mtt_mr; int n; - pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0; @@ -2172,7 +2201,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, * metadata offset at the first metadata page */ pi_mr->pi_iova = (iova & page_mask) + - pi_mr->ndescs * ibmr->page_size + + pi_mr->mmkey.ndescs * ibmr->page_size + (pi_mr->ibmr.iova & ~page_mask); /* * In order to use one MTT MR for data and metadata, we register @@ -2203,7 +2232,7 @@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, struct mlx5_ib_mr *pi_mr = mr->klm_mr; int n; - pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0; @@ -2238,7 +2267,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); - mr->ndescs = 0; + mr->mmkey.ndescs = 0; mr->data_length = 0; mr->data_iova = 0; mr->meta_ndescs = 0; @@ -2294,7 +2323,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct mlx5_ib_mr *mr = to_mmr(ibmr); int n; - mr->ndescs = 0; + mr->mmkey.ndescs = 0; ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, mr->desc_size * mr->max_descs, |