aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
diff options
context:
space:
mode:
authorEran Ben Elisha <eranbe@mellanox.com>2020-05-03 11:01:39 +0300
committerSaeed Mahameed <saeedm@mellanox.com>2020-05-15 15:44:15 -0700
commite7f860e2106a4c288d98308c59545ddd350e4739 (patch)
tree92328d715bf1e678bf498b6a7e69a295acd0a625 /drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
parentnet/mlx5: Dedicate fw page to the requesting function (diff)
downloadlinux-dev-e7f860e2106a4c288d98308c59545ddd350e4739.tar.xz
linux-dev-e7f860e2106a4c288d98308c59545ddd350e4739.zip
net/mlx5: Fix a bug of releasing wrong chunks on > 4K page size systems
On systems with page size larger than 4K, a fwp object has few 4K chunks. Fix a bug in fwp free flow where the chunk address was dropped and fwp->addr was used instead (first chunk address). This caused a wrong update of fwp->bitmask which later can cause errors in re-alloc fwp chunk flow. In order to fix this it, re-factor the release flow: - Free 4k: Releases a specific 4k chunk inside the fwp, defined by starting address. - Free fwp: Unconditionally release the whole fwp and its resources. Free addr will call free fwp if all chunks were released, in order to do code sharing. In addition, fix npages to count for all released chunks correctly. Fixes: c6168161f693 ("net/mlx5: Add support for release all pages event") Signed-off-by: Eran Ben Elisha <eranbe@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 84f6356edbf8..5ddd18639a1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -188,35 +188,35 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
-static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp)
+static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
+ bool in_free_list)
{
- int n = (fwp->addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
-
- fwp->free_count++;
- set_bit(n, &fwp->bitmask);
- if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
- rb_erase(&fwp->rb_node, &dev->priv.page_root);
- if (fwp->free_count != 1)
- list_del(&fwp->list);
- dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(fwp->page);
- kfree(fwp);
- } else if (fwp->free_count == 1) {
- list_add(&fwp->list, &dev->priv.free_list);
- }
+ rb_erase(&fwp->rb_node, &dev->priv.page_root);
+ if (in_free_list)
+ list_del(&fwp->list);
+ dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(fwp->page);
+ kfree(fwp);
}
-static void free_addr(struct mlx5_core_dev *dev, u64 addr)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr)
{
struct fw_page *fwp;
+ int n;
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
}
- free_fwp(dev, fwp);
+ n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
+ fwp->free_count++;
+ set_bit(n, &fwp->bitmask);
+ if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
+ free_fwp(dev, fwp, fwp->free_count != 1);
+ else if (fwp->free_count == 1)
+ list_add(&fwp->list, &dev->priv.free_list);
}
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
@@ -340,7 +340,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_addr(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
out_free:
kvfree(in);
if (notify_fail)
@@ -361,8 +361,8 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
p = rb_next(p);
if (fwp->func_id != func_id)
continue;
- free_fwp(dev, fwp);
- npages++;
+ npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
+ free_fwp(dev, fwp, fwp->free_count);
}
dev->priv.fw_pages -= npages;
@@ -446,7 +446,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
- free_addr(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
if (nclaimed)
*nclaimed = num_claimed;