aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/alloc.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c85
1 files changed, 13 insertions, 72 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 42198e64a7f4..6aca004e88cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -56,8 +56,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
size_t size, dma_addr_t *dma_handle,
int node)
{
+ struct device *device = mlx5_core_dma_dev(dev);
struct mlx5_priv *priv = &dev->priv;
- struct device *device = dev->device;
int original_node;
void *cpu_handle;
@@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
-{
- dma_addr_t t;
-
- buf->size = size;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
-
- buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
- if (!buf->frags)
- return -ENOMEM;
-
- buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
- &t, node);
- if (!buf->frags->buf)
- goto err_out;
-
- buf->frags->map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
-
- return 0;
-err_out:
- kfree(buf->frags);
- return -ENOMEM;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf)
-{
- return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
- dma_free_coherent(dev->device, buf->size, buf->frags->buf,
- buf->frags->map);
-
- kfree(buf->frags);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
{
@@ -140,7 +93,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
- dma_free_coherent(dev->device, frag_sz,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
@@ -153,7 +106,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf:
while (i--)
- dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
@@ -169,7 +122,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
- dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
@@ -183,11 +136,11 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+ pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
if (!pgdir)
return NULL;
- pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
+ pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
if (!pgdir->bitmap) {
kfree(pgdir);
return NULL;
@@ -260,12 +213,6 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
-{
- return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
-}
-EXPORT_SYMBOL_GPL(mlx5_db_alloc);
-
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
u32 db_per_page = PAGE_SIZE / cache_line_size();
@@ -275,7 +222,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
- dma_free_coherent(dev->device, PAGE_SIZE,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);
@@ -286,24 +233,18 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
+void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{
- u64 addr;
int i;
- for (i = 0; i < buf->npages; i++) {
- addr = buf->frags->map + (i << buf->page_shift);
-
- pas[i] = cpu_to_be64(addr);
- }
+ WARN_ON(perm & 0xfc);
+ for (i = 0; i < buf->npages; i++)
+ pas[i] = cpu_to_be64(buf->frags[i].map | perm);
}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
+EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
- int i;
-
- for (i = 0; i < buf->npages; i++)
- pas[i] = cpu_to_be64(buf->frags[i].map);
+ mlx5_fill_page_frag_array_perm(buf, pas, 0);
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);