aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/rl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/rl.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c266
1 files changed, 189 insertions, 77 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 01c380425f9d..9f8b4005f4bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -31,17 +31,15 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
{
- u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
void *schedc;
int err;
@@ -53,7 +51,7 @@ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out);
if (err)
return err;
@@ -66,8 +64,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 element_id,
u32 modify_bitmask)
{
- u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {};
void *schedc;
schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
@@ -82,14 +79,13 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_scheduling_element, in);
}
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id)
{
- u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {};
MLX5_SET(destroy_scheduling_element_in, in, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
@@ -98,7 +94,14 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in);
+}
+
+static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
+ u16 uid)
+{
+ return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
+ entry->uid == uid);
}
/* Finds an entry where we can register the given rate
@@ -107,16 +110,29 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
* If the table is full, return NULL
*/
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
- struct mlx5_rate_limit *rl)
+ void *rl_in, u16 uid, bool dedicated)
{
struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false;
int i;
+ lockdep_assert_held(&table->rl_lock);
+ WARN_ON(!table->rl_entry);
+
for (i = 0; i < table->max_size; i++) {
- if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl))
- return &table->rl_entry[i];
- if (!empty_found && !table->rl_entry[i].rl.rate) {
+ if (dedicated) {
+ if (!table->rl_entry[i].refcount)
+ return &table->rl_entry[i];
+ continue;
+ }
+
+ if (table->rl_entry[i].refcount) {
+ if (table->rl_entry[i].dedicated)
+ continue;
+ if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
+ uid))
+ return &table->rl_entry[i];
+ } else if (!empty_found) {
empty_found = true;
ret_entry = &table->rl_entry[i];
}
@@ -126,19 +142,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
}
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
- u16 index,
- struct mlx5_rate_limit *rl)
+ struct mlx5_rl_entry *entry, bool set)
{
- u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
+ void *pp_context;
+ pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate);
- MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz);
- MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
+ if (set)
+ memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
+ return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in);
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
@@ -158,77 +174,189 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
- struct mlx5_rate_limit *rl)
+static int mlx5_rl_table_get(struct mlx5_rl_table *table)
+{
+ int i;
+
+ lockdep_assert_held(&table->rl_lock);
+
+ if (table->rl_entry) {
+ table->refcount++;
+ return 0;
+ }
+
+ table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
+ GFP_KERNEL);
+ if (!table->rl_entry)
+ return -ENOMEM;
+
+ /* The index represents the index in HW rate limit table
+ * Index 0 is reserved for unlimited rate
+ */
+ for (i = 0; i < table->max_size; i++)
+ table->rl_entry[i].index = i + 1;
+
+ table->refcount++;
+ return 0;
+}
+
+static void mlx5_rl_table_put(struct mlx5_rl_table *table)
+{
+ lockdep_assert_held(&table->rl_lock);
+ if (--table->refcount)
+ return;
+
+ kfree(table->rl_entry);
+ table->rl_entry = NULL;
+}
+
+static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
+{
+ int i;
+
+ if (!table->rl_entry)
+ return;
+
+ /* Clear all configured rates */
+ for (i = 0; i < table->max_size; i++)
+ if (table->rl_entry[i].refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
+ kfree(table->rl_entry);
+}
+
+static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry)
+{
+ entry->refcount++;
+}
+
+static void
+mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry)
+{
+ entry->refcount--;
+ if (!entry->refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+}
+
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
- int err = 0;
+ u32 rate;
+ int err;
- mutex_lock(&table->rl_lock);
+ if (!table->max_size)
+ return -EOPNOTSUPP;
- if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) {
+ rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
+ if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
- rl->rate, table->min_rate, table->max_rate);
- err = -EINVAL;
- goto out;
+ rate, table->min_rate, table->max_rate);
+ return -EINVAL;
}
- entry = find_rl_entry(table, rl);
+ mutex_lock(&table->rl_lock);
+ err = mlx5_rl_table_get(table);
+ if (err)
+ goto out;
+
+ entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
err = -ENOSPC;
- goto out;
+ goto rl_err;
}
- if (entry->refcount) {
- /* rate already configured */
- entry->refcount++;
- } else {
+ if (!entry->refcount) {
/* new rate limit */
- err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
+ memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
+ entry->uid = uid;
+ err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
- mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
- err, rl->rate, rl->max_burst_sz,
- rl->typical_pkt_sz);
- goto out;
+ mlx5_core_err(
+ dev,
+ "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+ err, rate,
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ burst_upper_bound),
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ typical_packet_size));
+ goto rl_err;
}
- entry->rl = *rl;
- entry->refcount = 1;
+
+ entry->dedicated = dedicated_entry;
}
+ mlx5_rl_entry_get(entry);
*index = entry->index;
+ mutex_unlock(&table->rl_lock);
+ return 0;
+rl_err:
+ mlx5_rl_table_put(table);
out:
mutex_unlock(&table->rl_lock);
return err;
}
+EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
+
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry;
+
+ mutex_lock(&table->rl_lock);
+ entry = &table->rl_entry[index - 1];
+ mlx5_rl_entry_put(dev, entry);
+ mlx5_rl_table_put(table);
+ mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
+ return mlx5_rl_add_rate_raw(dev, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0,
+ false, index);
+}
EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL;
- struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */
if (rl->rate == 0)
return;
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
mutex_lock(&table->rl_lock);
- entry = find_rl_entry(table, rl);
+ entry = find_rl_entry(table, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
goto out;
}
-
- entry->refcount--;
- if (!entry->refcount) {
- /* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl);
- entry->rl = reset_rl;
- }
-
+ mlx5_rl_entry_put(dev, entry);
+ mlx5_rl_table_put(table);
out:
mutex_unlock(&table->rl_lock);
}
@@ -237,31 +365,19 @@ EXPORT_SYMBOL(mlx5_rl_remove_rate);
int mlx5_init_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- int i;
- mutex_init(&table->rl_lock);
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
table->max_size = 0;
return 0;
}
+ mutex_init(&table->rl_lock);
+
/* First entry is reserved for unlimited rate */
table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
- table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
- GFP_KERNEL);
- if (!table->rl_entry)
- return -ENOMEM;
-
- /* The index represents the index in HW rate limit table
- * Index 0 is reserved for unlimited rate
- */
- for (i = 0; i < table->max_size; i++)
- table->rl_entry[i].index = i + 1;
-
- /* Index 0 is reserved */
mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
table->max_size,
table->min_rate >> 10,
@@ -273,14 +389,10 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- struct mlx5_rate_limit rl = {0};
- int i;
- /* Clear all configured rates */
- for (i = 0; i < table->max_size; i++)
- if (table->rl_entry[i].rl.rate)
- mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index,
- &rl);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing))
+ return;
- kfree(dev->priv.rl_table.rl_entry);
+ mlx5_rl_table_free(dev, table);
+ mutex_destroy(&table->rl_lock);
}