aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1591
1 files changed, 863 insertions, 728 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 217e3b351dfe..48f1fa62a4fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -21,6 +21,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/nexthop.h>
@@ -225,6 +226,64 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+struct mlxsw_sp_rif_counter_set_basic {
+ u64 good_unicast_packets;
+ u64 good_multicast_packets;
+ u64 good_broadcast_packets;
+ u64 good_unicast_bytes;
+ u64 good_multicast_bytes;
+ u64 good_broadcast_bytes;
+ u64 error_packets;
+ u64 discard_packets;
+ u64 error_bytes;
+ u64 discard_bytes;
+};
+
+static int
+mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ struct mlxsw_sp_rif_counter_set_basic *set)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ int err;
+
+ if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+
+ if (!set)
+ return 0;
+
+#define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
+ (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
+
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
+
+#undef MLXSW_SP_RIF_COUNTER_EXTRACT
+
+ return 0;
+}
+
static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index)
{
@@ -235,16 +294,20 @@ static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
}
-int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
int err;
+ if (mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return 0;
+
p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
if (!p_counter_index)
return -EINVAL;
+
err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
p_counter_index);
if (err)
@@ -268,10 +331,10 @@ err_counter_clear:
return err;
}
-void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
@@ -296,14 +359,12 @@ static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
if (!devlink_dpipe_table_counter_enabled(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
return;
- mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
-
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
@@ -382,72 +443,19 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
-static struct mlxsw_sp_fib_entry_priv *
-mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- struct mlxsw_sp_fib_entry_priv *priv;
-
- if (!ll_ops->fib_entry_priv_size)
- /* No need to have priv */
- return NULL;
-
- priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
- if (!priv)
- return ERR_PTR(-ENOMEM);
- refcount_set(&priv->refcnt, 1);
- return priv;
-}
-
-static void
-mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
-{
- kfree(priv);
-}
-
-static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
-{
- refcount_inc(&priv->refcnt);
-}
-
-static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv || !refcount_dec_and_test(&priv->refcnt))
- return;
- mlxsw_sp_fib_entry_priv_destroy(priv);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv)
- return;
- mlxsw_sp_fib_entry_priv_hold(priv);
- list_add(&priv->list, &op_ctx->fib_entry_priv_list);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_priv *priv, *tmp;
-
- list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
- mlxsw_sp_fib_entry_priv_put(priv);
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
- struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
struct fib_info *fi;
u32 tb_id;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -476,7 +484,6 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
- const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -490,45 +497,16 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
-static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- return 0;
-}
-
-static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
- xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
- xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
-}
-
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
- err = ll_ops->init(mlxsw_sp, vr->id, proto);
- if (err)
- return ERR_PTR(err);
-
lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
@@ -540,7 +518,6 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
- fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -579,36 +556,33 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralst_pl[MLXSW_REG_XRALST_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -616,20 +590,19 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -640,11 +613,12 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -655,15 +629,14 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -671,7 +644,6 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -685,7 +657,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -696,11 +668,8 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops =
- mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
-
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -790,23 +759,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -1307,6 +1276,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
addr_prefix_len = 32;
break;
case MLXSW_SP_L3_PROTO_IPV6:
+ addrp = &addr->addr6;
+ addr_len = 16;
+ addr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -1465,8 +1438,8 @@ static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
return false;
}
-bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
+static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
{
return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
}
@@ -1510,16 +1483,10 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
+static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
{
- bool is_ipip_ul;
-
- mutex_lock(&mlxsw_sp->router->lock);
- is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
- mutex_unlock(&mlxsw_sp->router->lock);
-
- return is_ipip_ul;
+ return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
}
static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
@@ -1852,7 +1819,7 @@ void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
/* The configuration where several tunnels have the same local address in the
* same underlay table needs special treatment in the HW. That is currently not
* implemented in the driver. This function finds and demotes the first tunnel
- * with a given source address, except the one passed in in the argument
+ * with a given source address, except the one passed in the argument
* `except'.
*/
bool
@@ -1895,16 +1862,15 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
}
}
-int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
- struct net_device *ol_dev,
- unsigned long event,
- struct netdev_notifier_info *info)
+static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
{
struct netdev_notifier_changeupper_info *chup;
struct netlink_ext_ack *extack;
int err = 0;
- mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_REGISTER:
err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
@@ -1935,7 +1901,6 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -1973,16 +1938,15 @@ __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-int
+static int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *ul_dev,
unsigned long event,
struct netdev_notifier_info *info)
{
struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
- int err = 0;
+ int err;
- mutex_lock(&mlxsw_sp->router->lock);
while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
ul_dev,
ipip_entry))) {
@@ -1995,7 +1959,7 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
ul_dev);
- break;
+ return err;
}
if (demote_this) {
@@ -2012,9 +1976,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
ipip_entry = prev;
}
}
- mutex_unlock(&mlxsw_sp->router->lock);
- return err;
+ return 0;
}
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
@@ -2295,6 +2258,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
goto err_neigh_entry_insert;
mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
+ atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
return neigh_entry;
@@ -2309,6 +2273,7 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
list_del(&neigh_entry->rif_list_node);
+ atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
mlxsw_sp_neigh_entry_free(neigh_entry);
@@ -2506,6 +2471,9 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
char *rauhtd_pl;
int err;
+ if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
+ return 0;
+
rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
if (!rauhtd_pl)
return -ENOMEM;
@@ -2885,6 +2853,7 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_router_neighs_update_work);
INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
mlxsw_sp_router_probe_unresolved_nexthops);
+ atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
return 0;
@@ -4354,6 +4323,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_nexthop_neigh_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err;
}
@@ -5321,7 +5292,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
{
const struct fib_nh *nh = fib_info_nh(fi, 0);
- return nh->fib_nh_scope == RT_SCOPE_LINK ||
+ return nh->fib_nh_gw_family ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
@@ -5495,7 +5466,7 @@ mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- return !fib4_entry->tos;
+ return !fib4_entry->dscp;
}
static bool
@@ -5556,7 +5527,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -5581,7 +5552,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = should_offload;
fri.trap = !should_offload;
@@ -5604,7 +5575,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = false;
fri.trap = false;
@@ -5717,14 +5688,13 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ case MLXSW_REG_RALUE_OP_WRITE_WRITE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -5732,140 +5702,39 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
-struct mlxsw_sp_fib_entry_op_ctx_basic {
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-};
-
static void
-mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
+mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
+ const struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
- enum mlxsw_reg_ralxx_protocol ralxx_proto;
- char *ralue_pl = op_ctx_basic->ralue_pl;
- enum mlxsw_reg_ralue_op ralue_op;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+ enum mlxsw_reg_ralxx_protocol proto;
+ u32 *p_dip;
- ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
+ proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
-
- switch (proto) {
+ switch (fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, (u32 *) addr);
+ p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ *p_dip);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr);
break;
}
}
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
-}
-
-static int
-mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- op_ctx_basic->ralue_pl);
-}
-
-static bool
-mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- return true;
-}
-
-static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
-{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
-
- mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
- fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr,
- fib_entry->priv);
-}
-
-static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- bool postponed_for_bulk = false;
- int err;
-
- err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
- if (!postponed_for_bulk)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -5888,20 +5757,19 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -5913,64 +5781,61 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_pack(op_ctx);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
int err;
if (WARN_ON(!ipip_entry))
@@ -5982,55 +5847,54 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
+ fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
if (err)
return err;
@@ -6040,35 +5904,18 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- bool is_new)
-{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
- MLXSW_SP_FIB_ENTRY_OP_UPDATE);
-}
-
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
-
- if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
- return 0;
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- MLXSW_SP_FIB_ENTRY_OP_DELETE);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
static int
@@ -6163,12 +6010,6 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
@@ -6186,7 +6027,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
- fib4_entry->tos = fen_info->tos;
+ fib4_entry->dscp = fen_info->dscp;
fib_entry->fib_node = fib_node;
@@ -6197,8 +6038,6 @@ err_fib4_entry_type_set:
err_nexthop_group_vr_link:
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
err_nexthop4_group_get:
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -6213,7 +6052,6 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -6240,7 +6078,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib4_entry, common);
if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
+ fib4_entry->dscp == fen_info->dscp &&
fib4_entry->type == fen_info->type &&
fib4_entry->fi == fen_info->fi)
return fib4_entry;
@@ -6451,16 +6289,14 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_update;
@@ -6471,25 +6307,14 @@ err_fib_entry_update:
return err;
}
-static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- int err;
- err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
fib_node->fib_entry = NULL;
- return err;
-}
-
-static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -6511,7 +6336,6 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -6546,7 +6370,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -6571,23 +6395,20 @@ err_fib4_entry_create:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
- int err;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return 0;
+ return;
fib_node = fib4_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -6679,6 +6500,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
struct net_device *dev = rt->fib6_nh->fib_nh_dev;
+ int err;
nh->nhgi = nh_grp->nhgi;
nh->nh_weight = rt->fib6_nh->fib_nh_weight;
@@ -6694,7 +6516,16 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
return 0;
nh->ifindex = dev->ifindex;
- return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_nexthop_type_init;
+
+ return 0;
+
+err_nexthop_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
}
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
@@ -6885,9 +6716,9 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
@@ -6910,8 +6741,7 @@ static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
- &fib6_entry->common, false);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_entry_update;
@@ -6935,7 +6765,6 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -6946,23 +6775,21 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
if (err)
- goto err_nexthop6_group_update;
+ goto err_rt6_unwind;
return 0;
-err_nexthop6_group_update:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
@@ -6974,7 +6801,6 @@ err_rt6_create:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -6992,7 +6818,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
}
static int
@@ -7002,6 +6828,8 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
int ifindex = nhgi->nexthops[0].ifindex;
struct mlxsw_sp_ipip_entry *ipip_entry;
@@ -7015,6 +6843,14 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
ipip_entry);
}
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV6, &dip)) {
+ u32 tunnel_index;
+
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ }
return 0;
}
@@ -7068,19 +6904,13 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
fib6_entry->nrt6++;
@@ -7088,7 +6918,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
- goto err_nexthop6_group_get;
+ goto err_rt6_unwind;
err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
fib_node->fib);
@@ -7107,18 +6937,14 @@ err_fib6_entry_type_set:
mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
-err_nexthop6_group_get:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -7141,7 +6967,6 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
- mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -7199,8 +7024,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -7239,7 +7064,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -7263,8 +7088,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -7292,7 +7117,8 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -7303,17 +7129,16 @@ err_fib6_entry_nexthop_add:
return err;
}
-static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
- int err;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return 0;
+ return;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -7322,22 +7147,22 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return 0;
+ return;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
- return 0;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ return;
}
fib_node = fib6_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static struct mlxsw_sp_mr_table *
@@ -7490,15 +7315,15 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
}
}
-struct mlxsw_sp_fib6_event {
+struct mlxsw_sp_fib6_event_work {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event {
- struct list_head list; /* node in fib queue */
+struct mlxsw_sp_fib_event_work {
+ struct work_struct work;
union {
- struct mlxsw_sp_fib6_event fib6_event;
+ struct mlxsw_sp_fib6_event_work fib6_work;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -7507,12 +7332,11 @@ struct mlxsw_sp_fib_event {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
- int family;
};
static int
-mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -7526,8 +7350,8 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
if (!rt_arr)
return -ENOMEM;
- fib6_event->rt_arr = rt_arr;
- fib6_event->nrt6 = nrt6;
+ fib6_work->rt_arr = rt_arr;
+ fib6_work->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -7549,242 +7373,182 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
}
static void
-mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
+mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
{
int i;
- for (i = 0; i < fib6_event->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
- kfree(fib6_event->rt_arr);
+ for (i = 0; i < fib6_work->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
+ kfree(fib6_work->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
- &fib_event->fen_info);
+ &fib_work->fen_info);
}
- fib_info_put(fib_event->fen_info.fi);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- fib_info_put(fib_event->fen_info.fi);
+ mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
- fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
+ fib_work->fnh_info.fib_nh);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
{
- struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_del(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
- mr_cache_put(fib_event->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
- mr_cache_put(fib_event->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_event->ven_info);
+ &fib_work->ven_info);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
- dev_put(fib_event->ven_info.dev);
+ dev_put(fib_work->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
- dev_put(fib_event->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
-{
- struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
- struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
- struct mlxsw_sp_fib_event *next_fib_event;
- struct mlxsw_sp_fib_event *fib_event;
- int last_family = AF_UNSPEC;
- LIST_HEAD(fib_event_queue);
-
- spin_lock_bh(&router->fib_event_queue_lock);
- list_splice_init(&router->fib_event_queue, &fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
-
- /* Router lock is held here to make sure per-instance
- * operation context is not used in between FIB4/6 events
- * processing.
- */
- mutex_lock(&router->lock);
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- list_for_each_entry_safe(fib_event, next_fib_event,
- &fib_event_queue, list) {
- /* Check if the next entry in the queue exists and it is
- * of the same type (family and event) as the currect one.
- * In that case it is permitted to do the bulking
- * of multiple FIB entries to a single register write.
- */
- op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
- fib_event->family == next_fib_event->family &&
- fib_event->event == next_fib_event->event;
- op_ctx->event = fib_event->event;
-
- /* In case family of this and the previous entry are different, context
- * reinitialization is going to be needed now, indicate that.
- * Note that since last_family is initialized to AF_UNSPEC, this is always
- * going to happen for the first entry processed in the work.
- */
- if (fib_event->family != last_family)
- op_ctx->initialized = false;
-
- switch (fib_event->family) {
- case AF_INET:
- mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case AF_INET6:
- mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case RTNL_FAMILY_IP6MR:
- case RTNL_FAMILY_IPMR:
- /* Unlock here as inside FIBMR the lock is taken again
- * under RTNL. The per-instance operation context
- * is not used by FIBMR.
- */
- mutex_unlock(&router->lock);
- mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
- fib_event);
- mutex_lock(&router->lock);
- break;
- default:
- WARN_ON_ONCE(1);
- }
- last_family = fib_event->family;
- kfree(fib_event);
- cond_resched();
- }
- WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- mutex_unlock(&router->lock);
-}
-
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_event->fen_info = *fen_info;
+ fib_work->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while event is queued. Release it afterwards.
+ * freed while work is queued. Release it afterwards.
*/
- fib_info_hold(fib_event->fen_info.fi);
+ fib_info_hold(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_event->fnh_info = *fnh_info;
- fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
+ fib_work->fnh_info = *fnh_info;
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
- fen6_info);
+ err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
+ fen6_info);
if (err)
return err;
break;
@@ -7794,20 +7558,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
- mr_cache_hold(fib_event->men_info.mfc);
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ mr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
- dev_hold(fib_event->ven_info.dev);
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
break;
}
}
@@ -7861,7 +7625,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event *fib_event;
+ struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -7893,39 +7657,37 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
break;
}
- fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
- if (!fib_event)
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
return NOTIFY_BAD;
- fib_event->mlxsw_sp = router->mlxsw_sp;
- fib_event->event = event;
- fib_event->family = info->family;
+ fib_work->mlxsw_sp = router->mlxsw_sp;
+ fib_work->event = event;
switch (info->family) {
case AF_INET:
- mlxsw_sp_router_fib4_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
+ mlxsw_sp_router_fib4_event(fib_work, info);
break;
case AF_INET6:
- err = mlxsw_sp_router_fib6_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
+ err = mlxsw_sp_router_fib6_event(fib_work, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- mlxsw_sp_router_fibmr_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
break;
}
- /* Enqueue the event and trigger the work */
- spin_lock_bh(&router->fib_event_queue_lock);
- list_add_tail(&fib_event->list, &router->fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
- mlxsw_core_schedule_work(&router->fib_event_work);
+ mlxsw_core_schedule_work(&fib_work->work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_event);
+ kfree(fib_work);
return NOTIFY_BAD;
}
@@ -8134,6 +7896,166 @@ u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->ul_rif_id;
}
+static bool
+mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS) &&
+ mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
+{
+ int err;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ if (err)
+ return err;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ NULL);
+ if (err)
+ goto err_clear_ingress;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ if (err)
+ goto err_alloc_egress;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ NULL);
+ if (err)
+ goto err_clear_egress;
+
+ return 0;
+
+err_clear_egress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+err_alloc_egress:
+err_clear_ingress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ return err;
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return;
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
+ struct rtnl_hw_stats64 *p_stats)
+{
+ struct mlxsw_sp_rif_counter_set_basic ingress;
+ struct mlxsw_sp_rif_counter_set_basic egress;
+ int err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ &ingress);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &egress);
+ if (err)
+ return err;
+
+#define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
+ ((SET.good_unicast_ ## SFX) + \
+ (SET.good_multicast_ ## SFX) + \
+ (SET.good_broadcast_ ## SFX))
+
+ p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
+ p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
+ p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
+ p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
+ p_stats->rx_errors = ingress.error_packets;
+ p_stats->tx_errors = egress.error_packets;
+ p_stats->rx_dropped = ingress.discard_packets;
+ p_stats->tx_dropped = egress.discard_packets;
+ p_stats->multicast = ingress.good_multicast_packets +
+ ingress.good_broadcast_packets;
+
+#undef MLXSW_SP_ROUTER_ALL_GOOD
+
+ return 0;
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct rtnl_hw_stats64 stats = {};
+ int err;
+
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return 0;
+
+ err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
+ if (err)
+ return err;
+
+ netdev_offload_xstats_report_delta(info->report_delta, &stats);
+ return 0;
+}
+
+struct mlxsw_sp_router_hwstats_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work =
+ container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
+ work);
+
+ rtnl_lock();
+ rtnl_offload_xstats_notify(hws_work->dev);
+ rtnl_unlock();
+ dev_put(hws_work->dev);
+ kfree(hws_work);
+}
+
+static void
+mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work;
+
+ /* To collect notification payload, the core ends up sending another
+ * notifier block message, which would deadlock on the attempt to
+ * acquire the router lock again. Just postpone the notification until
+ * later.
+ */
+
+ hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
+ if (!hws_work)
+ return;
+
+ INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
+ dev_hold(dev);
+ hws_work->dev = dev;
+ mlxsw_core_schedule_work(&hws_work->work);
+}
+
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
{
return rif->dev->ifindex;
@@ -8144,6 +8066,16 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
+{
+ struct rtnl_hw_stats64 stats = {};
+
+ if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
+ netdev_offload_xstats_push_delta(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3,
+ &stats);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -8204,10 +8136,20 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
- mlxsw_sp_rif_counters_alloc(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ err = mlxsw_sp_router_port_l3_stats_enable(rif);
+ if (err)
+ goto err_stats_enable;
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_alloc(rif);
+ }
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return rif;
+err_stats_enable:
err_mr_rif_add:
for (i--; i >= 0; i--)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -8234,10 +8176,19 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
struct mlxsw_sp_vr *vr;
int i;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp_rif_counters_free(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ mlxsw_sp_rif_push_l3_stats(rif);
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_free(rif);
+ }
+
for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
@@ -8369,9 +8320,6 @@ mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
int id;
idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
- if (!profile)
- continue;
-
if (ether_addr_equal_masked(profile->mac_prefix, mac,
mlxsw_sp->mac_mask))
return profile;
@@ -8387,6 +8335,13 @@ static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
}
+static u64 mlxsw_sp_rifs_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rifs_count);
+}
+
static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
struct netlink_ext_ack *extack)
@@ -8494,7 +8449,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
u8 mac_profile;
int err;
- if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+ !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
@@ -8634,9 +8590,7 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (netif_is_bridge_port(port_dev) ||
- netif_is_lag_port(port_dev) ||
- netif_is_ovs_port(port_dev))
+ if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
@@ -9116,36 +9070,102 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
return -ENOBUFS;
}
-int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
- unsigned long event, void *ptr)
+static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return true;
+ }
+
+ return false;
+}
+
+static int
+mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ switch (info->type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ break;
+ default:
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ return mlxsw_sp_router_port_l3_stats_enable(rif);
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ mlxsw_sp_router_port_l3_stats_report_used(rif, info);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int
+mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
+}
+
+static bool mlxsw_sp_is_router_event(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_PRE_CHANGEADDR:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
- int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
return 0;
- mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
- goto out;
+ return 0;
switch (event) {
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
- err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
- break;
+ return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
case NETDEV_PRE_CHANGEADDR:
- err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ default:
+ WARN_ON_ONCE(1);
break;
}
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return err;
+ return 0;
}
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
@@ -9176,8 +9196,18 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
}
-int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
- struct netdev_notifier_changeupper_info *info)
+static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
+ return false;
+ return netif_is_l3_master(info->upper_dev);
+}
+
+static int
+mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
int err = 0;
@@ -9188,7 +9218,6 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
- mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_PRECHANGEUPPER:
break;
@@ -9203,11 +9232,42 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
}
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
+static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_router *router;
+ struct mlxsw_sp *mlxsw_sp;
+ int err = 0;
+
+ router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
+ mlxsw_sp = router->mlxsw_sp;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (mlxsw_sp_is_offload_xstats_event(event))
+ err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_is_router_event(event))
+ err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
+ else if (mlxsw_sp_is_vrf_event(event, ptr))
+ err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
+
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return notifier_from_errno(err);
+}
+
static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
struct netdev_nested_priv *priv)
{
@@ -9254,17 +9314,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
- rif_subport->vid);
-
+ efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9289,9 +9350,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
@@ -9303,7 +9370,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9327,10 +9394,9 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
.fid_get = mlxsw_sp_rif_subport_fid_get,
};
-static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
- enum mlxsw_reg_ritr_if_type type,
- u16 vid_fid, bool enable)
+static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -9338,12 +9404,12 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
- mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+ mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
@@ -9362,10 +9428,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
return err;
rif->mac_profile_id = mac_profile;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
+ err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
if (err)
- goto err_rif_vlan_fid_op;
+ goto err_rif_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9382,9 +9447,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
@@ -9392,8 +9463,8 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-err_rif_vlan_fid_op:
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+err_rif_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9404,7 +9475,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9412,7 +9483,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
@@ -9489,11 +9560,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr,
+ rif->mac_profile_id, vid, efid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
+ struct netlink_ext_ack *extack)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
+ if (err)
+ goto err_rif_vlan_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_fid_rif_unset(rif->fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .configure = mlxsw_sp1_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u16 efid = mlxsw_sp_fid_index(rif->fid);
+
+ return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp2_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
@@ -9568,7 +9747,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
@@ -9611,6 +9790,7 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
if (err)
goto ul_rif_op_err;
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return ul_rif;
ul_rif_op_err:
@@ -9623,6 +9803,7 @@ static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
{
struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
kfree(ul_rif);
@@ -9754,7 +9935,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
@@ -9778,10 +9959,15 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- mlxsw_sp_rif_mac_profiles_occ_get,
- mlxsw_sp);
+ atomic_set(&mlxsw_sp->router->rifs_count, 0);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIFS,
+ mlxsw_sp_rifs_occ_get,
+ mlxsw_sp);
return 0;
}
@@ -9791,11 +9977,13 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
kfree(mlxsw_sp->router->rifs);
@@ -9954,7 +10142,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
unsigned long *fields = config->fields;
u32 hash_fields;
- switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+ switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
case 0:
mlxsw_sp_mp4_hash_outer_addr(config);
break;
@@ -9972,7 +10160,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_mp_hash_inner_l3(config);
break;
case 3:
- hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
/* Outer */
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
@@ -10153,13 +10341,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
struct net *net = mlxsw_sp_net(mlxsw_sp);
- bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
+ bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
@@ -10175,46 +10364,6 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
- .init = mlxsw_sp_router_ll_basic_init,
- .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
- .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
-};
-
-static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
-{
- size_t max_size = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
- size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
-
- if (size > max_size)
- max_size = size;
- }
- router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
- GFP_KERNEL);
- if (!router->ll_op_ctx)
- return -ENOMEM;
- INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
- return 0;
-}
-
-static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
-{
- WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- kfree(router->ll_op_ctx);
-}
-
static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
{
u16 lb_rif_index;
@@ -10288,23 +10437,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_router_ops_init;
- err = mlxsw_sp_router_xm_init(mlxsw_sp);
- if (err)
- goto err_xm_init;
-
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
- &mlxsw_sp_router_ll_xm_ops :
- &mlxsw_sp_router_ll_basic_ops;
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
-
- err = mlxsw_sp_router_ll_op_ctx_init(router);
- if (err)
- goto err_ll_op_ctx_init;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
mlxsw_sp_nh_grp_activity_work);
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -10357,10 +10492,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
- INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
- INIT_LIST_HEAD(&router->fib_event_queue);
- spin_lock_init(&router->fib_event_queue_lock);
-
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -10392,8 +10523,18 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_fib_notifier;
+ mlxsw_sp->router->netdevice_nb.notifier_call =
+ mlxsw_sp_router_netdevice_event;
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
+ if (err)
+ goto err_register_netdev_notifier;
+
return 0;
+err_register_netdev_notifier:
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb);
err_register_fib_notifier:
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->nexthop_nb);
@@ -10405,7 +10546,6 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
@@ -10429,10 +10569,6 @@ err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(router);
-err_ll_op_ctx_init:
- mlxsw_sp_router_xm_fini(mlxsw_sp);
-err_xm_init:
err_router_ops_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
@@ -10441,6 +10577,8 @@ err_router_ops_init:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
@@ -10449,7 +10587,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -10461,8 +10598,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
- mlxsw_sp_router_xm_fini(mlxsw_sp);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}