aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c774
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h102
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c234
-rw-r--r--include/net/switchdev.h2
-rw-r--r--net/8021q/vlan_dev.c7
-rw-r--r--net/bridge/br_fdb.c1
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/bridge/br_vlan.c2
-rw-r--r--net/core/net-sysfs.c1
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/switchdev/switchdev.c12
14 files changed, 1001 insertions, 162 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index ec8caf8fedc6..ce26adcb4988 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -41,7 +41,7 @@ config MLXSW_SWITCHX2
config MLXSW_SPECTRUM
tristate "Mellanox Technologies Spectrum support"
- depends on MLXSW_CORE && NET_SWITCHDEV
+ depends on MLXSW_CORE && NET_SWITCHDEV && VLAN_8021Q
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index af631df4603a..66d851d4dfb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -396,7 +396,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
- const char *mac, u16 vid,
+ const char *mac, u16 fid_vid,
enum mlxsw_reg_sfd_rec_action action,
u8 local_port)
{
@@ -404,16 +404,16 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST,
policy, mac, action);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
- mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid);
+ mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
- char *mac, u16 *p_vid,
+ char *mac, u16 *p_fid_vid,
u8 *p_local_port)
{
mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
- *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
+ *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
*p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
}
@@ -438,6 +438,13 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_lag_lag_vid
+ * Indicates VID in case of vFIDs. Reserved for FIDs.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
/* reg_sfd_uc_lag_lag_id
* LAG Identifier - pointer into the LAG descriptor table.
* Access: RW
@@ -448,15 +455,16 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_id, MLXSW_REG_SFD_BASE_LEN, 0, 10,
static inline void
mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
- const char *mac, u16 vid,
- enum mlxsw_reg_sfd_rec_action action,
+ const char *mac, u16 fid_vid,
+ enum mlxsw_reg_sfd_rec_action action, u16 lag_vid,
u16 lag_id)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG,
policy, mac, action);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
- mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, vid);
+ mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 322ed544348f..c588c65e91f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -48,6 +48,7 @@
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
+#include <linux/list.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -186,33 +187,6 @@ static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
- int err;
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
- MLXSW_SP_VFID_BASE + vfid, 0);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-
- if (err)
- return err;
-
- set_bit(vfid, mlxsw_sp->active_vfids);
- return 0;
-}
-
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- clear_bit(vfid, mlxsw_sp->active_vfids);
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
- MLXSW_SP_VFID_BASE + vfid, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned char *addr)
{
@@ -549,12 +523,132 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
+static struct mlxsw_sp_vfid *
+mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
+{
+ struct mlxsw_sp_vfid *vfid;
+
+ list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
+ if (vfid->vid == vid)
+ return vfid;
+ }
+
+ return NULL;
+}
+
+static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
+ MLXSW_SP_VFID_PORT_MAX);
+}
+
+static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ u16 fid = mlxsw_sp_vfid_to_fid(vfid);
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ u16 fid = mlxsw_sp_vfid_to_fid(vfid);
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
+ u16 vid)
+{
+ struct device *dev = mlxsw_sp->bus_info->dev;
+ struct mlxsw_sp_vfid *vfid;
+ u16 n_vfid;
+ int err;
+
+ n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
+ if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
+ dev_err(dev, "No available vFIDs\n");
+ return ERR_PTR(-ERANGE);
+ }
+
+ err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
+ if (err) {
+ dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
+ return ERR_PTR(err);
+ }
+
+ vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
+ if (!vfid)
+ goto err_allocate_vfid;
+
+ vfid->vfid = n_vfid;
+ vfid->vid = vid;
+
+ list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
+ set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
+
+ return vfid;
+
+err_allocate_vfid:
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vfid *vfid)
+{
+ clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
+ list_del(&vfid->list);
+
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
+
+ kfree(vfid);
+}
+
+static struct mlxsw_sp_port *
+mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_vfid *vfid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
+ if (!mlxsw_sp_vport)
+ return NULL;
+
+ /* dev will be set correctly after the VLAN device is linked
+ * with the real device. In case of bridge SELF invocation, dev
+ * will remain as is.
+ */
+ mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
+ mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
+ mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
+ mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
+ mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
+ mlxsw_sp_vport->vport.vfid = vfid;
+ mlxsw_sp_vport->vport.vid = vfid->vid;
+
+ list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
+
+ return mlxsw_sp_vport;
+}
+
+static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ list_del(&mlxsw_sp_vport->vport.list);
+ kfree(mlxsw_sp_vport);
+}
+
int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *sftr_pl;
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_vfid *vfid;
int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
@@ -563,100 +657,105 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
if (!vid)
return 0;
- if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
netdev_warn(dev, "VID=%d already configured\n", vid);
return 0;
}
- if (!test_bit(vid, mlxsw_sp->active_vfids)) {
- err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
- if (err) {
- netdev_err(dev, "Failed to create vFID=%d\n",
- MLXSW_SP_VFID_BASE + vid);
- return err;
+ vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
+ if (!vfid) {
+ vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
+ if (IS_ERR(vfid)) {
+ netdev_err(dev, "Failed to create vFID for VID=%d\n",
+ vid);
+ return PTR_ERR(vfid);
}
+ }
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl) {
- err = -ENOMEM;
- goto err_flood_table_alloc;
- }
- mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
- MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
- MLXSW_PORT_CPU_PORT, true);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
- kfree(sftr_pl);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
+ if (!mlxsw_sp_vport) {
+ netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
+ err = -ENOMEM;
+ goto err_port_vport_create;
+ }
+
+ if (!vfid->nr_vports) {
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
+ true, false);
if (err) {
- netdev_err(dev, "Failed to configure flood table\n");
- goto err_flood_table_config;
+ netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
+ vfid->vfid);
+ goto err_vport_flood_set;
}
}
- /* In case we fail in the following steps, we intentionally do not
- * destroy the associated vFID.
- */
-
/* When adding the first VLAN interface on a bridged port we need to
* transition all the active 802.1Q bridge VLANs to use explicit
* {Port, VID} to FID mappings and set the port's mode to Virtual mode.
*/
- if (!mlxsw_sp_port->nr_vfids) {
+ if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err) {
netdev_err(dev, "Failed to set to Virtual mode\n");
- return err;
+ goto err_port_vp_mode_trans;
}
}
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true, MLXSW_SP_VFID_BASE + vid, vid);
+ true,
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
+ vid);
if (err) {
netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
- vid, MLXSW_SP_VFID_BASE + vid);
+ vid, vfid->vfid);
goto err_port_vid_to_fid_set;
}
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
if (err) {
netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
goto err_port_vid_learning_set;
}
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
if (err) {
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
vid);
goto err_port_add_vid;
}
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
MLXSW_REG_SPMS_STATE_FORWARDING);
if (err) {
netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
goto err_port_stp_state_set;
}
- mlxsw_sp_port->nr_vfids++;
- set_bit(vid, mlxsw_sp_port->active_vfids);
+ vfid->nr_vports++;
return 0;
-err_flood_table_config:
-err_flood_table_alloc:
- mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
- return err;
-
err_port_stp_state_set:
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
err_port_add_vid:
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
err_port_vid_learning_set:
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
- MLXSW_SP_VFID_BASE + vid, vid);
+ mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
err_port_vid_to_fid_set:
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ if (list_is_singular(&mlxsw_sp_port->vports_list))
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+err_port_vp_mode_trans:
+ if (!vfid->nr_vports)
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
+ false);
+err_vport_flood_set:
+ mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
+err_port_vport_create:
+ if (!vfid->nr_vports)
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
return err;
}
@@ -664,6 +763,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_vfid *vfid;
int err;
/* VLAN 0 is removed from HW filter when device goes down, but
@@ -672,38 +773,42 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
if (!vid)
return 0;
- if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport) {
netdev_warn(dev, "VID=%d does not exist\n", vid);
return 0;
}
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ vfid = mlxsw_sp_vport->vport.vfid;
+
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
MLXSW_REG_SPMS_STATE_DISCARDING);
if (err) {
netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
return err;
}
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
if (err) {
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
vid);
return err;
}
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
if (err) {
netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
return err;
}
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false, MLXSW_SP_VFID_BASE + vid,
+ false,
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
vid);
if (err) {
netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
- vid, MLXSW_SP_VFID_BASE + vid);
+ vid, vfid->vfid);
return err;
}
@@ -711,7 +816,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
* transition all active 802.1Q bridge VLANs to use VID to FID
* mappings and set port's mode to VLAN mode.
*/
- if (mlxsw_sp_port->nr_vfids == 1) {
+ if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
if (err) {
netdev_err(dev, "Failed to set to VLAN mode\n");
@@ -719,8 +824,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
}
}
- mlxsw_sp_port->nr_vfids--;
- clear_bit(vid, mlxsw_sp_port->active_vfids);
+ vfid->nr_vports--;
+ mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
+
+ /* Destroy the vFID if no vPorts are assigned to it anymore. */
+ if (!vfid->nr_vports)
+ mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
return 0;
}
@@ -1245,6 +1354,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
bool usable;
+ size_t bytes;
int err;
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
@@ -1258,6 +1368,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
mlxsw_sp_port->pvid = 1;
+ bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
+ mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
+ if (!mlxsw_sp_port->active_vlans) {
+ err = -ENOMEM;
+ goto err_port_active_vlans_alloc;
+ }
+ INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
@@ -1359,16 +1476,27 @@ err_port_module_check:
err_dev_addr_init:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
+ kfree(mlxsw_sp_port->active_vlans);
+err_port_active_vlans_alloc:
free_netdev(dev);
return err;
}
-static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
- u16 vfid;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
- for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
- mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
+ list_for_each_entry_safe(mlxsw_sp_vport, tmp,
+ &mlxsw_sp_port->vports_list, vport.list) {
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+
+ /* vPorts created for VLAN devices should already be gone
+ * by now, since we unregistered the port netdev.
+ */
+ WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
+ mlxsw_sp_port_kill_vid(dev, 0, vid);
+ }
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
@@ -1377,10 +1505,11 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
- mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_vports_fini(mlxsw_sp_port);
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
+ kfree(mlxsw_sp_port->active_vlans);
free_netdev(mlxsw_sp_port->dev);
}
@@ -1662,16 +1791,15 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
enum mlxsw_sp_flood_table flood_table;
char sfgc_pl[MLXSW_REG_SFGC_LEN];
- if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
+ if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- flood_table = 0;
- } else {
+ else
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
- if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
- flood_table = MLXSW_SP_FLOOD_TABLE_UC;
- else
- flood_table = MLXSW_SP_FLOOD_TABLE_BM;
- }
+
+ if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+ flood_table = MLXSW_SP_FLOOD_TABLE_UC;
+ else
+ flood_table = MLXSW_SP_FLOOD_TABLE_BM;
mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
flood_table);
@@ -1682,9 +1810,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
{
int type, err;
- /* For non-offloaded netdevs, flood all traffic types to CPU
- * port.
- */
for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
continue;
@@ -1693,15 +1818,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
if (err)
return err;
- }
-
- /* For bridged ports, use one flooding table for unknown unicast
- * traffic and a second table for unregistered multicast and
- * broadcast.
- */
- for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
- if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
- continue;
err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
@@ -1736,6 +1852,8 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
+ INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
+ INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -1746,7 +1864,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
- goto err_ports_create;
+ return err;
}
err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
@@ -1796,8 +1914,6 @@ err_rx_listener_register:
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
err_event_register:
mlxsw_sp_ports_remove(mlxsw_sp);
-err_ports_create:
- mlxsw_sp_vfids_fini(mlxsw_sp);
return err;
}
@@ -1809,7 +1925,6 @@ static void mlxsw_sp_fini(void *priv)
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
mlxsw_sp_ports_remove(mlxsw_sp);
- mlxsw_sp_vfids_fini(mlxsw_sp);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -1834,8 +1949,8 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.flood_mode = 3,
.max_fid_offset_flood_tables = 2,
.fid_offset_flood_table_size = VLAN_N_VID - 1,
- .max_fid_flood_tables = 1,
- .fid_flood_table_size = VLAN_N_VID,
+ .max_fid_flood_tables = 2,
+ .fid_flood_table_size = MLXSW_SP_VFID_MAX,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -2147,6 +2262,54 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev);
+
+static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *vlan_dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport) {
+ WARN_ON(!mlxsw_sp_vport);
+ return -EINVAL;
+ }
+
+ mlxsw_sp_vport->dev = vlan_dev;
+
+ return 0;
+}
+
+static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *vlan_dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport) {
+ WARN_ON(!mlxsw_sp_vport);
+ return -EINVAL;
+ }
+
+ /* When removing a VLAN device while still bridged we should first
+ * remove it from the bridge, as we receive the bridge's notification
+ * when the vPort is already gone.
+ */
+ if (mlxsw_sp_vport->bridged) {
+ struct net_device *br_dev;
+
+ br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev);
+ }
+
+ mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
+
+ return 0;
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
unsigned long event, void *ptr)
{
@@ -2176,9 +2339,23 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master)
- break;
- if (netif_is_bridge_master(upper_dev)) {
+ if (is_vlan_dev(upper_dev)) {
+ if (info->linking) {
+ err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
+ upper_dev);
+ if (err) {
+ netdev_err(dev, "Failed to link VLAN device\n");
+ return NOTIFY_BAD;
+ }
+ } else {
+ err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
+ upper_dev);
+ if (err) {
+ netdev_err(dev, "Failed to unlink VLAN device\n");
+ return NOTIFY_BAD;
+ }
+ }
+ } else if (netif_is_bridge_master(upper_dev)) {
if (info->linking) {
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
if (err)
@@ -2271,6 +2448,370 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
return NOTIFY_DONE;
}
+static struct mlxsw_sp_vfid *
+mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_vfid *vfid;
+
+ list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
+ if (vfid->br_dev == br_dev)
+ return vfid;
+ }
+
+ return NULL;
+}
+
+static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
+{
+ return vfid - MLXSW_SP_VFID_PORT_MAX;
+}
+
+static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
+{
+ return MLXSW_SP_VFID_PORT_MAX + br_vfid;
+}
+
+static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
+ MLXSW_SP_VFID_BR_MAX);
+}
+
+static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ struct device *dev = mlxsw_sp->bus_info->dev;
+ struct mlxsw_sp_vfid *vfid;
+ u16 n_vfid;
+ int err;
+
+ n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
+ if (n_vfid == MLXSW_SP_VFID_MAX) {
+ dev_err(dev, "No available vFIDs\n");
+ return ERR_PTR(-ERANGE);
+ }
+
+ err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
+ if (err) {
+ dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
+ return ERR_PTR(err);
+ }
+
+ vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
+ if (!vfid)
+ goto err_allocate_vfid;
+
+ vfid->vfid = n_vfid;
+ vfid->br_dev = br_dev;
+
+ list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
+ set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
+
+ return vfid;
+
+err_allocate_vfid:
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vfid *vfid)
+{
+ u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
+
+ clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
+ list_del(&vfid->list);
+
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
+
+ kfree(vfid);
+}
+
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ struct net_device *dev = mlxsw_sp_vport->dev;
+ struct mlxsw_sp_vfid *vfid, *new_vfid;
+ int err;
+
+ vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
+ if (!vfid) {
+ WARN_ON(!vfid);
+ return -EINVAL;
+ }
+
+ /* We need a vFID to go back to after leaving the bridge's vFID. */
+ new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
+ if (!new_vfid) {
+ new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
+ if (IS_ERR(new_vfid)) {
+ netdev_err(dev, "Failed to create vFID for VID=%d\n",
+ vid);
+ return PTR_ERR(new_vfid);
+ }
+ }
+
+ /* Invalidate existing {Port, VID} to vFID mapping and create a new
+ * one for the new vFID.
+ */
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ false,
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
+ vid);
+ if (err) {
+ netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
+ vfid->vfid);
+ goto err_port_vid_to_fid_invalidate;
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ true,
+ mlxsw_sp_vfid_to_fid(new_vfid->vfid),
+ vid);
+ if (err) {
+ netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
+ new_vfid->vfid);
+ goto err_port_vid_to_fid_validate;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
+ if (err) {
+ netdev_err(dev, "Failed to disable learning\n");
+ goto err_port_vid_learning_set;
+ }
+
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
+ false);
+ if (err) {
+ netdev_err(dev, "Failed clear to clear flooding\n");
+ goto err_vport_flood_set;
+ }
+
+ /* Switch between the vFIDs and destroy the old one if needed. */
+ new_vfid->nr_vports++;
+ mlxsw_sp_vport->vport.vfid = new_vfid;
+ vfid->nr_vports--;
+ if (!vfid->nr_vports)
+ mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+
+ mlxsw_sp_vport->learning = 0;
+ mlxsw_sp_vport->learning_sync = 0;
+ mlxsw_sp_vport->uc_flood = 0;
+ mlxsw_sp_vport->bridged = 0;
+
+ return 0;
+
+err_vport_flood_set:
+err_port_vid_learning_set:
+err_port_vid_to_fid_validate:
+err_port_vid_to_fid_invalidate:
+ /* Rollback vFID only if new. */
+ if (!new_vfid->nr_vports)
+ mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
+ return err;
+}
+
+static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ struct net_device *dev = mlxsw_sp_vport->dev;
+ struct mlxsw_sp_vfid *vfid;
+ int err;
+
+ vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
+ if (!vfid) {
+ vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
+ if (IS_ERR(vfid)) {
+ netdev_err(dev, "Failed to create bridge vFID\n");
+ return PTR_ERR(vfid);
+ }
+ }
+
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
+ if (err) {
+ netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
+ vfid->vfid);
+ goto err_port_flood_set;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
+ if (err) {
+ netdev_err(dev, "Failed to enable learning\n");
+ goto err_port_vid_learning_set;
+ }
+
+ /* We need to invalidate existing {Port, VID} to vFID mapping and
+ * create a new one for the bridge's vFID.
+ */
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ false,
+ mlxsw_sp_vfid_to_fid(old_vfid->vfid),
+ vid);
+ if (err) {
+ netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
+ old_vfid->vfid);
+ goto err_port_vid_to_fid_invalidate;
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ true,
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
+ vid);
+ if (err) {
+ netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
+ vfid->vfid);
+ goto err_port_vid_to_fid_validate;
+ }
+
+ /* Switch between the vFIDs and destroy the old one if needed. */
+ vfid->nr_vports++;
+ mlxsw_sp_vport->vport.vfid = vfid;
+ old_vfid->nr_vports--;
+ if (!old_vfid->nr_vports)
+ mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
+
+ mlxsw_sp_vport->learning = 1;
+ mlxsw_sp_vport->learning_sync = 1;
+ mlxsw_sp_vport->uc_flood = 1;
+ mlxsw_sp_vport->bridged = 1;
+
+ return 0;
+
+err_port_vid_to_fid_validate:
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
+ mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
+err_port_vid_to_fid_invalidate:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
+err_port_vid_learning_set:
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
+err_port_flood_set:
+ if (!vfid->nr_vports)
+ mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+ return err;
+}
+
+static bool
+mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
+ vport.list) {
+ if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
+ return false;
+ }
+
+ return true;
+}
+
+static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
+ unsigned long event, void *ptr,
+ u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct net_device *upper_dev;
+ int err;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!info->master || !info->linking)
+ break;
+ if (!netif_is_bridge_master(upper_dev))
+ return NOTIFY_BAD;
+ /* We can't have multiple VLAN interfaces configured on
+ * the same port and being members in the same bridge.
+ */
+ if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
+ upper_dev))
+ return NOTIFY_BAD;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!info->master)
+ break;
+ if (info->linking) {
+ if (!mlxsw_sp_vport) {
+ WARN_ON(!mlxsw_sp_vport);
+ return NOTIFY_BAD;
+ }
+ err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
+ upper_dev);
+ if (err) {
+ netdev_err(dev, "Failed to join bridge\n");
+ return NOTIFY_BAD;
+ }
+ } else {
+ /* We ignore bridge's unlinking notifications if vPort
+ * is gone, since we already left the bridge when the
+ * VLAN device was unlinked from the real device.
+ */
+ if (!mlxsw_sp_vport)
+ return NOTIFY_DONE;
+ err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
+ upper_dev);
+ if (err) {
+ netdev_err(dev, "Failed to leave bridge\n");
+ return NOTIFY_BAD;
+ }
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
+ unsigned long event, void *ptr,
+ u16 vid)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+ int ret;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ if (mlxsw_sp_port_dev_check(dev)) {
+ ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
+ vid);
+ if (ret == NOTIFY_BAD)
+ return ret;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
+ unsigned long event, void *ptr)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
+ vid);
+ else if (netif_is_lag_master(real_dev))
+ return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
+ vid);
+
+ return NOTIFY_DONE;
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -2282,6 +2823,9 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
if (netif_is_lag_master(dev))
return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
+ if (is_vlan_dev(dev))
+ return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 48be5a63b9b5..463ed6dcc709 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -41,11 +41,16 @@
#include <linux/netdevice.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/list.h>
#include <net/switchdev.h>
#include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID
+#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */
+#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */
+#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
+
#define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16
@@ -56,8 +61,38 @@ struct mlxsw_sp_upper {
unsigned int ref_count;
};
+struct mlxsw_sp_vfid {
+ struct list_head list;
+ u16 nr_vports;
+ u16 vfid; /* Starting at 0 */
+ struct net_device *br_dev;
+ u16 vid;
+};
+
+static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
+{
+ return MLXSW_SP_VFID_BASE + vfid;
+}
+
+static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
+{
+ return fid - MLXSW_SP_VFID_BASE;
+}
+
+static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
+{
+ return fid >= MLXSW_SP_VFID_BASE;
+}
+
struct mlxsw_sp {
- unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+ struct {
+ struct list_head list;
+ unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)];
+ } port_vfids;
+ struct {
+ struct list_head list;
+ unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)];
+ } br_vfids;
unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
@@ -102,11 +137,15 @@ struct mlxsw_sp_port {
lagged:1;
u16 pvid;
u16 lag_id;
+ struct {
+ struct list_head list;
+ struct mlxsw_sp_vfid *vfid;
+ u16 vid;
+ } vport;
/* 802.1Q bridge VLANs */
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long *active_vlans;
/* VLAN interfaces */
- unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
- u16 nr_vfids;
+ struct list_head vports_list;
};
static inline struct mlxsw_sp_port *
@@ -121,6 +160,59 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
+static inline bool
+mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port->vport.vfid;
+}
+
+static inline struct net_device *
+mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ return mlxsw_sp_vport->vport.vfid->br_dev;
+}
+
+static inline u16
+mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ return mlxsw_sp_vport->vport.vid;
+}
+
+static inline u16
+mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ return mlxsw_sp_vport->vport.vfid->vfid;
+}
+
+static inline struct mlxsw_sp_port *
+mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
+ vport.list) {
+ if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
+ return mlxsw_sp_vport;
+ }
+
+ return NULL;
+}
+
+static inline struct mlxsw_sp_port *
+mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vfid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
+ vport.list) {
+ if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid)
+ return mlxsw_sp_vport;
+ }
+
+ return NULL;
+}
+
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BM,
@@ -143,5 +235,7 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
u16 vid);
int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid);
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
+ bool set, bool only_uc);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 406dab2f6b17..9476ff9237ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -51,12 +51,33 @@
#include "core.h"
#include "reg.h"
+static struct mlxsw_sp_port *
+mlxsw_sp_port_orig_get(struct net_device *dev,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ u16 vid;
+
+ if (!is_vlan_dev(dev))
+ return mlxsw_sp_port;
+
+ vid = vlan_dev_vlan_id(dev);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ WARN_ON(!mlxsw_sp_vport);
+
+ return mlxsw_sp_vport;
+}
+
static int mlxsw_sp_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
@@ -105,8 +126,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!spms_pl)
return -ENOMEM;
mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+ } else {
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+ }
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
kfree(spms_pl);
@@ -124,22 +151,38 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
}
+static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
+{
+ return vfid >= MLXSW_SP_VFID_PORT_MAX;
+}
+
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid_begin, u16 fid_end, bool set,
+ u16 idx_begin, u16 idx_end, bool set,
bool only_uc)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u16 range = fid_end - fid_begin + 1;
+ u16 local_port = mlxsw_sp_port->local_port;
+ enum mlxsw_flood_table_type table_type;
+ u16 range = idx_end - idx_begin + 1;
char *sftr_pl;
int err;
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
+ if (mlxsw_sp_vfid_is_vport_br(idx_begin))
+ local_port = mlxsw_sp_port->local_port;
+ else
+ local_port = MLXSW_PORT_CPU_PORT;
+ } else {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+ }
+
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
- mlxsw_sp_port->local_port, set);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
+ table_type, range, local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
@@ -150,9 +193,8 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (only_uc)
goto buffer_out;
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
- mlxsw_sp_port->local_port, set);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
+ table_type, range, local_port, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
@@ -167,6 +209,13 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, last_visited_vid;
int err;
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
+
+ return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
+ set, true);
+ }
+
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
true);
@@ -185,6 +234,16 @@ err_port_flood_set:
return err;
}
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
+ bool set, bool only_uc)
+{
+ /* In case of vFIDs, index into the flooding table is relative to
+ * the start of the vFIDs range.
+ */
+ return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
+ only_uc);
+}
+
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long brport_flags)
@@ -244,6 +303,10 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
+ mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
@@ -304,7 +367,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
{
enum mlxsw_reg_svfa_mt mt;
- if (mlxsw_sp_port->nr_vfids)
+ if (!list_empty(&mlxsw_sp_port->vports_list))
mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
else
mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
@@ -316,7 +379,7 @@ static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
{
enum mlxsw_reg_svfa_mt mt;
- if (!mlxsw_sp_port->nr_vfids)
+ if (list_empty(&mlxsw_sp_port->vports_list))
return 0;
mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
@@ -503,7 +566,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port,
- const char *mac, u16 vid, bool adding,
+ const char *mac, u16 fid, bool adding,
bool dynamic)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -516,7 +579,7 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_port->local_port);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
@@ -525,8 +588,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
- const char *mac, u16 vid, bool adding,
- bool dynamic)
+ const char *mac, u16 fid, u16 lag_vid,
+ bool adding, bool dynamic)
{
char *sfd_pl;
int err;
@@ -537,8 +600,8 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
- lag_id);
+ mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ lag_vid, lag_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
@@ -550,21 +613,30 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- u16 vid = fdb->vid;
+ u16 fid = fdb->vid;
+ u16 lag_vid = 0;
if (switchdev_trans_ph_prepare(trans))
return 0;
- if (!vid)
- vid = mlxsw_sp_port->pvid;
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
+
+ fid = mlxsw_sp_vfid_to_fid(vfid);
+ lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ }
+
+ if (!fid)
+ fid = mlxsw_sp_port->pvid;
if (!mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
- fdb->addr, vid, true, false);
+ fdb->addr, fid, true, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->lag_id,
- fdb->addr, vid, true, false);
+ fdb->addr, fid, lag_vid,
+ true, false);
}
static int mlxsw_sp_port_obj_add(struct net_device *dev,
@@ -574,8 +646,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
+ mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
+ return 0;
+
err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
@@ -679,14 +758,24 @@ static int
mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb)
{
+ u16 fid = fdb->vid;
+ u16 lag_vid = 0;
+
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
+
+ fid = mlxsw_sp_vfid_to_fid(vfid);
+ lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ }
+
if (!mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port,
- fdb->addr, fdb->vid,
+ fdb->addr, fid,
false, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->lag_id,
- fdb->addr, fdb->vid,
+ fdb->addr, fid, lag_vid,
false, false);
}
@@ -696,8 +785,15 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
+ mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
+ return 0;
+
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
@@ -732,9 +828,10 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
switchdev_obj_dump_cb_t *cb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 vport_vid = 0, vport_fid = 0;
char *sfd_pl;
char mac[ETH_ALEN];
- u16 vid;
+ u16 fid;
u8 local_port;
u16 lag_id;
u8 num_rec;
@@ -746,6 +843,14 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
if (!sfd_pl)
return -ENOMEM;
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ u16 tmp;
+
+ tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
+ vport_fid = mlxsw_sp_vfid_to_fid(tmp);
+ vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ }
+
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
do {
mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
@@ -764,12 +869,17 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
for (i = 0; i < num_rec; i++) {
switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
case MLXSW_REG_SFD_REC_TYPE_UNICAST:
- mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
+ mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
&local_port);
if (local_port == mlxsw_sp_port->local_port) {
+ if (vport_fid && vport_fid != fid)
+ continue;
+ else if (vport_fid)
+ fdb->vid = vport_vid;
+ else
+ fdb->vid = fid;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
- fdb->vid = vid;
err = cb(&fdb->obj);
if (err)
stored_err = err;
@@ -777,12 +887,17 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
break;
case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
- mac, &vid, &lag_id);
+ mac, &fid, &lag_id);
if (mlxsw_sp_port ==
mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
+ if (vport_fid && vport_fid != fid)
+ continue;
+ else if (vport_fid)
+ fdb->vid = vport_vid;
+ else
+ fdb->vid = fid;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
- fdb->vid = vid;
err = cb(&fdb->obj);
if (err)
stored_err = err;
@@ -804,6 +919,13 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid;
int err = 0;
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ vlan->flags = 0;
+ vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ return cb(&vlan->obj);
+ }
+
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
vlan->flags = 0;
if (vid == mlxsw_sp_port->pvid)
@@ -824,6 +946,10 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
+ mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
@@ -871,17 +997,35 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_port *mlxsw_sp_port;
char mac[ETH_ALEN];
u8 local_port;
- u16 vid;
+ u16 vid, fid;
int err;
- mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
+ mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
return;
}
- err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, mac, vid,
+ if (mlxsw_sp_fid_is_vfid(fid)) {
+ u16 vfid = mlxsw_sp_fid_to_vfid(fid);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
+ vfid);
+ if (!mlxsw_sp_vport) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
+ return;
+ }
+
+ vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ /* Override the physical port with the vPort. */
+ mlxsw_sp_port = mlxsw_sp_vport;
+ } else {
+ vid = fid;
+ }
+
+ err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port, mac, fid,
adding && mlxsw_sp_port->learning, true);
if (err) {
if (net_ratelimit())
@@ -900,18 +1044,38 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_port *mlxsw_sp_port;
char mac[ETH_ALEN];
+ u16 lag_vid = 0;
u16 lag_id;
- u16 vid;
+ u16 vid, fid;
int err;
- mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &vid, &lag_id);
+ mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
if (!mlxsw_sp_port) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
return;
}
- err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, vid,
+ if (mlxsw_sp_fid_is_vfid(fid)) {
+ u16 vfid = mlxsw_sp_fid_to_vfid(fid);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
+ vfid);
+ if (!mlxsw_sp_vport) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
+ return;
+ }
+
+ vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ lag_vid = vid;
+ /* Override the physical port with the vPort. */
+ mlxsw_sp_port = mlxsw_sp_vport;
+ } else {
+ vid = fid;
+ }
+
+ err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding && mlxsw_sp_port->learning,
true);
if (err) {
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 1d22ce9f352e..6612946167fe 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -50,6 +50,7 @@ enum switchdev_attr_id {
};
struct switchdev_attr {
+ struct net_device *orig_dev;
enum switchdev_attr_id id;
u32 flags;
union {
@@ -68,6 +69,7 @@ enum switchdev_obj_id {
};
struct switchdev_obj {
+ struct net_device *orig_dev;
enum switchdev_obj_id id;
u32 flags;
};
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index fded86508117..9f4bd137e045 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -30,6 +30,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <net/arp.h>
+#include <net/switchdev.h>
#include "vlan.h"
#include "vlanproc.h"
@@ -774,6 +775,12 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
#endif
.ndo_fix_features = vlan_dev_fix_features,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
.ndo_get_iflink = vlan_dev_get_iflink,
};
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index a642bb829d09..82e3e9705017 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -135,6 +135,7 @@ static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
{
struct switchdev_obj_port_fdb fdb = {
.obj = {
+ .orig_dev = f->dst->dev,
.id = SWITCHDEV_OBJ_ID_PORT_FDB,
.flags = SWITCHDEV_F_DEFER,
},
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 5f3f64553179..b3cca126b103 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -40,6 +40,7 @@ void br_log_state(const struct net_bridge_port *p)
void br_set_state(struct net_bridge_port *p, unsigned int state)
{
struct switchdev_attr attr = {
+ .orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
.flags = SWITCHDEV_F_DEFER,
.u.stp_state = state,
@@ -570,6 +571,7 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
{
struct switchdev_attr attr = {
+ .orig_dev = br->dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
.u.ageing_time = ageing_time,
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 5396ff08af32..775e00fbeb1e 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -37,6 +37,7 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
void br_init_port(struct net_bridge_port *p)
{
struct switchdev_attr attr = {
+ .orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
.u.ageing_time = p->br->ageing_time,
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 1394da63614a..66c4549efbbb 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -73,6 +73,7 @@ static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
u16 vid, u16 flags)
{
struct switchdev_obj_port_vlan v = {
+ .obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.flags = flags,
.vid_begin = vid,
@@ -120,6 +121,7 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
u16 vid)
{
struct switchdev_obj_port_vlan v = {
+ .obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.vid_begin = vid,
.vid_end = vid,
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f88a62ab019d..bca8c350e7f3 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -471,6 +471,7 @@ static ssize_t phys_switch_id_show(struct device *dev,
if (dev_isalive(netdev)) {
struct switchdev_attr attr = {
+ .orig_dev = netdev,
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
.flags = SWITCHDEV_F_NO_RECURSE,
};
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 34ba7a08876d..d8b0113d3eec 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1027,6 +1027,7 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
int err;
struct switchdev_attr attr = {
+ .orig_dev = dev,
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
.flags = SWITCHDEV_F_NO_RECURSE,
};
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index f34e535e93bd..df790d3385a2 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -723,6 +723,7 @@ static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
u32 filter_mask)
{
struct switchdev_vlan_dump dump = {
+ .vlan.obj.orig_dev = dev,
.vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.skb = skb,
.filter_mask = filter_mask,
@@ -757,6 +758,7 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int nlflags)
{
struct switchdev_attr attr = {
+ .orig_dev = dev,
.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
};
u16 mode = BRIDGE_MODE_UNDEF;
@@ -778,6 +780,7 @@ static int switchdev_port_br_setflag(struct net_device *dev,
unsigned long brport_flag)
{
struct switchdev_attr attr = {
+ .orig_dev = dev,
.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
};
u8 flag = nla_get_u8(nlattr);
@@ -853,6 +856,7 @@ static int switchdev_port_br_afspec(struct net_device *dev,
struct nlattr *attr;
struct bridge_vlan_info *vinfo;
struct switchdev_obj_port_vlan vlan = {
+ .obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
};
int rem;
@@ -975,6 +979,7 @@ int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
u16 vid, u16 nlm_flags)
{
struct switchdev_obj_port_fdb fdb = {
+ .obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
.vid = vid,
};
@@ -1000,6 +1005,7 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
u16 vid)
{
struct switchdev_obj_port_fdb fdb = {
+ .obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
.vid = vid,
};
@@ -1077,6 +1083,7 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *filter_dev, int idx)
{
struct switchdev_fdb_dump dump = {
+ .fdb.obj.orig_dev = dev,
.fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
.dev = dev,
.skb = skb,
@@ -1135,6 +1142,7 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
if (!dev)
return NULL;
+ attr.orig_dev = dev;
if (switchdev_port_attr_get(dev, &attr))
return NULL;
@@ -1194,6 +1202,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
if (!dev)
return 0;
+ ipv4_fib.obj.orig_dev = dev;
err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
if (!err)
fi->fib_flags |= RTNH_F_OFFLOAD;
@@ -1238,6 +1247,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
if (!dev)
return 0;
+ ipv4_fib.obj.orig_dev = dev;
err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
if (!err)
fi->fib_flags &= ~RTNH_F_OFFLOAD;
@@ -1270,10 +1280,12 @@ static bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b)
{
struct switchdev_attr a_attr = {
+ .orig_dev = a,
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
.flags = SWITCHDEV_F_NO_RECURSE,
};
struct switchdev_attr b_attr = {
+ .orig_dev = b,
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
.flags = SWITCHDEV_F_NO_RECURSE,
};