diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core')
100 files changed, 13958 insertions, 3794 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 6e4d7bb7fea2..9d623e38d783 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -149,14 +149,14 @@ config MLX5_IPSEC IPsec support for the Connect-X family. config MLX5_EN_IPSEC - bool "IPSec XFRM cryptography-offload accelaration" + bool "IPSec XFRM cryptography-offload acceleration" depends on MLX5_CORE_EN depends on XFRM_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD depends on MLX5_FPGA_IPSEC || MLX5_IPSEC default n help - Build support for IPsec cryptography-offload accelaration in the NIC. + Build support for IPsec cryptography-offload acceleration in the NIC. Note: Support for hardware with this capability needs to be selected for this option to become available. @@ -166,7 +166,6 @@ config MLX5_FPGA_TLS depends on TLS=y || MLX5_CORE=m depends on MLX5_CORE_EN depends on MLX5_FPGA - depends on XPS select MLX5_EN_TLS default n help @@ -181,7 +180,6 @@ config MLX5_TLS depends on TLS_DEVICE depends on TLS=y || MLX5_CORE=m depends on MLX5_CORE_EN - depends on XPS select MLX5_ACCEL select MLX5_EN_TLS default n @@ -192,7 +190,7 @@ config MLX5_TLS config MLX5_EN_TLS bool help - Build support for TLS cryptography-offload accelaration in the NIC. + Build support for TLS cryptography-offload acceleration in the NIC. Note: Support for hardware with this capability needs to be selected for this option to become available. @@ -203,3 +201,22 @@ config MLX5_SW_STEERING default y help Build support for software-managed steering in the NIC. + +config MLX5_SF + bool "Mellanox Technologies subfunction device support using auxiliary device" + depends on MLX5_CORE && MLX5_CORE_EN + default n + help + Build support for subfuction device in the NIC. A Mellanox subfunction + device can support RDMA, netdevice and vdpa device. + It is similar to a SRIOV VF but it doesn't require SRIOV support. + +config MLX5_SF_MANAGER + bool + depends on MLX5_SF && MLX5_ESWITCH + default y + help + Build support for subfuction port in the NIC. A Mellanox subfunction + port is managed through devlink. A subfunction supports RDMA, netdevice + and vdpa device. It is similar to a SRIOV VF but it doesn't require + SRIOV support. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 77961643d5a9..8cb2625472c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -16,7 +16,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \ - diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o fw_reset.o + diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \ + fw_reset.o qos.o # # Netdev basic @@ -25,7 +26,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ - en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o + en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ + en/qos.o en/trap.o # # Netdev extra @@ -38,6 +40,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \ en_rep.o en/rep/bond.o en/mod_hdr.o mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ en/mapping.o lib/fs_chains.o en/tc_tun.o \ + esw/indir_table.o en/tc_tun_encap.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o @@ -83,5 +86,15 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_matcher.o steering/dr_rule.o \ steering/dr_icm_pool.o steering/dr_buddy.o \ steering/dr_ste.o steering/dr_send.o \ + steering/dr_ste_v0.o steering/dr_ste_v1.o \ steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o +# +# SF device +# +mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o + +# +# SF manager +# +mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 50c7b9ee80c3..e8cecd50558d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -333,6 +333,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DEALLOC_MEMIC: case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: + case MLX5_CMD_OP_DEALLOC_SF: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -464,6 +465,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_ALLOC_MEMIC: case MLX5_CMD_OP_MODIFY_XRQ: case MLX5_CMD_OP_RELEASE_XRQ_ERROR: + case MLX5_CMD_OP_QUERY_VHCA_STATE: + case MLX5_CMD_OP_MODIFY_VHCA_STATE: + case MLX5_CMD_OP_ALLOC_SF: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -657,6 +661,10 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DESTROY_UMEM); MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); MLX5_COMMAND_STR_CASE(MODIFY_XRQ); + MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE); + MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE); + MLX5_COMMAND_STR_CASE(ALLOC_SF); + MLX5_COMMAND_STR_CASE(DEALLOC_SF); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 3261d0dc1104..d7d8a68ef23d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -7,6 +7,8 @@ #include "fw_reset.h" #include "fs_core.h" #include "eswitch.h" +#include "sf/dev/dev.h" +#include "sf/sf.h" static int mlx5_devlink_flash_update(struct devlink *devlink, struct devlink_flash_update_params *params, @@ -127,6 +129,22 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); + bool sf_dev_allocated; + + sf_dev_allocated = mlx5_sf_dev_allocated(dev); + if (sf_dev_allocated) { + /* Reload results in deleting SF device which further results in + * unregistering devlink instance while holding devlink_mutext. + * Hence, do not support reload. + */ + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n"); + return -EOPNOTSUPP; + } + + if (mlx5_lag_is_active(dev)) { + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n"); + return -EOPNOTSUPP; + } switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: @@ -168,6 +186,91 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a return 0; } +static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id) +{ + struct mlx5_devlink_trap *dl_trap; + + list_for_each_entry(dl_trap, &dev->priv.traps, list) + if (dl_trap->trap.id == trap_id) + return dl_trap; + + return NULL; +} + +static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap, + void *trap_ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_devlink_trap *dl_trap; + + dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL); + if (!dl_trap) + return -ENOMEM; + + dl_trap->trap.id = trap->id; + dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP; + dl_trap->item = trap_ctx; + + if (mlx5_find_trap_by_id(dev, trap->id)) { + kfree(dl_trap); + mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id); + return -EEXIST; + } + + list_add_tail(&dl_trap->list, &dev->priv.traps); + return 0; +} + +static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap, + void *trap_ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_devlink_trap *dl_trap; + + dl_trap = mlx5_find_trap_by_id(dev, trap->id); + if (!dl_trap) { + mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id); + return; + } + list_del(&dl_trap->list); + kfree(dl_trap); +} + +static int mlx5_devlink_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + enum devlink_trap_action action_orig; + struct mlx5_devlink_trap *dl_trap; + int err = 0; + + dl_trap = mlx5_find_trap_by_id(dev, trap->id); + if (!dl_trap) { + mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id); + err = -EINVAL; + goto out; + } + + if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) { + err = -EOPNOTSUPP; + goto out; + } + + if (action == dl_trap->trap.action) + goto out; + + action_orig = dl_trap->trap.action; + dl_trap->trap.action = action; + err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP, + &dl_trap->trap); + if (err) + dl_trap->trap.action = action_orig; +out: + return err; +} + static const struct devlink_ops mlx5_devlink_ops = { #ifdef CONFIG_MLX5_ESWITCH .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, @@ -179,6 +282,12 @@ static const struct devlink_ops mlx5_devlink_ops = { .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get, .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set, #endif +#ifdef CONFIG_MLX5_SF_MANAGER + .port_new = mlx5_devlink_sf_port_new, + .port_del = mlx5_devlink_sf_port_del, + .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, + .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, +#endif .flash_update = mlx5_devlink_flash_update, .info_get = mlx5_devlink_info_get, .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | @@ -186,8 +295,59 @@ static const struct devlink_ops mlx5_devlink_ops = { .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET), .reload_down = mlx5_devlink_reload_down, .reload_up = mlx5_devlink_reload_up, + .trap_init = mlx5_devlink_trap_init, + .trap_fini = mlx5_devlink_trap_fini, + .trap_action_set = mlx5_devlink_trap_action_set, }; +void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb, + struct devlink_port *dl_port) +{ + struct devlink *devlink = priv_to_devlink(dev); + struct mlx5_devlink_trap *dl_trap; + + dl_trap = mlx5_find_trap_by_id(dev, trap_id); + if (!dl_trap) { + mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id); + return; + } + + if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) { + mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id, + dl_trap->trap.action); + return; + } + devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL); +} + +int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev) +{ + struct mlx5_devlink_trap *dl_trap; + int count = 0; + + list_for_each_entry(dl_trap, &dev->priv.traps, list) + if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP) + count++; + + return count; +} + +int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, + enum devlink_trap_action *action) +{ + struct mlx5_devlink_trap *dl_trap; + + dl_trap = mlx5_find_trap_by_id(dev, trap_id); + if (!dl_trap) { + mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x", + trap_id); + return -EINVAL; + } + + *action = dl_trap->trap.action; + return 0; +} + struct devlink *mlx5_devlink_alloc(void) { return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev)); @@ -273,6 +433,10 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE"); return -EOPNOTSUPP; } + if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) { + NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE"); + return -EOPNOTSUPP; + } return 0; } @@ -358,6 +522,49 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) #endif } +#define MLX5_TRAP_DROP(_id, _group_id) \ + DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ + DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) + +static const struct devlink_trap mlx5_traps_arr[] = { + MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS), + MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS), +}; + +static const struct devlink_trap_group mlx5_trap_groups_arr[] = { + DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0), +}; + +static int mlx5_devlink_traps_register(struct devlink *devlink) +{ + struct mlx5_core_dev *core_dev = devlink_priv(devlink); + int err; + + err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr, + ARRAY_SIZE(mlx5_trap_groups_arr)); + if (err) + return err; + + err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr), + &core_dev->priv); + if (err) + goto err_trap_group; + return 0; + +err_trap_group: + devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr, + ARRAY_SIZE(mlx5_trap_groups_arr)); + return err; +} + +static void mlx5_devlink_traps_unregister(struct devlink *devlink) +{ + devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr)); + devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr, + ARRAY_SIZE(mlx5_trap_groups_arr)); +} + int mlx5_devlink_register(struct devlink *devlink, struct device *dev) { int err; @@ -372,8 +579,16 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev) goto params_reg_err; mlx5_devlink_set_params_init_values(devlink); devlink_params_publish(devlink); + + err = mlx5_devlink_traps_register(devlink); + if (err) + goto traps_reg_err; + return 0; +traps_reg_err: + devlink_params_unregister(devlink, mlx5_devlink_params, + ARRAY_SIZE(mlx5_devlink_params)); params_reg_err: devlink_unregister(devlink); return err; @@ -381,6 +596,7 @@ params_reg_err: void mlx5_devlink_unregister(struct devlink *devlink) { + mlx5_devlink_traps_unregister(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); devlink_unregister(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h index f0de327a59be..eff107dad922 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h @@ -12,6 +12,24 @@ enum mlx5_devlink_param_id { MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, }; +struct mlx5_trap_ctx { + int id; + int action; +}; + +struct mlx5_devlink_trap { + struct mlx5_trap_ctx trap; + void *item; + struct list_head list; +}; + +struct mlx5_core_dev; +void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb, + struct devlink_port *dl_port); +int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev); +int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, + enum devlink_trap_action *action); + struct devlink *mlx5_devlink_alloc(void); void mlx5_devlink_free(struct devlink *devlink); int mlx5_devlink_register(struct devlink *devlink, struct device *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h index 1177860a2ee4..f15718db5d0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h @@ -15,7 +15,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update, TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha, bool neigh_connected), TP_ARGS(nhe, ha, neigh_connected), - TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name) + TP_STRUCT__entry(__string(devname, nhe->neigh_dev->name) __array(u8, ha, ETH_ALEN) __array(u8, v4, 4) __array(u8, v6, 16) @@ -25,7 +25,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update, struct in6_addr *pin6; __be32 *p32; - __assign_str(devname, mn->dev->name); + __assign_str(devname, nhe->neigh_dev->name); __entry->neigh_connected = neigh_connected; memcpy(__entry->ha, ha, ETH_ALEN); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h index d4e6cfaaade3..ac52ef37f38a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h @@ -77,7 +77,7 @@ TRACE_EVENT(mlx5e_stats_flower, TRACE_EVENT(mlx5e_tc_update_neigh_used_value, TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used), TP_ARGS(nhe, neigh_used), - TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name) + TP_STRUCT__entry(__string(devname, nhe->neigh_dev->name) __array(u8, v4, 4) __array(u8, v6, 16) __field(bool, neigh_used) @@ -86,7 +86,7 @@ TRACE_EVENT(mlx5e_tc_update_neigh_used_value, struct in6_addr *pin6; __be32 *p32; - __assign_str(devname, mn->dev->name); + __assign_str(devname, nhe->neigh_dev->name); __entry->neigh_used = neigh_used; p32 = (__be32 *)__entry->v4; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 055baf3b6cb1..7435fe6829b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -55,7 +55,9 @@ #include "en_stats.h" #include "en/dcbnl.h" #include "en/fs.h" +#include "en/qos.h" #include "lib/hv_vhca.h" +#include "lib/clock.h" extern const struct net_device_ops mlx5e_netdev_ops; struct page_pool; @@ -136,10 +138,10 @@ struct page_pool; #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 -#define MLX5E_LOG_INDIR_RQT_SIZE 0x7 +#define MLX5E_LOG_INDIR_RQT_SIZE 0x8 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MIN_NUM_CHANNELS 0x1 -#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE +#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_XSK_POLL_BUDGET 64 @@ -161,6 +163,9 @@ do { \ ##__VA_ARGS__); \ } while (0) +#define mlx5e_state_dereference(priv, p) \ + rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) + enum mlx5e_rq_group { MLX5E_RQ_GROUP_REGULAR, MLX5E_RQ_GROUP_XSK, @@ -389,6 +394,7 @@ struct mlx5e_txqsq { u32 rate_limit; struct work_struct recover_work; struct mlx5e_ptpsq *ptpsq; + cqe_ts_to_ns ptp_cyc2time; } ____cacheline_aligned_in_smp; struct mlx5e_dma_info { @@ -560,6 +566,7 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); +void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); enum mlx5e_rq_flag { MLX5E_RQ_FLAG_XDP_XMIT, @@ -650,6 +657,7 @@ struct mlx5e_rq { /* XDP read-mostly */ struct xdp_rxq_info xdp_rxq; + cqe_ts_to_ns ptp_cyc2time; } ____cacheline_aligned_in_smp; enum mlx5e_channel_state { @@ -663,11 +671,13 @@ struct mlx5e_channel { struct mlx5e_xdpsq rq_xdpsq; struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; struct mlx5e_icosq icosq; /* internal control operations */ + struct mlx5e_txqsq __rcu * __rcu *qos_sqs; bool xdp; struct napi_struct napi; struct device *pdev; struct net_device *netdev; __be32 mkey_be; + u16 qos_sqs_size; u8 num_tc; u8 lag_port; @@ -756,6 +766,8 @@ struct mlx5e_modify_sq_param { int next_state; int rl_update; int rl_index; + bool qos_update; + u16 qos_queue_group_id; }; #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) @@ -788,10 +800,22 @@ struct mlx5e_scratchpad { cpumask_var_t cpumask; }; +struct mlx5e_htb { + DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES)); + DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES); + struct mlx5e_sq_stats **qos_sq_stats; + u16 max_qos_sqs; + u16 maj_id; + u16 defcls; +}; + +struct mlx5e_trap; + struct mlx5e_priv { /* priv data path fields - start */ /* +1 for port ptp ts */ - struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC]; + struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC + + MLX5E_QOS_MAX_LEAF_NODES]; int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC]; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -826,8 +850,10 @@ struct mlx5e_priv { struct mlx5_core_dev *mdev; struct net_device *netdev; + struct mlx5e_trap *en_trap; struct mlx5e_stats stats; struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_channel_stats trap_stats; struct mlx5e_port_ptp_stats port_ptp_stats; u16 max_nch; u8 max_opened_tc; @@ -836,6 +862,7 @@ struct mlx5e_priv { u16 q_counter; u16 drop_rq_q_counter; struct notifier_block events_nb; + struct notifier_block blocking_events_nb; int num_tc_x_num_ch; struct udp_tunnel_nic_info nic_info; @@ -859,6 +886,7 @@ struct mlx5e_priv { struct mlx5e_hv_vhca_stats_agent stats_agent; #endif struct mlx5e_scratchpad scratchpad; + struct mlx5e_htb htb; }; struct mlx5e_rx_handlers { @@ -870,8 +898,7 @@ extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; struct mlx5e_profile { int (*init)(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, void *ppriv); + struct net_device *netdev); void (*cleanup)(struct mlx5e_priv *priv); int (*init_rx)(struct mlx5e_priv *priv); void (*cleanup_rx)(struct mlx5e_priv *priv); @@ -942,6 +969,8 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); void mlx5e_close_rq(struct mlx5e_rq *rq); +int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param); +void mlx5e_destroy_rq(struct mlx5e_rq *rq); struct mlx5e_sq_param; int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, @@ -986,6 +1015,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, mlx5e_fp_preactivate preactivate, void *context); +int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); int mlx5e_num_channels_changed(struct mlx5e_priv *priv); int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); @@ -1010,6 +1040,9 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, struct mlx5e_modify_sq_param *p); +int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, + struct mlx5e_params *params, struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); @@ -1020,8 +1053,10 @@ struct mlx5e_create_sq_param; int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, struct mlx5e_sq_param *param, struct mlx5e_create_sq_param *csp, + u16 qos_queue_group_id, u32 *sqn); void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); +void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) { @@ -1047,6 +1082,8 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); int mlx5e_open_drop_rq(struct mlx5e_priv *priv, struct mlx5e_rq *drop_rq); void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); +int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node); +void mlx5e_free_di_list(struct mlx5e_rq *rq); int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); @@ -1120,24 +1157,25 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, struct ethtool_pauseparam *pauseparam); /* mlx5e generic netdev management API */ -int mlx5e_netdev_init(struct net_device *netdev, - struct mlx5e_priv *priv, - struct mlx5_core_dev *mdev, - const struct mlx5e_profile *profile, - void *ppriv); -void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv); -struct net_device* -mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, - int nch, void *ppriv); +static inline unsigned int +mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile) +{ + return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); +} + +int mlx5e_priv_init(struct mlx5e_priv *priv, + struct net_device *netdev, + struct mlx5_core_dev *mdev); +void mlx5e_priv_cleanup(struct mlx5e_priv *priv); +struct net_device * +mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs); int mlx5e_attach_netdev(struct mlx5e_priv *priv); void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); +int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, + const struct mlx5e_profile *new_profile, void *new_ppriv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); -void mlx5e_build_nic_params(struct mlx5e_priv *priv, - struct mlx5e_xsk *xsk, - struct mlx5e_rss_params *rss_params, - struct mlx5e_params *params, - u16 mtu); +void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 5749557749b0..a16297e7e2ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -44,6 +44,11 @@ struct mlx5e_l2_rule { #define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) +struct mlx5e_promisc_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_handle *rule; +}; + struct mlx5e_vlan_table { struct mlx5e_flow_table ft; DECLARE_BITMAP(active_cvlans, VLAN_N_VID); @@ -53,6 +58,7 @@ struct mlx5e_vlan_table { struct mlx5_flow_handle *untagged_rule; struct mlx5_flow_handle *any_cvlan_rule; struct mlx5_flow_handle *any_svlan_rule; + struct mlx5_flow_handle *trap_rule; bool cvlan_filter_disabled; }; @@ -62,7 +68,7 @@ struct mlx5e_l2_table { struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE]; struct mlx5e_l2_rule broadcast; struct mlx5e_l2_rule allmulti; - struct mlx5e_l2_rule promisc; + struct mlx5_flow_handle *trap_rule; bool broadcast_enabled; bool allmulti_enabled; bool promisc_enabled; @@ -126,7 +132,8 @@ struct mlx5e_ttc_table { /* NIC prio FTS */ enum { - MLX5E_VLAN_FT_LEVEL = 0, + MLX5E_PROMISC_FT_LEVEL, + MLX5E_VLAN_FT_LEVEL, MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, MLX5E_INNER_TTC_FT_LEVEL, @@ -241,6 +248,7 @@ struct mlx5e_flow_steering { struct mlx5e_ethtool_steering ethtool; #endif struct mlx5e_tc_table tc; + struct mlx5e_promisc_table promisc; struct mlx5e_vlan_table vlan; struct mlx5e_l2_table l2; struct mlx5e_ttc_table ttc; @@ -288,6 +296,10 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt); +int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); +void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv); +int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); +void mlx5e_remove_mac_trap(struct mlx5e_priv *priv); #endif /* __MLX5E_FLOW_STEER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 43271a3856ca..36381a2ed5a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -179,7 +179,7 @@ int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params) stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); if (stop_room >= sq_size) { - netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n", + netdev_err(priv->netdev, "Stop room %u is bigger than the SQ size %zu\n", stop_room, sq_size); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 807147d97a0f..ea2cfb04b31a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -118,6 +118,8 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_rq_param *param); void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, struct mlx5e_sq_param *param); +void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, + struct mlx5e_sq_param *param); void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 2a2bac30daaa..d57b6f06382f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -3,7 +3,6 @@ #include "en/ptp.h" #include "en/txrx.h" -#include "lib/clock.h" struct mlx5e_skb_cb_hwtstamp { ktime_t cqe_hwtstamp; @@ -70,6 +69,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, int budget) { struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); + struct mlx5e_txqsq *sq = &ptpsq->txqsq; ktime_t hwtstamp; if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { @@ -77,7 +77,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, goto out; } - hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe)); + hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, hwtstamp, ptpsq->cq_stats); ptpsq->cq_stats->cqe++; @@ -183,6 +183,9 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix, if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); sq->stop_room = param->stop_room; + sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ? + mlx5_real_time_cyc2time : + mlx5_timecounter_cyc2time; node = dev_to_node(mlx5_core_dma_dev(mdev)); @@ -261,7 +264,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn, csp.min_inline_mode = txqsq->min_inline_mode; csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn; - err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn); if (err) goto err_free_txqsq; @@ -428,16 +431,13 @@ static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c, if (err) return err; - napi_enable(&c->napi); - err = mlx5e_ptp_open_txqsqs(c, cparams); if (err) - goto disable_napi; + goto close_cqs; return 0; -disable_napi: - napi_disable(&c->napi); +close_cqs: mlx5e_ptp_close_cqs(c); return err; @@ -446,7 +446,6 @@ disable_napi: static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c) { mlx5e_ptp_close_txqsqs(c); - napi_disable(&c->napi); mlx5e_ptp_close_cqs(c); } @@ -515,6 +514,8 @@ void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c) { int tc; + napi_enable(&c->napi); + for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq); } @@ -525,4 +526,6 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c) for (tc = 0; tc < c->num_tc; tc++) mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq); + + napi_disable(&c->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c new file mode 100644 index 000000000000..12d7ad061237 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -0,0 +1,984 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#include "en.h" +#include "params.h" +#include "../qos.h" + +#define BYTES_IN_MBIT 125000 + +int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) +{ + return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev)); +} + +int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv) +{ + int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev)); + + return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1; +} + +/* Software representation of the QoS tree (internal to this file) */ + +static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv) +{ + int size = mlx5e_qos_max_leaf_nodes(priv->mdev); + int res; + + WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__); + res = find_first_zero_bit(priv->htb.qos_used_qids, size); + + return res == size ? -ENOSPC : res; +} + +struct mlx5e_qos_node { + struct hlist_node hnode; + struct rcu_head rcu; + struct mlx5e_qos_node *parent; + u64 rate; + u32 bw_share; + u32 max_average_bw; + u32 hw_id; + u32 classid; /* 16-bit, except root. */ + u16 qid; +}; + +#define MLX5E_QOS_QID_INNER 0xffff +#define MLX5E_HTB_CLASSID_ROOT 0xffffffff + +static struct mlx5e_qos_node * +mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid, + struct mlx5e_qos_node *parent) +{ + struct mlx5e_qos_node *node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return ERR_PTR(-ENOMEM); + + node->parent = parent; + + node->qid = qid; + __set_bit(qid, priv->htb.qos_used_qids); + + node->classid = classid; + hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid); + + mlx5e_update_tx_netdev_queues(priv); + + return node; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv) +{ + struct mlx5e_qos_node *node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return ERR_PTR(-ENOMEM); + + node->qid = MLX5E_QOS_QID_INNER; + node->classid = MLX5E_HTB_CLASSID_ROOT; + hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid); + + return node; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid) +{ + struct mlx5e_qos_node *node = NULL; + + hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) { + if (node->classid == classid) + break; + } + + return node; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid) +{ + struct mlx5e_qos_node *node = NULL; + + hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) { + if (node->classid == classid) + break; + } + + return node; +} + +static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node) +{ + hash_del_rcu(&node->hnode); + if (node->qid != MLX5E_QOS_QID_INNER) { + __clear_bit(node->qid, priv->htb.qos_used_qids); + mlx5e_update_tx_netdev_queues(priv); + } + kfree_rcu(node, rcu); +} + +/* TX datapath API */ + +static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) +{ + /* These channel params are safe to access from the datapath, because: + * 1. This function is called only after checking priv->htb.maj_id != 0, + * and the number of queues can't change while HTB offload is active. + * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for + * mlx5e_select_queue to finish while holding priv->state_lock, + * preventing other code from changing the number of queues. + */ + bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS); + + return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid; +} + +int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid) +{ + struct mlx5e_qos_node *node; + u16 qid; + int res; + + rcu_read_lock(); + + node = mlx5e_sw_node_find_rcu(priv, classid); + if (!node) { + res = -ENOENT; + goto out; + } + qid = READ_ONCE(node->qid); + if (qid == MLX5E_QOS_QID_INNER) { + res = -EINVAL; + goto out; + } + res = mlx5e_qid_from_qos(&priv->channels, qid); + +out: + rcu_read_unlock(); + return res; +} + +static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) +{ + struct mlx5e_params *params = &priv->channels.params; + struct mlx5e_txqsq __rcu **qos_sqs; + struct mlx5e_channel *c; + int ix; + + ix = qid % params->num_channels; + qid /= params->num_channels; + c = priv->channels.c[ix]; + + qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); + return mlx5e_state_dereference(priv, qos_sqs[qid]); +} + +/* SQ lifecycle */ + +static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs, + struct mlx5e_qos_node *node) +{ + struct mlx5e_create_cq_param ccp = {}; + struct mlx5e_txqsq __rcu **qos_sqs; + struct mlx5e_sq_param param_sq; + struct mlx5e_cq_param param_cq; + int txq_ix, ix, qid, err = 0; + struct mlx5e_params *params; + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + + params = &chs->params; + + txq_ix = mlx5e_qid_from_qos(chs, node->qid); + + WARN_ON(node->qid > priv->htb.max_qos_sqs); + if (node->qid == priv->htb.max_qos_sqs) { + struct mlx5e_sq_stats *stats, **stats_list = NULL; + + if (priv->htb.max_qos_sqs == 0) { + stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev), + sizeof(*stats_list), + GFP_KERNEL); + if (!stats_list) + return -ENOMEM; + } + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + kvfree(stats_list); + return -ENOMEM; + } + if (stats_list) + WRITE_ONCE(priv->htb.qos_sq_stats, stats_list); + WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats); + /* Order max_qos_sqs increment after writing the array pointer. + * Pairs with smp_load_acquire in en_stats.c. + */ + smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1); + } + + ix = node->qid % params->num_channels; + qid = node->qid / params->num_channels; + c = chs->c[ix]; + + qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); + sq = kzalloc(sizeof(*sq), GFP_KERNEL); + + if (!sq) + return -ENOMEM; + + mlx5e_build_create_cq_param(&ccp, c); + + memset(¶m_sq, 0, sizeof(param_sq)); + memset(¶m_cq, 0, sizeof(param_cq)); + mlx5e_build_sq_param(priv, params, ¶m_sq); + mlx5e_build_tx_cq_param(priv, params, ¶m_cq); + err = mlx5e_open_cq(priv, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq); + if (err) + goto err_free_sq; + err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, + ¶m_sq, sq, 0, node->hw_id, node->qid); + if (err) + goto err_close_cq; + + rcu_assign_pointer(qos_sqs[qid], sq); + + return 0; + +err_close_cq: + mlx5e_close_cq(&sq->cq); +err_free_sq: + kfree(sq); + return err; +} + +static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node) +{ + struct mlx5e_txqsq *sq; + + sq = mlx5e_get_qos_sq(priv, node->qid); + + WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq); + + /* Make the change to txq2sq visible before the queue is started. + * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, + * which pairs with this barrier. + */ + smp_wmb(); + + qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid); + mlx5e_activate_txqsq(sq); +} + +static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) +{ + struct mlx5e_txqsq *sq; + + sq = mlx5e_get_qos_sq(priv, qid); + if (!sq) /* Handle the case when the SQ failed to open. */ + return; + + qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid); + mlx5e_deactivate_txqsq(sq); + + /* The queue is disabled, no synchronization with datapath is needed. */ + priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL; +} + +static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid) +{ + struct mlx5e_txqsq __rcu **qos_sqs; + struct mlx5e_params *params; + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + int ix; + + params = &priv->channels.params; + + ix = qid % params->num_channels; + qid /= params->num_channels; + c = priv->channels.c[ix]; + qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); + sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock)); + if (!sq) /* Handle the case when the SQ failed to open. */ + return; + + synchronize_rcu(); /* Sync with NAPI. */ + + mlx5e_close_txqsq(sq); + mlx5e_close_cq(&sq->cq); + kfree(sq); +} + +void mlx5e_qos_close_queues(struct mlx5e_channel *c) +{ + struct mlx5e_txqsq __rcu **qos_sqs; + int i; + + qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock)); + if (!qos_sqs) + return; + synchronize_rcu(); /* Sync with NAPI. */ + + for (i = 0; i < c->qos_sqs_size; i++) { + struct mlx5e_txqsq *sq; + + sq = mlx5e_state_dereference(c->priv, qos_sqs[i]); + if (!sq) /* Handle the case when the SQ failed to open. */ + continue; + + mlx5e_close_txqsq(sq); + mlx5e_close_cq(&sq->cq); + kfree(sq); + } + + kvfree(qos_sqs); +} + +static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs) +{ + int i; + + for (i = 0; i < chs->num; i++) + mlx5e_qos_close_queues(chs->c[i]); +} + +static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) +{ + u16 qos_sqs_size; + int i; + + qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num); + + for (i = 0; i < chs->num; i++) { + struct mlx5e_txqsq **sqs; + + sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), GFP_KERNEL); + if (!sqs) + goto err_free; + + WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size); + smp_wmb(); /* Pairs with mlx5e_napi_poll. */ + rcu_assign_pointer(chs->c[i]->qos_sqs, sqs); + } + + return 0; + +err_free: + while (--i >= 0) { + struct mlx5e_txqsq **sqs; + + sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL, + lockdep_is_held(&priv->state_lock)); + + synchronize_rcu(); /* Sync with NAPI. */ + kvfree(sqs); + } + return -ENOMEM; +} + +int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) +{ + struct mlx5e_qos_node *node = NULL; + int bkt, err; + + if (!priv->htb.maj_id) + return 0; + + err = mlx5e_qos_alloc_queues(priv, chs); + if (err) + return err; + + hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) { + if (node->qid == MLX5E_QOS_QID_INNER) + continue; + err = mlx5e_open_qos_sq(priv, chs, node); + if (err) { + mlx5e_qos_close_all_queues(chs); + return err; + } + } + + return 0; +} + +void mlx5e_qos_activate_queues(struct mlx5e_priv *priv) +{ + struct mlx5e_qos_node *node = NULL; + int bkt; + + hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) { + if (node->qid == MLX5E_QOS_QID_INNER) + continue; + mlx5e_activate_qos_sq(priv, node); + } +} + +void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) +{ + struct mlx5e_params *params = &c->priv->channels.params; + struct mlx5e_txqsq __rcu **qos_sqs; + int i; + + qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs); + if (!qos_sqs) + return; + + for (i = 0; i < c->qos_sqs_size; i++) { + u16 qid = params->num_channels * i + c->ix; + struct mlx5e_txqsq *sq; + + sq = mlx5e_state_dereference(c->priv, qos_sqs[i]); + if (!sq) /* Handle the case when the SQ failed to open. */ + continue; + + qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid); + mlx5e_deactivate_txqsq(sq); + + /* The queue is disabled, no synchronization with datapath is needed. */ + c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL; + } +} + +static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs) +{ + int i; + + for (i = 0; i < chs->num; i++) + mlx5e_qos_deactivate_queues(chs->c[i]); +} + +/* HTB API */ + +int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *root; + bool opened; + int err; + + qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls); + + if (!mlx5_qos_is_supported(priv->mdev)) { + NL_SET_ERR_MSG_MOD(extack, + "Missing QoS capabilities. Try disabling SRIOV or use a supported device."); + return -EOPNOTSUPP; + } + + opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (opened) { + err = mlx5e_qos_alloc_queues(priv, &priv->channels); + if (err) + return err; + } + + root = mlx5e_sw_node_create_root(priv); + if (IS_ERR(root)) { + err = PTR_ERR(root); + goto err_free_queues; + } + + err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware."); + goto err_sw_node_delete; + } + + WRITE_ONCE(priv->htb.defcls, htb_defcls); + /* Order maj_id after defcls - pairs with + * mlx5e_select_queue/mlx5e_select_htb_queues. + */ + smp_store_release(&priv->htb.maj_id, htb_maj_id); + + return 0; + +err_sw_node_delete: + mlx5e_sw_node_delete(priv, root); + +err_free_queues: + if (opened) + mlx5e_qos_close_all_queues(&priv->channels); + return err; +} + +int mlx5e_htb_root_del(struct mlx5e_priv *priv) +{ + struct mlx5e_qos_node *root; + int err; + + qos_dbg(priv->mdev, "TC_HTB_DESTROY\n"); + + WRITE_ONCE(priv->htb.maj_id, 0); + synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */ + + root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT); + if (!root) { + qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n"); + return -ENOENT; + } + err = mlx5_qos_destroy_node(priv->mdev, root->hw_id); + if (err) + qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n", + root->hw_id, err); + mlx5e_sw_node_delete(priv, root); + + mlx5e_qos_deactivate_all_queues(&priv->channels); + mlx5e_qos_close_all_queues(&priv->channels); + + return err; +} + +static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate, + struct mlx5e_qos_node *parent, u32 *bw_share) +{ + u64 share = 0; + + while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw) + parent = parent->parent; + + if (parent->max_average_bw) + share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT), + parent->max_average_bw); + else + share = 101; + + *bw_share = share == 0 ? 1 : share > 100 ? 0 : share; + + qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n", + rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share); + + return 0; +} + +static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw) +{ + *max_average_bw = div_u64(ceil, BYTES_IN_MBIT); + + qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n", + ceil, *max_average_bw); +} + +int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid, + u32 parent_classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node, *parent; + int qid; + int err; + + qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n", + classid, parent_classid, rate, ceil); + + qid = mlx5e_find_unused_qos_qid(priv); + if (qid < 0) { + NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached."); + return qid; + } + + parent = mlx5e_sw_node_find(priv, parent_classid); + if (!parent) + return -EINVAL; + + node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent); + if (IS_ERR(node)) + return PTR_ERR(node); + + node->rate = rate; + mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share); + mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw); + + err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id, + node->bw_share, node->max_average_bw, + &node->hw_id); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node."); + qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n", + classid, err); + mlx5e_sw_node_delete(priv, node); + return err; + } + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, node); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ."); + qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n", + classid, err); + } else { + mlx5e_activate_qos_sq(priv, node); + } + } + + return mlx5e_qid_from_qos(&priv->channels, node->qid); +} + +int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid, + u64 rate, u64 ceil, struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node, *child; + int err, tmp_err; + u32 new_hw_id; + u16 qid; + + qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n", + classid, child_classid, rate, ceil); + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -ENOENT; + + err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id, + node->bw_share, node->max_average_bw, + &new_hw_id); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node."); + qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n", + classid, err); + return err; + } + + /* Intentionally reuse the qid for the upcoming first child. */ + child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node); + if (IS_ERR(child)) { + err = PTR_ERR(child); + goto err_destroy_hw_node; + } + + child->rate = rate; + mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share); + mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw); + + err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share, + child->max_average_bw, &child->hw_id); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node."); + qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n", + classid, err); + goto err_delete_sw_node; + } + + /* No fail point. */ + + qid = node->qid; + /* Pairs with mlx5e_get_txq_by_classid. */ + WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER); + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + mlx5e_deactivate_qos_sq(priv, qid); + mlx5e_close_qos_sq(priv, qid); + } + + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); + if (err) /* Not fatal. */ + qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n", + node->hw_id, classid, err); + + node->hw_id = new_hw_id; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, child); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ."); + qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n", + classid, err); + } else { + mlx5e_activate_qos_sq(priv, child); + } + } + + return 0; + +err_delete_sw_node: + child->qid = MLX5E_QOS_QID_INNER; + mlx5e_sw_node_delete(priv, child); + +err_destroy_hw_node: + tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id); + if (tmp_err) /* Not fatal. */ + qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n", + new_hw_id, classid, tmp_err); + return err; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid) +{ + struct mlx5e_qos_node *node = NULL; + int bkt; + + hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) + if (node->qid == qid) + break; + + return node; +} + +static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq) +{ + qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid); + netdev_tx_reset_queue(txq); + netif_tx_start_queue(txq); +} + +static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) +{ + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); + struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + + if (!qdisc) + return; + + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); +} + +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, + u16 *new_qid, struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node; + struct netdev_queue *txq; + u16 qid, moved_qid; + bool opened; + int err; + + qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid); + + *old_qid = *new_qid = 0; + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -ENOENT; + + /* Store qid for reuse. */ + qid = node->qid; + + opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (opened) { + txq = netdev_get_tx_queue(priv->netdev, + mlx5e_qid_from_qos(&priv->channels, qid)); + mlx5e_deactivate_qos_sq(priv, qid); + mlx5e_close_qos_sq(priv, qid); + } + + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); + if (err) /* Not fatal. */ + qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n", + node->hw_id, classid, err); + + mlx5e_sw_node_delete(priv, node); + + moved_qid = mlx5e_qos_cur_leaf_nodes(priv); + + if (moved_qid == 0) { + /* The last QoS SQ was just destroyed. */ + if (opened) + mlx5e_reactivate_qos_sq(priv, qid, txq); + return 0; + } + moved_qid--; + + if (moved_qid < qid) { + /* The highest QoS SQ was just destroyed. */ + WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u", + qid, moved_qid); + if (opened) + mlx5e_reactivate_qos_sq(priv, qid, txq); + return 0; + } + + WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid); + qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid); + + node = mlx5e_sw_node_find_by_qid(priv, moved_qid); + WARN(!node, "Could not find a node with qid %u to move to queue %u", + moved_qid, qid); + + /* Stop traffic to the old queue. */ + WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER); + __clear_bit(moved_qid, priv->htb.qos_used_qids); + + if (opened) { + txq = netdev_get_tx_queue(priv->netdev, + mlx5e_qid_from_qos(&priv->channels, moved_qid)); + mlx5e_deactivate_qos_sq(priv, moved_qid); + mlx5e_close_qos_sq(priv, moved_qid); + } + + /* Prevent packets from the old class from getting into the new one. */ + mlx5e_reset_qdisc(priv->netdev, moved_qid); + + __set_bit(qid, priv->htb.qos_used_qids); + WRITE_ONCE(node->qid, qid); + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, node); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ."); + qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n", + node->classid, moved_qid, qid, err); + } else { + mlx5e_activate_qos_sq(priv, node); + } + } + + mlx5e_update_tx_netdev_queues(priv); + if (opened) + mlx5e_reactivate_qos_sq(priv, moved_qid, txq); + + *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid); + *new_qid = mlx5e_qid_from_qos(&priv->channels, qid); + return 0; +} + +int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node, *parent; + u32 old_hw_id, new_hw_id; + int err, saved_err = 0; + u16 qid; + + qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n", + force ? "_FORCE" : "", classid); + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -ENOENT; + + err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id, + node->parent->bw_share, + node->parent->max_average_bw, + &new_hw_id); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node."); + qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n", + classid, err); + if (!force) + return err; + saved_err = err; + } + + /* Store qid for reuse and prevent clearing the bit. */ + qid = node->qid; + /* Pairs with mlx5e_get_txq_by_classid. */ + WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER); + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + mlx5e_deactivate_qos_sq(priv, qid); + mlx5e_close_qos_sq(priv, qid); + } + + /* Prevent packets from the old class from getting into the new one. */ + mlx5e_reset_qdisc(priv->netdev, qid); + + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); + if (err) /* Not fatal. */ + qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n", + node->hw_id, classid, err); + + parent = node->parent; + mlx5e_sw_node_delete(priv, node); + + node = parent; + WRITE_ONCE(node->qid, qid); + + /* Early return on error in force mode. Parent will still be an inner + * node to be deleted by a following delete operation. + */ + if (saved_err) + return saved_err; + + old_hw_id = node->hw_id; + node->hw_id = new_hw_id; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, node); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ."); + qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n", + classid, err); + } else { + mlx5e_activate_qos_sq(priv, node); + } + } + + err = mlx5_qos_destroy_node(priv->mdev, old_hw_id); + if (err) /* Not fatal. */ + qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n", + node->hw_id, classid, err); + + return 0; +} + +static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *child; + int err = 0; + int bkt; + + hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) { + u32 old_bw_share = child->bw_share; + int err_one; + + if (child->parent != node) + continue; + + mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share); + if (child->bw_share == old_bw_share) + continue; + + err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share, + child->max_average_bw, child->hw_id); + if (!err && err_one) { + err = err_one; + + NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node."); + qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n", + node->classid, err); + } + } + + return err; +} + +int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack) +{ + u32 bw_share, max_average_bw; + struct mlx5e_qos_node *node; + bool ceil_changed = false; + int err; + + qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n", + classid, rate, ceil); + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -ENOENT; + + node->rate = rate; + mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share); + mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw); + + err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share, + max_average_bw, node->hw_id); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node."); + qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n", + classid, err); + return err; + } + + if (max_average_bw != node->max_average_bw) + ceil_changed = true; + + node->bw_share = bw_share; + node->max_average_bw = max_average_bw; + + if (ceil_changed) + err = mlx5e_qos_update_children(priv, node, extack); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h new file mode 100644 index 000000000000..5af7991fcd19 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5E_EN_QOS_H +#define __MLX5E_EN_QOS_H + +#include <linux/mlx5/driver.h> + +#define MLX5E_QOS_MAX_LEAF_NODES 256 + +struct mlx5e_priv; +struct mlx5e_channels; +struct mlx5e_channel; + +int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); +int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv); + +/* TX datapath API */ +int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid); +struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid); + +/* SQ lifecycle */ +int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs); +void mlx5e_qos_activate_queues(struct mlx5e_priv *priv); +void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c); +void mlx5e_qos_close_queues(struct mlx5e_channel *c); + +/* HTB API */ +int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, + struct netlink_ext_ack *extack); +int mlx5e_htb_root_del(struct mlx5e_priv *priv); +int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid, + u32 parent_classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid, + u64 rate, u64 ceil, struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, + u16 *new_qid, struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, + struct netlink_ext_ack *extack); +int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c index 58e27038c947..be0ee03de721 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c @@ -129,10 +129,10 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) work); struct mlx5e_neigh_hash_entry *nhe = update_work->nhe; struct neighbour *n = update_work->n; + bool neigh_connected, same_dev; struct mlx5e_encap_entry *e; unsigned char ha[ETH_ALEN]; struct mlx5e_priv *priv; - bool neigh_connected; u8 nud_state, dead; rtnl_lock(); @@ -146,12 +146,16 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) memcpy(ha, n->ha, ETH_ALEN); nud_state = n->nud_state; dead = n->dead; + same_dev = READ_ONCE(nhe->neigh_dev) == n->dev; read_unlock_bh(&n->lock); neigh_connected = (nud_state & NUD_VALID) && !dead; trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected); + if (!same_dev) + goto out; + list_for_each_entry(e, &nhe->encap_list, encap_list) { if (!mlx5e_encap_take(e)) continue; @@ -160,6 +164,7 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) mlx5e_rep_update_flows(priv, e, neigh_connected, ha); mlx5e_encap_put(priv, e); } +out: rtnl_unlock(); mlx5e_release_neigh_update_work(update_work); } @@ -175,7 +180,6 @@ static struct neigh_update_work *mlx5e_alloc_neigh_update_work(struct mlx5e_priv if (WARN_ON(!update_work)) return NULL; - m_neigh.dev = n->dev; m_neigh.family = n->ops->family; memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); @@ -246,7 +250,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, rcu_read_lock(); list_for_each_entry_rcu(nhe, &neigh_update->neigh_list, neigh_list) { - if (p->dev == nhe->m_neigh.dev) { + if (p->dev == READ_ONCE(nhe->neigh_dev)) { found = true; break; } @@ -279,7 +283,7 @@ int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params); if (err) - return err; + goto out_err; INIT_LIST_HEAD(&neigh_update->neigh_list); mutex_init(&neigh_update->encap_lock); @@ -287,14 +291,19 @@ int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) mlx5e_rep_neigh_stats_work); mlx5e_rep_neigh_update_init_interval(rpriv); - rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event; - err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb); + neigh_update->netevent_nb.notifier_call = mlx5e_rep_netevent_event; + err = register_netevent_notifier(&neigh_update->netevent_nb); if (err) - goto out_err; + goto out_notifier; return 0; -out_err: +out_notifier: + neigh_update->netevent_nb.notifier_call = NULL; rhashtable_destroy(&neigh_update->neigh_ht); +out_err: + netdev_warn(rpriv->netdev, + "Failed to initialize neighbours handling for vport %d\n", + rpriv->rep->vport); return err; } @@ -303,6 +312,9 @@ void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + if (!rpriv->neigh_update.netevent_nb.notifier_call) + return; + unregister_netevent_notifier(&neigh_update->netevent_nb); flush_workqueue(priv->wq); /* flush neigh update works */ @@ -361,7 +373,8 @@ mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, } int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev, struct mlx5e_neigh_hash_entry **nhe) { int err; @@ -371,10 +384,11 @@ int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, return -ENOMEM; (*nhe)->priv = priv; - memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); + memcpy(&(*nhe)->m_neigh, m_neigh, sizeof(*m_neigh)); spin_lock_init(&(*nhe)->encap_list_lock); INIT_LIST_HEAD(&(*nhe)->encap_list); refcount_set(&(*nhe)->refcnt, 1); + WRITE_ONCE((*nhe)->neigh_dev, neigh_dev); err = mlx5e_rep_neigh_entry_insert(priv, *nhe); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h index 32b239189c95..6fe0ab970943 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h @@ -16,7 +16,8 @@ struct mlx5e_neigh_hash_entry * mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, struct mlx5e_neigh *m_neigh); int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev, struct mlx5e_neigh_hash_entry **nhe); void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 76177f7c5ec2..065126370acd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -26,7 +26,9 @@ struct mlx5e_rep_indr_block_priv { }; int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e) + struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev) { struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; @@ -39,9 +41,9 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, return err; mutex_lock(&rpriv->neigh_update.encap_lock); - nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); + nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh); if (!nhe) { - err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); + err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe); if (err) { mutex_unlock(&rpriv->neigh_update.encap_lock); mlx5_tun_entropy_refcount_dec(tun_entropy, @@ -122,7 +124,7 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv, } unlock: mutex_unlock(&esw->offloads.encap_tbl_lock); - mlx5e_put_encap_flow_list(priv, &flow_list); + mlx5e_put_flow_list(priv, &flow_list); } static int @@ -651,7 +653,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, tc_skb_ext->chain = chain; - zone_restore_id = reg_c1 & ZONE_RESTORE_MAX; + zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK; uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &uplink_rpriv->uplink_priv; @@ -660,7 +662,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, return false; } - tunnel_id = reg_c1 >> REG_MAPPING_SHIFT(TUNNEL_TO_REG); + tunnel_id = reg_c1 >> ESW_TUN_OFFSET; return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); #endif /* CONFIG_NET_TC_SKB_EXT */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h index fdf9702c2d7d..d0661578467b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h @@ -27,7 +27,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv, unsigned char ha[ETH_ALEN]); int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e); + struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev); void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 6bc6b48a56dc..f3f6eb081948 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -12,6 +12,7 @@ #include <net/flow_offload.h> #include <net/netfilter/nf_flow_table.h> #include <linux/workqueue.h> +#include <linux/refcount.h> #include <linux/xarray.h> #include "lib/fs_chains.h" @@ -27,6 +28,7 @@ #define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1) #define MLX5_CT_STATE_TRK_BIT BIT(2) #define MLX5_CT_STATE_NAT_BIT BIT(3) +#define MLX5_CT_STATE_REPLY_BIT BIT(4) #define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8) #define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) @@ -51,11 +53,11 @@ struct mlx5_tc_ct_priv { struct mlx5_flow_table *ct_nat; struct mlx5_flow_table *post_ct; struct mutex control_lock; /* guards parallel adds/dels */ - struct mutex shared_counter_lock; struct mapping_ctx *zone_mapping; struct mapping_ctx *labels_mapping; enum mlx5_flow_namespace_type ns_type; struct mlx5_fs_chains *chains; + spinlock_t ht_lock; /* protects ft entries */ }; struct mlx5_ct_flow { @@ -124,6 +126,10 @@ struct mlx5_ct_counter { bool is_shared; }; +enum { + MLX5_CT_ENTRY_FLAG_VALID, +}; + struct mlx5_ct_entry { struct rhash_head node; struct rhash_head tuple_node; @@ -134,6 +140,12 @@ struct mlx5_ct_entry { struct mlx5_ct_tuple tuple; struct mlx5_ct_tuple tuple_nat; struct mlx5_ct_zone_rule zone_rules[2]; + + struct mlx5_tc_ct_priv *ct_priv; + struct work_struct work; + + refcount_t refcnt; + unsigned long flags; }; static const struct rhashtable_params cts_ht_params = { @@ -641,6 +653,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, } ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT; + ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT; err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts, ct_state, meta->ct_metadata.mark, @@ -709,11 +722,11 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, attr->outer_match_level = MLX5_MATCH_L4; attr->counter = entry->counter->counter; attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT; + if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) + attr->esw_attr->in_mdev = priv->mdev; mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); - mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, - entry->tuple.zone & MLX5_CT_ZONE_MASK, - MLX5_CT_ZONE_MASK); + mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK); zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr); if (IS_ERR(zone_rule->rule)) { @@ -740,6 +753,87 @@ err_attr: return err; } +static bool +mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry) +{ + return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags); +} + +static struct mlx5_ct_entry * +mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple) +{ + struct mlx5_ct_entry *entry; + + entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple, + tuples_ht_params); + if (entry && mlx5_tc_ct_entry_valid(entry) && + refcount_inc_not_zero(&entry->refcnt)) { + return entry; + } else if (!entry) { + entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht, + tuple, tuples_nat_ht_params); + if (entry && mlx5_tc_ct_entry_valid(entry) && + refcount_inc_not_zero(&entry->refcnt)) + return entry; + } + + return entry ? ERR_PTR(-EINVAL) : NULL; +} + +static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry) +{ + struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv; + + rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, + tuples_nat_ht_params); + rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, + tuples_ht_params); +} + +static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry) +{ + struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv; + + mlx5_tc_ct_entry_del_rules(ct_priv, entry); + + spin_lock_bh(&ct_priv->ht_lock); + mlx5_tc_ct_entry_remove_from_tuples(entry); + spin_unlock_bh(&ct_priv->ht_lock); + + mlx5_tc_ct_counter_put(ct_priv, entry); + kfree(entry); +} + +static void +mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry) +{ + if (!refcount_dec_and_test(&entry->refcnt)) + return; + + mlx5_tc_ct_entry_del(entry); +} + +static void mlx5_tc_ct_entry_del_work(struct work_struct *work) +{ + struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work); + + mlx5_tc_ct_entry_del(entry); +} + +static void +__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry) +{ + struct mlx5e_priv *priv; + + if (!refcount_dec_and_test(&entry->refcnt)) + return; + + priv = netdev_priv(entry->ct_priv->netdev); + INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work); + queue_work(priv->wq, &entry->work); +} + static struct mlx5_ct_counter * mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv) { @@ -770,7 +864,6 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_counter *shared_counter; struct mlx5_ct_entry *rev_entry; __be16 tmp_port; - int ret; /* get the reversed tuple */ tmp_port = rev_tuple.port.src; @@ -792,23 +885,31 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv, } /* Use the same counter as the reverse direction */ - mutex_lock(&ct_priv->shared_counter_lock); - rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple, - tuples_ht_params); - if (rev_entry) { - if (refcount_inc_not_zero(&rev_entry->counter->refcount)) { - mutex_unlock(&ct_priv->shared_counter_lock); - return rev_entry->counter; - } + spin_lock_bh(&ct_priv->ht_lock); + rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple); + + if (IS_ERR(rev_entry)) { + spin_unlock_bh(&ct_priv->ht_lock); + goto create_counter; } - mutex_unlock(&ct_priv->shared_counter_lock); - shared_counter = mlx5_tc_ct_counter_create(ct_priv); - if (IS_ERR(shared_counter)) { - ret = PTR_ERR(shared_counter); - return ERR_PTR(ret); + if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) { + ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry); + shared_counter = rev_entry->counter; + spin_unlock_bh(&ct_priv->ht_lock); + + mlx5_tc_ct_entry_put(rev_entry); + return shared_counter; } + spin_unlock_bh(&ct_priv->ht_lock); + +create_counter: + + shared_counter = mlx5_tc_ct_counter_create(ct_priv); + if (IS_ERR(shared_counter)) + return shared_counter; + shared_counter->is_shared = true; refcount_set(&shared_counter->refcount, 1); return shared_counter; @@ -866,10 +967,14 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, if (!meta_action) return -EOPNOTSUPP; - entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, - cts_ht_params); - if (entry) - return 0; + spin_lock_bh(&ct_priv->ht_lock); + entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params); + if (entry && refcount_inc_not_zero(&entry->refcnt)) { + spin_unlock_bh(&ct_priv->ht_lock); + mlx5_tc_ct_entry_put(entry); + return -EEXIST; + } + spin_unlock_bh(&ct_priv->ht_lock); entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) @@ -878,6 +983,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, entry->tuple.zone = ft->zone; entry->cookie = flow->cookie; entry->restore_cookie = meta_action->ct_metadata.cookie; + refcount_set(&entry->refcnt, 2); + entry->ct_priv = ct_priv; err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule); if (err) @@ -888,35 +995,40 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, if (err) goto err_set; - err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht, - &entry->tuple_node, - tuples_ht_params); + spin_lock_bh(&ct_priv->ht_lock); + + err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node, + cts_ht_params); + if (err) + goto err_entries; + + err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht, + &entry->tuple_node, + tuples_ht_params); if (err) goto err_tuple; if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) { - err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, - tuples_nat_ht_params); + err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, + tuples_nat_ht_params); if (err) goto err_tuple_nat; } + spin_unlock_bh(&ct_priv->ht_lock); err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry, ft->zone_restore_id); if (err) goto err_rules; - err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node, - cts_ht_params); - if (err) - goto err_insert; + set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags); + mlx5_tc_ct_entry_put(entry); /* this function reference */ return 0; -err_insert: - mlx5_tc_ct_entry_del_rules(ct_priv, entry); err_rules: + spin_lock_bh(&ct_priv->ht_lock); if (mlx5_tc_ct_entry_has_nat(entry)) rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, &entry->tuple_nat_node, tuples_nat_ht_params); @@ -925,47 +1037,43 @@ err_tuple_nat: &entry->tuple_node, tuples_ht_params); err_tuple: + rhashtable_remove_fast(&ft->ct_entries_ht, + &entry->node, + cts_ht_params); +err_entries: + spin_unlock_bh(&ct_priv->ht_lock); err_set: kfree(entry); - netdev_warn(ct_priv->netdev, - "Failed to offload ct entry, err: %d\n", err); + if (err != -EEXIST) + netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err); return err; } -static void -mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv, - struct mlx5_ct_entry *entry) -{ - mlx5_tc_ct_entry_del_rules(ct_priv, entry); - mutex_lock(&ct_priv->shared_counter_lock); - if (mlx5_tc_ct_entry_has_nat(entry)) - rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, - tuples_nat_ht_params); - rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, - tuples_ht_params); - mutex_unlock(&ct_priv->shared_counter_lock); - mlx5_tc_ct_counter_put(ct_priv, entry); - -} - static int mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, struct flow_cls_offload *flow) { + struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv; unsigned long cookie = flow->cookie; struct mlx5_ct_entry *entry; - entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, - cts_ht_params); - if (!entry) + spin_lock_bh(&ct_priv->ht_lock); + entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params); + if (!entry) { + spin_unlock_bh(&ct_priv->ht_lock); return -ENOENT; + } - mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry); - WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht, - &entry->node, - cts_ht_params)); - kfree(entry); + if (!mlx5_tc_ct_entry_valid(entry)) { + spin_unlock_bh(&ct_priv->ht_lock); + return -EINVAL; + } + + rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params); + mlx5_tc_ct_entry_remove_from_tuples(entry); + spin_unlock_bh(&ct_priv->ht_lock); + + mlx5_tc_ct_entry_put(entry); return 0; } @@ -974,19 +1082,30 @@ static int mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, struct flow_cls_offload *f) { + struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv; unsigned long cookie = f->cookie; struct mlx5_ct_entry *entry; u64 lastuse, packets, bytes; - entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, - cts_ht_params); - if (!entry) + spin_lock_bh(&ct_priv->ht_lock); + entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params); + if (!entry) { + spin_unlock_bh(&ct_priv->ht_lock); return -ENOENT; + } + + if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) { + spin_unlock_bh(&ct_priv->ht_lock); + return -EINVAL; + } + + spin_unlock_bh(&ct_priv->ht_lock); mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse); flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); + mlx5_tc_ct_entry_put(entry); return 0; } @@ -1088,8 +1207,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, struct netlink_ext_ack *extack) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); + bool trk, est, untrk, unest, new, rpl, unrpl; struct flow_dissector_key_ct *mask, *key; - bool trk, est, untrk, unest, new; u32 ctstate = 0, ctstate_mask = 0; u16 ct_state_on, ct_state_off; u16 ct_state, ct_state_mask; @@ -1115,9 +1234,10 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | - TCA_FLOWER_KEY_CT_FLAGS_NEW)) { + TCA_FLOWER_KEY_CT_FLAGS_NEW | + TCA_FLOWER_KEY_CT_FLAGS_REPLY)) { NL_SET_ERR_MSG_MOD(extack, - "only ct_state trk, est and new are supported for offload"); + "only ct_state trk, est, new and rpl are supported for offload"); return -EOPNOTSUPP; } @@ -1126,13 +1246,17 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW; est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; + rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY; untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; + unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY; ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0; ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; + ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0; ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0; ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; + ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0; if (new) { NL_SET_ERR_MSG_MOD(extack, @@ -1247,9 +1371,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft, pre_ct->flow_rule = rule; /* add miss rule */ - memset(spec, 0, sizeof(*spec)); dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct; - rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); ct_dbg("Failed to add pre ct miss rule zone %d", zone); @@ -1478,11 +1601,9 @@ err_mapping: static void mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) { - struct mlx5_tc_ct_priv *ct_priv = arg; struct mlx5_ct_entry *entry = ptr; - mlx5_tc_ct_del_ft_entry(ct_priv, entry); - kfree(entry); + mlx5_tc_ct_entry_put(entry); } static void @@ -1760,7 +1881,6 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv, goto err_set_registers; } - dealloc_mod_hdr_actions(mod_acts); pre_ct_attr->modify_hdr = mod_hdr; pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; @@ -1960,6 +2080,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, goto err_mapping_labels; } + spin_lock_init(&ct_priv->ht_lock); ct_priv->ns_type = ns_type; ct_priv->chains = chains; ct_priv->netdev = priv->netdev; @@ -1994,7 +2115,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, idr_init(&ct_priv->fte_ids); mutex_init(&ct_priv->control_lock); - mutex_init(&ct_priv->shared_counter_lock); rhashtable_init(&ct_priv->zone_ht, &zone_params); rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params); @@ -2037,7 +2157,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); rhashtable_destroy(&ct_priv->zone_ht); mutex_destroy(&ct_priv->control_lock); - mutex_destroy(&ct_priv->shared_counter_lock); idr_destroy(&ct_priv->fte_ids); kfree(ct_priv); } @@ -2059,14 +2178,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone)) return false; - entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple, - tuples_ht_params); - if (!entry) - entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht, - &tuple, tuples_nat_ht_params); - if (!entry) + spin_lock(&ct_priv->ht_lock); + + entry = mlx5_tc_ct_entry_get(ct_priv, &tuple); + if (!entry) { + spin_unlock(&ct_priv->ht_lock); + return false; + } + + if (IS_ERR(entry)) { + spin_unlock(&ct_priv->ht_lock); return false; + } + spin_unlock(&ct_priv->ht_lock); tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie); + __mlx5_tc_ct_entry_put(entry); + return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 6503b614337c..69e618d17071 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -73,7 +73,7 @@ struct mlx5_ct_attr { #define zone_restore_to_reg_ct {\ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\ .moffset = 0,\ - .mlen = 1,\ + .mlen = (ESW_ZONE_ID_BITS / 8),\ .soffset = MLX5_BYTE_OFF(fte_match_param,\ misc_parameters_2.metadata_reg_c_1) + 3,\ } @@ -81,14 +81,12 @@ struct mlx5_ct_attr { #define nic_zone_restore_to_reg_ct {\ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\ .moffset = 2,\ - .mlen = 1,\ + .mlen = (ESW_ZONE_ID_BITS / 8),\ } #define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen) #define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset) #define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8) -#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8) -#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0) #if IS_ENABLED(CONFIG_MLX5_TC_CT) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h new file mode 100644 index 000000000000..c223591ffc22 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies. */ + +#ifndef __MLX5_EN_TC_PRIV_H__ +#define __MLX5_EN_TC_PRIV_H__ + +#include "en_tc.h" + +#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) + +#define MLX5E_TC_MAX_SPLITS 1 + +enum { + MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, + MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, + MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, + MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, + MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, + MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, + MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, + MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, + MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7, + MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8, + MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9, + MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10, +}; + +struct mlx5e_tc_flow_parse_attr { + const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; + struct net_device *filter_dev; + struct mlx5_flow_spec spec; + struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; + int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; + struct ethhdr eth; +}; + +/* Helper struct for accessing a struct containing list_head array. + * Containing struct + * |- Helper array + * [0] Helper item 0 + * |- list_head item 0 + * |- index (0) + * [1] Helper item 1 + * |- list_head item 1 + * |- index (1) + * To access the containing struct from one of the list_head items: + * 1. Get the helper item from the list_head item using + * helper item = + * container_of(list_head item, helper struct type, list_head field) + * 2. Get the contining struct from the helper item and its index in the array: + * containing struct = + * container_of(helper item, containing struct type, helper field[index]) + */ +struct encap_flow_item { + struct mlx5e_encap_entry *e; /* attached encap instance */ + struct list_head list; + int index; +}; + +struct encap_route_flow_item { + struct mlx5e_route_entry *r; /* attached route instance */ + int index; +}; + +struct mlx5e_tc_flow { + struct rhash_head node; + struct mlx5e_priv *priv; + u64 cookie; + unsigned long flags; + struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; + + /* flows sharing the same reformat object - currently mpls decap */ + struct list_head l3_to_l2_reformat; + struct mlx5e_decap_entry *decap_reformat; + + /* flows sharing same route entry */ + struct list_head decap_routes; + struct mlx5e_route_entry *decap_route; + struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS]; + + /* Flow can be associated with multiple encap IDs. + * The number of encaps is bounded by the number of supported + * destinations. + */ + struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5e_tc_flow *peer_flow; + struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ + struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ + struct list_head hairpin; /* flows sharing the same hairpin */ + struct list_head peer; /* flows with peer flow */ + struct list_head unready; /* flows not ready to be offloaded (e.g + * due to missing route) + */ + struct net_device *orig_dev; /* netdev adding flow first */ + int tmp_entry_index; + struct list_head tmp_list; /* temporary flow list used by neigh update */ + refcount_t refcnt; + struct rcu_head rcu_head; + struct completion init_done; + int tunnel_id; /* the mapped tunnel id of this flow */ + struct mlx5_flow_attr *attr; +}; + +u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer); + +struct mlx5_flow_handle * +mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr); + +bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); + +static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* Complete all memory stores before setting bit. */ + smp_mb__before_atomic(); + set_bit(flag, &flow->flags); +} + +#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) + +static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, + unsigned long flag) +{ + /* test_and_set_bit() provides all necessary barriers */ + return test_and_set_bit(flag, &flow->flags); +} + +#define flow_flag_test_and_set(flow, flag) \ + __flow_flag_test_and_set(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* Complete all memory stores before clearing bit. */ + smp_mb__before_atomic(); + clear_bit(flag, &flow->flags); +} + +#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + bool ret = test_bit(flag, &flow->flags); + + /* Read fields of flow structure only after checking flags. */ + smp_mb__after_atomic(); + return ret; +} + +#define flow_flag_test(flow, flag) __flow_flag_test(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow); +struct mlx5_flow_handle * +mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec); +void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); + +struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow); +void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); + +struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); + +#endif /* __MLX5_EN_TC_PRIV_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 90930e54b6f2..f8075a604605 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -6,10 +6,32 @@ #include <net/geneve.h> #include <net/bareudp.h> #include "en/tc_tun.h" +#include "en/tc_priv.h" #include "en_tc.h" #include "rep/tc.h" #include "rep/neigh.h" +struct mlx5e_tc_tun_route_attr { + struct net_device *out_dev; + struct net_device *route_dev; + union { + struct flowi4 fl4; + struct flowi6 fl6; + } fl; + struct neighbour *n; + u8 ttl; +}; + +#define TC_TUN_ROUTE_ATTR_INIT(name) struct mlx5e_tc_tun_route_attr name = {} + +static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr) +{ + if (attr->n) + neigh_release(attr->n); + if (attr->route_dev) + dev_put(attr->route_dev); +} + struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev) { if (netif_is_vxlan(tunnel_dev)) @@ -79,12 +101,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, struct net_device *mirred_dev, - struct net_device **out_dev, - struct net_device **route_dev, - struct flowi4 *fl4, - struct neighbour **out_n, - u8 *out_ttl) + struct mlx5e_tc_tun_route_attr *attr) { + struct net_device *route_dev; + struct net_device *out_dev; struct neighbour *n; struct rtable *rt; @@ -97,46 +117,50 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = mdev->priv.eswitch; uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - fl4->flowi4_oif = uplink_dev->ifindex; + attr->fl.fl4.flowi4_oif = uplink_dev->ifindex; } - rt = ip_route_output_key(dev_net(mirred_dev), fl4); + rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4); if (IS_ERR(rt)) return PTR_ERR(rt); if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) { - ip_rt_put(rt); - return -ENETUNREACH; + ret = -ENETUNREACH; + goto err_rt_release; } #else return -EOPNOTSUPP; #endif - ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev); - if (ret < 0) { - ip_rt_put(rt); - return ret; - } - dev_hold(*route_dev); + ret = get_route_and_out_devs(priv, rt->dst.dev, &route_dev, &out_dev); + if (ret < 0) + goto err_rt_release; + dev_hold(route_dev); - if (!(*out_ttl)) - *out_ttl = ip4_dst_hoplimit(&rt->dst); - n = dst_neigh_lookup(&rt->dst, &fl4->daddr); - ip_rt_put(rt); + if (!attr->ttl) + attr->ttl = ip4_dst_hoplimit(&rt->dst); + n = dst_neigh_lookup(&rt->dst, &attr->fl.fl4.daddr); if (!n) { - dev_put(*route_dev); - return -ENOMEM; + ret = -ENOMEM; + goto err_dev_release; } - *out_n = n; + ip_rt_put(rt); + attr->route_dev = route_dev; + attr->out_dev = out_dev; + attr->n = n; return 0; + +err_dev_release: + dev_put(route_dev); +err_rt_release: + ip_rt_put(rt); + return ret; } -static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev, - struct neighbour *n) +static void mlx5e_route_lookup_ipv4_put(struct mlx5e_tc_tun_route_attr *attr) { - neigh_release(n); - dev_put(route_dev); + mlx5e_tc_tun_route_attr_cleanup(attr); } static const char *mlx5e_netdev_kind(struct net_device *dev) @@ -188,28 +212,26 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); const struct ip_tunnel_key *tun_key = &e->tun_info->key; - struct net_device *out_dev, *route_dev; - struct flowi4 fl4 = {}; - struct neighbour *n; + struct mlx5e_neigh m_neigh = {}; + TC_TUN_ROUTE_ATTR_INIT(attr); int ipv4_encap_size; char *encap_header; - u8 nud_state, ttl; struct iphdr *ip; + u8 nud_state; int err; /* add the IP fields */ - fl4.flowi4_tos = tun_key->tos; - fl4.daddr = tun_key->u.ipv4.dst; - fl4.saddr = tun_key->u.ipv4.src; - ttl = tun_key->ttl; + attr.fl.fl4.flowi4_tos = tun_key->tos; + attr.fl.fl4.daddr = tun_key->u.ipv4.dst; + attr.fl.fl4.saddr = tun_key->u.ipv4.src; + attr.ttl = tun_key->ttl; - err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev, - &fl4, &n, &ttl); + err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr); if (err) return err; ipv4_encap_size = - (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + + (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + sizeof(struct iphdr) + e->tunnel->calc_hlen(e); @@ -226,40 +248,36 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, goto release_neigh; } - /* used by mlx5e_detach_encap to lookup a neigh hash table - * entry in the neigh hash table when a user deletes a rule - */ - e->m_neigh.dev = n->dev; - e->m_neigh.family = n->ops->family; - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - e->out_dev = out_dev; - e->route_dev_ifindex = route_dev->ifindex; + m_neigh.family = attr.n->ops->family; + memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); + e->out_dev = attr.out_dev; + e->route_dev_ifindex = attr.route_dev->ifindex; /* It's important to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the * neigh changes it's validity state, we would find the relevant neigh * in the hash. */ - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev); if (err) goto free_encap; - read_lock_bh(&n->lock); - nud_state = n->nud_state; - ether_addr_copy(e->h_dest, n->ha); - read_unlock_bh(&n->lock); + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + read_unlock_bh(&attr.n->lock); /* add ethernet header */ - ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, + ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, ETH_P_IP); /* add ip header */ ip->tos = tun_key->tos; ip->version = 0x4; ip->ihl = 0x5; - ip->ttl = ttl; - ip->daddr = fl4.daddr; - ip->saddr = fl4.saddr; + ip->ttl = attr.ttl; + ip->daddr = attr.fl.fl4.daddr; + ip->saddr = attr.fl.fl4.saddr; /* add tunneling protocol header */ err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), @@ -271,7 +289,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, e->encap_header = encap_header; if (!(nud_state & NUD_VALID)) { - neigh_event_send(n, NULL); + neigh_event_send(attr.n, NULL); /* the encap entry will be made valid on neigh update event * and not used before that. */ @@ -287,8 +305,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, } e->flags |= MLX5_ENCAP_ENTRY_VALID; - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); - mlx5e_route_lookup_ipv4_put(route_dev, n); + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv4_put(&attr); return err; destroy_neigh_entry: @@ -296,55 +314,155 @@ destroy_neigh_entry: free_encap: kfree(encap_header); release_neigh: - mlx5e_route_lookup_ipv4_put(route_dev, n); + mlx5e_route_lookup_ipv4_put(&attr); + return err; +} + +int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + const struct ip_tunnel_key *tun_key = &e->tun_info->key; + TC_TUN_ROUTE_ATTR_INIT(attr); + int ipv4_encap_size; + char *encap_header; + struct iphdr *ip; + u8 nud_state; + int err; + + /* add the IP fields */ + attr.fl.fl4.flowi4_tos = tun_key->tos; + attr.fl.fl4.daddr = tun_key->u.ipv4.dst; + attr.fl.fl4.saddr = tun_key->u.ipv4.src; + attr.ttl = tun_key->ttl; + + err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr); + if (err) + return err; + + ipv4_encap_size = + (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + + sizeof(struct iphdr) + + e->tunnel->calc_hlen(e); + + if (max_encap_size < ipv4_encap_size) { + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", + ipv4_encap_size, max_encap_size); + err = -EOPNOTSUPP; + goto release_neigh; + } + + encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); + if (!encap_header) { + err = -ENOMEM; + goto release_neigh; + } + + e->route_dev_ifindex = attr.route_dev->ifindex; + + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev); + read_unlock_bh(&attr.n->lock); + + /* add ethernet header */ + ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, + ETH_P_IP); + + /* add ip header */ + ip->tos = tun_key->tos; + ip->version = 0x4; + ip->ihl = 0x5; + ip->ttl = attr.ttl; + ip->daddr = attr.fl.fl4.daddr; + ip->saddr = attr.fl.fl4.saddr; + + /* add tunneling protocol header */ + err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), + &ip->protocol, e); + if (err) + goto free_encap; + + e->encap_size = ipv4_encap_size; + kfree(e->encap_header); + e->encap_header = encap_header; + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(attr.n, NULL); + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ + goto release_neigh; + } + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv4_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(e->pkt_reformat)) { + err = PTR_ERR(e->pkt_reformat); + goto free_encap; + } + + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv4_put(&attr); + return err; + +free_encap: + kfree(encap_header); +release_neigh: + mlx5e_route_lookup_ipv4_put(&attr); return err; } #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv, struct net_device *mirred_dev, - struct net_device **out_dev, - struct net_device **route_dev, - struct flowi6 *fl6, - struct neighbour **out_n, - u8 *out_ttl) + struct mlx5e_tc_tun_route_attr *attr) { + struct net_device *route_dev; + struct net_device *out_dev; struct dst_entry *dst; struct neighbour *n; - int ret; - dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6, NULL); if (IS_ERR(dst)) return PTR_ERR(dst); - if (!(*out_ttl)) - *out_ttl = ip6_dst_hoplimit(dst); + if (!attr->ttl) + attr->ttl = ip6_dst_hoplimit(dst); - ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); - if (ret < 0) { - dst_release(dst); - return ret; - } + ret = get_route_and_out_devs(priv, dst->dev, &route_dev, &out_dev); + if (ret < 0) + goto err_dst_release; - dev_hold(*route_dev); - n = dst_neigh_lookup(dst, &fl6->daddr); - dst_release(dst); + dev_hold(route_dev); + n = dst_neigh_lookup(dst, &attr->fl.fl6.daddr); if (!n) { - dev_put(*route_dev); - return -ENOMEM; + ret = -ENOMEM; + goto err_dev_release; } - *out_n = n; + dst_release(dst); + attr->out_dev = out_dev; + attr->route_dev = route_dev; + attr->n = n; return 0; + +err_dev_release: + dev_put(route_dev); +err_dst_release: + dst_release(dst); + return ret; } -static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev, - struct neighbour *n) +static void mlx5e_route_lookup_ipv6_put(struct mlx5e_tc_tun_route_attr *attr) { - neigh_release(n); - dev_put(route_dev); + mlx5e_tc_tun_route_attr_cleanup(attr); } int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, @@ -353,28 +471,25 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); const struct ip_tunnel_key *tun_key = &e->tun_info->key; - struct net_device *out_dev, *route_dev; - struct flowi6 fl6 = {}; + struct mlx5e_neigh m_neigh = {}; + TC_TUN_ROUTE_ATTR_INIT(attr); struct ipv6hdr *ip6h; - struct neighbour *n = NULL; int ipv6_encap_size; char *encap_header; - u8 nud_state, ttl; + u8 nud_state; int err; - ttl = tun_key->ttl; + attr.ttl = tun_key->ttl; + attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + attr.fl.fl6.daddr = tun_key->u.ipv6.dst; + attr.fl.fl6.saddr = tun_key->u.ipv6.src; - fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); - fl6.daddr = tun_key->u.ipv6.dst; - fl6.saddr = tun_key->u.ipv6.src; - - err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev, - &fl6, &n, &ttl); + err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr); if (err) return err; ipv6_encap_size = - (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + + (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + sizeof(struct ipv6hdr) + e->tunnel->calc_hlen(e); @@ -391,39 +506,35 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, goto release_neigh; } - /* used by mlx5e_detach_encap to lookup a neigh hash table - * entry in the neigh hash table when a user deletes a rule - */ - e->m_neigh.dev = n->dev; - e->m_neigh.family = n->ops->family; - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - e->out_dev = out_dev; - e->route_dev_ifindex = route_dev->ifindex; + m_neigh.family = attr.n->ops->family; + memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); + e->out_dev = attr.out_dev; + e->route_dev_ifindex = attr.route_dev->ifindex; /* It's importent to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the * neigh changes it's validity state, we would find the relevant neigh * in the hash. */ - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev); if (err) goto free_encap; - read_lock_bh(&n->lock); - nud_state = n->nud_state; - ether_addr_copy(e->h_dest, n->ha); - read_unlock_bh(&n->lock); + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + read_unlock_bh(&attr.n->lock); /* add ethernet header */ - ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, + ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, ETH_P_IPV6); /* add ip header */ ip6_flow_hdr(ip6h, tun_key->tos, 0); /* the HW fills up ipv6 payload len */ - ip6h->hop_limit = ttl; - ip6h->daddr = fl6.daddr; - ip6h->saddr = fl6.saddr; + ip6h->hop_limit = attr.ttl; + ip6h->daddr = attr.fl.fl6.daddr; + ip6h->saddr = attr.fl.fl6.saddr; /* add tunneling protocol header */ err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr), @@ -435,7 +546,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->encap_header = encap_header; if (!(nud_state & NUD_VALID)) { - neigh_event_send(n, NULL); + neigh_event_send(attr.n, NULL); /* the encap entry will be made valid on neigh update event * and not used before that. */ @@ -452,8 +563,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, } e->flags |= MLX5_ENCAP_ENTRY_VALID; - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); - mlx5e_route_lookup_ipv6_put(route_dev, n); + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv6_put(&attr); return err; destroy_neigh_entry: @@ -461,10 +572,160 @@ destroy_neigh_entry: free_encap: kfree(encap_header); release_neigh: - mlx5e_route_lookup_ipv6_put(route_dev, n); + mlx5e_route_lookup_ipv6_put(&attr); return err; } + +int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + const struct ip_tunnel_key *tun_key = &e->tun_info->key; + TC_TUN_ROUTE_ATTR_INIT(attr); + struct ipv6hdr *ip6h; + int ipv6_encap_size; + char *encap_header; + u8 nud_state; + int err; + + attr.ttl = tun_key->ttl; + + attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + attr.fl.fl6.daddr = tun_key->u.ipv6.dst; + attr.fl.fl6.saddr = tun_key->u.ipv6.src; + + err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr); + if (err) + return err; + + ipv6_encap_size = + (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + + sizeof(struct ipv6hdr) + + e->tunnel->calc_hlen(e); + + if (max_encap_size < ipv6_encap_size) { + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", + ipv6_encap_size, max_encap_size); + err = -EOPNOTSUPP; + goto release_neigh; + } + + encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); + if (!encap_header) { + err = -ENOMEM; + goto release_neigh; + } + + e->route_dev_ifindex = attr.route_dev->ifindex; + + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev); + read_unlock_bh(&attr.n->lock); + + /* add ethernet header */ + ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, + ETH_P_IPV6); + + /* add ip header */ + ip6_flow_hdr(ip6h, tun_key->tos, 0); + /* the HW fills up ipv6 payload len */ + ip6h->hop_limit = attr.ttl; + ip6h->daddr = attr.fl.fl6.daddr; + ip6h->saddr = attr.fl.fl6.saddr; + + /* add tunneling protocol header */ + err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr), + &ip6h->nexthdr, e); + if (err) + goto free_encap; + + e->encap_size = ipv6_encap_size; + kfree(e->encap_header); + e->encap_header = encap_header; + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(attr.n, NULL); + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ + goto release_neigh; + } + + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv6_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(e->pkt_reformat)) { + err = PTR_ERR(e->pkt_reformat); + goto free_encap; + } + + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv6_put(&attr); + return err; + +free_encap: + kfree(encap_header); +release_neigh: + mlx5e_route_lookup_ipv6_put(&attr); + return err; +} +#endif + +int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *flow_attr) +{ + struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr; + TC_TUN_ROUTE_ATTR_INIT(attr); + u16 vport_num; + int err = 0; + + if (flow_attr->ip_version == 4) { + /* Addresses are swapped for decap */ + attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; + attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; + err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); + } +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + else if (flow_attr->ip_version == 6) { + /* Addresses are swapped for decap */ + attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; + attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; + err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr); + } #endif + else + return 0; + + if (err) + return err; + + if (attr.route_dev->netdev_ops != &mlx5e_netdev_ops || + !mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev)) + goto out; + + err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num); + if (err) + goto out; + + esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, + misc_parameters.vxlan_vni); + esw_attr->rx_tun_attr->decap_vport = vport_num; + +out: + if (flow_attr->ip_version == 4) + mlx5e_route_lookup_ipv4_put(&attr); +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + else if (flow_attr->ip_version == 6) + mlx5e_route_lookup_ipv6_put(&attr); +#endif + return err; +} bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, struct net_device *netdev) @@ -625,14 +886,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, } } - /* Enforce DMAC when offloading incoming tunneled flows. - * Flow counters require a match on the DMAC. - */ - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - dmac_47_16), priv->netdev->dev_addr); - /* let software handle IP fragments */ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 704359df6095..67de2bf36861 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -11,6 +11,8 @@ #include "en.h" #include "en_rep.h" +#ifdef CONFIG_MLX5_ESWITCH + enum { MLX5E_TC_TUNNEL_TYPE_UNKNOWN, MLX5E_TC_TUNNEL_TYPE_VXLAN, @@ -59,17 +61,30 @@ int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev, int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_encap_entry *e); +int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_encap_entry *e); +int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); #else static inline int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; } +int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ return -EOPNOTSUPP; } #endif +int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr); bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, struct net_device *netdev); @@ -86,4 +101,6 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv, void *headers_c, void *headers_v); +#endif /* CONFIG_MLX5_ESWITCH */ + #endif //__MLX5_EN_TC_TUNNEL_H__ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c new file mode 100644 index 000000000000..6a116335bb21 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -0,0 +1,1653 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021 Mellanox Technologies. */ + +#include <net/fib_notifier.h> +#include "tc_tun_encap.h" +#include "en_tc.h" +#include "tc_tun.h" +#include "rep/tc.h" +#include "diag/en_tc_tracepoint.h" + +enum { + MLX5E_ROUTE_ENTRY_VALID = BIT(0), +}; + +struct mlx5e_route_key { + int ip_version; + union { + __be32 v4; + struct in6_addr v6; + } endpoint_ip; +}; + +struct mlx5e_route_entry { + struct mlx5e_route_key key; + struct list_head encap_entries; + struct list_head decap_flows; + u32 flags; + struct hlist_node hlist; + refcount_t refcnt; + int tunnel_dev_index; + struct rcu_head rcu; +}; + +struct mlx5e_tc_tun_encap { + struct mlx5e_priv *priv; + struct notifier_block fib_nb; + spinlock_t route_lock; /* protects route_tbl */ + unsigned long route_tbl_last_update; + DECLARE_HASHTABLE(route_tbl, 8); +}; + +static bool mlx5e_route_entry_valid(struct mlx5e_route_entry *r) +{ + return r->flags & MLX5E_ROUTE_ENTRY_VALID; +} + +int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec) +{ + struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; + struct mlx5_rx_tun_attr *tun_attr; + void *daddr, *saddr; + u8 ip_version; + + tun_attr = kvzalloc(sizeof(*tun_attr), GFP_KERNEL); + if (!tun_attr) + return -ENOMEM; + + esw_attr->rx_tun_attr = tun_attr; + ip_version = mlx5e_tc_get_ip_version(spec, true); + + if (ip_version == 4) { + daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + tun_attr->dst_ip.v4 = *(__be32 *)daddr; + tun_attr->src_ip.v4 = *(__be32 *)saddr; + if (!tun_attr->dst_ip.v4 || !tun_attr->src_ip.v4) + return 0; + } +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + else if (ip_version == 6) { + int ipv6_size = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6); + struct in6_addr zerov6 = {}; + + daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6); + saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6); + memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size); + memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size); + if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) || + !memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6))) + return 0; + } +#endif + /* Only set the flag if both src and dst ip addresses exist. They are + * required to establish routing. + */ + flow_flag_set(flow, TUN_RX); + return 0; +} + +static bool mlx5e_tc_flow_all_encaps_valid(struct mlx5_esw_flow_attr *esw_attr) +{ + bool all_flow_encaps_valid = true; + int i; + + /* Flow can be associated with multiple encap entries. + * Before offloading the flow verify that all of them have + * a valid neighbour. + */ + for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { + if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) + continue; + if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) { + all_flow_encaps_valid = false; + break; + } + } + + return all_flow_encaps_valid; +} + +void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct list_head *flow_list) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *attr; + struct mlx5_flow_spec *spec; + struct mlx5e_tc_flow *flow; + int err; + + if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE) + return; + + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + e->encap_size, e->encap_header, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(e->pkt_reformat)) { + mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n", + PTR_ERR(e->pkt_reformat)); + return; + } + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(priv); + + list_for_each_entry(flow, flow_list, tmp_list) { + if (!mlx5e_is_offloaded_flow(flow)) + continue; + attr = flow->attr; + esw_attr = attr->esw_attr; + spec = &attr->parse_attr->spec; + + esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat; + esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; + + /* Do not offload flows with unresolved neighbors */ + if (!mlx5e_tc_flow_all_encaps_valid(esw_attr)) + continue; + /* update from slow path rule to encap rule */ + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", + err); + continue; + } + + mlx5e_tc_unoffload_from_slow_path(esw, flow); + flow->rule[0] = rule; + /* was unset when slow path rule removed */ + flow_flag_set(flow, OFFLOADED); + } +} + +void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct list_head *flow_list) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *attr; + struct mlx5_flow_spec *spec; + struct mlx5e_tc_flow *flow; + int err; + + list_for_each_entry(flow, flow_list, tmp_list) { + if (!mlx5e_is_offloaded_flow(flow)) + continue; + attr = flow->attr; + esw_attr = attr->esw_attr; + spec = &attr->parse_attr->spec; + + /* update from encap rule to slow path rule */ + rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); + /* mark the flow's encap dest as non-valid */ + esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; + + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n", + err); + continue; + } + + mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); + flow->rule[0] = rule; + /* was unset when fast path rule removed */ + flow_flag_set(flow, OFFLOADED); + } + + /* we know that the encap is valid */ + e->flags &= ~MLX5_ENCAP_ENTRY_VALID; + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); +} + +static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, + struct list_head *flow_list, + int index) +{ + if (IS_ERR(mlx5e_flow_get(flow))) + return; + wait_for_completion(&flow->init_done); + + flow->tmp_entry_index = index; + list_add(&flow->tmp_list, flow_list); +} + +/* Takes reference to all flows attached to encap and adds the flows to + * flow_list using 'tmp_list' list_head in mlx5e_tc_flow. + */ +void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list) +{ + struct encap_flow_item *efi; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(efi, &e->flows, list) { + flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + mlx5e_take_tmp_flow(flow, flow_list, efi->index); + } +} + +/* Takes reference to all flows attached to route and adds the flows to + * flow_list using 'tmp_list' list_head in mlx5e_tc_flow. + */ +static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r, + struct list_head *flow_list) +{ + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, &r->decap_flows, decap_routes) + mlx5e_take_tmp_flow(flow, flow_list, 0); +} + +static struct mlx5e_encap_entry * +mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_encap_entry *next = NULL; + +retry: + rcu_read_lock(); + + /* find encap with non-zero reference counter value */ + for (next = e ? + list_next_or_null_rcu(&nhe->encap_list, + &e->encap_list, + struct mlx5e_encap_entry, + encap_list) : + list_first_or_null_rcu(&nhe->encap_list, + struct mlx5e_encap_entry, + encap_list); + next; + next = list_next_or_null_rcu(&nhe->encap_list, + &next->encap_list, + struct mlx5e_encap_entry, + encap_list)) + if (mlx5e_encap_take(next)) + break; + + rcu_read_unlock(); + + /* release starting encap */ + if (e) + mlx5e_encap_put(netdev_priv(e->out_dev), e); + if (!next) + return next; + + /* wait for encap to be fully initialized */ + wait_for_completion(&next->res_ready); + /* continue searching if encap entry is not in valid state after completion */ + if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) { + e = next; + goto retry; + } + + return next; +} + +void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_neigh *m_neigh = &nhe->m_neigh; + struct mlx5e_encap_entry *e = NULL; + struct mlx5e_tc_flow *flow; + struct mlx5_fc *counter; + struct neigh_table *tbl; + bool neigh_used = false; + struct neighbour *n; + u64 lastuse; + + if (m_neigh->family == AF_INET) + tbl = &arp_tbl; +#if IS_ENABLED(CONFIG_IPV6) + else if (m_neigh->family == AF_INET6) + tbl = ipv6_stub->nd_tbl; +#endif + else + return; + + /* mlx5e_get_next_valid_encap() releases previous encap before returning + * next one. + */ + while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) { + struct mlx5e_priv *priv = netdev_priv(e->out_dev); + struct encap_flow_item *efi, *tmp; + struct mlx5_eswitch *esw; + LIST_HEAD(flow_list); + + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); + list_for_each_entry_safe(efi, tmp, &e->flows, list) { + flow = container_of(efi, struct mlx5e_tc_flow, + encaps[efi->index]); + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + list_add(&flow->tmp_list, &flow_list); + + if (mlx5e_is_offloaded_flow(flow)) { + counter = mlx5e_tc_get_counter(flow); + lastuse = mlx5_fc_query_lastuse(counter); + if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { + neigh_used = true; + break; + } + } + } + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_put_flow_list(priv, &flow_list); + if (neigh_used) { + /* release current encap before breaking the loop */ + mlx5e_encap_put(priv, e); + break; + } + } + + trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used); + + if (neigh_used) { + nhe->reported_lastuse = jiffies; + + /* find the relevant neigh according to the cached device and + * dst ip pair + */ + n = neigh_lookup(tbl, &m_neigh->dst_ip, READ_ONCE(nhe->neigh_dev)); + if (!n) + return; + + neigh_event_send(n, NULL); + neigh_release(n); + } +} + +static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) +{ + WARN_ON(!list_empty(&e->flows)); + + if (e->compl_result > 0) { + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); + + if (e->flags & MLX5_ENCAP_ENTRY_VALID) + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); + } + + kfree(e->tun_info); + kfree(e->encap_header); + kfree_rcu(e, rcu); +} + +static void mlx5e_decap_dealloc(struct mlx5e_priv *priv, + struct mlx5e_decap_entry *d) +{ + WARN_ON(!list_empty(&d->flows)); + + if (!d->compl_result) + mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat); + + kfree_rcu(d, rcu); +} + +void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock)) + return; + list_del(&e->route_list); + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); +} + +static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock)) + return; + hash_del_rcu(&d->hlist); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + mlx5e_decap_dealloc(priv, d); +} + +static void mlx5e_detach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index); + +void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, int out_index) +{ + struct mlx5e_encap_entry *e = flow->encaps[out_index].e; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (flow->attr->esw_attr->dests[out_index].flags & + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + mlx5e_detach_encap_route(priv, flow, out_index); + + /* flow wasn't fully initialized */ + if (!e) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + list_del(&flow->encaps[out_index].list); + flow->encaps[out_index].e = NULL; + if (!refcount_dec_and_test(&e->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; + } + list_del(&e->route_list); + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); +} + +void mlx5e_detach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_decap_entry *d = flow->decap_reformat; + + if (!d) + return; + + mutex_lock(&esw->offloads.decap_tbl_lock); + list_del(&flow->l3_to_l2_reformat); + flow->decap_reformat = NULL; + + if (!refcount_dec_and_test(&d->refcnt)) { + mutex_unlock(&esw->offloads.decap_tbl_lock); + return; + } + hash_del_rcu(&d->hlist); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + mlx5e_decap_dealloc(priv, d); +} + +struct encap_key { + const struct ip_tunnel_key *ip_tun_key; + struct mlx5e_tc_tunnel *tc_tunnel; +}; + +static int cmp_encap_info(struct encap_key *a, + struct encap_key *b) +{ + return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || + a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type; +} + +static int cmp_decap_info(struct mlx5e_decap_key *a, + struct mlx5e_decap_key *b) +{ + return memcmp(&a->key, &b->key, sizeof(b->key)); +} + +static int hash_encap_info(struct encap_key *key) +{ + return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), + key->tc_tunnel->tunnel_type); +} + +static int hash_decap_info(struct mlx5e_decap_key *key) +{ + return jhash(&key->key, sizeof(key->key), 0); +} + +bool mlx5e_encap_take(struct mlx5e_encap_entry *e) +{ + return refcount_inc_not_zero(&e->refcnt); +} + +static bool mlx5e_decap_take(struct mlx5e_decap_entry *e) +{ + return refcount_inc_not_zero(&e->refcnt); +} + +static struct mlx5e_encap_entry * +mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, + uintptr_t hash_key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_encap_entry *e; + struct encap_key e_key; + + hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, + encap_hlist, hash_key) { + e_key.ip_tun_key = &e->tun_info->key; + e_key.tc_tunnel = e->tunnel; + if (!cmp_encap_info(&e_key, key) && + mlx5e_encap_take(e)) + return e; + } + + return NULL; +} + +static struct mlx5e_decap_entry * +mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key, + uintptr_t hash_key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_decap_key r_key; + struct mlx5e_decap_entry *e; + + hash_for_each_possible_rcu(esw->offloads.decap_tbl, e, + hlist, hash_key) { + r_key = e->key; + if (!cmp_decap_info(&r_key, key) && + mlx5e_decap_take(e)) + return e; + } + return NULL; +} + +struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info) +{ + size_t tun_size = sizeof(*tun_info) + tun_info->options_len; + + return kmemdup(tun_info, tun_size, GFP_KERNEL); +} + +static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < out_index; i++) { + if (flow->encaps[i].e != e) + continue; + NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action"); + netdev_err(priv->netdev, "can't duplicate encap action\n"); + return true; + } + + return false; +} + +static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + struct net_device *out_dev, + int route_dev_ifindex, + int out_index) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *route_dev; + u16 vport_num; + int err = 0; + u32 data; + + route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex); + + if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops || + !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) + goto out; + + err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num); + if (err) + goto out; + + attr->dest_chain = 0; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; + data = mlx5_eswitch_get_vport_metadata_for_set(esw_attr->in_mdev->priv.eswitch, + vport_num); + err = mlx5e_tc_match_to_reg_set_and_get_id(esw->dev, mod_hdr_acts, + MLX5_FLOW_NAMESPACE_FDB, + VPORT_TO_REG, data); + if (err >= 0) { + esw_attr->dests[out_index].src_port_rewrite_act_id = err; + err = 0; + } + +out: + if (route_dev) + dev_put(route_dev); + return err; +} + +static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw, + struct mlx5_esw_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + struct net_device *out_dev, + int route_dev_ifindex, + int out_index) +{ + int act_id = attr->dests[out_index].src_port_rewrite_act_id; + struct net_device *route_dev; + u16 vport_num; + int err = 0; + u32 data; + + route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex); + + if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops || + !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) { + err = -ENODEV; + goto out; + } + + err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num); + if (err) + goto out; + + data = mlx5_eswitch_get_vport_metadata_for_set(attr->in_mdev->priv.eswitch, + vport_num); + mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data); + +out: + if (route_dev) + dev_put(route_dev); + return err; +} + +static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_tc_tun_encap *encap; + unsigned int ret; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + encap = uplink_priv->encap; + + spin_lock_bh(&encap->route_lock); + ret = encap->route_tbl_last_update; + spin_unlock_bh(&encap->route_lock); + return ret; +} + +static int mlx5e_attach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5e_encap_entry *e, + bool new_encap_entry, + unsigned long tbl_time_before, + int out_index); + +int mlx5e_attach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct net_device *mirred_dev, + int out_index, + struct netlink_ext_ack *extack, + struct net_device **encap_dev, + bool *encap_valid) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + const struct ip_tunnel_info *tun_info; + unsigned long tbl_time_before = 0; + struct encap_key key; + struct mlx5e_encap_entry *e; + bool entry_created = false; + unsigned short family; + uintptr_t hash_key; + int err = 0; + + parse_attr = attr->parse_attr; + tun_info = parse_attr->tun_info[out_index]; + family = ip_tunnel_info_af(tun_info); + key.ip_tun_key = &tun_info->key; + key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); + if (!key.tc_tunnel) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel"); + return -EOPNOTSUPP; + } + + hash_key = hash_encap_info(&key); + + mutex_lock(&esw->offloads.encap_tbl_lock); + e = mlx5e_encap_get(priv, &key, hash_key); + + /* must verify if encap is valid or not */ + if (e) { + /* Check that entry was not already attached to this flow */ + if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { + err = -EOPNOTSUPP; + goto out_err; + } + + mutex_unlock(&esw->offloads.encap_tbl_lock); + wait_for_completion(&e->res_ready); + + /* Protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + if (e->compl_result < 0) { + err = -EREMOTEIO; + goto out_err; + } + goto attach_flow; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + err = -ENOMEM; + goto out_err; + } + + refcount_set(&e->refcnt, 1); + init_completion(&e->res_ready); + entry_created = true; + INIT_LIST_HEAD(&e->route_list); + + tun_info = mlx5e_dup_tun_info(tun_info); + if (!tun_info) { + err = -ENOMEM; + goto out_err_init; + } + e->tun_info = tun_info; + err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); + if (err) + goto out_err_init; + + INIT_LIST_HEAD(&e->flows); + hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); + tbl_time_before = mlx5e_route_tbl_get_last_update(priv); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + if (family == AF_INET) + err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); + else if (family == AF_INET6) + err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); + + /* Protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + complete_all(&e->res_ready); + if (err) { + e->compl_result = err; + goto out_err; + } + e->compl_result = 1; + +attach_flow: + err = mlx5e_attach_encap_route(priv, flow, e, entry_created, tbl_time_before, + out_index); + if (err) + goto out_err; + + flow->encaps[out_index].e = e; + list_add(&flow->encaps[out_index].list, &e->flows); + flow->encaps[out_index].index = out_index; + *encap_dev = e->out_dev; + if (e->flags & MLX5_ENCAP_ENTRY_VALID) { + attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat; + attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; + *encap_valid = true; + } else { + *encap_valid = false; + } + mutex_unlock(&esw->offloads.encap_tbl_lock); + + return err; + +out_err: + mutex_unlock(&esw->offloads.encap_tbl_lock); + if (e) + mlx5e_encap_put(priv, e); + return err; + +out_err_init: + mutex_unlock(&esw->offloads.encap_tbl_lock); + kfree(tun_info); + kfree(e); + return err; +} + +int mlx5e_attach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5e_decap_entry *d; + struct mlx5e_decap_key key; + uintptr_t hash_key; + int err = 0; + + parse_attr = flow->attr->parse_attr; + if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) { + NL_SET_ERR_MSG_MOD(extack, + "encap header larger than max supported"); + return -EOPNOTSUPP; + } + + key.key = parse_attr->eth; + hash_key = hash_decap_info(&key); + mutex_lock(&esw->offloads.decap_tbl_lock); + d = mlx5e_decap_get(priv, &key, hash_key); + if (d) { + mutex_unlock(&esw->offloads.decap_tbl_lock); + wait_for_completion(&d->res_ready); + mutex_lock(&esw->offloads.decap_tbl_lock); + if (d->compl_result) { + err = -EREMOTEIO; + goto out_free; + } + goto found; + } + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + err = -ENOMEM; + goto out_err; + } + + d->key = key; + refcount_set(&d->refcnt, 1); + init_completion(&d->res_ready); + INIT_LIST_HEAD(&d->flows); + hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2, + sizeof(parse_attr->eth), + &parse_attr->eth, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(d->pkt_reformat)) { + err = PTR_ERR(d->pkt_reformat); + d->compl_result = err; + } + mutex_lock(&esw->offloads.decap_tbl_lock); + complete_all(&d->res_ready); + if (err) + goto out_free; + +found: + flow->decap_reformat = d; + attr->decap_pkt_reformat = d->pkt_reformat; + list_add(&flow->l3_to_l2_reformat, &d->flows); + mutex_unlock(&esw->offloads.decap_tbl_lock); + return 0; + +out_free: + mutex_unlock(&esw->offloads.decap_tbl_lock); + mlx5e_decap_put(priv, d); + return err; + +out_err: + mutex_unlock(&esw->offloads.decap_tbl_lock); + return err; +} + +static int cmp_route_info(struct mlx5e_route_key *a, + struct mlx5e_route_key *b) +{ + if (a->ip_version == 4 && b->ip_version == 4) + return memcmp(&a->endpoint_ip.v4, &b->endpoint_ip.v4, + sizeof(a->endpoint_ip.v4)); + else if (a->ip_version == 6 && b->ip_version == 6) + return memcmp(&a->endpoint_ip.v6, &b->endpoint_ip.v6, + sizeof(a->endpoint_ip.v6)); + return 1; +} + +static u32 hash_route_info(struct mlx5e_route_key *key) +{ + if (key->ip_version == 4) + return jhash(&key->endpoint_ip.v4, sizeof(key->endpoint_ip.v4), 0); + return jhash(&key->endpoint_ip.v6, sizeof(key->endpoint_ip.v6), 0); +} + +static void mlx5e_route_dealloc(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r) +{ + WARN_ON(!list_empty(&r->decap_flows)); + WARN_ON(!list_empty(&r->encap_entries)); + + kfree_rcu(r, rcu); +} + +static void mlx5e_route_put(struct mlx5e_priv *priv, struct mlx5e_route_entry *r) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&r->refcnt, &esw->offloads.encap_tbl_lock)) + return; + + hash_del_rcu(&r->hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_route_dealloc(priv, r); +} + +static void mlx5e_route_put_locked(struct mlx5e_priv *priv, struct mlx5e_route_entry *r) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + lockdep_assert_held(&esw->offloads.encap_tbl_lock); + + if (!refcount_dec_and_test(&r->refcnt)) + return; + hash_del_rcu(&r->hlist); + mlx5e_route_dealloc(priv, r); +} + +static struct mlx5e_route_entry * +mlx5e_route_get(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key, + u32 hash_key) +{ + struct mlx5e_route_key r_key; + struct mlx5e_route_entry *r; + + hash_for_each_possible(encap->route_tbl, r, hlist, hash_key) { + r_key = r->key; + if (!cmp_route_info(&r_key, key) && + refcount_inc_not_zero(&r->refcnt)) + return r; + } + return NULL; +} + +static struct mlx5e_route_entry * +mlx5e_route_get_create(struct mlx5e_priv *priv, + struct mlx5e_route_key *key, + int tunnel_dev_index, + unsigned long *route_tbl_change_time) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_tc_tun_encap *encap; + struct mlx5e_route_entry *r; + u32 hash_key; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + encap = uplink_priv->encap; + + hash_key = hash_route_info(key); + spin_lock_bh(&encap->route_lock); + r = mlx5e_route_get(encap, key, hash_key); + spin_unlock_bh(&encap->route_lock); + if (r) { + if (!mlx5e_route_entry_valid(r)) { + mlx5e_route_put_locked(priv, r); + return ERR_PTR(-EINVAL); + } + return r; + } + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return ERR_PTR(-ENOMEM); + + r->key = *key; + r->flags |= MLX5E_ROUTE_ENTRY_VALID; + r->tunnel_dev_index = tunnel_dev_index; + refcount_set(&r->refcnt, 1); + INIT_LIST_HEAD(&r->decap_flows); + INIT_LIST_HEAD(&r->encap_entries); + + spin_lock_bh(&encap->route_lock); + *route_tbl_change_time = encap->route_tbl_last_update; + hash_add(encap->route_tbl, &r->hlist, hash_key); + spin_unlock_bh(&encap->route_lock); + + return r; +} + +static struct mlx5e_route_entry * +mlx5e_route_lookup_for_update(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key) +{ + u32 hash_key = hash_route_info(key); + struct mlx5e_route_entry *r; + + spin_lock_bh(&encap->route_lock); + encap->route_tbl_last_update = jiffies; + r = mlx5e_route_get(encap, key, hash_key); + spin_unlock_bh(&encap->route_lock); + + return r; +} + +struct mlx5e_tc_fib_event_data { + struct work_struct work; + unsigned long event; + struct mlx5e_route_entry *r; + struct net_device *ul_dev; +}; + +static void mlx5e_tc_fib_event_work(struct work_struct *work); +static struct mlx5e_tc_fib_event_data * +mlx5e_tc_init_fib_work(unsigned long event, struct net_device *ul_dev, gfp_t flags) +{ + struct mlx5e_tc_fib_event_data *fib_work; + + fib_work = kzalloc(sizeof(*fib_work), flags); + if (WARN_ON(!fib_work)) + return NULL; + + INIT_WORK(&fib_work->work, mlx5e_tc_fib_event_work); + fib_work->event = event; + fib_work->ul_dev = ul_dev; + + return fib_work; +} + +static int +mlx5e_route_enqueue_update(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r, + unsigned long event) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_fib_event_data *fib_work; + struct mlx5e_rep_priv *uplink_rpriv; + struct net_device *ul_dev; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + ul_dev = uplink_rpriv->netdev; + + fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_KERNEL); + if (!fib_work) + return -ENOMEM; + + dev_hold(ul_dev); + refcount_inc(&r->refcnt); + fib_work->r = r; + queue_work(priv->wq, &fib_work->work); + + return 0; +} + +int mlx5e_attach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + unsigned long tbl_time_before, tbl_time_after; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + int err = 0; + + esw_attr = attr->esw_attr; + parse_attr = attr->parse_attr; + mutex_lock(&esw->offloads.encap_tbl_lock); + if (!esw_attr->rx_tun_attr) + goto out; + + tbl_time_before = mlx5e_route_tbl_get_last_update(priv); + tbl_time_after = tbl_time_before; + err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr); + if (err || !esw_attr->rx_tun_attr->decap_vport) + goto out; + + key.ip_version = attr->ip_version; + if (key.ip_version == 4) + key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4; + else + key.endpoint_ip.v6 = esw_attr->rx_tun_attr->dst_ip.v6; + + r = mlx5e_route_get_create(priv, &key, parse_attr->filter_dev->ifindex, + &tbl_time_after); + if (IS_ERR(r)) { + err = PTR_ERR(r); + goto out; + } + /* Routing changed concurrently. FIB event handler might have missed new + * entry, schedule update. + */ + if (tbl_time_before != tbl_time_after) { + err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE); + if (err) { + mlx5e_route_put_locked(priv, r); + goto out; + } + } + + flow->decap_route = r; + list_add(&flow->decap_routes, &r->decap_flows); + mutex_unlock(&esw->offloads.encap_tbl_lock); + return 0; + +out: + mutex_unlock(&esw->offloads.encap_tbl_lock); + return err; +} + +static int mlx5e_attach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5e_encap_entry *e, + bool new_encap_entry, + unsigned long tbl_time_before, + int out_index) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + unsigned long tbl_time_after = tbl_time_before; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + const struct ip_tunnel_info *tun_info; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + unsigned short family; + int err = 0; + + esw_attr = attr->esw_attr; + parse_attr = attr->parse_attr; + tun_info = parse_attr->tun_info[out_index]; + family = ip_tunnel_info_af(tun_info); + + if (family == AF_INET) { + key.endpoint_ip.v4 = tun_info->key.u.ipv4.src; + key.ip_version = 4; + } else if (family == AF_INET6) { + key.endpoint_ip.v6 = tun_info->key.u.ipv6.src; + key.ip_version = 6; + } + + err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev, + e->route_dev_ifindex, out_index); + if (err || !(esw_attr->dests[out_index].flags & + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)) + return err; + + r = mlx5e_route_get_create(priv, &key, parse_attr->mirred_ifindex[out_index], + &tbl_time_after); + if (IS_ERR(r)) + return PTR_ERR(r); + /* Routing changed concurrently. FIB event handler might have missed new + * entry, schedule update. + */ + if (tbl_time_before != tbl_time_after) { + err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE); + if (err) { + mlx5e_route_put_locked(priv, r); + return err; + } + } + + flow->encap_routes[out_index].r = r; + if (new_encap_entry) + list_add(&e->route_list, &r->encap_entries); + flow->encap_routes[out_index].index = out_index; + return 0; +} + +void mlx5e_detach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_route_entry *r = flow->decap_route; + + if (!r) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + list_del(&flow->decap_routes); + flow->decap_route = NULL; + + if (!refcount_dec_and_test(&r->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; + } + hash_del_rcu(&r->hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_route_dealloc(priv, r); +} + +static void mlx5e_detach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index) +{ + struct mlx5e_route_entry *r = flow->encap_routes[out_index].r; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_encap_entry *e, *tmp; + + if (!r) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + flow->encap_routes[out_index].r = NULL; + + if (!refcount_dec_and_test(&r->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; + } + list_for_each_entry_safe(e, tmp, &r->encap_entries, route_list) + list_del_init(&e->route_list); + hash_del_rcu(&r->hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_route_dealloc(priv, r); +} + +static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct list_head *encap_flows) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, encap_flows, tmp_list) { + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *esw_attr; + + if (!mlx5e_is_offloaded_flow(flow)) + continue; + esw_attr = attr->esw_attr; + + if (flow_flag_test(flow, SLOW)) + mlx5e_tc_unoffload_from_slow_path(esw, flow); + else + mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); + mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); + attr->modify_hdr = NULL; + + esw_attr->dests[flow->tmp_entry_index].flags &= + ~MLX5_ESW_DEST_ENCAP_VALID; + esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL; + } + + e->flags |= MLX5_ENCAP_ENTRY_NO_ROUTE; + if (e->flags & MLX5_ENCAP_ENTRY_VALID) { + e->flags &= ~MLX5_ENCAP_ENTRY_VALID; + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); + e->pkt_reformat = NULL; + } +} + +static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, + struct net_device *tunnel_dev, + struct mlx5e_encap_entry *e, + struct list_head *encap_flows) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + int err; + + err = ip_tunnel_info_af(e->tun_info) == AF_INET ? + mlx5e_tc_tun_update_header_ipv4(priv, tunnel_dev, e) : + mlx5e_tc_tun_update_header_ipv6(priv, tunnel_dev, e); + if (err) + mlx5_core_warn(priv->mdev, "Failed to update encap header, %d", err); + e->flags &= ~MLX5_ENCAP_ENTRY_NO_ROUTE; + + list_for_each_entry(flow, encap_flows, tmp_list) { + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + + if (flow_flag_test(flow, FAILED)) + continue; + + esw_attr = attr->esw_attr; + parse_attr = attr->parse_attr; + spec = &parse_attr->spec; + + err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts, + e->out_dev, e->route_dev_ifindex, + flow->tmp_entry_index); + if (err) { + mlx5_core_warn(priv->mdev, "Failed to update VF tunnel err=%d", err); + continue; + } + + err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow); + if (err) { + mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d", + err); + continue; + } + + if (e->flags & MLX5_ENCAP_ENTRY_VALID) { + esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat; + esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; + if (!mlx5e_tc_flow_all_encaps_valid(esw_attr)) + goto offload_to_slow_path; + /* update from slow path rule to encap rule */ + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", + err); + } else { + flow->rule[0] = rule; + } + } else { +offload_to_slow_path: + rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); + /* mark the flow's encap dest as non-valid */ + esw_attr->dests[flow->tmp_entry_index].flags &= + ~MLX5_ESW_DEST_ENCAP_VALID; + + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n", + err); + } else { + flow->rule[0] = rule; + } + } + flow_flag_set(flow, OFFLOADED); + } +} + +static int mlx5e_update_route_encaps(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r, + struct list_head *flow_list, + bool replace) +{ + struct net_device *tunnel_dev; + struct mlx5e_encap_entry *e; + + tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index); + if (!tunnel_dev) + return -ENODEV; + + list_for_each_entry(e, &r->encap_entries, route_list) { + LIST_HEAD(encap_flows); + + mlx5e_take_all_encap_flows(e, &encap_flows); + if (list_empty(&encap_flows)) + continue; + + if (mlx5e_route_entry_valid(r)) + mlx5e_invalidate_encap(priv, e, &encap_flows); + + if (!replace) { + list_splice(&encap_flows, flow_list); + continue; + } + + mlx5e_reoffload_encap(priv, tunnel_dev, e, &encap_flows); + list_splice(&encap_flows, flow_list); + } + + return 0; +} + +static void mlx5e_unoffload_flow_list(struct mlx5e_priv *priv, + struct list_head *flow_list) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, flow_list, tmp_list) + if (mlx5e_is_offloaded_flow(flow)) + mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); +} + +static void mlx5e_reoffload_decap(struct mlx5e_priv *priv, + struct list_head *decap_flows) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, decap_flows, tmp_list) { + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err; + + if (flow_flag_test(flow, FAILED)) + continue; + + parse_attr = attr->parse_attr; + spec = &parse_attr->spec; + err = mlx5e_tc_tun_route_lookup(priv, spec, attr); + if (err) { + mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n", + err); + continue; + } + + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(priv->mdev, "Failed to update cached decap flow, %d\n", + err); + } else { + flow->rule[0] = rule; + flow_flag_set(flow, OFFLOADED); + } + } +} + +static int mlx5e_update_route_decap_flows(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r, + struct list_head *flow_list, + bool replace) +{ + struct net_device *tunnel_dev; + LIST_HEAD(decap_flows); + + tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index); + if (!tunnel_dev) + return -ENODEV; + + mlx5e_take_all_route_decap_flows(r, &decap_flows); + if (mlx5e_route_entry_valid(r)) + mlx5e_unoffload_flow_list(priv, &decap_flows); + if (replace) + mlx5e_reoffload_decap(priv, &decap_flows); + + list_splice(&decap_flows, flow_list); + + return 0; +} + +static void mlx5e_tc_fib_event_work(struct work_struct *work) +{ + struct mlx5e_tc_fib_event_data *event_data = + container_of(work, struct mlx5e_tc_fib_event_data, work); + struct net_device *ul_dev = event_data->ul_dev; + struct mlx5e_priv *priv = netdev_priv(ul_dev); + struct mlx5e_route_entry *r = event_data->r; + struct mlx5_eswitch *esw; + LIST_HEAD(flow_list); + bool replace; + int err; + + /* sync with concurrent neigh updates */ + rtnl_lock(); + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); + replace = event_data->event == FIB_EVENT_ENTRY_REPLACE; + + if (!mlx5e_route_entry_valid(r) && !replace) + goto out; + + err = mlx5e_update_route_encaps(priv, r, &flow_list, replace); + if (err) + mlx5_core_warn(priv->mdev, "Failed to update route encaps, %d\n", + err); + + err = mlx5e_update_route_decap_flows(priv, r, &flow_list, replace); + if (err) + mlx5_core_warn(priv->mdev, "Failed to update route decap flows, %d\n", + err); + + if (replace) + r->flags |= MLX5E_ROUTE_ENTRY_VALID; +out: + mutex_unlock(&esw->offloads.encap_tbl_lock); + rtnl_unlock(); + + mlx5e_put_flow_list(priv, &flow_list); + mlx5e_route_put(priv, event_data->r); + dev_put(event_data->ul_dev); + kfree(event_data); +} + +static struct mlx5e_tc_fib_event_data * +mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv, + struct net_device *ul_dev, + struct mlx5e_tc_tun_encap *encap, + unsigned long event, + struct fib_notifier_info *info) +{ + struct fib_entry_notifier_info *fen_info; + struct mlx5e_tc_fib_event_data *fib_work; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + struct net_device *fib_dev; + + fen_info = container_of(info, struct fib_entry_notifier_info, info); + fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; + if (fib_dev->netdev_ops != &mlx5e_netdev_ops || + fen_info->dst_len != 32) + return NULL; + + fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC); + if (!fib_work) + return ERR_PTR(-ENOMEM); + + key.endpoint_ip.v4 = htonl(fen_info->dst); + key.ip_version = 4; + + /* Can't fail after this point because releasing reference to r + * requires obtaining sleeping mutex which we can't do in atomic + * context. + */ + r = mlx5e_route_lookup_for_update(encap, &key); + if (!r) + goto out; + fib_work->r = r; + dev_hold(ul_dev); + + return fib_work; + +out: + kfree(fib_work); + return NULL; +} + +static struct mlx5e_tc_fib_event_data * +mlx5e_init_fib_work_ipv6(struct mlx5e_priv *priv, + struct net_device *ul_dev, + struct mlx5e_tc_tun_encap *encap, + unsigned long event, + struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen_info; + struct mlx5e_tc_fib_event_data *fib_work; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + struct net_device *fib_dev; + + fen_info = container_of(info, struct fib6_entry_notifier_info, info); + fib_dev = fib6_info_nh_dev(fen_info->rt); + if (fib_dev->netdev_ops != &mlx5e_netdev_ops || + fen_info->rt->fib6_dst.plen != 128) + return NULL; + + fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC); + if (!fib_work) + return ERR_PTR(-ENOMEM); + + memcpy(&key.endpoint_ip.v6, &fen_info->rt->fib6_dst.addr, + sizeof(fen_info->rt->fib6_dst.addr)); + key.ip_version = 6; + + /* Can't fail after this point because releasing reference to r + * requires obtaining sleeping mutex which we can't do in atomic + * context. + */ + r = mlx5e_route_lookup_for_update(encap, &key); + if (!r) + goto out; + fib_work->r = r; + dev_hold(ul_dev); + + return fib_work; + +out: + kfree(fib_work); + return NULL; +} + +static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct mlx5e_tc_fib_event_data *fib_work; + struct fib_notifier_info *info = ptr; + struct mlx5e_tc_tun_encap *encap; + struct net_device *ul_dev; + struct mlx5e_priv *priv; + + encap = container_of(nb, struct mlx5e_tc_tun_encap, fib_nb); + priv = encap->priv; + ul_dev = priv->netdev; + priv = netdev_priv(ul_dev); + + switch (event) { + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_DEL: + if (info->family == AF_INET) + fib_work = mlx5e_init_fib_work_ipv4(priv, ul_dev, encap, event, info); + else if (info->family == AF_INET6) + fib_work = mlx5e_init_fib_work_ipv6(priv, ul_dev, encap, event, info); + else + return NOTIFY_DONE; + + if (!IS_ERR_OR_NULL(fib_work)) { + queue_work(priv->wq, &fib_work->work); + } else if (IS_ERR(fib_work)) { + NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work"); + mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n", + PTR_ERR(fib_work)); + } + + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_DONE; +} + +struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_tun_encap *encap; + int err; + + encap = kvzalloc(sizeof(*encap), GFP_KERNEL); + if (!encap) + return ERR_PTR(-ENOMEM); + + encap->priv = priv; + encap->fib_nb.notifier_call = mlx5e_tc_tun_fib_event; + spin_lock_init(&encap->route_lock); + hash_init(encap->route_tbl); + err = register_fib_notifier(dev_net(priv->netdev), &encap->fib_nb, + NULL, NULL); + if (err) { + kvfree(encap); + return ERR_PTR(err); + } + + return encap; +} + +void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap) +{ + if (!encap) + return; + + unregister_fib_notifier(dev_net(encap->priv->netdev), &encap->fib_nb); + flush_workqueue(encap->priv->wq); /* flush fib event works */ + kvfree(encap); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h new file mode 100644 index 000000000000..3391504d9a08 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies. */ + +#ifndef __MLX5_EN_TC_TUN_ENCAP_H__ +#define __MLX5_EN_TC_TUN_ENCAP_H__ + +#include "tc_priv.h" + +void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, int out_index); + +int mlx5e_attach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct net_device *mirred_dev, + int out_index, + struct netlink_ext_ack *extack, + struct net_device **encap_dev, + bool *encap_valid); +int mlx5e_attach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); +void mlx5e_detach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + +int mlx5e_attach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); +void mlx5e_detach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + +struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info); + +int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec); + +struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv); +void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap); + +#endif /* __MLX5_EN_TC_TUN_ENCAP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c index 1f9526244222..3479672e84cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c @@ -81,8 +81,8 @@ static int parse_tunnel(struct mlx5e_priv *priv, if (!enc_keyid.mask->keyid) return 0; - if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_UDP)) + if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) && + !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP)) return -EOPNOTSUPP; flow_rule_match_mpls(rule, &match); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c new file mode 100644 index 000000000000..37fc1d77ded7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies */ + +#include <net/page_pool.h> +#include "en/txrx.h" +#include "en/params.h" +#include "en/trap.h" + +static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget) +{ + struct mlx5e_trap *trap_ctx = container_of(napi, struct mlx5e_trap, napi); + struct mlx5e_ch_stats *ch_stats = trap_ctx->stats; + struct mlx5e_rq *rq = &trap_ctx->rq; + bool busy = false; + int work_done = 0; + + ch_stats->poll++; + + work_done = mlx5e_poll_rx_cq(&rq->cq, budget); + busy |= work_done == budget; + busy |= rq->post_wqes(rq); + + if (busy) + return budget; + + if (unlikely(!napi_complete_done(napi, work_done))) + return work_done; + + mlx5e_cq_arm(&rq->cq); + return work_done; +} + +static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp, + struct mlx5e_rq_stats *stats, struct mlx5e_params *params, + struct mlx5e_ch_stats *ch_stats, + struct mlx5e_rq *rq) +{ + void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq); + struct mlx5_core_dev *mdev = priv->mdev; + struct page_pool_params pp_params = {}; + int node = dev_to_node(mdev->device); + u32 pool_size; + int wq_sz; + int err; + int i; + + rqp->wq.db_numa_node = node; + + rq->wq_type = params->rq_wq_type; + rq->pdev = mdev->device; + rq->netdev = priv->netdev; + rq->mdev = mdev; + rq->priv = priv; + rq->stats = stats; + rq->clock = &mdev->clock; + rq->tstamp = &priv->tstamp; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + + xdp_rxq_info_unused(&rq->xdp_rxq); + + rq->buff.map_dir = DMA_FROM_DEVICE; + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL); + pool_size = 1 << params->log_rq_mtu_frames; + + err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); + if (err) + return err; + + rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); + + rq->wqe.info = rqp->frags_info; + rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; + rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags), + (wq_sz << rq->wqe.info.log_num_frags)), + GFP_KERNEL, node); + if (!rq->wqe.frags) { + err = -ENOMEM; + goto err_wq_cyc_destroy; + } + + err = mlx5e_init_di_list(rq, wq_sz, node); + if (err) + goto err_free_frags; + + rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + + mlx5e_rq_set_trap_handlers(rq, params); + + /* Create a page_pool and register it with rxq */ + pp_params.order = 0; + pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pp_params.pool_size = pool_size; + pp_params.nid = node; + pp_params.dev = mdev->device; + pp_params.dma_dir = rq->buff.map_dir; + + /* page_pool can be used even when there is no rq->xdp_prog, + * given page_pool does not handle DMA mapping there is no + * required state to clear. And page_pool gracefully handle + * elevated refcnt. + */ + rq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rq->page_pool)) { + err = PTR_ERR(rq->page_pool); + rq->page_pool = NULL; + goto err_free_di_list; + } + for (i = 0; i < wq_sz; i++) { + struct mlx5e_rx_wqe_cyc *wqe = + mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++) { + u32 frag_size = rq->wqe.info.arr[f].frag_size | + MLX5_HW_START_PADDING; + + wqe->data[f].byte_count = cpu_to_be32(frag_size); + wqe->data[f].lkey = rq->mkey_be; + } + /* check if num_frags is not a pow of two */ + if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { + wqe->data[f].byte_count = 0; + wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + wqe->data[f].addr = 0; + } + } + return 0; + +err_free_di_list: + mlx5e_free_di_list(rq); +err_free_frags: + kvfree(rq->wqe.frags); +err_wq_cyc_destroy: + mlx5_wq_destroy(&rq->wq_ctrl); + + return err; +} + +static void mlx5e_free_trap_rq(struct mlx5e_rq *rq) +{ + page_pool_destroy(rq->page_pool); + mlx5e_free_di_list(rq); + kvfree(rq->wqe.frags); + mlx5_wq_destroy(&rq->wq_ctrl); +} + +static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi, + struct mlx5e_rq_stats *stats, struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param, + struct mlx5e_ch_stats *ch_stats, + struct mlx5e_rq *rq) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_create_cq_param ccp = {}; + struct dim_cq_moder trap_moder = {}; + struct mlx5e_cq *cq = &rq->cq; + int err; + + ccp.node = dev_to_node(mdev->device); + ccp.ch_stats = ch_stats; + ccp.napi = napi; + ccp.ix = 0; + err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq); + if (err) + return err; + + err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq); + if (err) + goto err_destroy_cq; + + err = mlx5e_create_rq(rq, rq_param); + if (err) + goto err_free_rq; + + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) + goto err_destroy_rq; + + return 0; + +err_destroy_rq: + mlx5e_destroy_rq(rq); + mlx5e_free_rx_descs(rq); +err_free_rq: + mlx5e_free_trap_rq(rq); +err_destroy_cq: + mlx5e_close_cq(cq); + + return err; +} + +static void mlx5e_close_trap_rq(struct mlx5e_rq *rq) +{ + mlx5e_destroy_rq(rq); + mlx5e_free_rx_descs(rq); + mlx5e_free_trap_rq(rq); + mlx5e_close_cq(&rq->cq); +} + +static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, + u32 rqn) +{ + void *tirc; + int inlen; + u32 *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, inline_rqn, rqn); + err = mlx5e_create_tir(mdev, tir, in); + kvfree(in); + + return err; +} + +static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) +{ + mlx5e_destroy_tir(mdev, tir); +} + +static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq) +{ + set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); +} + +static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq) +{ + clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); +} + +static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t) +{ + struct mlx5e_params *params = &t->params; + + params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; + mlx5e_init_rq_type_params(priv->mdev, params); + params->sw_mtu = priv->netdev->max_mtu; + mlx5e_build_rq_param(priv, params, NULL, &t->rq_param); +} + +static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) +{ + int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0)); + struct net_device *netdev = priv->netdev; + struct mlx5e_trap *t; + int err; + + t = kvzalloc_node(sizeof(*t), GFP_KERNEL, cpu_to_node(cpu)); + if (!t) + return ERR_PTR(-ENOMEM); + + mlx5e_build_trap_params(priv, t); + + t->priv = priv; + t->mdev = priv->mdev; + t->tstamp = &priv->tstamp; + t->pdev = mlx5_core_dma_dev(priv->mdev); + t->netdev = priv->netdev; + t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + t->stats = &priv->trap_stats.ch; + + netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64); + + err = mlx5e_open_trap_rq(priv, &t->napi, + &priv->trap_stats.rq, + &t->params, &t->rq_param, + &priv->trap_stats.ch, + &t->rq); + if (unlikely(err)) + goto err_napi_del; + + err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn); + if (err) + goto err_close_trap_rq; + + return t; + +err_close_trap_rq: + mlx5e_close_trap_rq(&t->rq); +err_napi_del: + netif_napi_del(&t->napi); + kvfree(t); + return ERR_PTR(err); +} + +void mlx5e_close_trap(struct mlx5e_trap *trap) +{ + mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir); + mlx5e_close_trap_rq(&trap->rq); + netif_napi_del(&trap->napi); + kvfree(trap); +} + +static void mlx5e_activate_trap(struct mlx5e_trap *trap) +{ + napi_enable(&trap->napi); + mlx5e_activate_trap_rq(&trap->rq); + napi_schedule(&trap->napi); +} + +void mlx5e_deactivate_trap(struct mlx5e_priv *priv) +{ + struct mlx5e_trap *trap = priv->en_trap; + + mlx5e_deactivate_trap_rq(&trap->rq); + napi_disable(&trap->napi); +} + +static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv) +{ + struct mlx5e_trap *trap; + + trap = mlx5e_open_trap(priv); + if (IS_ERR(trap)) + goto out; + + mlx5e_activate_trap(trap); +out: + return trap; +} + +static void mlx5e_del_trap_queue(struct mlx5e_priv *priv) +{ + mlx5e_deactivate_trap(priv); + mlx5e_close_trap(priv->en_trap); + priv->en_trap = NULL; +} + +static int mlx5e_trap_get_tirn(struct mlx5e_trap *en_trap) +{ + return en_trap->tir.tirn; +} + +static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id) +{ + bool open_queue = !priv->en_trap; + struct mlx5e_trap *trap; + int err; + + if (open_queue) { + trap = mlx5e_add_trap_queue(priv); + if (IS_ERR(trap)) + return PTR_ERR(trap); + priv->en_trap = trap; + } + + switch (trap_id) { + case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER: + err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap)); + if (err) + goto err_out; + break; + case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER: + err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap)); + if (err) + goto err_out; + break; + default: + netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id); + err = -EINVAL; + goto err_out; + } + return 0; + +err_out: + if (open_queue) + mlx5e_del_trap_queue(priv); + return err; +} + +static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id) +{ + switch (trap_id) { + case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER: + mlx5e_remove_vlan_trap(priv); + break; + case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER: + mlx5e_remove_mac_trap(priv); + break; + default: + netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id); + return -EINVAL; + } + if (priv->en_trap && !mlx5_devlink_trap_get_num_active(priv->mdev)) + mlx5e_del_trap_queue(priv); + + return 0; +} + +int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx) +{ + int err = 0; + + /* Traps are unarmed when interface is down, no need to update + * them. The configuration is saved in the core driver, + * queried and applied upon interface up operation in + * mlx5e_open_locked(). + */ + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + + switch (trap_ctx->action) { + case DEVLINK_TRAP_ACTION_TRAP: + err = mlx5e_handle_action_trap(priv, trap_ctx->id); + break; + case DEVLINK_TRAP_ACTION_DROP: + err = mlx5e_handle_action_drop(priv, trap_ctx->id); + break; + default: + netdev_warn(priv->netdev, "%s: Unsupported action %d\n", __func__, + trap_ctx->action); + err = -EINVAL; + } + return err; +} + +static int mlx5e_apply_trap(struct mlx5e_priv *priv, int trap_id, bool enable) +{ + enum devlink_trap_action action; + int err; + + err = mlx5_devlink_traps_get_action(priv->mdev, trap_id, &action); + if (err) + return err; + if (action == DEVLINK_TRAP_ACTION_TRAP) + err = enable ? mlx5e_handle_action_trap(priv, trap_id) : + mlx5e_handle_action_drop(priv, trap_id); + return err; +} + +static const int mlx5e_traps_arr[] = { + DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER, + DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER, +}; + +int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable) +{ + int err; + int i; + + for (i = 0; i < ARRAY_SIZE(mlx5e_traps_arr); i++) { + err = mlx5e_apply_trap(priv, mlx5e_traps_arr[i], enable); + if (err) + return err; + } + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h new file mode 100644 index 000000000000..aa3f17658c6d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies */ + +#ifndef __MLX5E_TRAP_H__ +#define __MLX5E_TRAP_H__ + +#include "../en.h" +#include "../devlink.h" + +struct mlx5e_trap { + /* data path */ + struct mlx5e_rq rq; + struct mlx5e_tir tir; + struct napi_struct napi; + struct device *pdev; + struct net_device *netdev; + __be32 mkey_be; + + /* data path - accessed per napi poll */ + struct mlx5e_ch_stats *stats; + + /* control */ + struct mlx5e_priv *priv; + struct mlx5_core_dev *mdev; + struct hwtstamp_config *tstamp; + DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); + + struct mlx5e_params params; + struct mlx5e_rq_param rq_param; +}; + +void mlx5e_close_trap(struct mlx5e_trap *trap); +void mlx5e_deactivate_trap(struct mlx5e_priv *priv); +int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx); +int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 4880f2179273..2371b83dad9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -26,6 +26,13 @@ #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) +static inline +ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts) +{ + return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time, + clock, cqe_ts); +} + enum mlx5e_icosq_wqe_type { MLX5E_ICOSQ_WQE_NOP, MLX5E_ICOSQ_WQE_UMR_RX, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index d487e5e37162..8d991c3b7a50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -83,7 +83,7 @@ static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); /* Let other device's napi(s) and XSK wakeups see our new state. */ - synchronize_rcu(); + synchronize_net(); } static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index d87c345878d3..f4bce1365639 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -111,7 +111,7 @@ err_free_cparam: void mlx5e_close_xsk(struct mlx5e_channel *c) { clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); - synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */ + synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */ mlx5e_close_rq(&c->xskrq); mlx5e_close_cq(&c->xskrq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 1fae7fab8297..cc0efac7b812 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -85,7 +85,7 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ih } mlx5e_set_eseg_swp(skb, eseg, &swp_spec); - if (skb_vlan_tag_present(skb) && ihs) + if (skb_vlan_tag_present(skb) && ihs) mlx5e_eseg_swp_offsets_add_vlan(eseg); } @@ -144,9 +144,9 @@ static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *sta { #ifdef CONFIG_MLX5_EN_IPSEC return mlx5e_ipsec_is_tx_flow(&state->ipsec); -#endif - +#else return false; +#endif } static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, @@ -173,7 +173,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, #endif #if IS_ENABLED(CONFIG_GENEVE) - if (skb->encapsulation) + if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) mlx5e_tx_tunnel_accel(skb, eseg, ihs); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index a9b45606dbdb..a97e8d205094 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -497,20 +497,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, } } -bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, - netdev_features_t features) -{ - struct sec_path *sp = skb_sec_path(skb); - struct xfrm_state *x; - - if (sp && sp->len) { - x = sp->xvec[0]; - if (x && x->xso.offload_handle) - return true; - } - return false; -} - void mlx5e_ipsec_build_inverse_table(void) { u16 mss_inv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 9df9b9a8e09b..3e80742a3caf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -57,8 +57,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, u32 *cqe_bcnt); void mlx5e_ipsec_inverse_table_init(void); -bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, - netdev_features_t features); void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_offload *xo); void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, @@ -87,8 +85,28 @@ static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ips return ipsec_st->x; } +static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) +{ + return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); +} + void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg); + +static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, + netdev_features_t features) +{ + struct sec_path *sp = skb_sec_path(skb); + + if (sp && sp->len) { + struct xfrm_state *x = sp->xvec[0]; + + if (x && x->xso.offload_handle) + return true; + } + return false; +} + #else static inline void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, @@ -96,7 +114,14 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, struct mlx5_cqe64 *cqe) {} +static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) +{ + return false; +} + static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } +static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, + netdev_features_t features) { return false; } #endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5E_IPSEC_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 1b392696280d..95293ee0d38d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -2,6 +2,7 @@ // Copyright (c) 2019 Mellanox Technologies. #include "en.h" +#include "en_accel/tls.h" #include "en_accel/ktls.h" #include "en_accel/ktls_utils.h" #include "en_accel/fs_tcp.h" @@ -86,16 +87,33 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable) int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) { - int err = 0; + int err; - if (priv->netdev->features & NETIF_F_HW_TLS_RX) + if (!mlx5_accel_is_ktls_rx(priv->mdev)) + return 0; + + priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); + if (!priv->tls->rx_wq) + return -ENOMEM; + + if (priv->netdev->features & NETIF_F_HW_TLS_RX) { err = mlx5e_accel_fs_tcp_create(priv); + if (err) { + destroy_workqueue(priv->tls->rx_wq); + return err; + } + } - return err; + return 0; } void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) { + if (!mlx5_accel_is_ktls_rx(priv->mdev)) + return; + if (priv->netdev->features & NETIF_F_HW_TLS_RX) mlx5e_accel_fs_tcp_destroy(priv); + + destroy_workqueue(priv->tls->rx_wq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 6a1d82503ef8..d06532d0baa4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -57,6 +57,20 @@ struct mlx5e_ktls_offload_context_rx { struct mlx5e_ktls_rx_resync_ctx resync; }; +static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) +{ + if (!refcount_dec_and_test(&priv_rx->resync.refcnt)) + return false; + + kfree(priv_rx); + return true; +} + +static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx) +{ + refcount_inc(&priv_rx->resync.refcnt); +} + static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) { int err, inlen; @@ -326,7 +340,7 @@ static void resync_handle_work(struct work_struct *work) priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { - refcount_dec(&resync->refcnt); + mlx5e_ktls_priv_rx_put(priv_rx); return; } @@ -334,7 +348,7 @@ static void resync_handle_work(struct work_struct *work) sq = &c->async_icosq; if (resync_post_get_progress_params(sq, priv_rx)) - refcount_dec(&resync->refcnt); + mlx5e_ktls_priv_rx_put(priv_rx); } static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, @@ -377,7 +391,11 @@ unlock: return err; } -/* Function is called with elevated refcount, it decreases it. */ +/* Function can be called with the refcount being either elevated or not. + * It decreases the refcount and may free the kTLS priv context. + * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was + * already in flight. + */ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, struct mlx5e_icosq *sq) { @@ -410,7 +428,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); priv_rx->stats->tls_resync_req_end++; out: - refcount_dec(&resync->refcnt); + mlx5e_ktls_priv_rx_put(priv_rx); dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); kfree(buf); } @@ -431,9 +449,9 @@ static bool resync_queue_get_psv(struct sock *sk) return false; resync = &priv_rx->resync; - refcount_inc(&resync->refcnt); + mlx5e_ktls_priv_rx_get(priv_rx); if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) - refcount_dec(&resync->refcnt); + mlx5e_ktls_priv_rx_put(priv_rx); return true; } @@ -625,31 +643,6 @@ err_create_key: return err; } -/* Elevated refcount on the resync object means there are - * outstanding operations (uncompleted GET_PSV WQEs) that - * will read the resync / priv_rx objects once completed. - * Wait for them to avoid use-after-free. - */ -static void wait_for_resync(struct net_device *netdev, - struct mlx5e_ktls_rx_resync_ctx *resync) -{ -#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */ - unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT); - unsigned int refcnt; - - do { - refcnt = refcount_read(&resync->refcnt); - if (refcnt == 1) - return; - - msleep(20); - } while (time_before(jiffies, exp_time)); - - netdev_warn(netdev, - "Failed waiting for kTLS RX resync refcnt to be released (%u).\n", - refcnt); -} - void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) { struct mlx5e_ktls_offload_context_rx *priv_rx; @@ -663,7 +656,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); - synchronize_rcu(); /* Sync with NAPI */ + synchronize_net(); /* Sync with NAPI */ if (!cancel_work_sync(&priv_rx->rule.work)) /* completion is needed, as the priv_rx in the add flow * is maintained on the wqe info (wi), not on the socket. @@ -671,8 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) wait_for_completion(&priv_rx->add_ctx); resync = &priv_rx->resync; if (cancel_work_sync(&resync->work)) - refcount_dec(&resync->refcnt); - wait_for_resync(netdev, resync); + mlx5e_ktls_priv_rx_put(priv_rx); priv_rx->stats->tls_del++; if (priv_rx->rule.rule) @@ -680,5 +672,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) mlx5_core_destroy_tir(mdev, priv_rx->tirn); mlx5_ktls_destroy_key(mdev, priv_rx->key_id); - kfree(priv_rx); + /* priv_rx should normally be freed here, but if there is an outstanding + * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is + * processed. + */ + mlx5e_ktls_priv_rx_put(priv_rx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index fee991f5ee7c..d6b21b899dbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -231,12 +231,6 @@ int mlx5e_tls_init(struct mlx5e_priv *priv) if (!tls) return -ENOMEM; - tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); - if (!tls->rx_wq) { - kfree(tls); - return -ENOMEM; - } - priv->tls = tls; return 0; } @@ -248,7 +242,6 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv) if (!tls) return; - destroy_workqueue(tls->rx_wq); kfree(tls); priv->tls = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 302001d6661e..abdf721bb264 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -447,6 +447,17 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, goto out; } + /* Don't allow changing the number of channels if HTB offload is active, + * because the numeration of the QoS SQs will change, while per-queue + * qdiscs are attached. + */ + if (priv->htb.maj_id) { + err = -EINVAL; + netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n", + __func__); + goto out; + } + new_channels.params = *cur_params; new_channels.params.num_channels = count; @@ -525,7 +536,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, #define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT static void -mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) +mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { struct mlx5_core_dev *mdev = priv->mdev; int tc; @@ -540,6 +551,17 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc coal->tx_coalesce_usecs, coal->tx_max_coalesced_frames); } + } +} + +static void +mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int i; + + for (i = 0; i < priv->channels.num; ++i) { + struct mlx5e_channel *c = priv->channels.c[i]; mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, coal->rx_coalesce_usecs, @@ -586,21 +608,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, tx_moder->pkts = coal->tx_max_coalesced_frames; new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - goto out; - } - /* we are opened */ - reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; - if (!reset_rx && !reset_tx) { - mlx5e_set_priv_channels_coalesce(priv, coal); - priv->channels.params = new_channels.params; - goto out; - } - if (reset_rx) { u8 mode = MLX5E_GET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER); @@ -614,6 +624,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, mlx5e_reset_tx_moderation(&new_channels.params, mode); } + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + goto out; + } + + if (!reset_rx && !reset_tx) { + if (!coal->use_adaptive_rx_coalesce) + mlx5e_set_priv_channels_rx_coalesce(priv, coal); + if (!coal->use_adaptive_tx_coalesce) + mlx5e_set_priv_channels_tx_coalesce(priv, coal); + priv->channels.params = new_channels.params; + goto out; + } + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); out: @@ -1972,6 +1996,16 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) return -EOPNOTSUPP; + /* Don't allow changing the PTP state if HTB offload is active, because + * the numeration of the QoS SQs will change, while per-queue qdiscs are + * attached. + */ + if (priv->htb.maj_id) { + netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n", + __func__); + return -EINVAL; + } + new_channels.params = priv->channels.params; MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable); /* No need to verify SQ stop room as diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index e02e5895703d..16ce7756ac43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -46,7 +46,6 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, enum { MLX5E_FULLMATCH = 0, MLX5E_ALLMULTI = 1, - MLX5E_PROMISC = 2, }; enum { @@ -306,6 +305,79 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); } +static struct mlx5_flow_handle * +mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num) +{ + struct mlx5_flow_destination dest = {}; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG; + spec->flow_context.flow_tag = trap_id; + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = tir_num; + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + kvfree(spec); + return rule; +} + +int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) +{ + struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; + struct mlx5_flow_handle *rule; + int err; + + rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + priv->fs.vlan.trap_rule = NULL; + netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n", + __func__, err); + return err; + } + priv->fs.vlan.trap_rule = rule; + return 0; +} + +void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv) +{ + if (priv->fs.vlan.trap_rule) { + mlx5_del_flow_rules(priv->fs.vlan.trap_rule); + priv->fs.vlan.trap_rule = NULL; + } +} + +int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) +{ + struct mlx5_flow_table *ft = priv->fs.l2.ft.t; + struct mlx5_flow_handle *rule; + int err; + + rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + priv->fs.l2.trap_rule = NULL; + netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n", + __func__, err); + return err; + } + priv->fs.l2.trap_rule = rule; + return 0; +} + +void mlx5e_remove_mac_trap(struct mlx5e_priv *priv) +{ + if (priv->fs.l2.trap_rule) { + mlx5_del_flow_rules(priv->fs.l2.trap_rule); + priv->fs.l2.trap_rule = NULL; + } +} + void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) { if (!priv->fs.vlan.cvlan_filter_disabled) @@ -419,6 +491,8 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); + mlx5e_remove_vlan_trap(priv); + /* must be called after DESTROY bit is set and * set_rx_mode is called and flushed */ @@ -596,6 +670,83 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) mlx5e_apply_netdev_addr(priv); } +#define MLX5E_PROMISC_GROUP0_SIZE BIT(0) +#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE + +static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table *ft = priv->fs.promisc.ft.t; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_handle **rule_p; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fs.ttc.ft.t; + + rule_p = &priv->fs.promisc.rule; + *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(*rule_p)) { + err = PTR_ERR(*rule_p); + *rule_p = NULL; + netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__); + } + kvfree(spec); + return err; +} + +static int mlx5e_create_promisc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *ft = &priv->fs.promisc.ft; + struct mlx5_flow_table_attr ft_attr = {}; + int err; + + ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.level = MLX5E_PROMISC_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err); + return err; + } + + err = mlx5e_add_promisc_rule(priv); + if (err) + goto err_destroy_promisc_table; + + return 0; + +err_destroy_promisc_table: + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; + + return err; +} + +static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv) +{ + if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule")) + return; + mlx5_del_flow_rules(priv->fs.promisc.rule); + priv->fs.promisc.rule = NULL; +} + +static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv) +{ + if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table")) + return; + mlx5e_del_promisc_rule(priv); + mlx5_destroy_flow_table(priv->fs.promisc.ft.t); + priv->fs.promisc.ft.t = NULL; +} + void mlx5e_set_rx_mode_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, @@ -615,14 +766,15 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; + int err; if (enable_promisc) { - if (!priv->channels.params.vlan_strip_disable) + err = mlx5e_create_promisc_table(priv); + if (err) + enable_promisc = false; + if (!priv->channels.params.vlan_strip_disable && !err) netdev_warn_once(ndev, "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n"); - mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); - if (!priv->fs.vlan.cvlan_filter_disabled) - mlx5e_add_any_vid_rules(priv); } if (enable_allmulti) mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); @@ -635,11 +787,8 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_l2_flow_rule(priv, &ea->broadcast); if (disable_allmulti) mlx5e_del_l2_flow_rule(priv, &ea->allmulti); - if (disable_promisc) { - if (!priv->fs.vlan.cvlan_filter_disabled) - mlx5e_del_any_vid_rules(priv); - mlx5e_del_l2_flow_rule(priv, &ea->promisc); - } + if (disable_promisc) + mlx5e_destroy_promisc_table(priv); ea->promisc_enabled = promisc_enabled; ea->allmulti_enabled = allmulti_enabled; @@ -1306,9 +1455,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, mc_dmac[0] = 0x01; mv_dmac[0] = 0x01; break; - - case MLX5E_PROMISC: - break; } ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); @@ -1325,12 +1471,12 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, } #define MLX5E_NUM_L2_GROUPS 3 -#define MLX5E_L2_GROUP1_SIZE BIT(0) -#define MLX5E_L2_GROUP2_SIZE BIT(15) -#define MLX5E_L2_GROUP3_SIZE BIT(0) +#define MLX5E_L2_GROUP1_SIZE BIT(15) +#define MLX5E_L2_GROUP2_SIZE BIT(0) +#define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */ #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\ MLX5E_L2_GROUP2_SIZE +\ - MLX5E_L2_GROUP3_SIZE) + MLX5E_L2_GROUP_TRAP_SIZE) static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -1353,7 +1499,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, outer_headers.dmac_47_16); - /* Flow Group for promiscuous */ + /* Flow Group for full match */ + eth_broadcast_addr(mc_dmac); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_L2_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1362,9 +1510,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) goto err_destroy_groups; ft->num_groups++; - /* Flow Group for full match */ - eth_broadcast_addr(mc_dmac); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + /* Flow Group for allmulti */ + eth_zero_addr(mc_dmac); + mc_dmac[0] = 0x01; MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_L2_GROUP2_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1373,11 +1521,10 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) goto err_destroy_groups; ft->num_groups++; - /* Flow Group for allmulti */ - eth_zero_addr(mc_dmac); - mc_dmac[0] = 0x01; + /* Flow Group for l2 traps */ + memset(in, 0, inlen); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_L2_GROUP3_SIZE; + ix += MLX5E_L2_GROUP_TRAP_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) @@ -1435,15 +1582,17 @@ err_destroy_flow_table: return err; } -#define MLX5E_NUM_VLAN_GROUPS 4 +#define MLX5E_NUM_VLAN_GROUPS 5 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) #define MLX5E_VLAN_GROUP1_SIZE BIT(12) #define MLX5E_VLAN_GROUP2_SIZE BIT(1) #define MLX5E_VLAN_GROUP3_SIZE BIT(0) +#define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */ #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ MLX5E_VLAN_GROUP1_SIZE +\ MLX5E_VLAN_GROUP2_SIZE +\ - MLX5E_VLAN_GROUP3_SIZE) + MLX5E_VLAN_GROUP3_SIZE +\ + MLX5E_VLAN_GROUP_TRAP_SIZE) static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, int inlen) @@ -1498,6 +1647,15 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in goto err_destroy_groups; ft->num_groups++; + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP_TRAP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + return 0; err_destroy_groups: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3fc7d18ac868..ec2fcb2a2977 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -65,6 +65,9 @@ #include "en/devlink.h" #include "lib/mlx5.h" #include "en/ptp.h" +#include "qos.h" +#include "en/trap.h" +#include "fpga/ipsec.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -106,7 +109,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) return false; - if (MLX5_IPSEC_DEV(mdev)) + if (mlx5_fpga_is_ipsec_device(mdev)) return false; if (params->xdp_prog) { @@ -211,6 +214,33 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) mlx5_notifier_unregister(priv->mdev, &priv->events_nb); } +static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb); + int err; + + switch (event) { + case MLX5_DRIVER_EVENT_TYPE_TRAP: + err = mlx5e_handle_trap_event(priv, data); + break; + default: + netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event); + err = -EINVAL; + } + return err; +} + +static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv) +{ + priv->blocking_events_nb.notifier_call = blocking_event; + mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb); +} + +static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv) +{ + mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb); +} + static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *wqe) @@ -342,13 +372,11 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) prev->last_in_page = true; } -static int mlx5e_init_di_list(struct mlx5e_rq *rq, - int wq_sz, int cpu) +int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node) { int len = wq_sz << rq->wqe.info.log_num_frags; - rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), - GFP_KERNEL, cpu_to_node(cpu)); + rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node); if (!rq->wqe.di) return -ENOMEM; @@ -357,7 +385,7 @@ static int mlx5e_init_di_list(struct mlx5e_rq *rq, return 0; } -static void mlx5e_free_di_list(struct mlx5e_rq *rq) +void mlx5e_free_di_list(struct mlx5e_rq *rq) { kvfree(rq->wqe.di); } @@ -422,6 +450,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->xdpsq = &c->rq_xdpsq; rq->xsk_pool = xsk_pool; + rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ? + mlx5_real_time_cyc2time : + mlx5_timecounter_cyc2time; if (rq->xsk_pool) rq->stats = &c->priv->channel_stats[c->ix].xskrq; @@ -499,7 +530,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - err = mlx5e_init_di_list(rq, wq_sz, c->cpu); + err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu)); if (err) goto err_rq_frags; @@ -650,11 +681,10 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) mlx5_wq_destroy(&rq->wq_ctrl); } -static int mlx5e_create_rq(struct mlx5e_rq *rq, - struct mlx5e_rq_param *param) +int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) { struct mlx5_core_dev *mdev = rq->mdev; - + u8 ts_format; void *in; void *rqc; void *wq; @@ -667,6 +697,9 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, if (!in) return -ENOMEM; + ts_format = mlx5_is_real_time_rq(mdev) ? + MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME : + MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING; rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); wq = MLX5_ADDR_OF(rqc, rqc, wq); @@ -674,6 +707,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); + MLX5_SET(rqc, rqc, ts_format, ts_format); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); @@ -774,7 +808,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) return err; } -static void mlx5e_destroy_rq(struct mlx5e_rq *rq) +void mlx5e_destroy_rq(struct mlx5e_rq *rq) { mlx5_core_destroy_rq(rq->mdev, rq->rqn); } @@ -914,7 +948,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq) void mlx5e_deactivate_rq(struct mlx5e_rq *rq) { clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ + synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ } void mlx5e_close_rq(struct mlx5e_rq *rq) @@ -1143,7 +1177,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); @@ -1154,6 +1187,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); sq->stop_room = param->stop_room; + sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ? + mlx5_real_time_cyc2time : + mlx5_timecounter_cyc2time; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1187,6 +1223,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, struct mlx5e_create_sq_param *csp, u32 *sqn) { + u8 ts_format; void *in; void *sqc; void *wq; @@ -1199,6 +1236,9 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, if (!in) return -ENOMEM; + ts_format = mlx5_is_real_time_sq(mdev) ? + MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME : + MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING; sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); wq = MLX5_ADDR_OF(sqc, sqc, wq); @@ -1207,6 +1247,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_SET(sqc, sqc, tis_num_0, csp->tisn); MLX5_SET(sqc, sqc, cqn, csp->cqn); MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn); + MLX5_SET(sqc, sqc, ts_format, ts_format); + if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); @@ -1233,6 +1275,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, struct mlx5e_modify_sq_param *p) { + u64 bitmask = 0; void *in; void *sqc; int inlen; @@ -1248,9 +1291,14 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, MLX5_SET(modify_sq_in, in, sq_state, p->curr_state); MLX5_SET(sqc, sqc, state, p->next_state); if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) { - MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); - MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); + bitmask |= 1; + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); + } + if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) { + bitmask |= 1 << 2; + MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id); } + MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask); err = mlx5_core_modify_sq(mdev, sqn, in); @@ -1267,6 +1315,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, struct mlx5e_sq_param *param, struct mlx5e_create_sq_param *csp, + u16 qos_queue_group_id, u32 *sqn) { struct mlx5e_modify_sq_param msp = {0}; @@ -1278,6 +1327,10 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, msp.curr_state = MLX5_SQC_STATE_RST; msp.next_state = MLX5_SQC_STATE_RDY; + if (qos_queue_group_id) { + msp.qos_update = true; + msp.qos_queue_group_id = qos_queue_group_id; + } err = mlx5e_modify_sq(mdev, *sqn, &msp); if (err) mlx5e_destroy_sq(mdev, *sqn); @@ -1288,13 +1341,9 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, static int mlx5e_set_sq_maxrate(struct net_device *dev, struct mlx5e_txqsq *sq, u32 rate); -static int mlx5e_open_txqsq(struct mlx5e_channel *c, - u32 tisn, - int txq_ix, - struct mlx5e_params *params, - struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, - int tc) +int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, + struct mlx5e_params *params, struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; @@ -1304,12 +1353,17 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, if (err) return err; + if (qos_queue_group_id) + sq->stats = c->priv->htb.qos_sq_stats[qos_qid]; + else + sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + csp.tisn = tisn; csp.tis_lst_sz = 1; csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn); if (err) goto err_free_txqsq; @@ -1348,7 +1402,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) struct mlx5_wq_cyc *wq = &sq->wq; clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */ + synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */ mlx5e_tx_disable_queue(sq->txq); @@ -1366,7 +1420,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) } } -static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) { struct mlx5_core_dev *mdev = sq->mdev; struct mlx5_rate_limit rl = {0}; @@ -1403,7 +1457,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = params->tx_min_inline_mode; - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); if (err) goto err_free_icosq; @@ -1423,7 +1477,7 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq) void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) { clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); - synchronize_rcu(); /* Sync with NAPI. */ + synchronize_net(); /* Sync with NAPI. */ } void mlx5e_close_icosq(struct mlx5e_icosq *sq) @@ -1452,7 +1506,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); if (err) goto err_free_xdpsq; @@ -1502,7 +1556,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) struct mlx5e_channel *c = sq->channel; clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - synchronize_rcu(); /* Sync with NAPI. */ + synchronize_net(); /* Sync with NAPI. */ mlx5e_destroy_sq(c->mdev, sq->sqn); mlx5e_free_xdpsq_descs(sq); @@ -1703,7 +1757,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, int txq_ix = c->ix + tc * params->num_channels; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, - params, &cparam->txq_sq, &c->sq[tc], tc); + params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); if (err) goto err_close_sqs; } @@ -1826,12 +1880,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, mlx5e_build_create_cq_param(&ccp, c); - err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, + err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, &c->async_icosq.cq); if (err) return err; - err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, + err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, &c->icosq.cq); if (err) goto err_close_async_icosq_cq; @@ -1855,13 +1909,11 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, if (err) goto err_close_rx_cq; - napi_enable(&c->napi); - spin_lock_init(&c->async_icosq_lock); err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); if (err) - goto err_disable_napi; + goto err_close_xdpsq_cq; err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); if (err) @@ -1904,9 +1956,7 @@ err_close_icosq: err_close_async_icosq: mlx5e_close_icosq(&c->async_icosq); -err_disable_napi: - napi_disable(&c->napi); - +err_close_xdpsq_cq: if (c->xdp) mlx5e_close_cq(&c->rq_xdpsq.cq); @@ -1937,7 +1987,6 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) mlx5e_close_sqs(c); mlx5e_close_icosq(&c->icosq); mlx5e_close_icosq(&c->async_icosq); - napi_disable(&c->napi); if (c->xdp) mlx5e_close_cq(&c->rq_xdpsq.cq); mlx5e_close_cq(&c->rq.cq); @@ -2022,6 +2071,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) { int tc; + napi_enable(&c->napi); + for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_icosq(&c->icosq); @@ -2044,6 +2095,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) mlx5e_deactivate_icosq(&c->icosq); for (tc = 0; tc < c->num_tc; tc++) mlx5e_deactivate_txqsq(&c->sq[tc]); + mlx5e_qos_deactivate_queues(c); + + napi_disable(&c->napi); } static void mlx5e_close_channel(struct mlx5e_channel *c) @@ -2051,6 +2105,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) mlx5e_close_xsk(c); mlx5e_close_queues(c); + mlx5e_qos_close_queues(c); netif_napi_del(&c->napi); kvfree(c); @@ -2068,10 +2123,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, u32 buf_size = 0; int i; -#ifdef CONFIG_MLX5_EN_IPSEC - if (MLX5_IPSEC_DEV(mdev)) + if (mlx5_fpga_is_ipsec_device(mdev)) byte_count += MLX5E_METADATA_ETHER_LEN; -#endif if (mlx5e_rx_is_linear_skb(params, xsk)) { int frag_stride; @@ -2200,9 +2253,8 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev)); } -static void mlx5e_build_sq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_sq_param *param) +void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, + struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); @@ -2381,10 +2433,18 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } + err = mlx5e_qos_open_queues(priv, chs); + if (err) + goto err_close_ptp; + mlx5e_health_channels_update(priv); kvfree(cparam); return 0; +err_close_ptp: + if (chs->port_ptp) + mlx5e_port_ptp_close(chs->port_ptp); + err_close_channels: for (i--; i >= 0; i--) mlx5e_close_channel(chs->c[i]); @@ -2917,11 +2977,31 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc) netdev_set_tc_queue(netdev, tc, nch, 0); } +int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) +{ + int qos_queues, nch, ntc, num_txqs, err; + + qos_queues = mlx5e_qos_cur_leaf_nodes(priv); + + nch = priv->channels.params.num_channels; + ntc = priv->channels.params.num_tc; + num_txqs = nch * ntc + qos_queues; + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) + num_txqs += ntc; + + mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs); + err = netif_set_real_num_tx_queues(priv->netdev, num_txqs); + if (err) + netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err); + + return err; +} + static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; - int num_txqs, num_rxqs, nch, ntc; int old_num_txqs, old_ntc; + int num_rxqs, nch, ntc; int err; old_num_txqs = netdev->real_num_tx_queues; @@ -2929,18 +3009,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) nch = priv->channels.params.num_channels; ntc = priv->channels.params.num_tc; - num_txqs = nch * ntc; - if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) - num_txqs += ntc; num_rxqs = nch * priv->profile->rq_groups; mlx5e_netdev_set_tcs(netdev, nch, ntc); - err = netif_set_real_num_tx_queues(netdev, num_txqs); - if (err) { - netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err); + err = mlx5e_update_tx_netdev_queues(priv); + if (err) goto err_tcs; - } err = netif_set_real_num_rx_queues(netdev, num_rxqs); if (err) { netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); @@ -3044,6 +3119,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_update_num_tc_x_num_ch(priv); mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); + mlx5e_qos_activate_queues(priv); mlx5e_xdp_tx_enable(priv); netif_tx_start_all_queues(priv->netdev); @@ -3186,6 +3262,7 @@ int mlx5e_open_locked(struct net_device *netdev) priv->profile->update_rx(priv); mlx5e_activate_priv_channels(priv); + mlx5e_apply_traps(priv, true); if (priv->profile->update_carrier) priv->profile->update_carrier(priv); @@ -3221,6 +3298,7 @@ int mlx5e_close_locked(struct net_device *netdev) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; + mlx5e_apply_traps(priv, false); clear_bit(MLX5E_STATE_OPENED, &priv->state); netif_carrier_off(priv->netdev); @@ -3610,6 +3688,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, mutex_lock(&priv->state_lock); + /* MQPRIO is another toplevel qdisc that can't be attached + * simultaneously with the offloaded HTB. + */ + if (WARN_ON(priv->htb.maj_id)) { + err = -EINVAL; + goto out; + } + new_channels.params = priv->channels.params; new_channels.params.num_tc = tc ? tc : 1; @@ -3635,12 +3721,55 @@ out: return err; } +static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb) +{ + int res; + + switch (htb->command) { + case TC_HTB_CREATE: + return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid, + htb->extack); + case TC_HTB_DESTROY: + return mlx5e_htb_root_del(priv); + case TC_HTB_LEAF_ALLOC_QUEUE: + res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid, + htb->rate, htb->ceil, htb->extack); + if (res < 0) + return res; + htb->qid = res; + return 0; + case TC_HTB_LEAF_TO_INNER: + return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid, + htb->rate, htb->ceil, htb->extack); + case TC_HTB_LEAF_DEL: + return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid, + htb->extack); + case TC_HTB_LEAF_DEL_LAST: + case TC_HTB_LEAF_DEL_LAST_FORCE: + return mlx5e_htb_leaf_del_last(priv, htb->classid, + htb->command == TC_HTB_LEAF_DEL_LAST_FORCE, + htb->extack); + case TC_HTB_NODE_MODIFY: + return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil, + htb->extack); + case TC_HTB_LEAF_QUERY_QUEUE: + res = mlx5e_get_txq_by_classid(priv, htb->classid); + if (res < 0) + return res; + htb->qid = res; + return 0; + default: + return -EOPNOTSUPP; + } +} + static LIST_HEAD(mlx5e_block_cb_list); static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct mlx5e_priv *priv = netdev_priv(dev); + int err; switch (type) { case TC_SETUP_BLOCK: { @@ -3654,6 +3783,11 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, } case TC_SETUP_QDISC_MQPRIO: return mlx5e_setup_tc_mqprio(priv, type_data); + case TC_SETUP_QDISC_HTB: + mutex_lock(&priv->state_lock); + err = mlx5e_setup_tc_htb(priv, type_data); + mutex_unlock(&priv->state_lock); + return err; default: return -EOPNOTSUPP; } @@ -3769,7 +3903,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable) mutex_lock(&priv->state_lock); if (enable && priv->xsk.refcnt) { - netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n", + netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n", priv->xsk.refcnt); err = -EINVAL; goto out; @@ -3823,20 +3957,25 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) return 0; } -#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) -static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) +static int set_feature_hw_tc(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; } +#endif + + if (!enable && priv->htb.maj_id) { + netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n"); + return -EINVAL; + } return 0; } -#endif static int set_feature_rx_all(struct net_device *netdev, bool enable) { @@ -3934,9 +4073,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); -#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) - err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); -#endif + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); @@ -3969,6 +4106,7 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, if (!params->vlan_strip_disable) netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); } + if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { if (features & NETIF_F_LRO) { netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); @@ -4016,7 +4154,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk); max_mtu = min(max_mtu_frame, max_mtu_page); - netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n", + netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n", new_params->sw_mtu, ix, max_mtu); return false; } @@ -4393,10 +4531,8 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb, features = vlan_features_check(skb, features); features = vxlan_features_check(skb, features); -#ifdef CONFIG_MLX5_EN_IPSEC if (mlx5e_ipsec_feature_check(skb, netdev, features)) return features; -#endif /* Validate if the tunneled packet is being offloaded by HW */ if (skb->encapsulation && @@ -4455,8 +4591,9 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return -EINVAL; } - if (MLX5_IPSEC_DEV(priv->mdev)) { - netdev_warn(netdev, "can't set XDP with IPSec offload\n"); + if (mlx5_fpga_is_ipsec_device(priv->mdev)) { + netdev_warn(netdev, + "XDP is not available on Innova cards with IPsec support\n"); return -EINVAL; } @@ -4639,8 +4776,6 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_change_mtu = mlx5e_change_nic_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = mlx5e_features_check, .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, @@ -4808,15 +4943,15 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, tirc_default_config[tt].rx_hash_fields; } -void mlx5e_build_nic_params(struct mlx5e_priv *priv, - struct mlx5e_xsk *xsk, - struct mlx5e_rss_params *rss_params, - struct mlx5e_params *params, - u16 mtu) +void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu) { + struct mlx5e_rss_params *rss_params = &priv->rss_params; + struct mlx5e_params *params = &priv->channels.params; struct mlx5_core_dev *mdev = priv->mdev; u8 rx_cq_period_mode; + priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile); + params->sw_mtu = mtu; params->hard_mtu = MLX5E_ETH_HARD_MTU; params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, @@ -4874,6 +5009,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, /* AF_XDP */ params->xsk = xsk; + + /* Do not update netdev->features directly in here + * on mlx5e_attach_netdev() we will call mlx5e_update_features() + * To update netdev->features please modify mlx5e_fix_features() + */ } static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) @@ -4975,8 +5115,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - mlx5e_vxlan_set_netdev_info(priv); - if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -5026,18 +5164,12 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_RXFCS; netdev->features = netdev->hw_features; - if (!priv->channels.params.lro_en) - netdev->features &= ~NETIF_F_LRO; + /* Defaults */ if (fcs_enabled) netdev->features &= ~NETIF_F_RXALL; - - if (!priv->channels.params.scatter_fcs_en) - netdev->features &= ~NETIF_F_RXFCS; - - /* prefere CQE compression over rxhash */ - if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) - netdev->features &= ~NETIF_F_RXHASH; + netdev->features &= ~NETIF_F_LRO; + netdev->features &= ~NETIF_F_RXFCS; #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) if (FT_CAP(flow_modify_en) && @@ -5051,6 +5183,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_NTUPLE; #endif } + if (mlx5_qos_is_supported(mdev)) + netdev->features |= NETIF_F_HW_TC; netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; @@ -5101,33 +5235,28 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) } static int mlx5e_nic_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) + struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rss_params *rss = &priv->rss_params; int err; - err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); - if (err) - return err; - - mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params, - netdev->mtu); + mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); + mlx5e_vxlan_set_netdev_info(priv); mlx5e_timestamp_init(priv); err = mlx5e_ipsec_init(priv); if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); + err = mlx5e_tls_init(priv); if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); - mlx5e_build_nic_netdev(netdev); + err = mlx5e_devlink_port_register(priv); if (err) mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); + mlx5e_health_create_reporters(priv); return 0; @@ -5139,7 +5268,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) mlx5e_devlink_port_unregister(priv); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); - mlx5e_netdev_cleanup(priv->netdev, priv); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) @@ -5268,6 +5396,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5_lag_add(mdev, netdev); mlx5e_enable_async_events(priv); + mlx5e_enable_blocking_events(priv); if (mlx5e_monitor_counter_supported(priv)) mlx5e_monitor_counter_init(priv); @@ -5305,6 +5434,12 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) if (mlx5e_monitor_counter_supported(priv)) mlx5e_monitor_counter_cleanup(priv); + mlx5e_disable_blocking_events(priv); + if (priv->en_trap) { + mlx5e_deactivate_trap(priv); + mlx5e_close_trap(priv->en_trap); + priv->en_trap = NULL; + } mlx5e_disable_async_events(priv); mlx5_lag_remove(mdev); mlx5_vxlan_reset_to_default(mdev->vxlan); @@ -5335,27 +5470,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = { }; /* mlx5e generic netdev management API (move to en_common.c) */ - -/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */ -int mlx5e_netdev_init(struct net_device *netdev, - struct mlx5e_priv *priv, - struct mlx5_core_dev *mdev, - const struct mlx5e_profile *profile, - void *ppriv) +int mlx5e_priv_init(struct mlx5e_priv *priv, + struct net_device *netdev, + struct mlx5_core_dev *mdev) { + memset(priv, 0, sizeof(*priv)); + /* priv init */ priv->mdev = mdev; priv->netdev = netdev; - priv->profile = profile; - priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; - priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); priv->max_opened_tc = 1; if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) return -ENOMEM; mutex_init(&priv->state_lock); + hash_init(priv->htb.qos_tc2node); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); @@ -5365,9 +5496,6 @@ int mlx5e_netdev_init(struct net_device *netdev, if (!priv->wq) goto err_free_cpumask; - /* netdev init */ - netif_carrier_off(netdev); - return 0; err_free_cpumask: @@ -5376,38 +5504,39 @@ err_free_cpumask: return -ENOMEM; } -void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv) +void mlx5e_priv_cleanup(struct mlx5e_priv *priv) { + int i; + destroy_workqueue(priv->wq); free_cpumask_var(priv->scratchpad.cpumask); + + for (i = 0; i < priv->htb.max_qos_sqs; i++) + kfree(priv->htb.qos_sq_stats[i]); + kvfree(priv->htb.qos_sq_stats); } -struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, - const struct mlx5e_profile *profile, - int nch, - void *ppriv) +struct net_device * +mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs) { struct net_device *netdev; - unsigned int ptp_txqs = 0; int err; - if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) - ptp_txqs = profile->max_tc; - - netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), - nch * profile->max_tc + ptp_txqs, - nch * profile->rq_groups); + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); return NULL; } - err = profile->init(mdev, netdev, profile, ppriv); + err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev); if (err) { - mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err); + mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err); goto err_free_netdev; } + netif_carrier_off(netdev); + dev_net_set(netdev, mlx5_core_net(mdev)); + return netdev; err_free_netdev: @@ -5416,14 +5545,23 @@ err_free_netdev: return NULL; } +static void mlx5e_update_features(struct net_device *netdev) +{ + if (netdev->reg_state != NETREG_REGISTERED) + return; /* features will be updated on netdev registration */ + + rtnl_lock(); + netdev_update_features(netdev); + rtnl_unlock(); +} + int mlx5e_attach_netdev(struct mlx5e_priv *priv) { const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED; - const struct mlx5e_profile *profile; + const struct mlx5e_profile *profile = priv->profile; int max_nch; int err; - profile = priv->profile; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); /* max number of channels may have changed */ @@ -5463,6 +5601,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) if (profile->enable) profile->enable(priv); + mlx5e_update_features(priv->netdev); + return 0; err_cleanup_tx: @@ -5489,13 +5629,76 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) cancel_work_sync(&priv->update_stats_work); } +static int +mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, + const struct mlx5e_profile *new_profile, void *new_ppriv) +{ + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + err = mlx5e_priv_init(priv, netdev, mdev); + if (err) { + mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err); + return err; + } + netif_carrier_off(netdev); + priv->profile = new_profile; + priv->ppriv = new_ppriv; + err = new_profile->init(priv->mdev, priv->netdev); + if (err) + return err; + err = mlx5e_attach_netdev(priv); + if (err) + new_profile->cleanup(priv); + return err; +} + +int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, + const struct mlx5e_profile *new_profile, void *new_ppriv) +{ + unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile); + const struct mlx5e_profile *orig_profile = priv->profile; + void *orig_ppriv = priv->ppriv; + int err, rollback_err; + + /* sanity */ + if (new_max_nch != priv->max_nch) { + netdev_warn(priv->netdev, + "%s: Replacing profile with different max channels\n", + __func__); + return -EINVAL; + } + + /* cleanup old profile */ + mlx5e_detach_netdev(priv); + priv->profile->cleanup(priv); + mlx5e_priv_cleanup(priv); + + err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv); + if (err) { /* roll back to original profile */ + netdev_warn(priv->netdev, "%s: new profile init failed, %d\n", + __func__, err); + goto rollback; + } + + return 0; + +rollback: + rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv); + if (rollback_err) { + netdev_err(priv->netdev, + "%s: failed to rollback to orig profile, %d\n", + __func__, rollback_err); + } + return err; +} + void mlx5e_destroy_netdev(struct mlx5e_priv *priv) { - const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev; - if (profile->cleanup) - profile->cleanup(priv); + mlx5e_priv_cleanup(priv); free_netdev(netdev); } @@ -5541,28 +5744,48 @@ static int mlx5e_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + const struct mlx5e_profile *profile = &mlx5e_nic_profile; struct mlx5_core_dev *mdev = edev->mdev; struct net_device *netdev; pm_message_t state = {}; - void *priv; + unsigned int txqs, rxqs, ptp_txqs = 0; + struct mlx5e_priv *priv; + int qos_sqs = 0; int err; int nch; + if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) + ptp_txqs = profile->max_tc; + + if (mlx5_qos_is_supported(mdev)) + qos_sqs = mlx5e_qos_max_leaf_nodes(mdev); + nch = mlx5e_get_max_num_channels(mdev); - netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL); + txqs = nch * profile->max_tc + ptp_txqs + qos_sqs; + rxqs = nch * profile->rq_groups; + netdev = mlx5e_create_netdev(mdev, txqs, rxqs); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); return -ENOMEM; } - dev_net_set(netdev, mlx5_core_net(mdev)); + mlx5e_build_nic_netdev(netdev); + priv = netdev_priv(netdev); dev_set_drvdata(&adev->dev, priv); + priv->profile = profile; + priv->ppriv = NULL; + err = profile->init(mdev, netdev); + if (err) { + mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err); + goto err_destroy_netdev; + } + err = mlx5e_resume(adev); if (err) { mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err); - goto err_destroy_netdev; + goto err_profile_cleanup; } err = register_netdev(netdev); @@ -5578,6 +5801,8 @@ static int mlx5e_probe(struct auxiliary_device *adev, err_resume: mlx5e_suspend(adev, state); +err_profile_cleanup: + profile->cleanup(priv); err_destroy_netdev: mlx5e_destroy_netdev(priv); return err; @@ -5591,6 +5816,7 @@ static void mlx5e_remove(struct auxiliary_device *adev) mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); mlx5e_suspend(adev, state); + priv->profile->cleanup(priv); mlx5e_destroy_netdev(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f0ceae65f6cf..a132fff7a980 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <generated/utsrelease.h> #include <linux/mlx5/fs.h> #include <net/switchdev.h> #include <net/pkt_cls.h> @@ -653,8 +652,6 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_change_mtu = mlx5e_uplink_rep_change_mtu, - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = mlx5e_features_check, .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_rate = mlx5e_set_vf_rate, @@ -686,7 +683,10 @@ static void mlx5e_build_rep_params(struct net_device *netdev) MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile); params = &priv->channels.params; + + params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; params->hard_mtu = MLX5E_ETH_HARD_MTU; params->sw_mtu = netdev->mtu; @@ -712,20 +712,16 @@ static void mlx5e_build_rep_params(struct net_device *netdev) mlx5e_build_rss_params(&priv->rss_params, params->num_channels); } -static void mlx5e_build_rep_netdev(struct net_device *netdev) +static void mlx5e_build_rep_netdev(struct net_device *netdev, + struct mlx5_core_dev *mdev, + struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_core_dev *mdev = priv->mdev; - SET_NETDEV_DEV(netdev, mdev->device); if (rep->vport == MLX5_VPORT_UPLINK) { netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; /* we want a persistent mac for the uplink rep */ mlx5_query_mac_address(mdev, netdev->dev_addr); netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; - mlx5e_vxlan_set_netdev_info(priv); mlx5e_dcbnl_build_rep_netdev(netdev); } else { netdev->netdev_ops = &mlx5e_netdev_ops_rep; @@ -757,30 +753,27 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) } static int mlx5e_init_rep(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) + struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - int err; - - err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); - if (err) - return err; - - priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; mlx5e_build_rep_params(netdev); - mlx5e_build_rep_netdev(netdev); - mlx5e_timestamp_init(priv); return 0; } +static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, + struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_vxlan_set_netdev_info(priv); + return mlx5e_init_rep(mdev, netdev); +} + static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) { - mlx5e_netdev_cleanup(priv->netdev, priv); } static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) @@ -1057,7 +1050,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) static void mlx5e_rep_enable(struct mlx5e_priv *priv) { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + mlx5e_set_netdev_mtu_boundaries(priv); + mlx5e_rep_neigh_init(rpriv); +} + +static void mlx5e_rep_disable(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + mlx5e_rep_neigh_cleanup(rpriv); } static int mlx5e_update_rep_rx(struct mlx5e_priv *priv) @@ -1092,6 +1095,7 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) { + struct mlx5e_rep_priv *rpriv = priv->ppriv; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; u16 max_mtu; @@ -1110,12 +1114,15 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5_notifier_register(mdev, &priv->events_nb); mlx5e_dcbnl_initialize(priv); mlx5e_dcbnl_init_app(priv); + mlx5e_rep_neigh_init(rpriv); } static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) { + struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; + mlx5e_rep_neigh_cleanup(rpriv); mlx5e_dcbnl_delete_app(priv); mlx5_notifier_unregister(mdev, &priv->events_nb); mlx5e_rep_tc_disable(priv); @@ -1167,6 +1174,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_rep_tx, .enable = mlx5e_rep_enable, + .disable = mlx5e_rep_disable, .update_rx = mlx5e_update_rep_rx, .update_stats = mlx5e_stats_update_ndo_stats, .rx_handlers = &mlx5e_rx_handlers_rep, @@ -1177,7 +1185,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { }; static const struct mlx5e_profile mlx5e_uplink_rep_profile = { - .init = mlx5e_init_rep, + .init = mlx5e_init_ul_rep, .cleanup = mlx5e_cleanup_rep, .init_rx = mlx5e_init_ul_rep_rx, .cleanup_rx = mlx5e_cleanup_ul_rep_rx, @@ -1203,6 +1211,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) struct mlx5e_rep_priv *rpriv; struct devlink_port *dl_port; struct net_device *netdev; + struct mlx5e_priv *priv; + unsigned int txqs, rxqs; int nch, err; rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); @@ -1212,10 +1222,13 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) /* rpriv->rep to be looked up when profile->init() is called */ rpriv->rep = rep; - nch = mlx5e_get_max_num_channels(dev); profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_rep_profile; - netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); + + nch = mlx5e_get_max_num_channels(dev); + txqs = nch * profile->max_tc; + rxqs = nch * profile->rq_groups; + netdev = mlx5e_create_netdev(dev, txqs, rxqs); if (!netdev) { mlx5_core_warn(dev, "Failed to create representor netdev for vport %d\n", @@ -1224,7 +1237,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) return -EINVAL; } - dev_net_set(netdev, mlx5_core_net(dev)); + mlx5e_build_rep_netdev(netdev, dev, rep); + rpriv->netdev = netdev; rep->rep_data[REP_ETH].priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); @@ -1235,20 +1249,21 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) goto err_destroy_netdev; } - err = mlx5e_attach_netdev(netdev_priv(netdev)); + priv = netdev_priv(netdev); + priv->profile = profile; + priv->ppriv = rpriv; + err = profile->init(dev, netdev); if (err) { - netdev_warn(netdev, - "Failed to attach representor netdev for vport %d\n", - rep->vport); + netdev_warn(netdev, "rep profile init failed, %d\n", err); goto err_destroy_mdev_resources; } - err = mlx5e_rep_neigh_init(rpriv); + err = mlx5e_attach_netdev(netdev_priv(netdev)); if (err) { netdev_warn(netdev, - "Failed to initialized neighbours handling for vport %d\n", + "Failed to attach representor netdev for vport %d\n", rep->vport); - goto err_detach_netdev; + goto err_cleanup_profile; } err = register_netdev(netdev); @@ -1256,7 +1271,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) netdev_warn(netdev, "Failed to register representor netdev for vport %d\n", rep->vport); - goto err_neigh_cleanup; + goto err_detach_netdev; } dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); @@ -1264,12 +1279,12 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) devlink_port_type_eth_set(dl_port, netdev); return 0; -err_neigh_cleanup: - mlx5e_rep_neigh_cleanup(rpriv); - err_detach_netdev: mlx5e_detach_netdev(netdev_priv(netdev)); +err_cleanup_profile: + priv->profile->cleanup(priv); + err_destroy_mdev_resources: if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_destroy_mdev_resources(dev); @@ -1294,8 +1309,8 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) if (dl_port) devlink_port_type_clear(dl_port); unregister_netdev(netdev); - mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); + priv->profile->cleanup(priv); if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_destroy_mdev_resources(priv->mdev); mlx5e_destroy_netdev(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 988195ab1c54..d1696404cca9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -59,6 +59,8 @@ struct mlx5e_neigh_update_table { struct mlx5_tc_ct_priv; struct mlx5e_rep_bond; +struct mlx5e_tc_tun_encap; + struct mlx5_rep_uplink_priv { /* Filters DB - instantiated by the uplink representor and shared by * the uplink's VFs @@ -90,6 +92,9 @@ struct mlx5_rep_uplink_priv { /* support eswitch vports bonding */ struct mlx5e_rep_bond *bond; + + /* tc tunneling encapsulation private data */ + struct mlx5e_tc_tun_encap *encap; }; struct mlx5e_rep_priv { @@ -110,7 +115,6 @@ struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep) } struct mlx5e_neigh { - struct net_device *dev; union { __be32 v4; struct in6_addr v6; @@ -122,6 +126,7 @@ struct mlx5e_neigh_hash_entry { struct rhash_head rhash_node; struct mlx5e_neigh m_neigh; struct mlx5e_priv *priv; + struct net_device *neigh_dev; /* Save the neigh hash entry in a list on the representor in * addition to the hash table. In order to iterate easily over the @@ -153,6 +158,7 @@ enum { /* set when the encap entry is successfully offloaded into HW */ MLX5_ENCAP_ENTRY_VALID = BIT(0), MLX5_REFORMAT_DECAP = BIT(1), + MLX5_ENCAP_ENTRY_NO_ROUTE = BIT(2), }; struct mlx5e_decap_key { @@ -175,12 +181,12 @@ struct mlx5e_encap_entry { struct mlx5e_neigh_hash_entry *nhe; /* neigh hash entry list of encaps sharing the same neigh */ struct list_head encap_list; - struct mlx5e_neigh m_neigh; /* a node of the eswitch encap hash table which keeping all the encap * entries */ struct hlist_node encap_hlist; struct list_head flows; + struct list_head route_list; struct mlx5_pkt_reformat *pkt_reformat; const struct ip_tunnel_info *tun_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ca4b55839a8a..1b6ad94ebb10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -47,11 +47,11 @@ #include "fpga/ipsec.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" -#include "lib/clock.h" #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/health.h" #include "en/params.h" +#include "devlink.h" static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, @@ -212,11 +212,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; } -static inline bool mlx5e_page_is_reserved(struct page *page) -{ - return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); -} - static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { @@ -229,7 +224,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, return false; } - if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { + if (!dev_page_is_reusable(dma_info->page)) { stats->cache_waive++; return false; } @@ -1066,9 +1061,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, } if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) - skb_hwtstamps(skb)->hwtstamp = - mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); - + skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, + rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); if (likely(netdev->features & NETIF_F_RXHASH)) @@ -1126,12 +1120,8 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, u32 len, struct xdp_buff *xdp) { - xdp->data_hard_start = va; - xdp->data = va + headroom; - xdp_set_data_meta_invalid(xdp); - xdp->data_end = xdp->data + len; - xdp->rxq = &rq->xdp_rxq; - xdp->frame_sz = rq->buff.frame0_sz; + xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); + xdp_prepare_buff(xdp, va, headroom, len, false); } static struct sk_buff * @@ -1675,9 +1665,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, } if (unlikely(mlx5e_rx_hw_stamp(tstamp))) - skb_hwtstamps(skb)->hwtstamp = - mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); - + skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, + rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); if (likely(netdev->features & NETIF_F_RXHASH)) @@ -1794,12 +1783,10 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; -#ifdef CONFIG_MLX5_EN_IPSEC - if (MLX5_IPSEC_DEV(mdev)) { - netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n"); + if (mlx5_fpga_is_ipsec_device(mdev)) { + netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); return -EINVAL; } -#endif if (!rq->handle_rx_cqe) { netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); return -EINVAL; @@ -1829,3 +1816,48 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool return 0; } + +static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + struct mlx5e_priv *priv = netdev_priv(rq->netdev); + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + struct mlx5e_wqe_frag_info *wi; + struct sk_buff *skb; + u32 cqe_bcnt; + u16 trap_id; + u16 ci; + + trap_id = get_cqe_flow_tag(cqe); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + rq->stats->wqe_err++; + goto free_wqe; + } + + skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt); + if (!skb) + goto free_wqe; + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + skb_push(skb, ETH_HLEN); + + mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port); + dev_kfree_skb_any(skb); + +free_wqe: + mlx5e_free_rx_wqe(rq, wi, false); + mlx5_wq_cyc_pop(wq); +} + +void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) +{ + rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ? + mlx5e_skb_from_cqe_linear : + mlx5e_skb_from_cqe_nonlinear; + rq->post_wqes = mlx5e_post_rx_wqes; + rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; + rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 2cf2042b37c7..92c5b81427b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -420,6 +420,25 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, } } +static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, + struct mlx5e_sw_stats *s) +{ + struct mlx5e_sq_stats **stats; + u16 max_qos_sqs; + int i; + + /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ + max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); + stats = READ_ONCE(priv->htb.qos_sq_stats); + + for (i = 0; i < max_qos_sqs; i++) { + mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i])); + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); + } +} + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) { struct mlx5e_sw_stats *s = &priv->stats.sw; @@ -449,6 +468,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) } } mlx5e_stats_grp_sw_update_stats_ptp(priv, s); + mlx5e_stats_grp_sw_update_stats_qos(priv, s); } static const struct counter_desc q_stats_desc[] = { @@ -1740,6 +1760,41 @@ static const struct counter_desc ptp_cq_stats_desc[] = { { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, }; +static const struct counter_desc qos_sq_stats_desc[] = { + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, +#endif + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, +}; + #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) @@ -1750,6 +1805,49 @@ static const struct counter_desc ptp_cq_stats_desc[] = { #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) +#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc) + +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) +{ + /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ + return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs); +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) +{ + /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ + u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); + int i, qid; + + for (qid = 0; qid < max_qos_sqs; qid++) + for (i = 0; i < NUM_QOS_SQ_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + qos_sq_stats_desc[i].format, qid); + + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) +{ + struct mlx5e_sq_stats **stats; + u16 max_qos_sqs; + int i, qid; + + /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ + max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); + stats = READ_ONCE(priv->htb.qos_sq_stats); + + for (qid = 0; qid < max_qos_sqs; qid++) { + struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); + + for (i = 0; i < NUM_QOS_SQ_STATS; i++) + data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i); + } + + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) { @@ -1932,6 +2030,7 @@ MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); MLX5E_DEFINE_STATS_GRP(eth_ext, 0); static MLX5E_DEFINE_STATS_GRP(tls, 0); static MLX5E_DEFINE_STATS_GRP(ptp, 0); +static MLX5E_DEFINE_STATS_GRP(qos, 0); /* The stats groups order is opposite to the update_stats() order calls */ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { @@ -1955,6 +2054,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(per_port_buff_congest), &MLX5E_STATS_GRP(ptp), + &MLX5E_STATS_GRP(qos), }; unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index e41fc11f2ce7..93c41312fb03 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -55,6 +55,8 @@ #define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld) + struct counter_desc { char format[ETH_GSTRING_LEN]; size_t offset; /* Byte offset */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index dd0bfbacad47..0da69b98f38f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -63,6 +63,8 @@ #include "en/mapping.h" #include "en/tc_ct.h" #include "en/mod_hdr.h" +#include "en/tc_priv.h" +#include "en/tc_tun_encap.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "lib/fs_chains.h" @@ -71,90 +73,6 @@ #define nic_chains(priv) ((priv)->fs.tc.chains) #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) -#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) - -enum { - MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, - MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, - MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, - MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, - MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, - MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, - MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, - MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, - MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, - MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, - MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, - MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, - MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7, - MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8, -}; - -#define MLX5E_TC_MAX_SPLITS 1 - -/* Helper struct for accessing a struct containing list_head array. - * Containing struct - * |- Helper array - * [0] Helper item 0 - * |- list_head item 0 - * |- index (0) - * [1] Helper item 1 - * |- list_head item 1 - * |- index (1) - * To access the containing struct from one of the list_head items: - * 1. Get the helper item from the list_head item using - * helper item = - * container_of(list_head item, helper struct type, list_head field) - * 2. Get the contining struct from the helper item and its index in the array: - * containing struct = - * container_of(helper item, containing struct type, helper field[index]) - */ -struct encap_flow_item { - struct mlx5e_encap_entry *e; /* attached encap instance */ - struct list_head list; - int index; -}; - -struct mlx5e_tc_flow { - struct rhash_head node; - struct mlx5e_priv *priv; - u64 cookie; - unsigned long flags; - struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; - - /* flows sharing the same reformat object - currently mpls decap */ - struct list_head l3_to_l2_reformat; - struct mlx5e_decap_entry *decap_reformat; - - /* Flow can be associated with multiple encap IDs. - * The number of encaps is bounded by the number of supported - * destinations. - */ - struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; - struct mlx5e_tc_flow *peer_flow; - struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ - struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ - struct list_head hairpin; /* flows sharing the same hairpin */ - struct list_head peer; /* flows with peer flow */ - struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ - struct net_device *orig_dev; /* netdev adding flow first */ - int tmp_efi_index; - struct list_head tmp_list; /* temporary flow list used by neigh update */ - refcount_t refcnt; - struct rcu_head rcu_head; - struct completion init_done; - int tunnel_id; /* the mapped tunnel id of this flow */ - struct mlx5_flow_attr *attr; -}; - -struct mlx5e_tc_flow_parse_attr { - const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; - struct net_device *filter_dev; - struct mlx5_flow_spec spec; - struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; - int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; - struct ethhdr eth; -}; #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) @@ -165,10 +83,15 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { .moffset = 0, .mlen = 2, }, + [VPORT_TO_REG] = { + .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, + .moffset = 2, + .mlen = 2, + }, [TUNNEL_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, .moffset = 1, - .mlen = 3, + .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8), .soffset = MLX5_BYTE_OFF(fte_match_param, misc_parameters_2.metadata_reg_c_1), }, @@ -190,6 +113,14 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct, }; +/* To avoid false lock dependency warning set the tc_ht lock + * class different than the lock class of the ht being used when deleting + * last flow from a group and then deleting a group, we get into del_sw_flow_group() + * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but + * it's different than the ht->mutex here. + */ +static struct lock_class_key tc_ht_lock_key; + static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); void @@ -239,11 +170,11 @@ mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, } int -mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, - enum mlx5_flow_namespace_type ns, - enum mlx5e_tc_attr_to_reg type, - u32 data) +mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5_flow_namespace_type ns, + enum mlx5e_tc_attr_to_reg type, + u32 data) { int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; @@ -267,9 +198,10 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, MLX5_SET(set_action_in, modact, offset, moffset * 8); MLX5_SET(set_action_in, modact, length, mlen * 8); MLX5_SET(set_action_in, modact, data, data); + err = mod_hdr_acts->num_actions; mod_hdr_acts->num_actions++; - return 0; + return err; } static struct mlx5_tc_ct_priv * @@ -318,6 +250,41 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv, mlx5e_del_offloaded_nic_rule(priv, rule, attr); } +int +mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5_flow_namespace_type ns, + enum mlx5e_tc_attr_to_reg type, + u32 data) +{ + int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data); + + return ret < 0 ? ret : 0; +} + +void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5e_tc_attr_to_reg type, + int act_id, u32 data) +{ + int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; + int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; + int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; + char *modact; + + modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ); + + /* Firmware has 5bit length field and 0 means 32bits */ + if (mlen == 4) + mlen = 0; + + MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, modact, field, mfield); + MLX5_SET(set_action_in, modact, offset, moffset * 8); + MLX5_SET(set_action_in, modact, length, mlen * 8); + MLX5_SET(set_action_in, modact, data, data); +} + struct mlx5e_hairpin { struct mlx5_hairpin *pair; @@ -355,15 +322,14 @@ struct mlx5e_hairpin_entry { static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); -static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) +struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) { if (!flow || !refcount_inc_not_zero(&flow->refcnt)) return ERR_PTR(-EINVAL); return flow; } -static void mlx5e_flow_put(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) +void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { if (refcount_dec_and_test(&flow->refcnt)) { mlx5e_tc_del_flow(priv, flow); @@ -371,48 +337,6 @@ static void mlx5e_flow_put(struct mlx5e_priv *priv, } } -static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) -{ - /* Complete all memory stores before setting bit. */ - smp_mb__before_atomic(); - set_bit(flag, &flow->flags); -} - -#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) - -static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, - unsigned long flag) -{ - /* test_and_set_bit() provides all necessary barriers */ - return test_and_set_bit(flag, &flow->flags); -} - -#define flow_flag_test_and_set(flow, flag) \ - __flow_flag_test_and_set(flow, \ - MLX5E_TC_FLOW_FLAG_##flag) - -static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) -{ - /* Complete all memory stores before clearing bit. */ - smp_mb__before_atomic(); - clear_bit(flag, &flow->flags); -} - -#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ - MLX5E_TC_FLOW_FLAG_##flag) - -static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) -{ - bool ret = test_bit(flag, &flow->flags); - - /* Read fields of flow structure only after checking flags. */ - smp_mb__after_atomic(); - return ret; -} - -#define flow_flag_test(flow, flag) __flow_flag_test(flow, \ - MLX5E_TC_FLOW_FLAG_##flag) - bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) { return flow_flag_test(flow, ESWITCH); @@ -423,7 +347,7 @@ static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) return flow_flag_test(flow, FT); } -static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) +bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) { return flow_flag_test(flow, OFFLOADED); } @@ -1138,23 +1062,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, kfree(flow->attr); } -static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, int out_index); - -static int mlx5e_attach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct net_device *mirred_dev, - int out_index, - struct netlink_ext_ack *extack, - struct net_device **encap_dev, - bool *encap_valid); -static int mlx5e_attach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack); -static void mlx5e_detach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow); - -static struct mlx5_flow_handle * +struct mlx5_flow_handle * mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec, @@ -1189,10 +1097,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, return rule; } -static void -mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr) +void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) { flow_flag_clear(flow, OFFLOADED); @@ -1211,7 +1118,7 @@ offload_rule_0: mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } -static struct mlx5_flow_handle * +struct mlx5_flow_handle * mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec) @@ -1237,9 +1144,8 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, return rule; } -static void -mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, - struct mlx5e_tc_flow *flow) +void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow) { struct mlx5_flow_attr *slow_attr; @@ -1307,6 +1213,63 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow) mutex_unlock(&uplink_priv->unready_flows_lock); } +static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); + +bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) +{ + struct mlx5_core_dev *out_mdev, *route_mdev; + struct mlx5e_priv *out_priv, *route_priv; + + out_priv = netdev_priv(out_dev); + out_mdev = out_priv->mdev; + route_priv = netdev_priv(route_dev); + route_mdev = route_priv->mdev; + + if (out_mdev->coredev_type != MLX5_COREDEV_PF || + route_mdev->coredev_type != MLX5_COREDEV_VF) + return false; + + return same_hw_devs(out_priv, route_priv); +} + +int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) +{ + struct mlx5e_priv *out_priv, *route_priv; + struct mlx5_core_dev *route_mdev; + struct mlx5_eswitch *esw; + u16 vhca_id; + int err; + + out_priv = netdev_priv(out_dev); + esw = out_priv->mdev->priv.eswitch; + route_priv = netdev_priv(route_dev); + route_mdev = route_priv->mdev; + + vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); + return err; +} + +int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow) +{ + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts; + struct mlx5_modify_hdr *mod_hdr; + + mod_hdr = mlx5_modify_header_alloc(priv->mdev, + get_flow_name_space(flow), + mod_hdr_acts->num_actions, + mod_hdr_acts->actions); + if (IS_ERR(mod_hdr)) + return PTR_ERR(mod_hdr); + + WARN_ON(flow->attr->modify_hdr); + flow->attr->modify_hdr = mod_hdr; + + return 0; +} + static int mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, @@ -1316,21 +1279,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct net_device *out_dev, *encap_dev = NULL; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; + bool vf_tun = false, encap_valid = true; struct mlx5_esw_flow_attr *esw_attr; struct mlx5_fc *counter = NULL; struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; - bool encap_valid = true; u32 max_prio, max_chain; int err = 0; int out_index; - if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) { - NL_SET_ERR_MSG_MOD(extack, - "E-switch priorities unsupported, upgrade FW"); - return -EOPNOTSUPP; - } - /* We check chain range only for tc flows. * For ft flows, we checked attr->chain was originally 0 and set it to * FDB_FT_CHAIN which is outside tc range. @@ -1340,20 +1297,28 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { NL_SET_ERR_MSG_MOD(extack, "Requested chain is out of supported range"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } max_prio = mlx5_chains_get_prio_range(esw_chains(esw)); if (attr->prio > max_prio) { NL_SET_ERR_MSG_MOD(extack, "Requested priority is out of supported range"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; + } + + if (flow_flag_test(flow, TUN_RX)) { + err = mlx5e_attach_decap_route(priv, flow); + if (err) + goto err_out; } if (flow_flag_test(flow, L3_TO_L2_DECAP)) { err = mlx5e_attach_decap(priv, flow, extack); if (err) - return err; + goto err_out; } parse_attr = attr->parse_attr; @@ -1371,8 +1336,11 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, err = mlx5e_attach_encap(priv, flow, out_dev, out_index, extack, &encap_dev, &encap_valid); if (err) - return err; + goto err_out; + if (esw_attr->dests[out_index].flags & + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + vf_tun = true; out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; esw_attr->dests[out_index].rep = rpriv->rep; @@ -1381,20 +1349,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, err = mlx5_eswitch_add_vlan_action(esw, attr); if (err) - return err; + goto err_out; if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) { - err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); - if (err) - return err; + if (vf_tun) { + err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow); + if (err) + goto err_out; + } else { + err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); + if (err) + goto err_out; + } } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(esw_attr->counter_dev, true); - if (IS_ERR(counter)) - return PTR_ERR(counter); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_out; + } attr->counter = counter; } @@ -1408,12 +1383,17 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, else flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); - if (IS_ERR(flow->rule[0])) - return PTR_ERR(flow->rule[0]); - else - flow_flag_set(flow, OFFLOADED); + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); + goto err_out; + } + flow_flag_set(flow, OFFLOADED); return 0; + +err_out: + flow_flag_set(flow, FAILED); + return err; } static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) @@ -1434,8 +1414,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *esw_attr; + bool vf_tun = false; int out_index; + esw_attr = attr->esw_attr; mlx5e_put_flow_tunnel_id(flow); if (flow_flag_test(flow, NOT_READY)) @@ -1453,20 +1436,33 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, mlx5_eswitch_del_vlan_action(esw, attr); - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) - if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) { + if (flow->decap_route) + mlx5e_detach_decap_route(priv, flow); + + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { + if (esw_attr->dests[out_index].flags & + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + vf_tun = true; + if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) { mlx5e_detach_encap(priv, flow, out_index); kfree(attr->parse_attr->tun_info[out_index]); } - kvfree(attr->parse_attr); + } mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr); - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - mlx5e_detach_mod_hdr(priv, flow); + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts); + if (vf_tun && attr->modify_hdr) + mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); + else + mlx5e_detach_mod_hdr(priv, flow); + } + kvfree(attr->parse_attr); + kvfree(attr->esw_attr->rx_tun_attr); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) - mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter); + mlx5_fc_destroy(esw_attr->counter_dev, attr->counter); if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); @@ -1474,141 +1470,13 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, kfree(flow->attr); } -void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, - struct list_head *flow_list) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_flow_handle *rule; - struct mlx5_flow_attr *attr; - struct mlx5_flow_spec *spec; - struct mlx5e_tc_flow *flow; - int err; - - e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, - e->reformat_type, - e->encap_size, e->encap_header, - MLX5_FLOW_NAMESPACE_FDB); - if (IS_ERR(e->pkt_reformat)) { - mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n", - PTR_ERR(e->pkt_reformat)); - return; - } - e->flags |= MLX5_ENCAP_ENTRY_VALID; - mlx5e_rep_queue_neigh_stats_work(priv); - - list_for_each_entry(flow, flow_list, tmp_list) { - bool all_flow_encaps_valid = true; - int i; - - if (!mlx5e_is_offloaded_flow(flow)) - continue; - attr = flow->attr; - esw_attr = attr->esw_attr; - spec = &attr->parse_attr->spec; - - esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat; - esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; - /* Flow can be associated with multiple encap entries. - * Before offloading the flow verify that all of them have - * a valid neighbour. - */ - for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { - if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) - continue; - if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) { - all_flow_encaps_valid = false; - break; - } - } - /* Do not offload flows with unresolved neighbors */ - if (!all_flow_encaps_valid) - continue; - /* update from slow path rule to encap rule */ - rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", - err); - continue; - } - - mlx5e_tc_unoffload_from_slow_path(esw, flow); - flow->rule[0] = rule; - /* was unset when slow path rule removed */ - flow_flag_set(flow, OFFLOADED); - } -} - -void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, - struct list_head *flow_list) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_flow_handle *rule; - struct mlx5_flow_attr *attr; - struct mlx5_flow_spec *spec; - struct mlx5e_tc_flow *flow; - int err; - - list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow)) - continue; - attr = flow->attr; - esw_attr = attr->esw_attr; - spec = &attr->parse_attr->spec; - - /* update from encap rule to slow path rule */ - rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); - /* mark the flow's encap dest as non-valid */ - esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; - - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n", - err); - continue; - } - - mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); - flow->rule[0] = rule; - /* was unset when fast path rule removed */ - flow_flag_set(flow, OFFLOADED); - } - - /* we know that the encap is valid */ - e->flags &= ~MLX5_ENCAP_ENTRY_VALID; - mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); -} - -static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) +struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) { return flow->attr->counter; } -/* Takes reference to all flows attached to encap and adds the flows to - * flow_list using 'tmp_list' list_head in mlx5e_tc_flow. - */ -void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list) -{ - struct encap_flow_item *efi; - struct mlx5e_tc_flow *flow; - - list_for_each_entry(efi, &e->flows, list) { - flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); - if (IS_ERR(mlx5e_flow_get(flow))) - continue; - wait_for_completion(&flow->init_done); - - flow->tmp_efi_index = efi->index; - list_add(&flow->tmp_list, flow_list); - } -} - /* Iterate over tmp_list of flows attached to flow_list head. */ -void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) +void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) { struct mlx5e_tc_flow *flow, *tmp; @@ -1616,222 +1484,6 @@ void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_l mlx5e_flow_put(priv, flow); } -static struct mlx5e_encap_entry * -mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, - struct mlx5e_encap_entry *e) -{ - struct mlx5e_encap_entry *next = NULL; - -retry: - rcu_read_lock(); - - /* find encap with non-zero reference counter value */ - for (next = e ? - list_next_or_null_rcu(&nhe->encap_list, - &e->encap_list, - struct mlx5e_encap_entry, - encap_list) : - list_first_or_null_rcu(&nhe->encap_list, - struct mlx5e_encap_entry, - encap_list); - next; - next = list_next_or_null_rcu(&nhe->encap_list, - &next->encap_list, - struct mlx5e_encap_entry, - encap_list)) - if (mlx5e_encap_take(next)) - break; - - rcu_read_unlock(); - - /* release starting encap */ - if (e) - mlx5e_encap_put(netdev_priv(e->out_dev), e); - if (!next) - return next; - - /* wait for encap to be fully initialized */ - wait_for_completion(&next->res_ready); - /* continue searching if encap entry is not in valid state after completion */ - if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) { - e = next; - goto retry; - } - - return next; -} - -void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) -{ - struct mlx5e_neigh *m_neigh = &nhe->m_neigh; - struct mlx5e_encap_entry *e = NULL; - struct mlx5e_tc_flow *flow; - struct mlx5_fc *counter; - struct neigh_table *tbl; - bool neigh_used = false; - struct neighbour *n; - u64 lastuse; - - if (m_neigh->family == AF_INET) - tbl = &arp_tbl; -#if IS_ENABLED(CONFIG_IPV6) - else if (m_neigh->family == AF_INET6) - tbl = ipv6_stub->nd_tbl; -#endif - else - return; - - /* mlx5e_get_next_valid_encap() releases previous encap before returning - * next one. - */ - while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) { - struct mlx5e_priv *priv = netdev_priv(e->out_dev); - struct encap_flow_item *efi, *tmp; - struct mlx5_eswitch *esw; - LIST_HEAD(flow_list); - - esw = priv->mdev->priv.eswitch; - mutex_lock(&esw->offloads.encap_tbl_lock); - list_for_each_entry_safe(efi, tmp, &e->flows, list) { - flow = container_of(efi, struct mlx5e_tc_flow, - encaps[efi->index]); - if (IS_ERR(mlx5e_flow_get(flow))) - continue; - list_add(&flow->tmp_list, &flow_list); - - if (mlx5e_is_offloaded_flow(flow)) { - counter = mlx5e_tc_get_counter(flow); - lastuse = mlx5_fc_query_lastuse(counter); - if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { - neigh_used = true; - break; - } - } - } - mutex_unlock(&esw->offloads.encap_tbl_lock); - - mlx5e_put_encap_flow_list(priv, &flow_list); - if (neigh_used) { - /* release current encap before breaking the loop */ - mlx5e_encap_put(priv, e); - break; - } - } - - trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used); - - if (neigh_used) { - nhe->reported_lastuse = jiffies; - - /* find the relevant neigh according to the cached device and - * dst ip pair - */ - n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); - if (!n) - return; - - neigh_event_send(n, NULL); - neigh_release(n); - } -} - -static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) -{ - WARN_ON(!list_empty(&e->flows)); - - if (e->compl_result > 0) { - mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); - - if (e->flags & MLX5_ENCAP_ENTRY_VALID) - mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); - } - - kfree(e->tun_info); - kfree(e->encap_header); - kfree_rcu(e, rcu); -} - -static void mlx5e_decap_dealloc(struct mlx5e_priv *priv, - struct mlx5e_decap_entry *d) -{ - WARN_ON(!list_empty(&d->flows)); - - if (!d->compl_result) - mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat); - - kfree_rcu(d, rcu); -} - -void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock)) - return; - hash_del_rcu(&e->encap_hlist); - mutex_unlock(&esw->offloads.encap_tbl_lock); - - mlx5e_encap_dealloc(priv, e); -} - -static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock)) - return; - hash_del_rcu(&d->hlist); - mutex_unlock(&esw->offloads.decap_tbl_lock); - - mlx5e_decap_dealloc(priv, d); -} - -static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, int out_index) -{ - struct mlx5e_encap_entry *e = flow->encaps[out_index].e; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - /* flow wasn't fully initialized */ - if (!e) - return; - - mutex_lock(&esw->offloads.encap_tbl_lock); - list_del(&flow->encaps[out_index].list); - flow->encaps[out_index].e = NULL; - if (!refcount_dec_and_test(&e->refcnt)) { - mutex_unlock(&esw->offloads.encap_tbl_lock); - return; - } - hash_del_rcu(&e->encap_hlist); - mutex_unlock(&esw->offloads.encap_tbl_lock); - - mlx5e_encap_dealloc(priv, e); -} - -static void mlx5e_detach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_decap_entry *d = flow->decap_reformat; - - if (!d) - return; - - mutex_lock(&esw->offloads.decap_tbl_lock); - list_del(&flow->l3_to_l2_reformat); - flow->decap_reformat = NULL; - - if (!refcount_dec_and_test(&d->refcnt)) { - mutex_unlock(&esw->offloads.decap_tbl_lock); - return; - } - hash_del_rcu(&d->hlist); - mutex_unlock(&esw->offloads.decap_tbl_lock); - - mlx5e_decap_dealloc(priv, d); -} - static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; @@ -2089,6 +1741,29 @@ void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, } } +u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) +{ + void *headers_v; + u16 ethertype; + u8 ip_version; + + if (outer) + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + else + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); + + ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version); + /* Return ip_version converted from ethertype anyway */ + if (!ip_version) { + ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); + if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP) + ip_version = 4; + else if (ethertype == ETH_P_IPV6) + ip_version = 6; + } + return ip_version; +} + static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec, @@ -2097,6 +1772,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, u8 *match_level, bool *match_inner) { + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct netlink_ext_ack *extack = f->common.extack; bool needs_mapping, sets_mapping; @@ -2134,6 +1810,31 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, */ if (!netif_is_bareudp(filter_dev)) flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; + err = mlx5e_tc_set_attr_rx_tun(flow, spec); + if (err) + return err; + } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + struct mlx5_flow_spec *tmp_spec; + + tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); + if (!tmp_spec) { + NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec"); + netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec"); + return -ENOMEM; + } + memcpy(tmp_spec, spec, sizeof(*tmp_spec)); + + err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level); + if (err) { + kvfree(tmp_spec); + NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes"); + netdev_warn(priv->netdev, "Failed to parse tunnel attributes"); + return err; + } + err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); + kvfree(tmp_spec); + if (err) + return err; } if (!needs_mapping && !sets_mapping) @@ -3582,35 +3283,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, return 0; } -struct encap_key { - const struct ip_tunnel_key *ip_tun_key; - struct mlx5e_tc_tunnel *tc_tunnel; -}; - -static inline int cmp_encap_info(struct encap_key *a, - struct encap_key *b) -{ - return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || - a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type; -} - -static inline int cmp_decap_info(struct mlx5e_decap_key *a, - struct mlx5e_decap_key *b) -{ - return memcmp(&a->key, &b->key, sizeof(b->key)); -} - -static inline int hash_encap_info(struct encap_key *key) -{ - return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), - key->tc_tunnel->tunnel_type); -} - -static inline int hash_decap_info(struct mlx5e_decap_key *key) -{ - return jhash(&key->key, sizeof(key->key), 0); -} - static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, struct net_device *peer_netdev) { @@ -3624,277 +3296,6 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, same_hw_devs(priv, peer_priv)); } -bool mlx5e_encap_take(struct mlx5e_encap_entry *e) -{ - return refcount_inc_not_zero(&e->refcnt); -} - -static bool mlx5e_decap_take(struct mlx5e_decap_entry *e) -{ - return refcount_inc_not_zero(&e->refcnt); -} - -static struct mlx5e_encap_entry * -mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, - uintptr_t hash_key) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_encap_entry *e; - struct encap_key e_key; - - hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, - encap_hlist, hash_key) { - e_key.ip_tun_key = &e->tun_info->key; - e_key.tc_tunnel = e->tunnel; - if (!cmp_encap_info(&e_key, key) && - mlx5e_encap_take(e)) - return e; - } - - return NULL; -} - -static struct mlx5e_decap_entry * -mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key, - uintptr_t hash_key) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_decap_key r_key; - struct mlx5e_decap_entry *e; - - hash_for_each_possible_rcu(esw->offloads.decap_tbl, e, - hlist, hash_key) { - r_key = e->key; - if (!cmp_decap_info(&r_key, key) && - mlx5e_decap_take(e)) - return e; - } - return NULL; -} - -static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info) -{ - size_t tun_size = sizeof(*tun_info) + tun_info->options_len; - - return kmemdup(tun_info, tun_size, GFP_KERNEL); -} - -static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - int out_index, - struct mlx5e_encap_entry *e, - struct netlink_ext_ack *extack) -{ - int i; - - for (i = 0; i < out_index; i++) { - if (flow->encaps[i].e != e) - continue; - NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action"); - netdev_err(priv->netdev, "can't duplicate encap action\n"); - return true; - } - - return false; -} - -static int mlx5e_attach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct net_device *mirred_dev, - int out_index, - struct netlink_ext_ack *extack, - struct net_device **encap_dev, - bool *encap_valid) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5_flow_attr *attr = flow->attr; - const struct ip_tunnel_info *tun_info; - struct encap_key key; - struct mlx5e_encap_entry *e; - unsigned short family; - uintptr_t hash_key; - int err = 0; - - parse_attr = attr->parse_attr; - tun_info = parse_attr->tun_info[out_index]; - family = ip_tunnel_info_af(tun_info); - key.ip_tun_key = &tun_info->key; - key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); - if (!key.tc_tunnel) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel"); - return -EOPNOTSUPP; - } - - hash_key = hash_encap_info(&key); - - mutex_lock(&esw->offloads.encap_tbl_lock); - e = mlx5e_encap_get(priv, &key, hash_key); - - /* must verify if encap is valid or not */ - if (e) { - /* Check that entry was not already attached to this flow */ - if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { - err = -EOPNOTSUPP; - goto out_err; - } - - mutex_unlock(&esw->offloads.encap_tbl_lock); - wait_for_completion(&e->res_ready); - - /* Protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); - if (e->compl_result < 0) { - err = -EREMOTEIO; - goto out_err; - } - goto attach_flow; - } - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) { - err = -ENOMEM; - goto out_err; - } - - refcount_set(&e->refcnt, 1); - init_completion(&e->res_ready); - - tun_info = dup_tun_info(tun_info); - if (!tun_info) { - err = -ENOMEM; - goto out_err_init; - } - e->tun_info = tun_info; - err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); - if (err) - goto out_err_init; - - INIT_LIST_HEAD(&e->flows); - hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); - mutex_unlock(&esw->offloads.encap_tbl_lock); - - if (family == AF_INET) - err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); - else if (family == AF_INET6) - err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); - - /* Protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); - complete_all(&e->res_ready); - if (err) { - e->compl_result = err; - goto out_err; - } - e->compl_result = 1; - -attach_flow: - flow->encaps[out_index].e = e; - list_add(&flow->encaps[out_index].list, &e->flows); - flow->encaps[out_index].index = out_index; - *encap_dev = e->out_dev; - if (e->flags & MLX5_ENCAP_ENTRY_VALID) { - attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat; - attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; - *encap_valid = true; - } else { - *encap_valid = false; - } - mutex_unlock(&esw->offloads.encap_tbl_lock); - - return err; - -out_err: - mutex_unlock(&esw->offloads.encap_tbl_lock); - if (e) - mlx5e_encap_put(priv, e); - return err; - -out_err_init: - mutex_unlock(&esw->offloads.encap_tbl_lock); - kfree(tun_info); - kfree(e); - return err; -} - -static int mlx5e_attach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5e_decap_entry *d; - struct mlx5e_decap_key key; - uintptr_t hash_key; - int err = 0; - - parse_attr = flow->attr->parse_attr; - if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) { - NL_SET_ERR_MSG_MOD(extack, - "encap header larger than max supported"); - return -EOPNOTSUPP; - } - - key.key = parse_attr->eth; - hash_key = hash_decap_info(&key); - mutex_lock(&esw->offloads.decap_tbl_lock); - d = mlx5e_decap_get(priv, &key, hash_key); - if (d) { - mutex_unlock(&esw->offloads.decap_tbl_lock); - wait_for_completion(&d->res_ready); - mutex_lock(&esw->offloads.decap_tbl_lock); - if (d->compl_result) { - err = -EREMOTEIO; - goto out_free; - } - goto found; - } - - d = kzalloc(sizeof(*d), GFP_KERNEL); - if (!d) { - err = -ENOMEM; - goto out_err; - } - - d->key = key; - refcount_set(&d->refcnt, 1); - init_completion(&d->res_ready); - INIT_LIST_HEAD(&d->flows); - hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key); - mutex_unlock(&esw->offloads.decap_tbl_lock); - - d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, - MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2, - sizeof(parse_attr->eth), - &parse_attr->eth, - MLX5_FLOW_NAMESPACE_FDB); - if (IS_ERR(d->pkt_reformat)) { - err = PTR_ERR(d->pkt_reformat); - d->compl_result = err; - } - mutex_lock(&esw->offloads.decap_tbl_lock); - complete_all(&d->res_ready); - if (err) - goto out_free; - -found: - flow->decap_reformat = d; - attr->decap_pkt_reformat = d->pkt_reformat; - list_add(&flow->l3_to_l2_reformat, &d->flows); - mutex_unlock(&esw->offloads.decap_tbl_lock); - return 0; - -out_free: - mutex_unlock(&esw->offloads.decap_tbl_lock); - mlx5e_decap_put(priv, d); - return err; - -out_err: - mutex_unlock(&esw->offloads.decap_tbl_lock); - return err; -} - static int parse_tc_vlan_action(struct mlx5e_priv *priv, const struct flow_action_entry *act, struct mlx5_esw_flow_attr *attr, @@ -4247,7 +3648,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (encap) { parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex; - parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info); + parse_attr->tun_info[esw_attr->out_count] = + mlx5e_dup_tun_info(info); if (!parse_attr->tun_info[esw_attr->out_count]) return -ENOMEM; encap = false; @@ -4384,6 +3786,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, } } + /* always set IP version for indirect table handling */ + attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); + if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { /* For prio tag mode, replace vlan pop with rewrite vlan prio @@ -4664,7 +4069,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, return flow; err_free: - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); mlx5e_flow_put(priv, flow); out: return ERR_PTR(err); @@ -4809,6 +4213,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, return 0; err_free: + flow_flag_set(flow, FAILED); dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); mlx5e_flow_put(priv, flow); out: @@ -5040,7 +4445,7 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, */ if (rate) { rate = (rate * BITS_PER_BYTE) + 500000; - rate_mbps = max_t(u32, do_div(rate, 1000000), 1); + rate_mbps = max_t(u64, do_div(rate, 1000000), 1); } err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); @@ -5221,6 +4626,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) if (err) return err; + lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); + if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; @@ -5328,7 +4735,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) } uplink_priv->tunnel_mapping = mapping; - mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true); + /* 0xFFF is reserved for stack devices slow path table mark */ + mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_enc_opts_mapping; @@ -5339,8 +4747,18 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) if (err) goto err_ht_init; - return err; + lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); + + uplink_priv->encap = mlx5e_tc_tun_init(priv); + if (IS_ERR(uplink_priv->encap)) { + err = PTR_ERR(uplink_priv->encap); + goto err_register_fib_notifier; + } + return 0; + +err_register_fib_notifier: + rhashtable_destroy(tc_ht); err_ht_init: mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); err_enc_opts_mapping: @@ -5357,10 +4775,11 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) { struct mlx5_rep_uplink_priv *uplink_priv; - rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); - uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); + rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); + mlx5e_tc_tun_cleanup(uplink_priv->encap); + mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); mapping_destroy(uplink_priv->tunnel_mapping); @@ -5460,7 +4879,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, tc_skb_ext->chain = chain; zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) & - ZONE_RESTORE_MAX; + ESW_ZONE_ID_MASK; if (!mlx5e_tc_ct_restore_flow(tc->ct, skb, zone_restore_id)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 4a2ce241522e..89003ae7775a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -37,6 +37,8 @@ #include "en.h" #include "eswitch.h" #include "en/tc_ct.h" +#include "en/tc_tun.h" +#include "en_rep.h" #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff @@ -76,6 +78,7 @@ struct mlx5_flow_attr { struct mlx5_flow_table *dest_ft; u8 inner_match_level; u8 outer_match_level; + u8 ip_version; u32 flags; union { struct mlx5_esw_flow_attr esw_attr[0]; @@ -83,6 +86,19 @@ struct mlx5_flow_attr { }; }; +struct mlx5_rx_tun_attr { + u16 decap_vport; + union { + __be32 v4; + struct in6_addr v6; + } src_ip; /* Valid if decap_vport is not zero */ + union { + __be32 v4; + struct in6_addr v6; + } dst_ip; /* Valid if decap_vport is not zero */ + u32 vni; +}; + #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0) @@ -158,7 +174,7 @@ bool mlx5e_encap_take(struct mlx5e_encap_entry *e); void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list); -void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list); +void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list); struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); @@ -167,6 +183,7 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work); enum mlx5e_tc_attr_to_reg { CHAIN_TO_REG, + VPORT_TO_REG, TUNNEL_TO_REG, CTSTATE_TO_REG, ZONE_TO_REG, @@ -197,6 +214,11 @@ int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, enum mlx5e_tc_attr_to_reg type, u32 data); +void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5e_tc_attr_to_reg type, + int act_id, u32 data); + void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, enum mlx5e_tc_attr_to_reg type, u32 data, @@ -207,6 +229,16 @@ void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, u32 *data, u32 *mask); +int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5_flow_namespace_type ns, + enum mlx5e_tc_attr_to_reg type, + u32 data); + +int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow); + int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, int namespace, struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); @@ -242,6 +274,10 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr); +bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev); +int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, + u16 *vport); + #else /* CONFIG_MLX5_CLS_ACT */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} @@ -283,7 +319,7 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) reg_b = be32_to_cpu(cqe->ft_metadata); - if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS)) + if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS)) return false; chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 61ed671fe741..bdbffe484fce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -39,7 +39,6 @@ #include "en/txrx.h" #include "ipoib/ipoib.h" #include "en_accel/en_accel.h" -#include "lib/clock.h" #include "en/ptp.h" static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) @@ -106,28 +105,53 @@ return_txq: return priv->port_ptp_tc2realtxq[up]; } +static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb, + u16 htb_maj_id) +{ + u16 classid; + + if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) + classid = TC_H_MIN(skb->priority); + else + classid = READ_ONCE(priv->htb.defcls); + + if (!classid) + return 0; + + return mlx5e_get_txq_by_classid(priv, classid); +} + u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct mlx5e_priv *priv = netdev_priv(dev); + int num_tc_x_num_ch; int txq_ix; int up = 0; int ch_ix; - if (unlikely(priv->channels.port_ptp)) { - int num_tc_x_num_ch; + /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ + num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); + if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) { + /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ + u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id); - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - mlx5e_use_ptpsq(skb)) - return mlx5e_select_ptpsq(dev, skb); + if (unlikely(htb_maj_id)) { + txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id); + if (txq_ix > 0) + return txq_ix; + } - /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ - num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); + if (unlikely(priv->channels.port_ptp)) + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + mlx5e_use_ptpsq(skb)) + return mlx5e_select_ptpsq(dev, skb); txq_ix = netdev_pick_tx(dev, skb, NULL); - /* Fix netdev_pick_tx() not to choose ptp_channel txqs. + /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs. * If they are selected, switch to regular queues. - * Driver to select these queues only at mlx5e_select_ptpsq(). + * Driver to select these queues only at mlx5e_select_ptpsq() + * and mlx5e_select_htb_queue(). */ if (unlikely(txq_ix >= num_tc_x_num_ch)) txq_ix %= num_tc_x_num_ch; @@ -241,9 +265,8 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; sq->stats->csum_partial++; #endif - } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) { + } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) { ipsec_txwqe_build_eseg_csum(sq, skb, eseg); - } else sq->stats->csum_none++; } @@ -703,6 +726,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) u16 pi; sq = priv->txq2sq[skb_get_queue_mapping(skb)]; + if (unlikely(!sq)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } /* May send SKBs and WQEs. */ if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel))) @@ -774,7 +801,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct skb_shared_hwtstamps hwts = {}; u64 ts = get_cqe_ts(cqe); - hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts); + hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts); if (sq->ptpsq) mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, hwts.hwtstamp, sq->ptpsq->cq_stats); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index a3cfe06d5116..d54da3797c30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -115,17 +115,21 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) napi); struct mlx5e_ch_stats *ch_stats = c->stats; struct mlx5e_xdpsq *xsksq = &c->xsksq; + struct mlx5e_txqsq __rcu **qos_sqs; struct mlx5e_rq *xskrq = &c->xskrq; struct mlx5e_rq *rq = &c->rq; bool aff_change = false; bool busy_xsk = false; bool busy = false; int work_done = 0; + u16 qos_sqs_size; bool xsk_open; int i; rcu_read_lock(); + qos_sqs = rcu_dereference(c->qos_sqs); + xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); ch_stats->poll++; @@ -133,6 +137,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); + if (unlikely(qos_sqs)) { + smp_rmb(); /* Pairs with mlx5e_qos_alloc_queues. */ + qos_sqs_size = READ_ONCE(c->qos_sqs_size); + + for (i = 0; i < qos_sqs_size; i++) { + struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]); + + if (sq) + busy |= mlx5e_poll_tx_cq(&sq->cq, budget); + } + } + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); if (c->xdp) @@ -186,6 +202,16 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) mlx5e_handle_tx_dim(&c->sq[i]); mlx5e_cq_arm(&c->sq[i].cq); } + if (unlikely(qos_sqs)) { + for (i = 0; i < qos_sqs_size; i++) { + struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]); + + if (sq) { + mlx5e_handle_tx_dim(sq); + mlx5e_cq_arm(&sq->cq); + } + } + } mlx5e_handle_rx_dim(rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index fc0afa03d407..174dfbc996c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -467,7 +467,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++) ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); - eq_table->irq_table = dev->priv.irq_table; + eq_table->irq_table = mlx5_irq_table_get(dev); return 0; } @@ -595,6 +595,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) async_event_mask |= (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED); + if (MLX5_CAP_GEN_MAX(dev, vhca_state)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE); + mask[0] = async_event_mask; if (MLX5_CAP_GEN(dev, event_cap)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c index 4c74e2690d57..26b37a0f8762 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c @@ -150,7 +150,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport) static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num) { - return mlx5_eswitch_is_vf_vport(esw, vport_num); + return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num); } int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index ffff11baa3d0..cb1e181f4c6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -122,3 +122,44 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 vport = mlx5_eswitch_get_vport(esw, vport_num); return vport->dl_port; } + +int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum) +{ + struct mlx5_core_dev *dev = esw->dev; + struct netdev_phys_item_id ppid = {}; + unsigned int dl_port_index; + struct mlx5_vport *vport; + struct devlink *devlink; + u16 pfnum; + int err; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); + + pfnum = PCI_FUNC(dev->pdev->devfn); + mlx5_esw_get_port_parent_id(dev, &ppid); + memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); + dl_port->attrs.switch_id.id_len = ppid.id_len; + devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum); + devlink = priv_to_devlink(dev); + dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); + err = devlink_port_register(devlink, dl_port, dl_port_index); + if (err) + return err; + + vport->dl_port = dl_port; + return 0; +} + +void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return; + devlink_port_unregister(vport->dl_port); + vport->dl_port = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c new file mode 100644 index 000000000000..6f6772bf61a2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -0,0 +1,517 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021 Mellanox Technologies. */ + +#include <linux/etherdevice.h> +#include <linux/idr.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include "mlx5_core.h" +#include "eswitch.h" +#include "en.h" +#include "en_tc.h" +#include "fs_core.h" +#include "esw/indir_table.h" +#include "lib/fs_chains.h" + +#define MLX5_ESW_INDIR_TABLE_SIZE 128 +#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2) +#define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1) + +struct mlx5_esw_indir_table_rule { + struct list_head list; + struct mlx5_flow_handle *handle; + union { + __be32 v4; + struct in6_addr v6; + } dst_ip; + u32 vni; + struct mlx5_modify_hdr *mh; + refcount_t refcnt; +}; + +struct mlx5_esw_indir_table_entry { + struct hlist_node hlist; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *recirc_grp; + struct mlx5_flow_group *fwd_grp; + struct mlx5_flow_handle *fwd_rule; + struct list_head recirc_rules; + int recirc_cnt; + int fwd_ref; + + u16 vport; + u8 ip_version; +}; + +struct mlx5_esw_indir_table { + struct mutex lock; /* protects table */ + DECLARE_HASHTABLE(table, 8); +}; + +struct mlx5_esw_indir_table * +mlx5_esw_indir_table_init(void) +{ + struct mlx5_esw_indir_table *indir = kvzalloc(sizeof(*indir), GFP_KERNEL); + + if (!indir) + return ERR_PTR(-ENOMEM); + + mutex_init(&indir->lock); + hash_init(indir->table); + return indir; +} + +void +mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) +{ + mutex_destroy(&indir->lock); + kvfree(indir); +} + +bool +mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport_num, + struct mlx5_core_dev *dest_mdev) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + /* Use indirect table for all IP traffic from UL to VF with vport + * destination when source rewrite flag is set. + */ + return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK && + mlx5_eswitch_is_vf_vport(esw, vport_num) && + esw->dev == dest_mdev && + attr->ip_version && + attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE; +} + +u16 +mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0; +} + +static struct mlx5_esw_indir_table_rule * +mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_esw_indir_table_rule *rule; + + list_for_each_entry(rule, &e->recirc_rules, list) + if (rule->vni == attr->rx_tun_attr->vni && + !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip, + sizeof(attr->rx_tun_attr->dst_ip))) + goto found; + return NULL; + +found: + refcount_inc(&rule->refcnt); + return rule; +} + +static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + struct mlx5_esw_indir_table_entry *e) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + struct mlx5e_tc_mod_hdr_acts mod_acts = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_esw_indir_table_rule *rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *rule_spec; + struct mlx5_flow_handle *handle; + int err = 0; + u32 data; + + rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr); + if (rule) + return 0; + + if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX) + return -EINVAL; + + rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL); + if (!rule_spec) + return -ENOMEM; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) { + err = -ENOMEM; + goto out; + } + + rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS | + MLX5_MATCH_MISC_PARAMETERS_2; + if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) { + MLX5_SET(fte_match_param, rule_spec->match_criteria, + outer_headers.ip_version, 0xf); + MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, + attr->ip_version); + } else if (attr->ip_version) { + MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, + outer_headers.ethertype); + MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype, + (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6)); + } else { + err = -EOPNOTSUPP; + goto err_ethertype; + } + + if (attr->ip_version == 4) { + MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + MLX5_SET(fte_match_param, rule_spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4, + ntohl(esw_attr->rx_tun_attr->dst_ip.v4)); + } else if (attr->ip_version == 6) { + int len = sizeof(struct in6_addr); + + memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, len); + memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &esw_attr->rx_tun_attr->dst_ip.v6, len); + } + + MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, + misc_parameters.vxlan_vni); + MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni, + MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni)); + + MLX5_SET(fte_match_param, rule_spec->match_criteria, + misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); + MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch, + MLX5_VPORT_UPLINK)); + + /* Modify flow source to recirculate packet */ + data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport); + err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, + VPORT_TO_REG, data); + if (err) + goto err_mod_hdr_regc0; + + err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, + TUNNEL_TO_REG, ESW_TUN_SLOW_TABLE_GOTO_VPORT); + if (err) + goto err_mod_hdr_regc1; + + flow_act.modify_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, + mod_acts.num_actions, mod_acts.actions); + if (IS_ERR(flow_act.modify_hdr)) { + err = PTR_ERR(flow_act.modify_hdr); + goto err_mod_hdr_alloc; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = mlx5_chains_get_table(chains, 0, 1, 0); + if (IS_ERR(dest.ft)) { + err = PTR_ERR(dest.ft); + goto err_table; + } + handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + goto err_handle; + } + + dealloc_mod_hdr_actions(&mod_acts); + rule->handle = handle; + rule->vni = esw_attr->rx_tun_attr->vni; + rule->mh = flow_act.modify_hdr; + memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, + sizeof(esw_attr->rx_tun_attr->dst_ip)); + refcount_set(&rule->refcnt, 1); + list_add(&rule->list, &e->recirc_rules); + e->recirc_cnt++; + goto out; + +err_handle: + mlx5_chains_put_table(chains, 0, 1, 0); +err_table: + mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr); +err_mod_hdr_alloc: +err_mod_hdr_regc1: + dealloc_mod_hdr_actions(&mod_acts); +err_mod_hdr_regc0: +err_ethertype: + kfree(rule); +out: + kfree(rule_spec); + return err; +} + +static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_esw_indir_table_entry *e) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + struct mlx5_esw_indir_table_rule *rule; + + list_for_each_entry(rule, &e->recirc_rules, list) + if (rule->vni == esw_attr->rx_tun_attr->vni && + !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, + sizeof(esw_attr->rx_tun_attr->dst_ip))) + goto found; + + return; + +found: + if (!refcount_dec_and_test(&rule->refcnt)) + return; + + mlx5_del_flow_rules(rule->handle); + mlx5_chains_put_table(chains, 0, 1, 0); + mlx5_modify_header_dealloc(esw->dev, rule->mh); + list_del(&rule->list); + kfree(rule); + e->recirc_cnt--; +} + +static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + struct mlx5_esw_indir_table_entry *e) +{ + int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + u32 *in, *match; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2); + match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) + MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf); + else + MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype); + + if (attr->ip_version == 4) { + MLX5_SET_TO_ONES(fte_match_param, match, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + } else if (attr->ip_version == 6) { + memset(MLX5_ADDR_OF(fte_match_param, match, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, sizeof(struct in6_addr)); + } else { + err = -EOPNOTSUPP; + goto out; + } + + MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni); + MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_mask()); + MLX5_SET(create_flow_group_in, in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX); + e->recirc_grp = mlx5_create_flow_group(e->ft, in); + if (IS_ERR(e->recirc_grp)) { + err = PTR_ERR(e->recirc_grp); + goto out; + } + + INIT_LIST_HEAD(&e->recirc_rules); + e->recirc_cnt = 0; + +out: + kfree(in); + return err; +} + +static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, + struct mlx5_esw_indir_table_entry *e) +{ + int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + u32 *in; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + kfree(in); + return -ENOMEM; + } + + /* Hold one entry */ + MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); + MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); + e->fwd_grp = mlx5_create_flow_group(e->ft, in); + if (IS_ERR(e->fwd_grp)) { + err = PTR_ERR(e->fwd_grp); + goto err_out; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = e->vport; + dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); + e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); + if (IS_ERR(e->fwd_rule)) { + mlx5_destroy_flow_group(e->fwd_grp); + err = PTR_ERR(e->fwd_rule); + } + +err_out: + kfree(spec); + kfree(in); + return err; +} + +static struct mlx5_esw_indir_table_entry * +mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, u16 vport, bool decap) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *root_ns; + struct mlx5_esw_indir_table_entry *e; + struct mlx5_flow_table *ft; + int err = 0; + + root_ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns) + return ERR_PTR(-ENOENT); + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + + ft_attr.prio = FDB_TC_OFFLOAD; + ft_attr.max_fte = MLX5_ESW_INDIR_TABLE_SIZE; + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.level = 1; + + ft = mlx5_create_flow_table(root_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + goto tbl_err; + } + e->ft = ft; + e->vport = vport; + e->ip_version = attr->ip_version; + e->fwd_ref = !decap; + + err = mlx5_create_indir_recirc_group(esw, attr, spec, e); + if (err) + goto recirc_grp_err; + + if (decap) { + err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + if (err) + goto recirc_rule_err; + } + + err = mlx5_create_indir_fwd_group(esw, e); + if (err) + goto fwd_grp_err; + + hash_add(esw->fdb_table.offloads.indir->table, &e->hlist, + vport << 16 | attr->ip_version); + + return e; + +fwd_grp_err: + if (decap) + mlx5_esw_indir_table_rule_put(esw, attr, e); +recirc_rule_err: + mlx5_destroy_flow_group(e->recirc_grp); +recirc_grp_err: + mlx5_destroy_flow_table(e->ft); +tbl_err: + kfree(e); + return ERR_PTR(err); +} + +static struct mlx5_esw_indir_table_entry * +mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version) +{ + struct mlx5_esw_indir_table_entry *e; + u32 key = vport << 16 | ip_version; + + hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key) + if (e->vport == vport && e->ip_version == ip_version) + return e; + + return NULL; +} + +struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + u16 vport, bool decap) +{ + struct mlx5_esw_indir_table_entry *e; + int err; + + mutex_lock(&esw->fdb_table.offloads.indir->lock); + e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + if (e) { + if (!decap) { + e->fwd_ref++; + } else { + err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + if (err) + goto out_err; + } + } else { + e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap); + if (IS_ERR(e)) { + err = PTR_ERR(e); + esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err); + goto out_err; + } + } + mutex_unlock(&esw->fdb_table.offloads.indir->lock); + return e->ft; + +out_err: + mutex_unlock(&esw->fdb_table.offloads.indir->lock); + return ERR_PTR(err); +} + +void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport, bool decap) +{ + struct mlx5_esw_indir_table_entry *e; + + mutex_lock(&esw->fdb_table.offloads.indir->lock); + e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + if (!e) + goto out; + + if (!decap) + e->fwd_ref--; + else + mlx5_esw_indir_table_rule_put(esw, attr, e); + + if (e->fwd_ref || e->recirc_cnt) + goto out; + + hash_del(&e->hlist); + mlx5_destroy_flow_group(e->recirc_grp); + mlx5_del_flow_rules(e->fwd_rule); + mlx5_destroy_flow_group(e->fwd_grp); + mlx5_destroy_flow_table(e->ft); + kfree(e); +out: + mutex_unlock(&esw->fdb_table.offloads.indir->lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h new file mode 100644 index 000000000000..cb9eafd1b4ee --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies. */ + +#ifndef __MLX5_ESW_FT_H__ +#define __MLX5_ESW_FT_H__ + +#ifdef CONFIG_MLX5_CLS_ACT + +struct mlx5_esw_indir_table * +mlx5_esw_indir_table_init(void); +void +mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir); + +struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + u16 vport, bool decap); +void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport, bool decap); + +bool +mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport_num, + struct mlx5_core_dev *dest_mdev); + +u16 +mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr); + +#else +/* indir API stubs */ +struct mlx5_esw_indir_table * +mlx5_esw_indir_table_init(void) +{ + return NULL; +} + +void +mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) +{ +} + +static inline struct mlx5_flow_table * +mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + u16 vport, bool decap) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void +mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport, bool decap) +{ +} + +bool +mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport_num, + struct mlx5_core_dev *dest_mdev) +{ + return false; +} + +static inline u16 +mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr) +{ + return 0; +} +#endif + +#endif /* __MLX5_ESW_FT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index da901e364656..aba17835465b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1042,8 +1042,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, void *vport_elem; int err = 0; - if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) || - !MLX5_CAP_QOS(dev, esw_scheduling)) + if (!esw->qos.enabled) return 0; if (vport->qos.enabled) @@ -1273,8 +1272,8 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport esw_vport_cleanup_acl(esw, vport); } -static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num, - enum mlx5_eswitch_vport_event enabled_events) +int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, + enum mlx5_eswitch_vport_event enabled_events) { struct mlx5_vport *vport; int ret; @@ -1301,6 +1300,13 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num, (!vport_num && mlx5_core_is_ecpf(esw->dev))) vport->info.trusted = true; + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { + ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); + if (ret) + goto err_vhca_mapping; + } + esw_vport_change_handle_locked(vport); esw->enabled_vports++; @@ -1308,9 +1314,14 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num, done: mutex_unlock(&esw->state_lock); return ret; + +err_vhca_mapping: + esw_vport_cleanup(esw, vport); + mutex_unlock(&esw->state_lock); + return ret; } -static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_vport *vport; @@ -1326,6 +1337,11 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num) /* Disable events from this vport */ arm_vport_context_events_cmd(esw->dev, vport->vport, 0); + + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) + mlx5_esw_vport_vhca_id_clear(esw, vport_num); + /* We don't assume VFs will cleanup after themselves. * Calling vport change handler while vport is disabled will cleanup * the vport resources. @@ -1366,9 +1382,15 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; + u16 max_sf_vports; u32 *out; int err; + max_sf_vports = mlx5_sf_max_functions(dev); + /* Device interface is array of 64-bits */ + if (max_sf_vports) + outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return ERR_PTR(-ENOMEM); @@ -1376,7 +1398,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) MLX5_SET(query_esw_functions_in, in, opcode, MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); - err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); if (!err) return out; @@ -1426,7 +1448,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, { int err; - err = esw_enable_vport(esw, vport_num, enabled_events); + err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); if (err) return err; @@ -1437,14 +1459,14 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, return err; err_rep: - esw_disable_vport(esw, vport_num); + mlx5_esw_vport_disable(esw, vport_num); return err; } void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) { esw_offloads_unload_rep(esw, vport_num); - esw_disable_vport(esw, vport_num); + mlx5_esw_vport_disable(esw, vport_num); } void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) @@ -1594,6 +1616,15 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) kvfree(out); } +static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) +{ + struct mlx5_esw_event_info info = {}; + + info.new_mode = mode; + + blocking_notifier_call_chain(&esw->n_head, 0, &info); +} + /** * mlx5_eswitch_enable_locked - Enable eswitch * @esw: Pointer to eswitch @@ -1654,6 +1685,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); + mlx5_esw_mode_change_notify(esw, mode); + return 0; abort: @@ -1710,6 +1743,11 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); + /* Notify eswitch users that it is exiting from current mode. + * So that it can do necessary cleanup before the eswitch is disabled. + */ + mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE); + mlx5_eswitch_event_handlers_unregister(esw); if (esw->mode == MLX5_ESWITCH_LEGACY) @@ -1794,6 +1832,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr); atomic64_set(&esw->offloads.num_flows, 0); ida_init(&esw->offloads.vport_metadata_ida); + xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); mutex_init(&esw->state_lock); mutex_init(&esw->mode_lock); @@ -1810,6 +1849,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; dev->priv.eswitch = esw; + BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); return 0; abort: if (esw->work_queue) @@ -1832,6 +1872,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw_offloads_cleanup_reps(esw); mutex_destroy(&esw->mode_lock); mutex_destroy(&esw->state_lock); + WARN_ON(!xa_empty(&esw->offloads.vhca_map)); + xa_destroy(&esw->offloads.vhca_map); ida_destroy(&esw->offloads.vport_metadata_ida); mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); mutex_destroy(&esw->offloads.encap_tbl_lock); @@ -1899,7 +1941,8 @@ static bool is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num) { return vport_num == MLX5_VPORT_PF || - mlx5_eswitch_is_vf_vport(esw, vport_num); + mlx5_eswitch_is_vf_vport(esw, vport_num) || + mlx5_esw_is_sf_vport(esw, vport_num); } int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, @@ -2500,4 +2543,12 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); } +int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&esw->n_head, nb); +} +void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&esw->n_head, nb); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index cf87de94418f..fdf5c8c05c1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -36,6 +36,7 @@ #include <linux/if_ether.h> #include <linux/if_link.h> #include <linux/atomic.h> +#include <linux/xarray.h> #include <net/devlink.h> #include <linux/mlx5/device.h> #include <linux/mlx5/eswitch.h> @@ -43,6 +44,7 @@ #include <linux/mlx5/fs.h> #include "lib/mpfs.h" #include "lib/fs_chains.h" +#include "sf/sf.h" #include "en/tc_ct.h" #ifdef CONFIG_MLX5_ESWITCH @@ -159,6 +161,8 @@ struct mlx5_vport { struct devlink_port *dl_port; }; +struct mlx5_esw_indir_table; + struct mlx5_eswitch_fdb { union { struct legacy_fdb { @@ -175,9 +179,11 @@ struct mlx5_eswitch_fdb { struct mlx5_flow_namespace *ns; struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; + struct mlx5_flow_group *send_to_vport_meta_grp; struct mlx5_flow_group *peer_miss_grp; struct mlx5_flow_handle **peer_miss_rules; struct mlx5_flow_group *miss_grp; + struct mlx5_flow_handle **send_to_vport_meta_rules; struct mlx5_flow_handle *miss_rule_uni; struct mlx5_flow_handle *miss_rule_multi; int vlan_push_pop_refcount; @@ -189,6 +195,8 @@ struct mlx5_eswitch_fdb { struct mutex lock; } vports; + struct mlx5_esw_indir_table *indir; + } offloads; }; u32 flags; @@ -211,6 +219,7 @@ struct mlx5_esw_offload { struct mod_hdr_tbl mod_hdr; DECLARE_HASHTABLE(termtbl_tbl, 8); struct mutex termtbl_mutex; /* protects termtbl hash */ + struct xarray vhca_map; const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; u8 inline_mode; atomic64_t num_flows; @@ -277,6 +286,7 @@ struct mlx5_eswitch { struct { u32 large_group_num; } params; + struct blocking_notifier_head n_head; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -385,12 +395,14 @@ enum mlx5_flow_match_level { enum { MLX5_ESW_DEST_ENCAP = BIT(0), MLX5_ESW_DEST_ENCAP_VALID = BIT(1), + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), }; enum { MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), + MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3), }; struct mlx5_esw_flow_attr { @@ -411,7 +423,9 @@ struct mlx5_esw_flow_attr { struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_core_dev *mdev; struct mlx5_termtbl_handle *termtbl; + int src_port_rewrite_act_id; } dests[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5_rx_tun_attr *rx_tun_attr; struct mlx5_pkt_reformat *decap_pkt_reformat; }; @@ -499,6 +513,40 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; } +static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw) +{ + /* PF and VF vports indices start from 0 to max_vfs */ + return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev); +} + +static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw) +{ + return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev); +} + +static inline int +mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num) +{ + return vport_num - mlx5_sf_start_function_id(esw->dev) + + MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev); +} + +static inline u16 +mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx) +{ + return mlx5_sf_start_function_id(esw->dev) + idx - + (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev)); +} + +static inline bool +mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num) +{ + return mlx5_sf_supported(esw->dev) && + vport_num >= mlx5_sf_start_function_id(esw->dev) && + (vport_num < (mlx5_sf_start_function_id(esw->dev) + + mlx5_sf_max_functions(esw->dev))); +} + static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) { return mlx5_core_is_ecpf_esw_manager(dev); @@ -527,6 +575,10 @@ static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, if (vport_num == MLX5_VPORT_UPLINK) return mlx5_eswitch_uplink_idx(esw); + if (mlx5_esw_is_sf_vport(esw, vport_num)) + return mlx5_esw_sf_vport_num_to_index(esw, vport_num); + + /* PF and VF vports start from 0 to max_vfs */ return vport_num; } @@ -540,6 +592,12 @@ static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, if (index == mlx5_eswitch_uplink_idx(esw)) return MLX5_VPORT_UPLINK; + /* SF vports indices are after VFs and before ECPF */ + if (mlx5_sf_supported(esw->dev) && + index > mlx5_core_max_vfs(esw->dev)) + return mlx5_esw_sf_vport_index_to_num(esw, index); + + /* PF and VF vports start from 0 to max_vfs */ return index; } @@ -625,6 +683,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); for ((vport) = (nvfs); \ (vport) >= (esw)->first_host_vport; (vport)--) +#define mlx5_esw_for_each_sf_rep(esw, i, rep) \ + for ((i) = mlx5_esw_sf_start_idx(esw); \ + (rep) = &(esw)->offloads.vport_reps[(i)], \ + (i) < mlx5_esw_sf_end_idx(esw); (i++)) + struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); @@ -638,6 +701,10 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, enum mlx5_eswitch_vport_event enabled_events); void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); +int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, + enum mlx5_eswitch_vport_event enabled_events); +void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); + int esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, struct mlx5_vport *vport); @@ -656,6 +723,9 @@ esw_get_max_restore_tag(struct mlx5_eswitch *esw); int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num); +void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num); + int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, enum mlx5_eswitch_vport_event enabled_events); void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num); @@ -667,6 +737,30 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); + +int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum); +void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); + +int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum); +void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); + +int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); +void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); + +/** + * mlx5_esw_event_info - Indicates eswitch mode changed/changing. + * + * @new_mode: New mode of eswitch. + */ +struct mlx5_esw_event_info { + u16 new_mode; +}; + +int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); +void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 2f6a0ae20650..94cb0217b4f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -38,7 +38,9 @@ #include <linux/mlx5/fs.h> #include "mlx5_core.h" #include "eswitch.h" +#include "esw/indir_table.h" #include "esw/acl/ofld.h" +#include "esw/indir_table.h" #include "rdma.h" #include "en.h" #include "fs_core.h" @@ -257,7 +259,9 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, static void mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, - struct mlx5_esw_flow_attr *attr) + struct mlx5_flow_attr *attr, + struct mlx5_eswitch *src_esw, + u16 vport) { void *misc2; void *misc; @@ -266,10 +270,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, * VHCA in dual-port RoCE mode, and matching on source vport may fail. */ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + if (mlx5_esw_indir_table_decap_vport(attr)) + vport = mlx5_esw_indir_table_decap_vport(attr); misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch, - attr->in_rep->vport)); + mlx5_eswitch_get_vport_metadata_for_match(src_esw, + vport)); misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, @@ -278,12 +284,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; } else { misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); + MLX5_SET(fte_match_set_misc, misc, source_port, vport); if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, - MLX5_CAP_GEN(attr->in_mdev, vhca_id)); + MLX5_CAP_GEN(src_esw->dev, vhca_id)); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); @@ -295,6 +301,299 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, } } +static int +esw_setup_decap_indir(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec) +{ + struct mlx5_flow_table *ft; + + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) + return -EOPNOTSUPP; + + ft = mlx5_esw_indir_table_get(esw, attr, spec, + mlx5_esw_indir_table_decap_vport(attr), true); + return PTR_ERR_OR_ZERO(ft); +} + +static void +esw_cleanup_decap_indir(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr) +{ + if (mlx5_esw_indir_table_decap_vport(attr)) + mlx5_esw_indir_table_put(esw, attr, + mlx5_esw_indir_table_decap_vport(attr), + true); +} + +static int +esw_setup_ft_dest(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + int i) +{ + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = attr->dest_ft; + + if (mlx5_esw_indir_table_decap_vport(attr)) + return esw_setup_decap_indir(esw, attr, spec); + return 0; +} + +static void +esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_fs_chains *chains, + int i) +{ + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = mlx5_chains_get_tc_end_ft(chains); +} + +static int +esw_setup_chain_dest(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_fs_chains *chains, + u32 chain, u32 prio, u32 level, + int i) +{ + struct mlx5_flow_table *ft; + + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + ft = mlx5_chains_get_table(chains, chain, prio, level); + if (IS_ERR(ft)) + return PTR_ERR(ft); + + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = ft; + return 0; +} + +static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, + int from, int to) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + int i; + + for (i = from; i < to; i++) + if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + mlx5_chains_put_table(chains, 0, 1, 0); + else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + esw_attr->dests[i].mdev)) + mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, + false); +} + +static bool +esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) +{ + int i; + + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + return true; + return false; +} + +static int +esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, + struct mlx5_fs_chains *chains, + struct mlx5_flow_attr *attr, + int *i) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int j, err; + + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) + return -EOPNOTSUPP; + + for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { + err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); + if (err) + goto err_setup_chain; + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat; + } + return 0; + +err_setup_chain: + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); + return err; +} + +static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); +} + +static bool +esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int i; + + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + esw_attr->dests[i].mdev)) + return true; + return false; +} + +static int +esw_setup_indir_table(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + bool ignore_flow_lvl, + int *i) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int j, err; + + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) + return -EOPNOTSUPP; + + for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { + if (ignore_flow_lvl) + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + + dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, + esw_attr->dests[j].rep->vport, false); + if (IS_ERR(dest[*i].ft)) { + err = PTR_ERR(dest[*i].ft); + goto err_indir_tbl_get; + } + } + + if (mlx5_esw_indir_table_decap_vport(attr)) { + err = esw_setup_decap_indir(esw, attr, spec); + if (err) + goto err_indir_tbl_get; + } + + return 0; + +err_indir_tbl_get: + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); + return err; +} + +static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); + esw_cleanup_decap_indir(esw, attr); +} + +static void +esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) +{ + mlx5_chains_put_table(chains, chain, prio, level); +} + +static void +esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, + int attr_idx, int dest_idx, bool pkt_reformat) +{ + dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; + dest[dest_idx].vport.vhca_id = + MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { + if (pkt_reformat) { + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; + } + dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; + dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; + } +} + +static int +esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, + int i) +{ + int j; + + for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) + esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); + return i; +} + +static int +esw_setup_dests(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + int *i) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + int err = 0; + + if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && + MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) && + mlx5_eswitch_vport_match_metadata_enabled(esw)) + attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; + + if (attr->dest_ft) { + esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); + (*i)++; + } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { + esw_setup_slow_path_dest(dest, flow_act, chains, *i); + (*i)++; + } else if (attr->dest_chain) { + err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, + 1, 0, *i); + (*i)++; + } else if (esw_is_indir_table(esw, attr)) { + err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); + } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { + err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); + } else { + *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); + } + + return err; +} + +static void +esw_cleanup_dests(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + + if (attr->dest_ft) { + esw_cleanup_decap_indir(esw, attr); + } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { + if (attr->dest_chain) + esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); + else if (esw_is_indir_table(esw, attr)) + esw_cleanup_indir_table(esw, attr); + else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) + esw_cleanup_chain_src_port_rewrite(esw, attr); + } +} + struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, @@ -308,7 +607,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr fwd_attr; struct mlx5_flow_handle *rule; struct mlx5_flow_table *fdb; - int j, i = 0; + int i = 0; if (esw->mode != MLX5_ESWITCH_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); @@ -329,50 +628,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } } + mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - struct mlx5_flow_table *ft; - - if (attr->dest_ft) { - flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[i].ft = attr->dest_ft; - i++; - } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { - flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[i].ft = mlx5_chains_get_tc_end_ft(chains); - i++; - } else if (attr->dest_chain) { - flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; - ft = mlx5_chains_get_table(chains, attr->dest_chain, - 1, 0); - if (IS_ERR(ft)) { - rule = ERR_CAST(ft); - goto err_create_goto_table; - } - - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[i].ft = ft; - i++; - } else { - for (j = esw_attr->split_count; j < esw_attr->out_count; j++) { - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport.num = esw_attr->dests[j].rep->vport; - dest[i].vport.vhca_id = - MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id); - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) - dest[i].vport.flags |= - MLX5_FLOW_DEST_VPORT_VHCA_ID; - if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) { - flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - flow_act.pkt_reformat = - esw_attr->dests[j].pkt_reformat; - dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; - dest[i].vport.pkt_reformat = - esw_attr->dests[j].pkt_reformat; - } - i++; - } + int err; + + err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); + if (err) { + rule = ERR_PTR(err); + goto err_create_goto_table; } } @@ -407,15 +671,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, fdb = attr->ft; if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT)) - mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); + mlx5_eswitch_set_rule_source_port(esw, spec, attr, + esw_attr->in_mdev->priv.eswitch, + esw_attr->in_rep->vport); } if (IS_ERR(fdb)) { rule = ERR_CAST(fdb); goto err_esw_get; } - mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); - if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, &flow_act, dest, i); @@ -434,8 +698,7 @@ err_add_rule: else if (attr->chain || attr->prio) mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); err_esw_get: - if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) - mlx5_chains_put_table(chains, attr->dest_chain, 1, 0); + esw_cleanup_dests(esw, attr); err_create_goto_table: return rule; } @@ -453,7 +716,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, struct mlx5_flow_table *fast_fdb; struct mlx5_flow_table *fwd_fdb; struct mlx5_flow_handle *rule; - int i; + int i, err = 0; fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); if (IS_ERR(fast_fdb)) { @@ -472,22 +735,26 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; for (i = 0; i < esw_attr->split_count; i++) { - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport.num = esw_attr->dests[i].rep->vport; - dest[i].vport.vhca_id = - MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id); - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) - dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; - if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) { - dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; - dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat; + if (esw_is_indir_table(esw, attr)) + err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); + else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) + err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, + &i); + else + esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); + + if (err) { + rule = ERR_PTR(err); + goto err_chain_src_rewrite; } } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[i].ft = fwd_fdb; i++; - mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); + mlx5_eswitch_set_rule_source_port(esw, spec, attr, + esw_attr->in_mdev->priv.eswitch, + esw_attr->in_rep->vport); if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; @@ -495,13 +762,16 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); - if (IS_ERR(rule)) - goto add_err; + if (IS_ERR(rule)) { + i = esw_attr->split_count; + goto err_chain_src_rewrite; + } atomic64_inc(&esw->offloads.num_flows); return rule; -add_err: +err_chain_src_rewrite: + esw_put_dest_tables_loop(esw, attr, 0, i); esw_vport_tbl_put(esw, &fwd_attr); err_get_fwd: mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); @@ -542,13 +812,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, if (fwd_rule) { esw_vport_tbl_put(esw, &fwd_attr); mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); + esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count); } else { if (split) esw_vport_tbl_put(esw, &fwd_attr); else if (attr->chain || attr->prio) mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); - if (attr->dest_chain) - mlx5_chains_put_table(chains, attr->dest_chain, 1, 0); + esw_cleanup_dests(esw, attr); } } @@ -810,6 +1080,81 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) mlx5_del_flow_rules(rule); } +static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules; + int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num; + + if (!num_vfs || !flows) + return; + + mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) + mlx5_del_flow_rules(flows[i++]); + + kvfree(flows); +} + +static int +mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) +{ + int num_vfs, vport_num, rule_idx = 0, err = 0; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_handle **flows; + struct mlx5_flow_spec *spec; + + num_vfs = esw->esw_funcs.num_vfs; + flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); + if (!flows) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + err = -ENOMEM; + goto alloc_err; + } + + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, + ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + + mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) { + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); + dest.vport.num = vport_num; + + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n", + rule_idx, PTR_ERR(flow_rule)); + goto rule_err; + } + flows[rule_idx++] = flow_rule; + } + + esw->fdb_table.offloads.send_to_vport_meta_rules = flows; + kvfree(spec); + return 0; + +rule_err: + while (--rule_idx >= 0) + mlx5_del_flow_rules(flows[rule_idx]); + kvfree(spec); +alloc_err: + kvfree(flows); + return err; +} + static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) { return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & @@ -1292,11 +1637,11 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; + int num_vfs, table_size, ix, err = 0; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; u32 flags = 0, *flow_group_in; - int table_size, ix, err = 0; struct mlx5_flow_group *g; void *match_criteria; u8 *dmac; @@ -1322,7 +1667,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) } table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + - MLX5_ESW_MISS_FLOWS + esw->total_vports; + MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs; /* create the slow path fdb with encap set, so further table instances * can be created at run time while VFs are probed if the FW allows that. @@ -1370,6 +1715,38 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) } esw->fdb_table.offloads.send_to_vport_grp = g; + /* meta send to vport */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS_2); + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + + MLX5_SET(fte_match_param, match_criteria, + misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); + MLX5_SET(fte_match_param, match_criteria, + misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); + + num_vfs = esw->esw_funcs.num_vfs; + if (num_vfs) { + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1); + ix += num_vfs; + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n", + err); + goto send_vport_meta_err; + } + esw->fdb_table.offloads.send_to_vport_meta_grp = g; + + err = mlx5_eswitch_add_send_to_vport_meta_rules(esw); + if (err) + goto meta_rule_err; + } + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { /* create peer esw miss group */ memset(flow_group_in, 0, inlen); @@ -1437,6 +1814,11 @@ miss_err: if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); peer_miss_err: + mlx5_eswitch_del_send_to_vport_meta_rules(esw); +meta_rule_err: + if (esw->fdb_table.offloads.send_to_vport_meta_grp) + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); +send_vport_meta_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: esw_chains_destroy(esw, esw_chains(esw)); @@ -1458,7 +1840,10 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); + mlx5_eswitch_del_send_to_vport_meta_rules(esw); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); + if (esw->fdb_table.offloads.send_to_vport_meta_grp) + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); @@ -1800,11 +2185,22 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, esw->offloads.rep_ops[rep_type]->unload(rep); } +static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int i; + + mlx5_esw_for_each_sf_rep(esw, i, rep) + __esw_offloads_unload_rep(esw, rep, rep_type); +} + static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; int i; + __unload_reps_sf_vport(esw, rep_type); + mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs) __esw_offloads_unload_rep(esw, rep, rep_type); @@ -1822,7 +2218,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) __esw_offloads_unload_rep(esw, rep, rep_type); } -static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) +int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_eswitch_rep *rep; int rep_type; @@ -1846,7 +2242,7 @@ err_reps: return err; } -static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_eswitch_rep *rep; int rep_type; @@ -2171,12 +2567,20 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) static int esw_offloads_steering_init(struct mlx5_eswitch *esw) { + struct mlx5_esw_indir_table *indir; int err; memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.vports.lock); hash_init(esw->fdb_table.offloads.vports.table); + indir = mlx5_esw_indir_table_init(); + if (IS_ERR(indir)) { + err = PTR_ERR(indir); + goto create_indir_err; + } + esw->fdb_table.offloads.indir = indir; + err = esw_create_uplink_offloads_acl_tables(esw); if (err) goto create_acl_err; @@ -2208,6 +2612,8 @@ create_restore_err: create_offloads_err: esw_destroy_uplink_offloads_acl_tables(esw); create_acl_err: + mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); +create_indir_err: mutex_destroy(&esw->fdb_table.offloads.vports.lock); return err; } @@ -2219,6 +2625,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) esw_destroy_restore_table(esw); esw_destroy_offloads_table(esw); esw_destroy_uplink_offloads_acl_tables(esw); + mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); mutex_destroy(&esw->fdb_table.offloads.vports.lock); } @@ -2824,3 +3231,126 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); } EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); + +int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum) +{ + int err; + + err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE); + if (err) + return err; + + err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum); + if (err) + goto devlink_err; + + err = mlx5_esw_offloads_rep_load(esw, vport_num); + if (err) + goto rep_err; + return 0; + +rep_err: + mlx5_esw_devlink_sf_port_unregister(esw, vport_num); +devlink_err: + mlx5_esw_vport_disable(esw, vport_num); + return err; +} + +void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) +{ + mlx5_esw_offloads_rep_unload(esw, vport_num); + mlx5_esw_devlink_sf_port_unregister(esw, vport_num); + mlx5_esw_vport_disable(esw, vport_num); +} + +static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) +{ + int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *query_ctx; + void *hca_caps; + int err; + + *vhca_id = 0; + if (mlx5_esw_is_manager_vport(esw, vport_num) || + !MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) + return -EPERM; + + query_ctx = kzalloc(query_out_sz, GFP_KERNEL); + if (!query_ctx) + return -ENOMEM; + + err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx); + if (err) + goto out_free; + + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); + *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); + +out_free: + kfree(query_ctx); + return err; +} + +int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) +{ + u16 *old_entry, *vhca_map_entry, vhca_id; + int err; + + err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); + if (err) { + esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", + vport_num, err); + return err; + } + + vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL); + if (!vhca_map_entry) + return -ENOMEM; + + *vhca_map_entry = vport_num; + old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); + if (xa_is_err(old_entry)) { + kfree(vhca_map_entry); + return xa_err(old_entry); + } + kfree(old_entry); + return 0; +} + +void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) +{ + u16 *vhca_map_entry, vhca_id; + int err; + + err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); + if (err) + esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", + vport_num, err); + + vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); + kfree(vhca_map_entry); +} + +int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) +{ + u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); + + if (!res) + return -ENOENT; + + *vport_num = *res; + return 0; +} + +u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, + u16 vport_num) +{ + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); + + if (WARN_ON_ONCE(IS_ERR(vport))) + return 0; + + return vport->metadata; +} +EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index 3ce17c3d7a00..d713ae24d6b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -23,7 +23,7 @@ static int temp_warn(struct notifier_block *, unsigned long, void *); static int port_module(struct notifier_block *, unsigned long, void *); static int pcie_core(struct notifier_block *, unsigned long, void *); -/* handler which forwards the event to events->nh, driver notifiers */ +/* handler which forwards the event to events->fw_nh, driver notifiers */ static int forward_event(struct notifier_block *, unsigned long, void *); static struct mlx5_nb events_nbs_ref[] = { @@ -55,12 +55,14 @@ struct mlx5_events { struct mlx5_core_dev *dev; struct workqueue_struct *wq; struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)]; - /* driver notifier chain */ - struct atomic_notifier_head nh; + /* driver notifier chain for fw events */ + struct atomic_notifier_head fw_nh; /* port module events stats */ struct mlx5_pme_stats pme_stats; /*pcie_core*/ struct work_struct pcie_core_work; + /* driver notifier chain for sw events */ + struct blocking_notifier_head sw_nh; }; static const char *eqe_type_str(u8 type) @@ -110,6 +112,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_CMD"; case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED: return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED"; + case MLX5_EVENT_TYPE_VHCA_STATE_CHANGE: + return "MLX5_EVENT_TYPE_VHCA_STATE_CHANGE"; case MLX5_EVENT_TYPE_PAGE_REQUEST: return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_PAGE_FAULT: @@ -331,7 +335,7 @@ static int forward_event(struct notifier_block *nb, unsigned long event, void *d mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n", eqe_type_str(eqe->type), eqe->sub_type); - atomic_notifier_call_chain(&events->nh, event, data); + atomic_notifier_call_chain(&events->fw_nh, event, data); return NOTIFY_OK; } @@ -342,7 +346,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev) if (!events) return -ENOMEM; - ATOMIC_INIT_NOTIFIER_HEAD(&events->nh); + ATOMIC_INIT_NOTIFIER_HEAD(&events->fw_nh); events->dev = dev; dev->priv.events = events; events->wq = create_singlethread_workqueue("mlx5_events"); @@ -351,6 +355,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev) return -ENOMEM; } INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); + BLOCKING_INIT_NOTIFIER_HEAD(&events->sw_nh); return 0; } @@ -383,11 +388,14 @@ void mlx5_events_stop(struct mlx5_core_dev *dev) flush_workqueue(events->wq); } +/* This API is used only for processing and forwarding firmware + * events to mlx5 consumer. + */ int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb) { struct mlx5_events *events = dev->priv.events; - return atomic_notifier_chain_register(&events->nh, nb); + return atomic_notifier_chain_register(&events->fw_nh, nb); } EXPORT_SYMBOL(mlx5_notifier_register); @@ -395,11 +403,41 @@ int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *n { struct mlx5_events *events = dev->priv.events; - return atomic_notifier_chain_unregister(&events->nh, nb); + return atomic_notifier_chain_unregister(&events->fw_nh, nb); } EXPORT_SYMBOL(mlx5_notifier_unregister); int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data) { - return atomic_notifier_call_chain(&events->nh, event, data); + return atomic_notifier_call_chain(&events->fw_nh, event, data); +} + +/* This API is used only for processing and forwarding driver-specific + * events to mlx5 consumers. + */ +int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + struct mlx5_events *events = dev->priv.events; + + return blocking_notifier_chain_register(&events->sw_nh, nb); +} + +int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + struct mlx5_events *events = dev->priv.events; + + return blocking_notifier_chain_unregister(&events->sw_nh, nb); +} + +int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, + void *data) +{ + struct mlx5_events *events = dev->priv.events; + + return blocking_notifier_call_chain(&events->sw_nh, event, data); +} + +void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work) +{ + queue_work(dev->priv.events->wq, work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index cc67366495b0..22bee4990232 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -124,7 +124,7 @@ struct mlx5_fpga_ipsec { struct ida halloc; }; -static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) +bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h index db88eb4c49e3..8931b5584477 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h @@ -43,6 +43,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); const struct mlx5_flow_cmds * mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); void mlx5_fpga_ipsec_build_fs_cmds(void); +bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev); #else static inline const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) @@ -55,6 +56,7 @@ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) } static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {}; +static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; } #endif /* CONFIG_MLX5_FPGA_IPSEC */ #endif /* __MLX5_FPGA_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index ee4d86c1f436..66ad599bd488 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -105,8 +105,8 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 6 +/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */ +#define KERNEL_NIC_PRIO_NUM_LEVELS 7 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) @@ -572,7 +572,7 @@ static void del_hw_fte(struct fs_node *node) mlx5_core_warn(dev, "flow steering can't delete fte in index %d of flow group id %d\n", fte->index, fg->id); - node->active = 0; + node->active = false; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 54523bed16cd..0c32c485eb58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -190,6 +190,16 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) return true; } +static void enter_error_state(struct mlx5_core_dev *dev, bool force) +{ + if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */ + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + mlx5_cmd_flush(dev); + } + + mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); +} + void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { bool err_detected = false; @@ -208,12 +218,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) goto unlock; } - if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */ - dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; - mlx5_cmd_flush(dev); - } - - mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); + enter_error_state(dev, force); unlock: mutex_unlock(&dev->intf_state_mutex); } @@ -613,7 +618,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); - mlx5_enter_error_state(dev, false); + enter_error_state(dev, false); if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { if (mlx5_health_try_recover(dev)) mlx5_core_err(dev, "health recovery failed\n"); @@ -707,8 +712,9 @@ static void poll_health(struct timer_list *t) mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); dev->priv.health.fatal_error = fatal_error; print_health_info(dev); + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mlx5_trigger_health_work(dev); - goto out; + return; } count = ioread32be(health->health_counter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 97b5fcb1f406..1eeca45cfcdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -72,23 +72,15 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, } /* Called directly after IPoIB netdevice was created to initialize SW structs */ -int mlx5i_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); - int err; - - err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); - if (err) - return err; + netif_carrier_off(netdev); mlx5e_set_netdev_mtu_boundaries(priv); netdev->mtu = netdev->max_mtu; - mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params, - netdev->mtu); + mlx5e_build_nic_params(priv, NULL, netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5e_timestamp_init(priv); @@ -112,7 +104,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev, /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ void mlx5i_cleanup(struct mlx5e_priv *priv) { - mlx5e_netdev_cleanup(priv->netdev, priv); + mlx5e_priv_cleanup(priv); } static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) @@ -753,7 +745,14 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, goto destroy_ht; } - prof->init(mdev, netdev, prof, ipriv); + err = mlx5e_priv_init(epriv, netdev, mdev); + if (err) + goto destroy_mdev_resources; + + epriv->profile = prof; + epriv->ppriv = ipriv; + + prof->init(mdev, netdev); err = mlx5e_attach_netdev(epriv); if (err) @@ -777,6 +776,7 @@ detach: prof->cleanup(epriv); if (ipriv->sub_interface) return err; +destroy_mdev_resources: mlx5e_destroy_mdev_resources(mdev); destroy_ht: mlx5i_pkey_qpn_ht_cleanup(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index b79dc1e28c41..99d46fda9f82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -87,10 +87,7 @@ void mlx5i_dev_cleanup(struct net_device *dev); int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Parent profile functions */ -int mlx5i_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv); +int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5i_cleanup(struct mlx5e_priv *priv); int mlx5i_update_nic_rx(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 7163d9f6c4a6..3d0a18a0bed4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -276,14 +276,12 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu) /* Called directly after IPoIB netdevice was created to initialize SW structs */ static int mlx5i_pkey_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) + struct net_device *netdev) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); int err; - err = mlx5i_init(mdev, netdev, profile, ppriv); + err = mlx5i_init(mdev, netdev); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index c70c1f0ca0c1..b0e129d0f6d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -67,39 +67,68 @@ enum { MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), }; -static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, - struct ptp_system_timestamp *sts) +static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) +{ + return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); +} + +static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify); +} + +static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size) +{ + u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {}; + + if (!MLX5_CAP_MCAM_REG(dev, mtutc)) + return -EOPNOTSUPP; + + return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out), + MLX5_REG_MTUTC, 0, 1); +} + +static u64 mlx5_read_time(struct mlx5_core_dev *dev, + struct ptp_system_timestamp *sts, + bool real_time) { u32 timer_h, timer_h1, timer_l; - timer_h = ioread32be(&dev->iseg->internal_timer_h); + timer_h = ioread32be(real_time ? &dev->iseg->real_time_h : + &dev->iseg->internal_timer_h); ptp_read_system_prets(sts); - timer_l = ioread32be(&dev->iseg->internal_timer_l); + timer_l = ioread32be(real_time ? &dev->iseg->real_time_l : + &dev->iseg->internal_timer_l); ptp_read_system_postts(sts); - timer_h1 = ioread32be(&dev->iseg->internal_timer_h); + timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h : + &dev->iseg->internal_timer_h); if (timer_h != timer_h1) { /* wrap around */ ptp_read_system_prets(sts); - timer_l = ioread32be(&dev->iseg->internal_timer_l); + timer_l = ioread32be(real_time ? &dev->iseg->real_time_l : + &dev->iseg->internal_timer_l); ptp_read_system_postts(sts); } - return (u64)timer_l | (u64)timer_h1 << 32; + return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) : + (u64)timer_l | (u64)timer_h1 << 32; } static u64 read_internal_timer(const struct cyclecounter *cc) { - struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles); + struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles); + struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer); struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); - return mlx5_read_internal_timer(mdev, NULL) & cc->mask; + return mlx5_read_time(mdev, NULL, false) & cc->mask; } static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) { struct mlx5_ib_clock_info *clock_info = mdev->clock_info; struct mlx5_clock *clock = &mdev->clock; + struct mlx5_timer *timer; u32 sign; if (!clock_info) @@ -109,10 +138,11 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) smp_store_mb(clock_info->sign, sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING); - clock_info->cycles = clock->tc.cycle_last; - clock_info->mult = clock->cycles.mult; - clock_info->nsec = clock->tc.nsec; - clock_info->frac = clock->tc.frac; + timer = &clock->timer; + clock_info->cycles = timer->tc.cycle_last; + clock_info->mult = timer->cycles.mult; + clock_info->nsec = timer->tc.nsec; + clock_info->frac = timer->tc.frac; smp_store_release(&clock_info->sign, sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2); @@ -151,92 +181,184 @@ static void mlx5_timestamp_overflow(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5_core_dev *mdev; + struct mlx5_timer *timer; struct mlx5_clock *clock; unsigned long flags; - clock = container_of(dwork, struct mlx5_clock, overflow_work); + timer = container_of(dwork, struct mlx5_timer, overflow_work); + clock = container_of(timer, struct mlx5_clock, timer); mdev = container_of(clock, struct mlx5_core_dev, clock); + write_seqlock_irqsave(&clock->lock, flags); - timecounter_read(&clock->tc); + timecounter_read(&timer->tc); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); - schedule_delayed_work(&clock->overflow_work, clock->overflow_period); + schedule_delayed_work(&timer->overflow_work, timer->overflow_period); +} + +static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev, + const struct timespec64 *ts) +{ + u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; + + if (!mlx5_modify_mtutc_allowed(mdev)) + return 0; + + if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX || + ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC) + return -EINVAL; + + MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE); + MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec); + MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec); + + return mlx5_set_mtutc(mdev, in, sizeof(in)); } static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); - u64 ns = timespec64_to_ns(ts); + struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; + int err; mdev = container_of(clock, struct mlx5_core_dev, clock); + err = mlx5_ptp_settime_real_time(mdev, ts); + if (err) + return err; + write_seqlock_irqsave(&clock->lock, flags); - timecounter_init(&clock->tc, &clock->cycles, ns); + timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts)); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); return 0; } +static +struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev, + struct ptp_system_timestamp *sts) +{ + struct timespec64 ts; + u64 time; + + time = mlx5_read_time(mdev, sts, true); + ts = ns_to_timespec64(time); + return ts; +} + static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; u64 cycles, ns; mdev = container_of(clock, struct mlx5_core_dev, clock); + if (mlx5_real_time_mode(mdev)) { + *ts = mlx5_ptp_gettimex_real_time(mdev, sts); + goto out; + } + write_seqlock_irqsave(&clock->lock, flags); - cycles = mlx5_read_internal_timer(mdev, sts); - ns = timecounter_cyc2time(&clock->tc, cycles); + cycles = mlx5_read_time(mdev, sts, false); + ns = timecounter_cyc2time(&timer->tc, cycles); write_sequnlock_irqrestore(&clock->lock, flags); - *ts = ns_to_timespec64(ns); - +out: return 0; } +static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta) +{ + u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; + + if (!mlx5_modify_mtutc_allowed(mdev)) + return 0; + + /* HW time adjustment range is s16. If out of range, settime instead */ + if (delta < S16_MIN || delta > S16_MAX) { + struct timespec64 ts; + s64 ns; + + ts = mlx5_ptp_gettimex_real_time(mdev, NULL); + ns = timespec64_to_ns(&ts) + delta; + ts = ns_to_timespec64(ns); + return mlx5_ptp_settime_real_time(mdev, &ts); + } + + MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME); + MLX5_SET(mtutc_reg, in, time_adjustment, delta); + + return mlx5_set_mtutc(mdev, in, sizeof(in)); +} + static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; + int err; mdev = container_of(clock, struct mlx5_core_dev, clock); + + err = mlx5_ptp_adjtime_real_time(mdev, delta); + if (err) + return err; write_seqlock_irqsave(&clock->lock, flags); - timecounter_adjtime(&clock->tc, delta); + timecounter_adjtime(&timer->tc, delta); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); return 0; } +static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq) +{ + u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; + + if (!mlx5_modify_mtutc_allowed(mdev)) + return 0; + + MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC); + MLX5_SET(mtutc_reg, in, freq_adjustment, freq); + + return mlx5_set_mtutc(mdev, in, sizeof(in)); +} + static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; int neg_adj = 0; u32 diff; u64 adj; + int err; + mdev = container_of(clock, struct mlx5_core_dev, clock); + err = mlx5_ptp_adjfreq_real_time(mdev, delta); + if (err) + return err; if (delta < 0) { neg_adj = 1; delta = -delta; } - adj = clock->nominal_c_mult; + adj = timer->nominal_c_mult; adj *= delta; diff = div_u64(adj, 1000000000ULL); - mdev = container_of(clock, struct mlx5_core_dev, clock); write_seqlock_irqsave(&clock->lock, flags); - timecounter_read(&clock->tc); - clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : - clock->nominal_c_mult + diff; + timecounter_read(&timer->tc); + timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff : + timer->nominal_c_mult + diff; mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); @@ -305,6 +427,45 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, MLX5_EVENT_MODE_REPETETIVE & on); } +static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns) +{ + struct mlx5_clock *clock = &mdev->clock; + u64 cycles_now, cycles_delta; + u64 nsec_now, nsec_delta; + struct mlx5_timer *timer; + unsigned long flags; + + timer = &clock->timer; + + cycles_now = mlx5_read_time(mdev, NULL, false); + write_seqlock_irqsave(&clock->lock, flags); + nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); + nsec_delta = target_ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, + timer->cycles.mult); + write_sequnlock_irqrestore(&clock->lock, flags); + + return cycles_now + cycles_delta; +} + +static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, + s64 sec, u32 nsec) +{ + struct timespec64 ts; + s64 target_ns; + + ts.tv_sec = sec; + ts.tv_nsec = nsec; + target_ns = timespec64_to_ns(&ts); + + return find_target_cycles(mdev, target_ns); +} + +static u64 perout_conf_real_time(s64 sec, u32 nsec) +{ + return (u64)nsec | (u64)sec << 32; +} + static int mlx5_perout_configure(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) @@ -314,11 +475,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - u64 nsec_now, nsec_delta, time_stamp = 0; - u64 cycles_now, cycles_delta; struct timespec64 ts; - unsigned long flags; u32 field_select = 0; + u64 time_stamp = 0; u8 pin_mode = 0; u8 pattern = 0; int pin = -1; @@ -335,12 +494,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, if (rq->perout.index >= clock->ptp_info.n_pins) return -EINVAL; - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, - rq->perout.index); - if (pin < 0) - return -EBUSY; - + field_select = MLX5_MTPPS_FS_ENABLE; if (on) { + bool rt_mode = mlx5_real_time_mode(mdev); + u32 nsec; + s64 sec; + + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); + if (pin < 0) + return -EBUSY; + pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; @@ -350,23 +513,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, if ((ns >> 1) != 500000000LL) return -EINVAL; - ts.tv_sec = rq->perout.start.sec; - ts.tv_nsec = rq->perout.start.nsec; - ns = timespec64_to_ns(&ts); - cycles_now = mlx5_read_internal_timer(mdev, NULL); - write_seqlock_irqsave(&clock->lock, flags); - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); - nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, - clock->cycles.mult); - write_sequnlock_irqrestore(&clock->lock, flags); - time_stamp = cycles_now + cycles_delta; - field_select = MLX5_MTPPS_FS_PIN_MODE | - MLX5_MTPPS_FS_PATTERN | - MLX5_MTPPS_FS_ENABLE | - MLX5_MTPPS_FS_TIME_STAMP; - } else { - field_select = MLX5_MTPPS_FS_ENABLE; + nsec = rq->perout.start.nsec; + sec = rq->perout.start.sec; + + if (rt_mode && sec > U32_MAX) + return -EINVAL; + + time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) : + perout_conf_internal_timer(mdev, sec, nsec); + + field_select |= MLX5_MTPPS_FS_PIN_MODE | + MLX5_MTPPS_FS_PATTERN | + MLX5_MTPPS_FS_TIME_STAMP; } MLX5_SET(mtpps_reg, in, pin, pin); @@ -537,25 +695,50 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); } +static void ts_next_sec(struct timespec64 *ts) +{ + ts->tv_sec += 1; + ts->tv_nsec = 0; +} + +static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev, + struct mlx5_clock *clock) +{ + bool rt_mode = mlx5_real_time_mode(mdev); + struct timespec64 ts; + s64 target_ns; + + if (rt_mode) + ts = mlx5_ptp_gettimex_real_time(mdev, NULL); + else + mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); + + ts_next_sec(&ts); + target_ns = timespec64_to_ns(&ts); + + return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) : + find_target_cycles(mdev, target_ns); +} + static int mlx5_pps_event(struct notifier_block *nb, unsigned long type, void *data) { struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); struct ptp_clock_event ptp_event; - u64 cycles_now, cycles_delta; - u64 nsec_now, nsec_delta, ns; struct mlx5_eqe *eqe = data; int pin = eqe->data.pps.pin; struct mlx5_core_dev *mdev; - struct timespec64 ts; unsigned long flags; + u64 ns; mdev = container_of(clock, struct mlx5_core_dev, clock); switch (clock->ptp_info.pin_config[pin].func) { case PTP_PF_EXTTS: ptp_event.index = pin; - ptp_event.timestamp = + ptp_event.timestamp = mlx5_real_time_mode(mdev) ? + mlx5_real_time_cyc2time(clock, + be64_to_cpu(eqe->data.pps.time_stamp)) : mlx5_timecounter_cyc2time(clock, be64_to_cpu(eqe->data.pps.time_stamp)); if (clock->pps_info.enabled) { @@ -569,17 +752,9 @@ static int mlx5_pps_event(struct notifier_block *nb, ptp_clock_event(clock->ptp, &ptp_event); break; case PTP_PF_PEROUT: - mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); - cycles_now = mlx5_read_internal_timer(mdev, NULL); - ts.tv_sec += 1; - ts.tv_nsec = 0; - ns = timespec64_to_ns(&ts); + ns = perout_conf_next_event_timer(mdev, clock); write_seqlock_irqsave(&clock->lock, flags); - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); - nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, - clock->cycles.mult); - clock->pps_info.start[pin] = cycles_now + cycles_delta; + clock->pps_info.start[pin] = ns; write_sequnlock_irqrestore(&clock->lock, flags); schedule_work(&clock->pps_info.out_work); break; @@ -591,29 +766,32 @@ static int mlx5_pps_event(struct notifier_block *nb, return NOTIFY_OK; } -void mlx5_init_clock(struct mlx5_core_dev *mdev) +static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) { struct mlx5_clock *clock = &mdev->clock; - u64 overflow_cycles; - u64 ns; - u64 frac = 0; + struct mlx5_timer *timer = &clock->timer; u32 dev_freq; dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); - if (!dev_freq) { - mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); - return; - } - seqlock_init(&clock->lock); - clock->cycles.read = read_internal_timer; - clock->cycles.shift = MLX5_CYCLES_SHIFT; - clock->cycles.mult = clocksource_khz2mult(dev_freq, - clock->cycles.shift); - clock->nominal_c_mult = clock->cycles.mult; - clock->cycles.mask = CLOCKSOURCE_MASK(41); - - timecounter_init(&clock->tc, &clock->cycles, + timer->cycles.read = read_internal_timer; + timer->cycles.shift = MLX5_CYCLES_SHIFT; + timer->cycles.mult = clocksource_khz2mult(dev_freq, + timer->cycles.shift); + timer->nominal_c_mult = timer->cycles.mult; + timer->cycles.mask = CLOCKSOURCE_MASK(41); + + timecounter_init(&timer->tc, &timer->cycles, ktime_to_ns(ktime_get_real())); +} + +static void mlx5_init_overflow_period(struct mlx5_clock *clock) +{ + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); + struct mlx5_ib_clock_info *clock_info = mdev->clock_info; + struct mlx5_timer *timer = &clock->timer; + u64 overflow_cycles; + u64 frac = 0; + u64 ns; /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least twice every wrap around. @@ -622,32 +800,77 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) * multiplied by clock multiplier where the result doesn't exceed * 64bits. */ - overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); - overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); + overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult); + overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3)); - ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, + ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles, frac, &frac); do_div(ns, NSEC_PER_SEC / HZ); - clock->overflow_period = ns; + timer->overflow_period = ns; - mdev->clock_info = - (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); - if (mdev->clock_info) { - mdev->clock_info->nsec = clock->tc.nsec; - mdev->clock_info->cycles = clock->tc.cycle_last; - mdev->clock_info->mask = clock->cycles.mask; - mdev->clock_info->mult = clock->nominal_c_mult; - mdev->clock_info->shift = clock->cycles.shift; - mdev->clock_info->frac = clock->tc.frac; - mdev->clock_info->overflow_period = clock->overflow_period; + INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow); + if (timer->overflow_period) + schedule_delayed_work(&timer->overflow_work, 0); + else + mlx5_core_warn(mdev, + "invalid overflow period, overflow_work is not scheduled\n"); + + if (clock_info) + clock_info->overflow_period = timer->overflow_period; +} + +static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + struct mlx5_ib_clock_info *info; + struct mlx5_timer *timer; + + mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); + if (!mdev->clock_info) { + mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n"); + return; + } + + info = mdev->clock_info; + timer = &clock->timer; + + info->nsec = timer->tc.nsec; + info->cycles = timer->tc.cycle_last; + info->mask = timer->cycles.mask; + info->mult = timer->nominal_c_mult; + info->shift = timer->cycles.shift; + info->frac = timer->tc.frac; +} + +static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + + mlx5_timecounter_init(mdev); + mlx5_init_clock_info(mdev); + mlx5_init_overflow_period(clock); + clock->ptp_info = mlx5_ptp_clock_info; + + if (mlx5_real_time_mode(mdev)) { + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + mlx5_ptp_settime(&clock->ptp_info, &ts); + } +} + +void mlx5_init_clock(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { + mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); + return; } + seqlock_init(&clock->lock); + mlx5_init_timer_clock(mdev); INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); - INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow); - if (clock->overflow_period) - schedule_delayed_work(&clock->overflow_work, 0); - else - mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n"); /* Configure the PHC */ clock->ptp_info = mlx5_ptp_clock_info; @@ -684,7 +907,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) } cancel_work_sync(&clock->pps_info.out_work); - cancel_delayed_work_sync(&clock->overflow_work); + cancel_delayed_work_sync(&clock->timer.overflow_work); if (mdev->clock_info) { free_page((unsigned long)mdev->clock_info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index 31600924bdc3..a12c7da618a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -33,6 +33,24 @@ #ifndef __LIB_CLOCK_H__ #define __LIB_CLOCK_H__ +static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev) +{ + u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format); + + return (rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME || + rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME); +} + +static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev) +{ + u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format); + + return (sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME || + sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME); +} + +typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64); + #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void mlx5_init_clock(struct mlx5_core_dev *mdev); void mlx5_cleanup_clock(struct mlx5_core_dev *mdev); @@ -45,17 +63,27 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, u64 timestamp) { + struct mlx5_timer *timer = &clock->timer; unsigned int seq; u64 nsec; do { seq = read_seqbegin(&clock->lock); - nsec = timecounter_cyc2time(&clock->tc, timestamp); + nsec = timecounter_cyc2time(&timer->tc, timestamp); } while (read_seqretry(&clock->lock, seq)); return ns_to_ktime(nsec); } +#define REAL_TIME_TO_NS(hi, low) (((u64)hi) * NSEC_PER_SEC + ((u64)low)) + +static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock, + u64 timestamp) +{ + u64 time = REAL_TIME_TO_NS(timestamp >> 32, timestamp & 0xFFFFFFFF); + + return ns_to_ktime(time); +} #else static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {} static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {} @@ -69,6 +97,12 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, { return 0; } + +static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock, + u64 timestamp) +{ + return 0; +} #endif #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index 947f346bdc2d..381325b4a863 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -141,9 +141,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains) u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) { - if (!mlx5_chains_prios_supported(chains)) - return 1; - if (mlx5_chains_ignore_flow_level_supported(chains)) return UINT_MAX; @@ -541,13 +538,13 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_handle *miss_rule = NULL; + struct mlx5_flow_handle *miss_rule; struct mlx5_flow_group *miss_group; struct mlx5_flow_table *next_ft; struct mlx5_flow_table *ft; - struct prio *prio_s = NULL; struct fs_chain *chain_s; struct list_head *pos; + struct prio *prio_s; u32 *flow_group_in; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index ca6f2fc39ea0..c568896cfb23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -73,6 +73,9 @@ #include "ecpf.h" #include "lib/hv_vhca.h" #include "diag/rsc_dump.h" +#include "sf/vhca_event.h" +#include "sf/dev/dev.h" +#include "sf/sf.h" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); @@ -82,7 +85,6 @@ unsigned int mlx5_core_debug_mask; module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); -#define MLX5_DEFAULT_PROF 2 static unsigned int prof_sel = MLX5_DEFAULT_PROF; module_param_named(prof_sel, prof_sel, uint, 0444); MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); @@ -235,8 +237,8 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); snprintf(string + strlen(string), remaining_size, "%u.%u.%u", - (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff), - (u16)(LINUX_VERSION_CODE & 0xffff)); + LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, + LINUX_VERSION_SUBLEVEL); /*Send the command*/ MLX5_SET(set_driver_version_in, in, opcode, @@ -567,6 +569,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) if (MLX5_CAP_GEN_MAX(dev, mkey_by_name)) MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1); + mlx5_vhca_state_cap_handle(dev, set_hca_cap); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); } @@ -884,6 +888,24 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) goto err_eswitch_cleanup; } + err = mlx5_vhca_event_init(dev); + if (err) { + mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err); + goto err_fpga_cleanup; + } + + err = mlx5_sf_hw_table_init(dev); + if (err) { + mlx5_core_err(dev, "Failed to init SF HW table %d\n", err); + goto err_sf_hw_table_cleanup; + } + + err = mlx5_sf_table_init(dev); + if (err) { + mlx5_core_err(dev, "Failed to init SF table %d\n", err); + goto err_sf_table_cleanup; + } + dev->dm = mlx5_dm_create(dev); if (IS_ERR(dev->dm)) mlx5_core_warn(dev, "Failed to init device memory%d\n", err); @@ -894,6 +916,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) return 0; +err_sf_table_cleanup: + mlx5_sf_hw_table_cleanup(dev); +err_sf_hw_table_cleanup: + mlx5_vhca_event_cleanup(dev); +err_fpga_cleanup: + mlx5_fpga_cleanup(dev); err_eswitch_cleanup: mlx5_eswitch_cleanup(dev->priv.eswitch); err_sriov_cleanup: @@ -925,6 +953,9 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_hv_vhca_destroy(dev->hv_vhca); mlx5_fw_tracer_destroy(dev->tracer); mlx5_dm_cleanup(dev); + mlx5_sf_table_cleanup(dev); + mlx5_sf_hw_table_cleanup(dev); + mlx5_vhca_event_cleanup(dev); mlx5_fpga_cleanup(dev); mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_sriov_cleanup(dev); @@ -1129,6 +1160,14 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_sriov; } + mlx5_vhca_event_start(dev); + + err = mlx5_sf_hw_table_create(dev); + if (err) { + mlx5_core_err(dev, "sf table create failed %d\n", err); + goto err_vhca; + } + err = mlx5_ec_init(dev); if (err) { mlx5_core_err(dev, "Failed to init embedded CPU\n"); @@ -1141,11 +1180,16 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_sriov; } + mlx5_sf_dev_table_create(dev); + return 0; err_sriov: mlx5_ec_cleanup(dev); err_ec: + mlx5_sf_hw_table_destroy(dev); +err_vhca: + mlx5_vhca_event_stop(dev); mlx5_cleanup_fs(dev); err_fs: mlx5_accel_tls_cleanup(dev); @@ -1171,8 +1215,11 @@ err_irq_table: static void mlx5_unload(struct mlx5_core_dev *dev) { + mlx5_sf_dev_table_destroy(dev); mlx5_sriov_detach(dev); mlx5_ec_cleanup(dev); + mlx5_sf_hw_table_destroy(dev); + mlx5_vhca_event_stop(dev); mlx5_cleanup_fs(dev); mlx5_accel_ipsec_cleanup(dev); mlx5_accel_tls_cleanup(dev); @@ -1283,7 +1330,7 @@ out: mutex_unlock(&dev->intf_state_mutex); } -static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) +int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) { struct mlx5_priv *priv = &dev->priv; int err; @@ -1305,6 +1352,8 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) priv->dbg_root = debugfs_create_dir(dev_name(dev->device), mlx5_debugfs_root); + INIT_LIST_HEAD(&priv->traps); + err = mlx5_health_init(dev); if (err) goto err_health_init; @@ -1333,7 +1382,7 @@ err_health_init: return err; } -static void mlx5_mdev_uninit(struct mlx5_core_dev *dev) +void mlx5_mdev_uninit(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; @@ -1396,7 +1445,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); pci_save_state(pdev); - devlink_reload_enable(devlink); + if (!mlx5_core_is_mp_slave(dev)) + devlink_reload_enable(devlink); return 0; err_load_one: @@ -1676,6 +1726,10 @@ static int __init init(void) if (err) goto err_debug; + err = mlx5_sf_driver_register(); + if (err) + goto err_sf; + #ifdef CONFIG_MLX5_CORE_EN err = mlx5e_init(); if (err) { @@ -1686,6 +1740,8 @@ static int __init init(void) return 0; +err_sf: + pci_unregister_driver(&mlx5_core_driver); err_debug: mlx5_unregister_debugfs(); return err; @@ -1696,6 +1752,7 @@ static void __exit cleanup(void) #ifdef CONFIG_MLX5_CORE_EN mlx5e_cleanup(); #endif + mlx5_sf_driver_unregister(); pci_unregister_driver(&mlx5_core_driver); mlx5_unregister_debugfs(); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 0a0302ce7144..efe403c7e354 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -117,6 +117,8 @@ enum mlx5_semaphore_space_address { MLX5_SEMAPHORE_SW_RESET = 0x20, }; +#define MLX5_DEFAULT_PROF 2 + int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init(struct mlx5_core_dev *dev); @@ -176,6 +178,7 @@ struct cpumask * mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx); struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table); int mlx5_irq_get_num_comp(struct mlx5_irq_table *table); +struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); int mlx5_events_init(struct mlx5_core_dev *dev); void mlx5_events_cleanup(struct mlx5_core_dev *dev); @@ -257,6 +260,17 @@ enum { u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); +static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev) +{ + return dev->coredev_type == MLX5_COREDEV_SF; +} + +int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx); +void mlx5_mdev_uninit(struct mlx5_core_dev *dev); void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); int mlx5_load_one(struct mlx5_core_dev *dev, bool boot); + +int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out); + +void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 9eb51f06d3ae..50af84e76fb6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -56,6 +56,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, mkey->size = MLX5_GET64(mkc, mkc, len); mkey->key |= mlx5_idx_to_mkey(mkey_index); mkey->pd = MLX5_GET(mkc, mkc, pd); + init_waitqueue_head(&mkey->wait); mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 6fd974920394..a61e09aff152 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -30,6 +30,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev) { struct mlx5_irq_table *irq_table; + if (mlx5_core_is_sf(dev)) + return 0; + irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL); if (!irq_table) return -ENOMEM; @@ -40,6 +43,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev) void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev) { + if (mlx5_core_is_sf(dev)) + return; + kvfree(dev->priv.irq_table); } @@ -268,6 +274,9 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) int nvec; int err; + if (mlx5_core_is_sf(dev)) + return 0; + nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + MLX5_IRQ_VEC_COMP_BASE; nvec = min_t(int, nvec, num_eqs); @@ -319,6 +328,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) struct mlx5_irq_table *table = dev->priv.irq_table; int i; + if (mlx5_core_is_sf(dev)) + return; + /* free_irq requires that affinity and rmap will be cleared * before calling it. This is why there is asymmetry with set_rmap * which should be called after alloc_irq but before request_irq. @@ -332,3 +344,11 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) kfree(table->irq); } +struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev) +{ +#ifdef CONFIG_MLX5_SF + if (mlx5_core_is_sf(dev)) + return dev->priv.parent_mdev->priv.irq_table; +#endif + return dev->priv.irq_table; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c new file mode 100644 index 000000000000..0777be24a307 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#include "qos.h" + +#define MLX5_QOS_DEFAULT_DWRR_UID 0 + +bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev) +{ + if (!MLX5_CAP_GEN(mdev, qos)) + return false; + if (!MLX5_CAP_QOS(mdev, nic_sq_scheduling)) + return false; + if (!MLX5_CAP_QOS(mdev, nic_bw_share)) + return false; + if (!MLX5_CAP_QOS(mdev, nic_rate_limit)) + return false; + return true; +} + +int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) +{ + return 1 << MLX5_CAP_QOS(mdev, log_max_qos_nic_queue_group); +} + +int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); + + return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, + sched_ctx, id); +} + +int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + void *attr; + + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); + + attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); + MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR); + + return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, + sched_ctx, id); +} + +int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id) +{ + return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id); +} + +int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 id) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + u32 bitmask = 0; + + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); + + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + + return mlx5_modify_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, + sched_ctx, id, bitmask); +} + +int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id) +{ + return mlx5_destroy_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, id); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h new file mode 100644 index 000000000000..125e4e47e6f7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_QOS_H +#define __MLX5_QOS_H + +#include "mlx5_core.h" + +#define MLX5_DEBUG_QOS_MASK BIT(4) + +#define qos_err(mdev, fmt, ...) \ + mlx5_core_err(mdev, "QoS: " fmt, ##__VA_ARGS__) +#define qos_warn(mdev, fmt, ...) \ + mlx5_core_warn(mdev, "QoS: " fmt, ##__VA_ARGS__) +#define qos_dbg(mdev, fmt, ...) \ + mlx5_core_dbg_mask(mdev, MLX5_DEBUG_QOS_MASK, "QoS: " fmt, ##__VA_ARGS__) + +bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev); +int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); + +int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id); +int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id); +int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id); +int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share, + u32 max_avg_bw, u32 id); +int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c new file mode 100644 index 000000000000..a8d75c2f0275 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#include <linux/mlx5/driver.h> +#include "priv.h" + +int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_sf_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_sf_in)] = {}; + + MLX5_SET(alloc_sf_in, in, opcode, MLX5_CMD_OP_ALLOC_SF); + MLX5_SET(alloc_sf_in, in, function_id, function_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id) +{ + u32 out[MLX5_ST_SZ_DW(dealloc_sf_out)] = {}; + u32 in[MLX5_ST_SZ_DW(dealloc_sf_in)] = {}; + + MLX5_SET(dealloc_sf_in, in, opcode, MLX5_CMD_OP_DEALLOC_SF); + MLX5_SET(dealloc_sf_in, in, function_id, function_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id) +{ + u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; + + MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); + MLX5_SET(enable_hca_in, in, function_id, func_id); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id) +{ + u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; + + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); + MLX5_SET(disable_hca_in, in, function_id, func_id); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c new file mode 100644 index 000000000000..b265f27b2166 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> +#include "mlx5_core.h" +#include "dev.h" +#include "sf/vhca_event.h" +#include "sf/sf.h" +#include "sf/mlx5_ifc_vhca_event.h" +#include "ecpf.h" + +struct mlx5_sf_dev_table { + struct xarray devices; + unsigned int max_sfs; + phys_addr_t base_address; + u64 sf_bar_length; + struct notifier_block nb; + struct mlx5_core_dev *dev; +}; + +static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev); +} + +bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev) +{ + struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; + + if (!mlx5_sf_dev_supported(dev)) + return false; + + return !xa_empty(&table->devices); +} + +static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev); + struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum); +} +static DEVICE_ATTR_RO(sfnum); + +static struct attribute *sf_device_attrs[] = { + &dev_attr_sfnum.attr, + NULL, +}; + +static const struct attribute_group sf_attr_group = { + .attrs = sf_device_attrs, +}; + +static const struct attribute_group *sf_attr_groups[2] = { + &sf_attr_group, + NULL +}; + +static void mlx5_sf_dev_release(struct device *device) +{ + struct auxiliary_device *adev = container_of(device, struct auxiliary_device, dev); + struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); + + mlx5_adev_idx_free(adev->id); + kfree(sf_dev); +} + +static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev) +{ + auxiliary_device_delete(&sf_dev->adev); + auxiliary_device_uninit(&sf_dev->adev); +} + +static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum) +{ + struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; + struct mlx5_sf_dev *sf_dev; + struct pci_dev *pdev; + int err; + int id; + + id = mlx5_adev_idx_alloc(); + if (id < 0) { + err = id; + goto add_err; + } + + sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL); + if (!sf_dev) { + mlx5_adev_idx_free(id); + err = -ENOMEM; + goto add_err; + } + pdev = dev->pdev; + sf_dev->adev.id = id; + sf_dev->adev.name = MLX5_SF_DEV_ID_NAME; + sf_dev->adev.dev.release = mlx5_sf_dev_release; + sf_dev->adev.dev.parent = &pdev->dev; + sf_dev->adev.dev.groups = sf_attr_groups; + sf_dev->sfnum = sfnum; + sf_dev->parent_mdev = dev; + + if (!table->max_sfs) { + mlx5_adev_idx_free(id); + kfree(sf_dev); + err = -EOPNOTSUPP; + goto add_err; + } + sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); + + err = auxiliary_device_init(&sf_dev->adev); + if (err) { + mlx5_adev_idx_free(id); + kfree(sf_dev); + goto add_err; + } + + err = auxiliary_device_add(&sf_dev->adev); + if (err) { + put_device(&sf_dev->adev.dev); + goto add_err; + } + + err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL); + if (err) + goto xa_err; + return; + +xa_err: + mlx5_sf_dev_remove(sf_dev); +add_err: + mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n", + sf_index, sfnum, err); +} + +static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev, u16 sf_index) +{ + struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; + + xa_erase(&table->devices, sf_index); + mlx5_sf_dev_remove(sf_dev); +} + +static int +mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data) +{ + struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); + const struct mlx5_vhca_state_event *event = data; + struct mlx5_sf_dev *sf_dev; + u16 sf_index; + + sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id); + sf_dev = xa_load(&table->devices, sf_index); + switch (event->new_vhca_state) { + case MLX5_VHCA_STATE_ALLOCATED: + if (sf_dev) + mlx5_sf_dev_del(table->dev, sf_dev, sf_index); + break; + case MLX5_VHCA_STATE_TEARDOWN_REQUEST: + if (sf_dev) + mlx5_sf_dev_del(table->dev, sf_dev, sf_index); + else + mlx5_core_err(table->dev, + "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n", + sf_index, event->sw_function_id); + break; + case MLX5_VHCA_STATE_ACTIVE: + if (!sf_dev) + mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id); + break; + default: + break; + } + return 0; +} + +static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) +{ + struct mlx5_core_dev *dev = table->dev; + u16 max_functions; + u16 function_id; + int err = 0; + bool ecpu; + int i; + + max_functions = mlx5_sf_max_functions(dev); + function_id = MLX5_CAP_GEN(dev, sf_base_id); + ecpu = mlx5_read_embedded_cpu(dev); + /* Arm the vhca context as the vhca event notifier */ + for (i = 0; i < max_functions; i++) { + err = mlx5_vhca_event_arm(dev, function_id, ecpu); + if (err) + return err; + + function_id++; + } + return 0; +} + +void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_dev_table *table; + unsigned int max_sfs; + int err; + + if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev)) + return; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) { + err = -ENOMEM; + goto table_err; + } + + table->nb.notifier_call = mlx5_sf_dev_state_change_handler; + table->dev = dev; + if (MLX5_CAP_GEN(dev, max_num_sf)) + max_sfs = MLX5_CAP_GEN(dev, max_num_sf); + else + max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf); + table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); + table->base_address = pci_resource_start(dev->pdev, 2); + table->max_sfs = max_sfs; + xa_init(&table->devices); + dev->priv.sf_dev_table = table; + + err = mlx5_vhca_event_notifier_register(dev, &table->nb); + if (err) + goto vhca_err; + err = mlx5_sf_dev_vhca_arm_all(table); + if (err) + goto arm_err; + mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs); + return; + +arm_err: + mlx5_vhca_event_notifier_unregister(dev, &table->nb); +vhca_err: + table->max_sfs = 0; + kfree(table); + dev->priv.sf_dev_table = NULL; +table_err: + mlx5_core_err(dev, "SF DEV table create err = %d\n", err); +} + +static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table) +{ + struct mlx5_sf_dev *sf_dev; + unsigned long index; + + xa_for_each(&table->devices, index, sf_dev) { + xa_erase(&table->devices, index); + mlx5_sf_dev_remove(sf_dev); + } +} + +void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; + + if (!table) + return; + + mlx5_vhca_event_notifier_unregister(dev, &table->nb); + + /* Now that event handler is not running, it is safe to destroy + * the sf device without race. + */ + mlx5_sf_dev_destroy_all(table); + + WARN_ON(!xa_empty(&table->devices)); + kfree(table); + dev->priv.sf_dev_table = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h new file mode 100644 index 000000000000..4de02902aef1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#ifndef __MLX5_SF_DEV_H__ +#define __MLX5_SF_DEV_H__ + +#ifdef CONFIG_MLX5_SF + +#include <linux/auxiliary_bus.h> + +#define MLX5_SF_DEV_ID_NAME "sf" + +struct mlx5_sf_dev { + struct auxiliary_device adev; + struct mlx5_core_dev *parent_mdev; + struct mlx5_core_dev *mdev; + phys_addr_t bar_base_addr; + u32 sfnum; +}; + +void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev); +void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev); + +int mlx5_sf_driver_register(void); +void mlx5_sf_driver_unregister(void); + +bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev); + +#else + +static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) +{ +} + +static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) +{ +} + +static inline int mlx5_sf_driver_register(void) +{ + return 0; +} + +static inline void mlx5_sf_driver_unregister(void) +{ +} + +static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev) +{ + return 0; +} + +#endif + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c new file mode 100644 index 000000000000..c4bf555c25ea --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> +#include "mlx5_core.h" +#include "dev.h" +#include "devlink.h" + +static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) +{ + struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); + struct mlx5_core_dev *mdev; + struct devlink *devlink; + int err; + + devlink = mlx5_devlink_alloc(); + if (!devlink) + return -ENOMEM; + + mdev = devlink_priv(devlink); + mdev->device = &adev->dev; + mdev->pdev = sf_dev->parent_mdev->pdev; + mdev->bar_addr = sf_dev->bar_base_addr; + mdev->iseg_base = sf_dev->bar_base_addr; + mdev->coredev_type = MLX5_COREDEV_SF; + mdev->priv.parent_mdev = sf_dev->parent_mdev; + mdev->priv.adev_idx = adev->id; + sf_dev->mdev = mdev; + + err = mlx5_mdev_init(mdev, MLX5_DEFAULT_PROF); + if (err) { + mlx5_core_warn(mdev, "mlx5_mdev_init on err=%d\n", err); + goto mdev_err; + } + + mdev->iseg = ioremap(mdev->iseg_base, sizeof(*mdev->iseg)); + if (!mdev->iseg) { + mlx5_core_warn(mdev, "remap error\n"); + err = -ENOMEM; + goto remap_err; + } + + err = mlx5_load_one(mdev, true); + if (err) { + mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err); + goto load_one_err; + } + return 0; + +load_one_err: + iounmap(mdev->iseg); +remap_err: + mlx5_mdev_uninit(mdev); +mdev_err: + mlx5_devlink_free(devlink); + return err; +} + +static void mlx5_sf_dev_remove(struct auxiliary_device *adev) +{ + struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); + struct devlink *devlink; + + devlink = priv_to_devlink(sf_dev->mdev); + mlx5_unload_one(sf_dev->mdev, true); + iounmap(sf_dev->mdev->iseg); + mlx5_mdev_uninit(sf_dev->mdev); + mlx5_devlink_free(devlink); +} + +static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev) +{ + struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); + + mlx5_unload_one(sf_dev->mdev, false); +} + +static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = { + { .name = MLX5_ADEV_NAME "." MLX5_SF_DEV_ID_NAME, }, + { }, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5_sf_dev_id_table); + +static struct auxiliary_driver mlx5_sf_driver = { + .name = MLX5_SF_DEV_ID_NAME, + .probe = mlx5_sf_dev_probe, + .remove = mlx5_sf_dev_remove, + .shutdown = mlx5_sf_dev_shutdown, + .id_table = mlx5_sf_dev_id_table, +}; + +int mlx5_sf_driver_register(void) +{ + return auxiliary_driver_register(&mlx5_sf_driver); +} + +void mlx5_sf_driver_unregister(void) +{ + auxiliary_driver_unregister(&mlx5_sf_driver); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c new file mode 100644 index 000000000000..c2ba41bb7a70 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -0,0 +1,556 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#include <linux/mlx5/driver.h> +#include "eswitch.h" +#include "priv.h" +#include "sf/dev/dev.h" +#include "mlx5_ifc_vhca_event.h" +#include "vhca_event.h" +#include "ecpf.h" + +struct mlx5_sf { + struct devlink_port dl_port; + unsigned int port_index; + u16 id; + u16 hw_fn_id; + u16 hw_state; +}; + +struct mlx5_sf_table { + struct mlx5_core_dev *dev; /* To refer from notifier context. */ + struct xarray port_indices; /* port index based lookup. */ + refcount_t refcount; + struct completion disable_complete; + struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */ + struct notifier_block esw_nb; + struct notifier_block vhca_nb; + u8 ecpu: 1; +}; + +static struct mlx5_sf * +mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index) +{ + return xa_load(&table->port_indices, port_index); +} + +static struct mlx5_sf * +mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) +{ + unsigned long index; + struct mlx5_sf *sf; + + xa_for_each(&table->port_indices, index, sf) { + if (sf->hw_fn_id == fn_id) + return sf; + } + return NULL; +} + +static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL); +} + +static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + xa_erase(&table->port_indices, sf->port_index); +} + +static struct mlx5_sf * +mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack) +{ + unsigned int dl_port_index; + struct mlx5_sf *sf; + u16 hw_fn_id; + int id_err; + int err; + + id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum); + if (id_err < 0) { + err = id_err; + goto id_err; + } + + sf = kzalloc(sizeof(*sf), GFP_KERNEL); + if (!sf) { + err = -ENOMEM; + goto alloc_err; + } + sf->id = id_err; + hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id); + dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id); + sf->port_index = dl_port_index; + sf->hw_fn_id = hw_fn_id; + sf->hw_state = MLX5_VHCA_STATE_ALLOCATED; + + err = mlx5_sf_id_insert(table, sf); + if (err) + goto insert_err; + + return sf; + +insert_err: + kfree(sf); +alloc_err: + mlx5_sf_hw_table_sf_free(table->dev, id_err); +id_err: + if (err == -EEXIST) + NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum"); + return ERR_PTR(err); +} + +static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + mlx5_sf_id_erase(table, sf); + mlx5_sf_hw_table_sf_free(table->dev, sf->id); + kfree(sf); +} + +static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_table *table = dev->priv.sf_table; + + if (!table) + return NULL; + + return refcount_inc_not_zero(&table->refcount) ? table : NULL; +} + +static void mlx5_sf_table_put(struct mlx5_sf_table *table) +{ + if (refcount_dec_and_test(&table->refcount)) + complete(&table->disable_complete); +} + +static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state) +{ + switch (hw_state) { + case MLX5_VHCA_STATE_ACTIVE: + case MLX5_VHCA_STATE_IN_USE: + case MLX5_VHCA_STATE_TEARDOWN_REQUEST: + return DEVLINK_PORT_FN_STATE_ACTIVE; + case MLX5_VHCA_STATE_INVALID: + case MLX5_VHCA_STATE_ALLOCATED: + default: + return DEVLINK_PORT_FN_STATE_INACTIVE; + } +} + +static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state) +{ + switch (hw_state) { + case MLX5_VHCA_STATE_IN_USE: + case MLX5_VHCA_STATE_TEARDOWN_REQUEST: + return DEVLINK_PORT_FN_OPSTATE_ATTACHED; + case MLX5_VHCA_STATE_INVALID: + case MLX5_VHCA_STATE_ALLOCATED: + case MLX5_VHCA_STATE_ACTIVE: + default: + return DEVLINK_PORT_FN_OPSTATE_DETACHED; + } +} + +static bool mlx5_sf_is_active(const struct mlx5_sf *sf) +{ + return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE; +} + +int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_sf_table *table; + struct mlx5_sf *sf; + int err = 0; + + table = mlx5_sf_table_try_get(dev); + if (!table) + return -EOPNOTSUPP; + + sf = mlx5_sf_lookup_by_index(table, dl_port->index); + if (!sf) { + err = -EOPNOTSUPP; + goto sf_err; + } + mutex_lock(&table->sf_state_lock); + *state = mlx5_sf_to_devlink_state(sf->hw_state); + *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state); + mutex_unlock(&table->sf_state_lock); +sf_err: + mlx5_sf_table_put(table); + return err; +} + +static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf) +{ + int err; + + if (mlx5_sf_is_active(sf)) + return 0; + if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) + return -EINVAL; + + err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id); + if (err) + return err; + + sf->hw_state = MLX5_VHCA_STATE_ACTIVE; + return 0; +} + +static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf) +{ + int err; + + if (!mlx5_sf_is_active(sf)) + return 0; + + err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id); + if (err) + return err; + + sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST; + return 0; +} + +static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, + struct mlx5_sf *sf, + enum devlink_port_fn_state state) +{ + int err = 0; + + mutex_lock(&table->sf_state_lock); + if (state == mlx5_sf_to_devlink_state(sf->hw_state)) + goto out; + if (state == DEVLINK_PORT_FN_STATE_ACTIVE) + err = mlx5_sf_activate(dev, sf); + else if (state == DEVLINK_PORT_FN_STATE_INACTIVE) + err = mlx5_sf_deactivate(dev, sf); + else + err = -EINVAL; +out: + mutex_unlock(&table->sf_state_lock); + return err; +} + +int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_sf_table *table; + struct mlx5_sf *sf; + int err; + + table = mlx5_sf_table_try_get(dev); + if (!table) { + NL_SET_ERR_MSG_MOD(extack, + "Port state set is only supported in eswitch switchdev mode or SF ports are disabled."); + return -EOPNOTSUPP; + } + sf = mlx5_sf_lookup_by_index(table, dl_port->index); + if (!sf) { + err = -ENODEV; + goto out; + } + + err = mlx5_sf_state_set(dev, table, sf, state); +out: + mlx5_sf_table_put(table); + return err; +} + +static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + unsigned int *new_port_index) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_sf *sf; + u16 hw_fn_id; + int err; + + sf = mlx5_sf_alloc(table, new_attr->sfnum, extack); + if (IS_ERR(sf)) + return PTR_ERR(sf); + + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id); + err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum); + if (err) + goto esw_err; + *new_port_index = sf->port_index; + return 0; + +esw_err: + mlx5_sf_free(table, sf); + return err; +} + +static int +mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack) +{ + if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) { + NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition"); + return -EOPNOTSUPP; + } + if (new_attr->port_index_valid) { + NL_SET_ERR_MSG_MOD(extack, + "Driver does not support user defined port index assignment"); + return -EOPNOTSUPP; + } + if (!new_attr->sfnum_valid) { + NL_SET_ERR_MSG_MOD(extack, + "User must provide unique sfnum. Driver does not support auto assignment"); + return -EOPNOTSUPP; + } + if (new_attr->controller_valid && new_attr->controller) { + NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported"); + return -EOPNOTSUPP; + } + if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied"); + return -EOPNOTSUPP; + } + return 0; +} + +int mlx5_devlink_sf_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + unsigned int *new_port_index) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_sf_table *table; + int err; + + err = mlx5_sf_new_check_attr(dev, new_attr, extack); + if (err) + return err; + + table = mlx5_sf_table_try_get(dev); + if (!table) { + NL_SET_ERR_MSG_MOD(extack, + "Port add is only supported in eswitch switchdev mode or SF ports are disabled."); + return -EOPNOTSUPP; + } + err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index); + mlx5_sf_table_put(table); + return err; +} + +static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) { + mlx5_sf_free(table, sf); + } else if (mlx5_sf_is_active(sf)) { + /* Even if its active, it is treated as in_use because by the time, + * it is disabled here, it may getting used. So it is safe to + * always look for the event to ensure that it is recycled only after + * firmware gives confirmation that it is detached by the driver. + */ + mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id); + mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); + kfree(sf); + } else { + mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); + kfree(sf); + } +} + +int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_sf_table *table; + struct mlx5_sf *sf; + int err = 0; + + table = mlx5_sf_table_try_get(dev); + if (!table) { + NL_SET_ERR_MSG_MOD(extack, + "Port del is only supported in eswitch switchdev mode or SF ports are disabled."); + return -EOPNOTSUPP; + } + sf = mlx5_sf_lookup_by_index(table, port_index); + if (!sf) { + err = -ENODEV; + goto sf_err; + } + + mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id); + mlx5_sf_id_erase(table, sf); + + mutex_lock(&table->sf_state_lock); + mlx5_sf_dealloc(table, sf); + mutex_unlock(&table->sf_state_lock); +sf_err: + mlx5_sf_table_put(table); + return err; +} + +static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state) +{ + if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE) + return true; + + if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE) + return true; + + if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST && + new_state == MLX5_VHCA_STATE_ALLOCATED) + return true; + + return false; +} + +static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data) +{ + struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb); + const struct mlx5_vhca_state_event *event = data; + bool update = false; + struct mlx5_sf *sf; + + table = mlx5_sf_table_try_get(table->dev); + if (!table) + return 0; + + mutex_lock(&table->sf_state_lock); + sf = mlx5_sf_lookup_by_function_id(table, event->function_id); + if (!sf) + goto sf_err; + + /* When driver is attached or detached to a function, an event + * notifies such state change. + */ + update = mlx5_sf_state_update_check(sf, event->new_vhca_state); + if (update) + sf->hw_state = event->new_vhca_state; +sf_err: + mutex_unlock(&table->sf_state_lock); + mlx5_sf_table_put(table); + return 0; +} + +static void mlx5_sf_table_enable(struct mlx5_sf_table *table) +{ + if (!mlx5_sf_max_functions(table->dev)) + return; + + init_completion(&table->disable_complete); + refcount_set(&table->refcount, 1); +} + +static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) +{ + struct mlx5_eswitch *esw = table->dev->priv.eswitch; + unsigned long index; + struct mlx5_sf *sf; + + /* At this point, no new user commands can start and no vhca event can + * arrive. It is safe to destroy all user created SFs. + */ + xa_for_each(&table->port_indices, index, sf) { + mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id); + mlx5_sf_id_erase(table, sf); + mlx5_sf_dealloc(table, sf); + } +} + +static void mlx5_sf_table_disable(struct mlx5_sf_table *table) +{ + if (!mlx5_sf_max_functions(table->dev)) + return; + + if (!refcount_read(&table->refcount)) + return; + + /* Balances with refcount_set; drop the reference so that new user cmd cannot start + * and new vhca event handler cannnot run. + */ + mlx5_sf_table_put(table); + wait_for_completion(&table->disable_complete); + + mlx5_sf_deactivate_all(table); +} + +static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb); + const struct mlx5_esw_event_info *mode = data; + + switch (mode->new_mode) { + case MLX5_ESWITCH_OFFLOADS: + mlx5_sf_table_enable(table); + break; + case MLX5_ESWITCH_NONE: + mlx5_sf_table_disable(table); + break; + default: + break; + }; + + return 0; +} + +static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) +{ + return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev); +} + +int mlx5_sf_table_init(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_table *table; + int err; + + if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev)) + return 0; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + mutex_init(&table->sf_state_lock); + table->dev = dev; + xa_init(&table->port_indices); + dev->priv.sf_table = table; + refcount_set(&table->refcount, 0); + table->esw_nb.notifier_call = mlx5_sf_esw_event; + err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb); + if (err) + goto reg_err; + + table->vhca_nb.notifier_call = mlx5_sf_vhca_event; + err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb); + if (err) + goto vhca_err; + + return 0; + +vhca_err: + mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); +reg_err: + mutex_destroy(&table->sf_state_lock); + kfree(table); + dev->priv.sf_table = NULL; + return err; +} + +void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_table *table = dev->priv.sf_table; + + if (!table) + return; + + mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); + mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); + WARN_ON(refcount_read(&table->refcount)); + mutex_destroy(&table->sf_state_lock); + WARN_ON(!xa_empty(&table->port_indices)); + kfree(table); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c new file mode 100644 index 000000000000..58b6be0b03d7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd */ +#include <linux/mlx5/driver.h> +#include "vhca_event.h" +#include "priv.h" +#include "sf.h" +#include "mlx5_ifc_vhca_event.h" +#include "vhca_event.h" +#include "ecpf.h" + +struct mlx5_sf_hw { + u32 usr_sfnum; + u8 allocated: 1; + u8 pending_delete: 1; +}; + +struct mlx5_sf_hw_table { + struct mlx5_core_dev *dev; + struct mlx5_sf_hw *sfs; + int max_local_functions; + u8 ecpu: 1; + struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ + struct notifier_block vhca_nb; +}; + +u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id) +{ + return sw_id + mlx5_sf_start_function_id(dev); +} + +static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id) +{ + return hw_id - mlx5_sf_start_function_id(dev); +} + +int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + int sw_id = -ENOSPC; + u16 hw_fn_id; + int err; + int i; + + if (!table->max_local_functions) + return -EOPNOTSUPP; + + mutex_lock(&table->table_lock); + /* Check if sf with same sfnum already exists or not. */ + for (i = 0; i < table->max_local_functions; i++) { + if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) { + err = -EEXIST; + goto exist_err; + } + } + + /* Find the free entry and allocate the entry from the array */ + for (i = 0; i < table->max_local_functions; i++) { + if (!table->sfs[i].allocated) { + table->sfs[i].usr_sfnum = usr_sfnum; + table->sfs[i].allocated = true; + sw_id = i; + break; + } + } + if (sw_id == -ENOSPC) { + err = -ENOSPC; + goto err; + } + + hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); + err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id); + if (err) + goto err; + + err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum); + if (err) + goto vhca_err; + + mutex_unlock(&table->table_lock); + return sw_id; + +vhca_err: + mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); +err: + table->sfs[i].allocated = false; +exist_err: + mutex_unlock(&table->table_lock); + return err; +} + +static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + u16 hw_fn_id; + + hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id); + mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + table->sfs[id].allocated = false; + table->sfs[id].pending_delete = false; +} + +void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + mutex_lock(&table->table_lock); + _mlx5_sf_hw_id_free(dev, id); + mutex_unlock(&table->table_lock); +} + +void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; + u16 hw_fn_id; + u8 state; + int err; + + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); + mutex_lock(&table->table_lock); + err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out)); + if (err) + goto err; + state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); + if (state == MLX5_VHCA_STATE_ALLOCATED) { + mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + table->sfs[id].allocated = false; + } else { + table->sfs[id].pending_delete = true; + } +err: + mutex_unlock(&table->table_lock); +} + +static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table) +{ + int i; + + for (i = 0; i < table->max_local_functions; i++) { + if (table->sfs[i].allocated) + _mlx5_sf_hw_id_free(table->dev, i); + } +} + +int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table; + struct mlx5_sf_hw *sfs; + int max_functions; + + if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev)) + return 0; + + max_functions = mlx5_sf_max_functions(dev); + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL); + if (!sfs) + goto table_err; + + mutex_init(&table->table_lock); + table->dev = dev; + table->sfs = sfs; + table->max_local_functions = max_functions; + table->ecpu = mlx5_read_embedded_cpu(dev); + dev->priv.sf_hw_table = table; + mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions); + return 0; + +table_err: + kfree(table); + return -ENOMEM; +} + +void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + if (!table) + return; + + mutex_destroy(&table->table_lock); + kfree(table->sfs); + kfree(table); +} + +static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data) +{ + struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb); + const struct mlx5_vhca_state_event *event = data; + struct mlx5_sf_hw *sf_hw; + u16 sw_id; + + if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED) + return 0; + + sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id); + sf_hw = &table->sfs[sw_id]; + + mutex_lock(&table->table_lock); + /* SF driver notified through firmware that SF is finally detached. + * Hence recycle the sf hardware id for reuse. + */ + if (sf_hw->allocated && sf_hw->pending_delete) + _mlx5_sf_hw_id_free(table->dev, sw_id); + mutex_unlock(&table->table_lock); + return 0; +} + +int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + if (!table) + return 0; + + table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event; + return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb); +} + +void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + if (!table) + return; + + mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); + /* Dealloc SFs whose firmware event has been missed. */ + mlx5_sf_hw_dealloc_all(table); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h new file mode 100644 index 000000000000..1daf5a122ba3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#ifndef __MLX5_IFC_VHCA_EVENT_H__ +#define __MLX5_IFC_VHCA_EVENT_H__ + +enum mlx5_ifc_vhca_state { + MLX5_VHCA_STATE_INVALID = 0x0, + MLX5_VHCA_STATE_ALLOCATED = 0x1, + MLX5_VHCA_STATE_ACTIVE = 0x2, + MLX5_VHCA_STATE_IN_USE = 0x3, + MLX5_VHCA_STATE_TEARDOWN_REQUEST = 0x4, +}; + +struct mlx5_ifc_vhca_state_context_bits { + u8 arm_change_event[0x1]; + u8 reserved_at_1[0xb]; + u8 vhca_state[0x4]; + u8 reserved_at_10[0x10]; + + u8 sw_function_id[0x20]; + + u8 reserved_at_40[0x80]; +}; + +struct mlx5_ifc_query_vhca_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_vhca_state_context_bits vhca_state_context; +}; + +struct mlx5_ifc_query_vhca_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 embedded_cpu_function[0x1]; + u8 reserved_at_41[0xf]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_vhca_state_field_select_bits { + u8 reserved_at_0[0x1e]; + u8 sw_function_id[0x1]; + u8 arm_change_event[0x1]; +}; + +struct mlx5_ifc_modify_vhca_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_vhca_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 embedded_cpu_function[0x1]; + u8 reserved_at_41[0xf]; + u8 function_id[0x10]; + + struct mlx5_ifc_vhca_state_field_select_bits vhca_state_field_select; + + struct mlx5_ifc_vhca_state_context_bits vhca_state_context; +}; + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h new file mode 100644 index 000000000000..cb02a51d0986 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#ifndef __MLX5_SF_PRIV_H__ +#define __MLX5_SF_PRIV_H__ + +#include <linux/mlx5/driver.h> + +int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id); +int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id); + +int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id); + +u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id); + +int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum); +void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id); +void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h new file mode 100644 index 000000000000..0b6aea1e6a94 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#ifndef __MLX5_SF_H__ +#define __MLX5_SF_H__ + +#include <linux/mlx5/driver.h> + +static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, sf_base_id); +} + +#ifdef CONFIG_MLX5_SF + +static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, sf); +} + +static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev) +{ + if (!mlx5_sf_supported(dev)) + return 0; + if (MLX5_CAP_GEN(dev, max_num_sf)) + return MLX5_CAP_GEN(dev, max_num_sf); + else + return 1 << MLX5_CAP_GEN(dev, log_max_sf); +} + +#else + +static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev) +{ + return false; +} + +static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev) +{ + return 0; +} + +#endif + +#ifdef CONFIG_MLX5_SF_MANAGER + +int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev); +void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev); + +int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev); +void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev); + +int mlx5_sf_table_init(struct mlx5_core_dev *dev); +void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); + +int mlx5_devlink_sf_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *add_attr, + struct netlink_ext_ack *extack, + unsigned int *new_port_index); +int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack); +int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack); +int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack); +#else + +static inline int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) +{ + return 0; +} + +static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) +{ +} + +static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) +{ + return 0; +} + +static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) +{ +} + +static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev) +{ + return 0; +} + +static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) +{ +} + +#endif + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c new file mode 100644 index 000000000000..af2f2dd9db25 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#include <linux/mlx5/driver.h> +#include "mlx5_ifc_vhca_event.h" +#include "mlx5_core.h" +#include "vhca_event.h" +#include "ecpf.h" + +struct mlx5_vhca_state_notifier { + struct mlx5_core_dev *dev; + struct mlx5_nb nb; + struct blocking_notifier_head n_head; +}; + +struct mlx5_vhca_event_work { + struct work_struct work; + struct mlx5_vhca_state_notifier *notifier; + struct mlx5_vhca_state_event event; +}; + +int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, + bool ecpu, u32 *out, u32 outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; + + MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE); + MLX5_SET(query_vhca_state_in, in, function_id, function_id); + MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); +} + +static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id, + bool ecpu, u32 *in, u32 inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; + + MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); + MLX5_SET(modify_vhca_state_in, in, function_id, function_id); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); +} + +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id) +{ + u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; + u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; + + MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); + MLX5_SET(modify_vhca_state_in, in, function_id, function_id); + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); + MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1); + MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id); + + return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out); +} + +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu) +{ + u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; + + MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1); + MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1); + + return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in)); +} + +static void +mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *event) +{ + u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; + int err; + + err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out)); + if (err) + return; + + event->sw_function_id = MLX5_GET(query_vhca_state_out, out, + vhca_state_context.sw_function_id); + event->new_vhca_state = MLX5_GET(query_vhca_state_out, out, + vhca_state_context.vhca_state); + + mlx5_vhca_event_arm(dev, event->function_id, event->ecpu); + + blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); +} + +static void mlx5_vhca_state_work_handler(struct work_struct *_work) +{ + struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work); + struct mlx5_vhca_state_notifier *notifier = work->notifier; + struct mlx5_core_dev *dev = notifier->dev; + + mlx5_vhca_event_notify(dev, &work->event); +} + +static int +mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data) +{ + struct mlx5_vhca_state_notifier *notifier = + mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb); + struct mlx5_vhca_event_work *work; + struct mlx5_eqe *eqe = data; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return NOTIFY_DONE; + INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); + work->notifier = notifier; + work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); + work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function); + mlx5_events_work_enqueue(notifier->dev, &work->work); + return NOTIFY_OK; +} + +void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) +{ + if (!mlx5_vhca_event_supported(dev)) + return; + + MLX5_SET(cmd_hca_cap, set_hca_cap, vhca_state, 1); + MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_allocated, 1); + MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_active, 1); + MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_in_use, 1); + MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1); +} + +int mlx5_vhca_event_init(struct mlx5_core_dev *dev) +{ + struct mlx5_vhca_state_notifier *notifier; + + if (!mlx5_vhca_event_supported(dev)) + return 0; + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) + return -ENOMEM; + + dev->priv.vhca_state_notifier = notifier; + notifier->dev = dev; + BLOCKING_INIT_NOTIFIER_HEAD(¬ifier->n_head); + MLX5_NB_INIT(¬ifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE); + return 0; +} + +void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_vhca_event_supported(dev)) + return; + + kfree(dev->priv.vhca_state_notifier); + dev->priv.vhca_state_notifier = NULL; +} + +void mlx5_vhca_event_start(struct mlx5_core_dev *dev) +{ + struct mlx5_vhca_state_notifier *notifier; + + if (!dev->priv.vhca_state_notifier) + return; + + notifier = dev->priv.vhca_state_notifier; + mlx5_eq_notifier_register(dev, ¬ifier->nb); +} + +void mlx5_vhca_event_stop(struct mlx5_core_dev *dev) +{ + struct mlx5_vhca_state_notifier *notifier; + + if (!dev->priv.vhca_state_notifier) + return; + + notifier = dev->priv.vhca_state_notifier; + mlx5_eq_notifier_unregister(dev, ¬ifier->nb); +} + +int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + if (!dev->priv.vhca_state_notifier) + return -EOPNOTSUPP; + return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb); +} + +void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h new file mode 100644 index 000000000000..1fe1ec6f4d4b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd */ + +#ifndef __MLX5_VHCA_EVENT_H__ +#define __MLX5_VHCA_EVENT_H__ + +#ifdef CONFIG_MLX5_SF + +struct mlx5_vhca_state_event { + u16 function_id; + u16 sw_function_id; + u8 new_vhca_state; + bool ecpu; +}; + +static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN_MAX(dev, vhca_state); +} + +void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap); +int mlx5_vhca_event_init(struct mlx5_core_dev *dev); +void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev); +void mlx5_vhca_event_start(struct mlx5_core_dev *dev); +void mlx5_vhca_event_stop(struct mlx5_core_dev *dev); +int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id); +int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu); +int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, + bool ecpu, u32 *out, u32 outlen); +#else + +static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) +{ +} + +static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev) +{ + return 0; +} + +static inline void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev) +{ +} + +static inline void mlx5_vhca_event_start(struct mlx5_core_dev *dev) +{ +} + +static inline void mlx5_vhca_event_stop(struct mlx5_core_dev *dev) +{ +} + +#endif + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index df1363a34a42..28a7971cac6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -218,158 +218,6 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] }, }; -struct dr_action_modify_field_conv { - u16 hw_field; - u8 start; - u8 end; - u8 l3_type; - u8 l4_type; -}; - -static const struct dr_action_modify_field_conv dr_action_conv_arr[] = { - [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47, - }, - [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15, - }, - [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47, - }, - [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47, - }, - [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15, - }, - [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56, - .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15, - .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31, - .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15, - .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP, - }, - [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31, - .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63, - .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { - .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15, - }, -}; - -#define MAX_VLANS 2 -struct dr_action_vlan_info { - int count; - u32 headers[MAX_VLANS]; -}; - -struct dr_action_apply_attr { - u32 modify_index; - u16 modify_actions; - u32 decap_index; - u16 decap_actions; - u8 decap_with_vlan:1; - u64 final_icm_addr; - u32 flow_tag; - u32 ctr_id; - u16 gvmi; - u16 hit_gvmi; - u32 reformat_id; - u32 reformat_size; - struct dr_action_vlan_info vlans; -}; - static int dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type, enum mlx5dr_action_type *action_type) @@ -394,141 +242,6 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type return 0; } -static void dr_actions_init_next_ste(u8 **last_ste, - u32 *added_stes, - enum mlx5dr_ste_entry_type entry_type, - u16 gvmi) -{ - (*added_stes)++; - *last_ste += DR_STE_SIZE; - mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi); -} - -static void dr_actions_apply_tx(struct mlx5dr_domain *dmn, - u8 *action_type_set, - u8 *last_ste, - struct dr_action_apply_attr *attr, - u32 *added_stes) -{ - bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] || - action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]; - - /* We want to make sure the modify header comes before L2 - * encapsulation. The reason for that is that we support - * modify headers for outer headers only - */ - if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); - mlx5dr_ste_set_rewrite_actions(last_ste, - attr->modify_actions, - attr->modify_index); - } - - if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) { - int i; - - for (i = 0; i < attr->vlans.count; i++) { - if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR]) - dr_actions_init_next_ste(&last_ste, - added_stes, - MLX5DR_STE_TYPE_TX, - attr->gvmi); - - mlx5dr_ste_set_tx_push_vlan(last_ste, - attr->vlans.headers[i], - encap); - } - } - - if (encap) { - /* Modify header and encapsulation require a different STEs. - * Since modify header STE format doesn't support encapsulation - * tunneling_action. - */ - if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] || - action_type_set[DR_ACTION_TYP_PUSH_VLAN]) - dr_actions_init_next_ste(&last_ste, - added_stes, - MLX5DR_STE_TYPE_TX, - attr->gvmi); - - mlx5dr_ste_set_tx_encap(last_ste, - attr->reformat_id, - attr->reformat_size, - action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]); - /* Whenever prio_tag_required enabled, we can be sure that the - * previous table (ACL) already push vlan to our packet, - * And due to HW limitation we need to set this bit, otherwise - * push vlan + reformat will not work. - */ - if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required)) - mlx5dr_ste_set_go_back_bit(last_ste); - } - - if (action_type_set[DR_ACTION_TYP_CTR]) - mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id); -} - -static void dr_actions_apply_rx(u8 *action_type_set, - u8 *last_ste, - struct dr_action_apply_attr *attr, - u32 *added_stes) -{ - if (action_type_set[DR_ACTION_TYP_CTR]) - mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id); - - if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { - mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); - mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan); - mlx5dr_ste_set_rewrite_actions(last_ste, - attr->decap_actions, - attr->decap_index); - } - - if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) - mlx5dr_ste_set_rx_decap(last_ste); - - if (action_type_set[DR_ACTION_TYP_POP_VLAN]) { - int i; - - for (i = 0; i < attr->vlans.count; i++) { - if (i || - action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] || - action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) - dr_actions_init_next_ste(&last_ste, - added_stes, - MLX5DR_STE_TYPE_RX, - attr->gvmi); - - mlx5dr_ste_set_rx_pop_vlan(last_ste); - } - } - - if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) - dr_actions_init_next_ste(&last_ste, - added_stes, - MLX5DR_STE_TYPE_MODIFY_PKT, - attr->gvmi); - else - mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); - - mlx5dr_ste_set_rewrite_actions(last_ste, - attr->modify_actions, - attr->modify_index); - } - - if (action_type_set[DR_ACTION_TYP_TAG]) { - if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) - dr_actions_init_next_ste(&last_ste, - added_stes, - MLX5DR_STE_TYPE_RX, - attr->gvmi); - - mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag); - } -} - /* Apply the actions on the rule STE array starting from the last_ste. * Actions might require more than one STE, new_num_stes will return * the new size of the STEs array, rule with actions. @@ -537,21 +250,20 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn, enum mlx5dr_ste_entry_type ste_type, u8 *action_type_set, u8 *last_ste, - struct dr_action_apply_attr *attr, + struct mlx5dr_ste_actions_attr *attr, u32 *new_num_stes) { + struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; u32 added_stes = 0; if (ste_type == MLX5DR_STE_TYPE_RX) - dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes); + mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set, + last_ste, attr, &added_stes); else - dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes); + mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set, + last_ste, attr, &added_stes); - last_ste += added_stes * DR_STE_SIZE; *new_num_stes += added_stes; - - mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi); - mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1); } static enum dr_action_domain @@ -643,9 +355,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 action_type_set[DR_ACTION_TYP_MAX] = {}; + struct mlx5dr_ste_actions_attr attr = {}; struct mlx5dr_action *dest_action = NULL; u32 state = DR_ACTION_STATE_NO_ACTION; - struct dr_action_apply_attr attr = {}; enum dr_action_domain action_domain; bool recalc_cs_required = false; u8 *last_ste; @@ -735,7 +447,8 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, case DR_ACTION_TYP_MODIFY_HDR: attr.modify_index = action->rewrite.index; attr.modify_actions = action->rewrite.num_of_actions; - recalc_cs_required = action->rewrite.modify_ttl; + recalc_cs_required = action->rewrite.modify_ttl && + !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps); break; case DR_ACTION_TYP_L2_TO_TNL_L2: case DR_ACTION_TYP_L2_TO_TNL_L3: @@ -756,12 +469,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, } break; case DR_ACTION_TYP_POP_VLAN: - max_actions_type = MAX_VLANS; + max_actions_type = MLX5DR_MAX_VLANS; attr.vlans.count++; break; case DR_ACTION_TYP_PUSH_VLAN: - max_actions_type = MAX_VLANS; - if (attr.vlans.count == MAX_VLANS) + max_actions_type = MLX5DR_MAX_VLANS; + if (attr.vlans.count == MLX5DR_MAX_VLANS) return -EINVAL; attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr; @@ -789,9 +502,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, *new_hw_ste_arr_sz = nic_matcher->num_of_builders; last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1); - /* Due to a HW bug, modifying TTL on RX flows will cause an incorrect - * checksum calculation. In this case we will use a FW table to - * recalculate. + /* Due to a HW bug in some devices, modifying TTL on RX flows will + * cause an incorrect checksum calculation. In this case we will + * use a FW table to recalculate. */ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB && rx_rule && recalc_cs_required && dest_action) { @@ -817,132 +530,6 @@ out_invalid_arg: return -EINVAL; } -#define CVLAN_ETHERTYPE 0x8100 -#define SVLAN_ETHERTYPE 0x88a8 -#define HDR_LEN_L2_ONLY 14 -#define HDR_LEN_L2_VLAN 18 -#define REWRITE_HW_ACTION_NUM 6 - -static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn, - struct mlx5dr_action *action, - void *data, size_t data_sz) -{ - struct mlx5_ifc_l2_hdr_bits *l2_hdr = data; - u64 ops[REWRITE_HW_ACTION_NUM] = {}; - u32 hdr_fld_4b; - u16 hdr_fld_2b; - u16 vlan_type; - bool vlan; - int i = 0; - int ret; - - vlan = (data_sz != HDR_LEN_L2_ONLY); - - /* dmac_47_16 */ - MLX5_SET(dr_action_hw_set, ops + i, - opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); - MLX5_SET(dr_action_hw_set, ops + i, - destination_length, 0); - MLX5_SET(dr_action_hw_set, ops + i, - destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0); - MLX5_SET(dr_action_hw_set, ops + i, - destination_left_shifter, 16); - hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16); - MLX5_SET(dr_action_hw_set, ops + i, - inline_data, hdr_fld_4b); - i++; - - /* smac_47_16 */ - MLX5_SET(dr_action_hw_set, ops + i, - opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); - MLX5_SET(dr_action_hw_set, ops + i, - destination_length, 0); - MLX5_SET(dr_action_hw_set, ops + i, - destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1); - MLX5_SET(dr_action_hw_set, ops + i, - destination_left_shifter, 16); - hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 | - MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16); - MLX5_SET(dr_action_hw_set, ops + i, - inline_data, hdr_fld_4b); - i++; - - /* dmac_15_0 */ - MLX5_SET(dr_action_hw_set, ops + i, - opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); - MLX5_SET(dr_action_hw_set, ops + i, - destination_length, 16); - MLX5_SET(dr_action_hw_set, ops + i, - destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0); - MLX5_SET(dr_action_hw_set, ops + i, - destination_left_shifter, 0); - hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0); - MLX5_SET(dr_action_hw_set, ops + i, - inline_data, hdr_fld_2b); - i++; - - /* ethertype + (optional) vlan */ - MLX5_SET(dr_action_hw_set, ops + i, - opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); - MLX5_SET(dr_action_hw_set, ops + i, - destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2); - MLX5_SET(dr_action_hw_set, ops + i, - destination_left_shifter, 32); - if (!vlan) { - hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype); - MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b); - MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16); - } else { - hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype); - vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN; - hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan); - hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b; - MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b); - MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18); - } - i++; - - /* smac_15_0 */ - MLX5_SET(dr_action_hw_set, ops + i, - opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); - MLX5_SET(dr_action_hw_set, ops + i, - destination_length, 16); - MLX5_SET(dr_action_hw_set, ops + i, - destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1); - MLX5_SET(dr_action_hw_set, ops + i, - destination_left_shifter, 0); - hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0); - MLX5_SET(dr_action_hw_set, ops + i, - inline_data, hdr_fld_2b); - i++; - - if (vlan) { - MLX5_SET(dr_action_hw_set, ops + i, - opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); - hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type); - MLX5_SET(dr_action_hw_set, ops + i, - inline_data, hdr_fld_2b); - MLX5_SET(dr_action_hw_set, ops + i, - destination_length, 16); - MLX5_SET(dr_action_hw_set, ops + i, - destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2); - MLX5_SET(dr_action_hw_set, ops + i, - destination_left_shifter, 0); - i++; - } - - action->rewrite.data = (void *)ops; - action->rewrite.num_of_actions = i; - - ret = mlx5dr_send_postsend_action(dmn, action); - if (ret) { - mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n"); - return ret; - } - - return 0; -} - static struct mlx5dr_action * dr_action_create_generic(enum mlx5dr_action_type action_type) { @@ -1217,21 +804,34 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, } case DR_ACTION_TYP_TNL_L3_TO_L2: { - /* Only Ethernet frame is supported, with VLAN (18) or without (14) */ - if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN) - return -EINVAL; + u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {}; + int ret; + + ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx, + data, data_sz, + hw_actions, + ACTION_CACHE_LINE_SIZE, + &action->rewrite.num_of_actions); + if (ret) { + mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n"); + return ret; + } action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_8); - if (!action->rewrite.chunk) + if (!action->rewrite.chunk) { + mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n"); return -ENOMEM; + } + action->rewrite.data = (void *)hw_actions; action->rewrite.index = (action->rewrite.chunk->icm_addr - dmn->info.caps.hdr_modify_icm_addr) / ACTION_CACHE_LINE_SIZE; - ret = dr_actions_l2_rewrite(dmn, action, data, data_sz); + ret = mlx5dr_send_postsend_action(dmn, action); if (ret) { + mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n"); mlx5dr_icm_free_chunk(action->rewrite.chunk); return ret; } @@ -1243,6 +843,9 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, } } +#define CVLAN_ETHERTYPE 0x8100 +#define SVLAN_ETHERTYPE 0x88a8 + struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void) { return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN); @@ -1315,31 +918,13 @@ dec_ref: return NULL; } -static const struct dr_action_modify_field_conv * -dr_action_modify_get_hw_info(u16 sw_field) -{ - const struct dr_action_modify_field_conv *hw_action_info; - - if (sw_field >= ARRAY_SIZE(dr_action_conv_arr)) - goto not_found; - - hw_action_info = &dr_action_conv_arr[sw_field]; - if (!hw_action_info->end && !hw_action_info->start) - goto not_found; - - return hw_action_info; - -not_found: - return NULL; -} - static int dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn, __be64 *sw_action, __be64 *hw_action, - const struct dr_action_modify_field_conv **ret_hw_info) + const struct mlx5dr_ste_action_modify_field **ret_hw_info) { - const struct dr_action_modify_field_conv *hw_action_info; + const struct mlx5dr_ste_action_modify_field *hw_action_info; u8 max_length; u16 sw_field; u32 data; @@ -1349,7 +934,7 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn, data = MLX5_GET(set_action_in, sw_action, data); /* Convert SW data to HW modify action format */ - hw_action_info = dr_action_modify_get_hw_info(sw_field); + hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field); if (!hw_action_info) { mlx5dr_dbg(dmn, "Modify add action invalid field given\n"); return -EINVAL; @@ -1357,20 +942,12 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn, max_length = hw_action_info->end - hw_action_info->start + 1; - MLX5_SET(dr_action_hw_set, hw_action, - opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD); - - MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, - hw_action_info->hw_field); - - MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, - hw_action_info->start); - - /* PRM defines that length zero specific length of 32bits */ - MLX5_SET(dr_action_hw_set, hw_action, destination_length, - max_length == 32 ? 0 : max_length); - - MLX5_SET(dr_action_hw_set, hw_action, inline_data, data); + mlx5dr_ste_set_action_add(dmn->ste_ctx, + hw_action, + hw_action_info->hw_field, + hw_action_info->start, + max_length, + data); *ret_hw_info = hw_action_info; @@ -1381,9 +958,9 @@ static int dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn, __be64 *sw_action, __be64 *hw_action, - const struct dr_action_modify_field_conv **ret_hw_info) + const struct mlx5dr_ste_action_modify_field **ret_hw_info) { - const struct dr_action_modify_field_conv *hw_action_info; + const struct mlx5dr_ste_action_modify_field *hw_action_info; u8 offset, length, max_length; u16 sw_field; u32 data; @@ -1395,7 +972,7 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn, data = MLX5_GET(set_action_in, sw_action, data); /* Convert SW data to HW modify action format */ - hw_action_info = dr_action_modify_get_hw_info(sw_field); + hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field); if (!hw_action_info) { mlx5dr_dbg(dmn, "Modify set action invalid field given\n"); return -EINVAL; @@ -1411,19 +988,12 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn, return -EINVAL; } - MLX5_SET(dr_action_hw_set, hw_action, - opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); - - MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, - hw_action_info->hw_field); - - MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, - hw_action_info->start + offset); - - MLX5_SET(dr_action_hw_set, hw_action, destination_length, - length == 32 ? 0 : length); - - MLX5_SET(dr_action_hw_set, hw_action, inline_data, data); + mlx5dr_ste_set_action_set(dmn->ste_ctx, + hw_action, + hw_action_info->hw_field, + hw_action_info->start + offset, + length, + data); *ret_hw_info = hw_action_info; @@ -1434,12 +1004,12 @@ static int dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn, __be64 *sw_action, __be64 *hw_action, - const struct dr_action_modify_field_conv **ret_dst_hw_info, - const struct dr_action_modify_field_conv **ret_src_hw_info) + const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info, + const struct mlx5dr_ste_action_modify_field **ret_src_hw_info) { u8 src_offset, dst_offset, src_max_length, dst_max_length, length; - const struct dr_action_modify_field_conv *hw_dst_action_info; - const struct dr_action_modify_field_conv *hw_src_action_info; + const struct mlx5dr_ste_action_modify_field *hw_dst_action_info; + const struct mlx5dr_ste_action_modify_field *hw_src_action_info; u16 src_field, dst_field; /* Get SW modify action data */ @@ -1450,8 +1020,8 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn, length = MLX5_GET(copy_action_in, sw_action, length); /* Convert SW data to HW modify action format */ - hw_src_action_info = dr_action_modify_get_hw_info(src_field); - hw_dst_action_info = dr_action_modify_get_hw_info(dst_field); + hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field); + hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field); if (!hw_src_action_info || !hw_dst_action_info) { mlx5dr_dbg(dmn, "Modify copy action invalid field given\n"); return -EINVAL; @@ -1471,23 +1041,13 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn, return -EINVAL; } - MLX5_SET(dr_action_hw_copy, hw_action, - opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY); - - MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, - hw_dst_action_info->hw_field); - - MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, - hw_dst_action_info->start + dst_offset); - - MLX5_SET(dr_action_hw_copy, hw_action, destination_length, - length == 32 ? 0 : length); - - MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, - hw_src_action_info->hw_field); - - MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, - hw_src_action_info->start + dst_offset); + mlx5dr_ste_set_action_copy(dmn->ste_ctx, + hw_action, + hw_dst_action_info->hw_field, + hw_dst_action_info->start + dst_offset, + length, + hw_src_action_info->hw_field, + hw_src_action_info->start + src_offset); *ret_dst_hw_info = hw_dst_action_info; *ret_src_hw_info = hw_src_action_info; @@ -1499,8 +1059,8 @@ static int dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, __be64 *sw_action, __be64 *hw_action, - const struct dr_action_modify_field_conv **ret_dst_hw_info, - const struct dr_action_modify_field_conv **ret_src_hw_info) + const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info, + const struct mlx5dr_ste_action_modify_field **ret_src_hw_info) { u8 action; int ret; @@ -1677,15 +1237,15 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, u32 *num_hw_actions, bool *modify_ttl) { - const struct dr_action_modify_field_conv *hw_dst_action_info; - const struct dr_action_modify_field_conv *hw_src_action_info; - u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED; - u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE; - u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE; + const struct mlx5dr_ste_action_modify_field *hw_dst_action_info; + const struct mlx5dr_ste_action_modify_field *hw_src_action_info; struct mlx5dr_domain *dmn = action->rewrite.dmn; int ret, i, hw_idx = 0; __be64 *sw_action; __be64 hw_action; + u16 hw_field = 0; + u32 l3_type = 0; + u32 l4_type = 0; *modify_ttl = false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index ba65ec406cfa..30b0136b5bc7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -78,9 +78,9 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev, caps->uplink_icm_address_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev, sw_steering_uplink_icm_address_tx); - caps->sw_owner = - MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, - sw_owner); + caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2); + if (!caps->sw_owner_v2) + caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner); return 0; } @@ -113,10 +113,15 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->nic_tx_allow_address = MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address); - caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner); - caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level); + caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2); + caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2); + + if (!caps->rx_sw_owner_v2) + caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner); + if (!caps->tx_sw_owner_v2) + caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner); - caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner); + caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level); caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size); caps->hdr_modify_icm_addr = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index aa2c2d6c44e6..7091b1be84ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -4,6 +4,11 @@ #include <linux/mlx5/eswitch.h> #include "dr_types.h" +#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ + ((dmn)->info.caps.dmn_type##_sw_owner || \ + ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ + (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) + static int dr_domain_init_cache(struct mlx5dr_domain *dmn) { /* Per vport cached FW FT for checksum recalculation, this @@ -57,6 +62,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn) { int ret; + dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver); + if (!dmn->ste_ctx) { + mlx5dr_err(dmn, "SW Steering on this device is unsupported\n"); + return -EOPNOTSUPP; + } + ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn); if (ret) { mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret); @@ -181,6 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, return ret; dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner; + dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2; dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx; dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx; @@ -223,18 +235,13 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, if (ret) return ret; - if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) { - mlx5dr_err(dmn, "SW steering is not supported on this device\n"); - return -EOPNOTSUPP; - } - ret = dr_domain_query_fdb_caps(mdev, dmn); if (ret) return ret; switch (dmn->type) { case MLX5DR_DOMAIN_TYPE_NIC_RX: - if (!dmn->info.caps.rx_sw_owner) + if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx)) return -ENOTSUPP; dmn->info.supp_sw_steering = true; @@ -243,7 +250,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address; break; case MLX5DR_DOMAIN_TYPE_NIC_TX: - if (!dmn->info.caps.tx_sw_owner) + if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx)) return -ENOTSUPP; dmn->info.supp_sw_steering = true; @@ -255,7 +262,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, if (!dmn->info.caps.eswitch_manager) return -ENOTSUPP; - if (!dmn->info.caps.fdb_sw_owner) + if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb)) return -ENOTSUPP; dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 6527eb4df153..15673cd10039 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -113,7 +113,8 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) static bool dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; + return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED); } static bool @@ -135,7 +136,8 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc) static bool dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED; + return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED); } static bool @@ -148,12 +150,14 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask, static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED; + return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED); } static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED; + return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED); } static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3) @@ -221,6 +225,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_match_param mask = {}; struct mlx5dr_ste_build *sb; bool inner, rx; @@ -259,80 +264,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner = false; if (dr_mask_is_wqe_metadata_set(&mask.misc2)) - mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_reg_c_0_3_set(&mask.misc2)) - mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_reg_c_4_7_set(&mask.misc2)) - mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) && (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) { - mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask, - dmn, inner, rx); + mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++], + &mask, dmn, inner, rx); } if (dr_mask_is_smac_set(&mask.outer) && dr_mask_is_dmac_set(&mask.outer)) { - mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++], + &mask, inner, rx); } if (dr_mask_is_smac_set(&mask.outer)) - mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer)) - mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++], + &mask, inner, rx); if (outer_ipv == DR_RULE_IPV6) { if (dr_mask_is_dst_addr_set(&mask.outer)) - mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_src_addr_set(&mask.outer)) - mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer)) - mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++], + &mask, inner, rx); } else { if (dr_mask_is_ipv4_5_tuple_set(&mask.outer)) - mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_ttl_set(&mask.outer)) - mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], + &mask, inner, rx); } if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn)) - mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++], + &mask, inner, rx); else if (dr_mask_is_tnl_geneve(&mask, dmn)) - mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) - mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer)) - mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) - mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_icmp(&mask, dmn)) { - ret = mlx5dr_ste_build_icmp(&sb[idx++], + ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++], &mask, &dmn->info.caps, inner, rx); if (ret) return ret; } if (dr_mask_is_tnl_gre_set(&mask.misc)) - mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++], + &mask, inner, rx); } /* Inner */ @@ -343,50 +357,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner = true; if (dr_mask_is_eth_l2_tnl_set(&mask.misc)) - mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_smac_set(&mask.inner) && dr_mask_is_dmac_set(&mask.inner)) { - mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], + mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++], &mask, inner, rx); } if (dr_mask_is_smac_set(&mask.inner)) - mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner)) - mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++], + &mask, inner, rx); if (inner_ipv == DR_RULE_IPV6) { if (dr_mask_is_dst_addr_set(&mask.inner)) - mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_src_addr_set(&mask.inner)) - mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner)) - mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++], + &mask, inner, rx); } else { if (dr_mask_is_ipv4_5_tuple_set(&mask.inner)) - mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], + &mask, inner, rx); if (dr_mask_is_ttl_set(&mask.inner)) - mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], + &mask, inner, rx); } if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner)) - mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner)) - mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++], + &mask, inner, rx); if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) - mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); + mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++], + &mask, inner, rx); } /* Empty matcher, takes all */ if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 6d73719db1f4..b337d6626bff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member { struct list_head list; }; -static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste, +static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste *new_last_ste, struct list_head *miss_list, struct list_head *send_list) { @@ -25,11 +26,11 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste, if (!ste_info_last) return -ENOMEM; - mlx5dr_ste_set_miss_addr(last_ste->hw_ste, + mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste, mlx5dr_ste_get_icm_addr(new_last_ste)); list_add_tail(&new_last_ste->miss_list_node, miss_list); - mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED, + mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL, 0, last_ste->hw_ste, ste_info_last, send_list, true); @@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher, u8 *hw_ste) { struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_ste_htbl *new_htbl; struct mlx5dr_ste *ste; @@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher, /* One and only entry, never grows */ ste = new_htbl->ste_arr; - mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); + mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, + nic_matcher->e_anchor->chunk->icm_addr); mlx5dr_htbl_get(new_htbl); return ste; @@ -103,14 +106,19 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info, int ret; list_del(&ste_info->send_list); + + /* Copy data to ste, only reduced size or control, the last 16B (mask) + * is already written to the hw. + */ + if (ste_info->size == DR_STE_SIZE_CTRL) + memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL); + else + memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED); + ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data, ste_info->size, ste_info->offset); if (ret) goto out; - /* Copy data to ste, only reduced size, the last 16B (mask) - * is already written to the hw. - */ - memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED); out: kfree(ste_info); @@ -169,6 +177,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, struct mlx5dr_ste *col_ste, u8 *hw_ste) { + struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste *new_ste; int ret; @@ -180,11 +189,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste); /* Update the previous from the list */ - ret = dr_rule_append_to_miss_list(new_ste, + ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste, mlx5dr_ste_get_miss_list(col_ste), update_list); if (ret) { - mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n"); + mlx5dr_dbg(dmn, "Failed update dup entry\n"); goto err_exit; } @@ -224,6 +233,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher, struct mlx5dr_ste_htbl *new_htbl, struct list_head *update_list) { + struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_send_info *ste_info; bool use_update_list = false; u8 hw_ste[DR_STE_SIZE] = {}; @@ -237,7 +247,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher, /* Copy STE control and tag */ memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED); - mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); + mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, + nic_matcher->e_anchor->chunk->icm_addr); new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl); new_ste = &new_htbl->ste_arr[new_idx]; @@ -253,7 +264,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher, new_ste, hw_ste); if (!new_ste) { - mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n", + mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n", new_idx); return NULL; } @@ -391,7 +402,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule, /* Write new table to HW */ info.type = CONNECT_MISS; info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; - mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi, + mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, + dmn->info.caps.gvmi, nic_dmn, new_htbl, formatted_ste, @@ -436,18 +448,20 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule, /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here * (48B len) which works only on first 32B */ - mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste, + mlx5dr_ste_set_hit_addr(dmn->ste_ctx, + prev_htbl->ste_arr[0].hw_ste, new_htbl->chunk->icm_addr, new_htbl->chunk->num_of_entries); ste_to_update = &prev_htbl->ste_arr[0]; } else { - mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste, + mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx, + cur_htbl->pointing_ste->hw_ste, new_htbl); ste_to_update = cur_htbl->pointing_ste; } - mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED, + mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL, 0, ste_to_update->hw_ste, ste_info, update_list, false); @@ -496,6 +510,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher, struct list_head *miss_list, struct list_head *send_list) { + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_ste_send_info *ste_info; struct mlx5dr_ste *new_ste; @@ -507,8 +523,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher, if (!new_ste) goto free_send_info; - if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) { - mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n"); + if (dr_rule_append_to_miss_list(ste_ctx, new_ste, + miss_list, send_list)) { + mlx5dr_dbg(dmn, "Failed to update prev miss_list\n"); goto err_exit; } @@ -659,6 +676,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES]; u8 num_of_builders = nic_matcher->num_of_builders; struct mlx5dr_matcher *matcher = rule->matcher; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 *curr_hw_ste, *prev_hw_ste; struct mlx5dr_ste *action_ste; int i, k, ret; @@ -692,10 +710,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, goto err_exit; /* Point current ste to the new action */ - mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl); + mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx, + prev_hw_ste, + action_ste->htbl); ret = dr_rule_add_member(nic_rule, action_ste); if (ret) { - mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n"); + mlx5dr_dbg(dmn, "Failed adding rule member\n"); goto free_ste_info; } mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0, @@ -722,6 +742,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher, struct list_head *miss_list, struct list_head *send_list) { + struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_send_info *ste_info; /* Take ref on table, only on first time this ste is used */ @@ -730,7 +751,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher, /* new entry -> new branch */ list_add_tail(&ste->miss_list_node, miss_list); - mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); + mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, + nic_matcher->e_anchor->chunk->icm_addr); ste->ste_chain_location = ste_location; @@ -743,7 +765,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher, ste, hw_ste, DR_CHUNK_SIZE_1)) { - mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); + mlx5dr_dbg(dmn, "Failed allocating table\n"); goto clean_ste_info; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 24dede1b0a20..83c4c877d558 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -431,6 +431,8 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste, { struct postsend_info send_info = {}; + mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size); + send_info.write.addr = (uintptr_t)data; send_info.write.length = size; send_info.write.lkey = 0; @@ -457,6 +459,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn, if (ret) return ret; + mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE); + /* Send the data iteration times */ for (i = 0; i < iterations; i++) { u32 ste_index = i * (byte_size / DR_STE_SIZE); @@ -480,6 +484,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn, /* Copy bit_mask */ memcpy(data + ste_off + DR_STE_SIZE_REDUCED, mask, DR_STE_SIZE_MASK); + /* Only when we have mask we need to re-arrange the STE */ + mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, + data + (j * DR_STE_SIZE), + DR_STE_SIZE); } } @@ -509,6 +517,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, u32 byte_size = htbl->chunk->byte_size; int iterations; int num_stes; + u8 *copy_dst; u8 *data; int ret; int i; @@ -518,20 +527,22 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, if (ret) return ret; - for (i = 0; i < num_stes; i++) { - u8 *copy_dst; - - /* Copy the same ste on the data buffer */ - copy_dst = data + i * DR_STE_SIZE; - memcpy(copy_dst, ste_init_data, DR_STE_SIZE); - - if (update_hw_ste) { - /* Copy the reduced ste to hash table ste_arr */ + if (update_hw_ste) { + /* Copy the reduced STE to hash table ste_arr */ + for (i = 0; i < num_stes; i++) { copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED); } } + mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE); + + /* Copy the same STE on the data buffer */ + for (i = 0; i < num_stes; i++) { + copy_dst = data + i * DR_STE_SIZE; + memcpy(copy_dst, ste_init_data, DR_STE_SIZE); + } + /* Send the data iteration times */ for (i = 0; i < iterations; i++) { u8 ste_index = i * (byte_size / DR_STE_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index d275823bff2f..f49abc7a4b9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -3,104 +3,7 @@ #include <linux/types.h> #include <linux/crc32.h> -#include "dr_types.h" - -#define DR_STE_CRC_POLY 0xEDB88320L -#define STE_IPV4 0x1 -#define STE_IPV6 0x2 -#define STE_TCP 0x1 -#define STE_UDP 0x2 -#define STE_SPI 0x3 -#define IP_VERSION_IPV4 0x4 -#define IP_VERSION_IPV6 0x6 -#define STE_SVLAN 0x1 -#define STE_CVLAN 0x2 - -#define DR_STE_ENABLE_FLOW_TAG BIT(31) - -/* Set to STE a specific value using DR_STE_SET */ -#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \ - if ((spec)->s_fname) { \ - MLX5_SET(ste_##lookup_type, tag, t_fname, value); \ - (spec)->s_fname = 0; \ - } \ -} while (0) - -/* Set to STE spec->s_fname to tag->t_fname */ -#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \ - DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname) - -/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */ -#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \ - DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1) - -/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */ -#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \ - DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname) - -#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \ - MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \ - MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \ -} while (0) - -#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \ - DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \ - in_out##_first_mpls_label);\ - DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \ - in_out##_first_mpls_s_bos); \ - DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \ - in_out##_first_mpls_exp); \ - DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \ - in_out##_first_mpls_ttl); \ -} while (0) - -#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \ - DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \ - in_out##_first_mpls_label);\ - DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \ - in_out##_first_mpls_s_bos); \ - DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \ - in_out##_first_mpls_exp); \ - DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \ - in_out##_first_mpls_ttl); \ -} while (0) - -#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ - (_misc)->outer_first_mpls_over_gre_label || \ - (_misc)->outer_first_mpls_over_gre_exp || \ - (_misc)->outer_first_mpls_over_gre_s_bos || \ - (_misc)->outer_first_mpls_over_gre_ttl) -#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\ - (_misc)->outer_first_mpls_over_udp_label || \ - (_misc)->outer_first_mpls_over_udp_exp || \ - (_misc)->outer_first_mpls_over_udp_s_bos || \ - (_misc)->outer_first_mpls_over_udp_ttl) - -#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \ - ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \ - (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \ - MLX5DR_STE_LU_TYPE_##lookup_type##_O) - -enum dr_ste_tunl_action { - DR_STE_TUNL_ACTION_NONE = 0, - DR_STE_TUNL_ACTION_ENABLE = 1, - DR_STE_TUNL_ACTION_DECAP = 2, - DR_STE_TUNL_ACTION_L3_DECAP = 3, - DR_STE_TUNL_ACTION_POP_VLAN = 4, -}; - -enum dr_ste_action_type { - DR_STE_ACTION_TYPE_PUSH_VLAN = 1, - DR_STE_ACTION_TYPE_ENCAP_L3 = 3, - DR_STE_ACTION_TYPE_ENCAP = 4, -}; +#include "dr_ste.h" struct dr_hw_ste_format { u8 ctrl[DR_STE_SIZE_CTRL]; @@ -115,6 +18,11 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length) return (__force u32)htonl(crc); } +bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps) +{ + return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5; +} + u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) { struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; @@ -142,7 +50,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) return index; } -static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask) +u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask) { u16 byte_mask = 0; int i; @@ -155,7 +63,7 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask) return byte_mask; } -static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p) +static u8 *dr_ste_get_tag(u8 *hw_ste_p) { struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; @@ -169,104 +77,6 @@ void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask) memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK); } -void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag) -{ - MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer, - DR_STE_ENABLE_FLOW_TAG | flow_tag); -} - -void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id) -{ - /* This can be used for both rx_steering_mult and for sx_transmit */ - MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id); - MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16); -} - -void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p) -{ - MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1); -} - -void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr, - bool go_back) -{ - MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, - DR_STE_ACTION_TYPE_PUSH_VLAN); - MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr); - /* Due to HW limitation we need to set this bit, otherwise reforamt + - * push vlan will not work. - */ - if (go_back) - mlx5dr_ste_set_go_back_bit(hw_ste_p); -} - -void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3) -{ - MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, - encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP); - /* The hardware expects here size in words (2 byte) */ - MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2); - MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id); -} - -void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p) -{ - MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, - DR_STE_TUNL_ACTION_DECAP); -} - -void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p) -{ - MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, - DR_STE_TUNL_ACTION_POP_VLAN); -} - -void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan) -{ - MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, - DR_STE_TUNL_ACTION_L3_DECAP); - MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0); -} - -void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type) -{ - MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type); -} - -u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p) -{ - return MLX5_GET(ste_general, hw_ste_p, entry_type); -} - -void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, - u32 re_write_index) -{ - MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions, - num_of_actions); - MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer, - re_write_index); -} - -void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) -{ - MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi); -} - -void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, - u16 gvmi) -{ - MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type); - MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type); - MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE); - - /* Set GVMI once, this is the same for RX/TX - * bits 63_48 of next table base / miss address encode the next GVMI - */ - MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi); - MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi); - MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi); -} - static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste) { memset(&hw_ste->tag, 0, sizeof(hw_ste->tag)); @@ -279,21 +89,26 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste) hw_ste->mask[0] = 0; } -u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste) +void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste_p, u64 miss_addr) { - u64 index = - (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) | - MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26); - - return index << 6; + ste_ctx->set_miss_addr(hw_ste_p, miss_addr); } -void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size) +static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste *ste, u64 miss_addr) { - u64 index = (icm_addr >> 5) | ht_size; + u8 *hw_ste_p = ste->hw_ste; + + ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); + ste_ctx->set_miss_addr(hw_ste_p, miss_addr); + dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste); +} - MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27); - MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index); +void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste, u64 icm_addr, u32 ht_size) +{ + ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size); } u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste) @@ -317,15 +132,16 @@ struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste) return &ste->htbl->miss_list[index]; } -static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste, +static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste *ste, struct mlx5dr_ste_htbl *next_htbl) { struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; u8 *hw_ste = ste->hw_ste; - MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask); - MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type); - mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); + ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask); + ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type); + ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste); } @@ -363,7 +179,8 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) /* Free ste which is the head and the only one in miss_list */ static void -dr_ste_remove_head_ste(struct mlx5dr_ste *ste, +dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste *ste, struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_ste_send_info *ste_info_head, struct list_head *send_ste_list, @@ -380,7 +197,7 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste, */ memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); miss_addr = nic_matcher->e_anchor->chunk->icm_addr; - mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr); + dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr); memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED); list_del_init(&ste->miss_list_node); @@ -399,13 +216,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste, * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0 */ static void -dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste, +dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste *ste, + struct mlx5dr_ste *next_ste, struct mlx5dr_ste_send_info *ste_info_head, struct list_head *send_ste_list, struct mlx5dr_ste_htbl *stats_tbl) { struct mlx5dr_ste_htbl *next_miss_htbl; + u8 hw_ste[DR_STE_SIZE] = {}; + int sb_idx; next_miss_htbl = next_ste->htbl; @@ -418,13 +239,19 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste, /* Move data from next into ste */ dr_ste_replace(ste, next_ste); + /* Copy all 64 hw_ste bytes */ + memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); + sb_idx = ste->ste_chain_location - 1; + mlx5dr_ste_set_bit_mask(hw_ste, + nic_matcher->ste_builder[sb_idx].bit_mask); + /* Del the htbl that contains the next_ste. * The origin htbl stay with the same number of entries. */ mlx5dr_htbl_put(next_miss_htbl); - mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED, - 0, ste->hw_ste, + mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, + 0, hw_ste, ste_info_head, send_ste_list, true /* Copy data */); @@ -436,7 +263,8 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste, /* Free ste that is located in the middle of the miss list: * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_| */ -static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste, +static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste *ste, struct mlx5dr_ste_send_info *ste_info, struct list_head *send_ste_list, struct mlx5dr_ste_htbl *stats_tbl) @@ -448,10 +276,10 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste, if (WARN_ON(!prev_ste)) return; - miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste); - mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr); + miss_addr = ste_ctx->get_miss_addr(ste->hw_ste); + ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr); - mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0, + mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0, prev_ste->hw_ste, ste_info, send_ste_list, true /* Copy data*/); @@ -467,6 +295,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste, { struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info; struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_ste_send_info ste_info_head; struct mlx5dr_ste *next_ste, *first_ste; bool put_on_origin_table = true; @@ -495,18 +324,22 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste, if (!next_ste) { /* One and only entry in the list */ - dr_ste_remove_head_ste(ste, nic_matcher, + dr_ste_remove_head_ste(ste_ctx, ste, + nic_matcher, &ste_info_head, &send_ste_list, stats_tbl); } else { /* First but not only entry in the list */ - dr_ste_replace_head_ste(ste, next_ste, &ste_info_head, + dr_ste_replace_head_ste(nic_matcher, ste, + next_ste, &ste_info_head, &send_ste_list, stats_tbl); put_on_origin_table = false; } } else { /* Ste in the middle of the list */ - dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl); + dr_ste_remove_middle_ste(ste_ctx, ste, + &ste_info_head, &send_ste_list, + stats_tbl); } /* Update HW */ @@ -530,34 +363,25 @@ bool mlx5dr_ste_equal_tag(void *src, void *dst) return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG); } -void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, +void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste, struct mlx5dr_ste_htbl *next_htbl) { struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; - mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); + ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); } -void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) +void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste_p, u32 ste_size) { - u64 index = miss_addr >> 6; - - /* Miss address for TX and RX STEs located in the same offsets */ - MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26); - MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index); -} - -void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr) -{ - u8 *hw_ste = ste->hw_ste; - - MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE); - mlx5dr_ste_set_miss_addr(hw_ste, miss_addr); - dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste); + if (ste_ctx->prepare_for_postsend) + ste_ctx->prepare_for_postsend(hw_ste_p, ste_size); } /* Init one ste as a pattern for ste data array */ -void mlx5dr_ste_set_formatted_ste(u16 gvmi, +void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, + u16 gvmi, struct mlx5dr_domain_rx_tx *nic_dmn, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, @@ -565,13 +389,13 @@ void mlx5dr_ste_set_formatted_ste(u16 gvmi, { struct mlx5dr_ste ste = {}; - mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); + ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); ste.hw_ste = formatted_ste; if (connect_info->type == CONNECT_HIT) - dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl); + dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl); else - mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr); + dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr); } int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, @@ -582,7 +406,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, { u8 formatted_ste[DR_STE_SIZE] = {}; - mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi, + mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, + dmn->info.caps.gvmi, nic_dmn, htbl, formatted_ste, @@ -597,18 +422,18 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, u8 *cur_hw_ste, enum mlx5dr_icm_chunk_size log_table_size) { - struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste; struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_htbl_connect_info info; struct mlx5dr_ste_htbl *next_htbl; if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { - u8 next_lu_type; + u16 next_lu_type; u16 byte_mask; - next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); - byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); + next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste); + byte_mask = ste_ctx->get_byte_mask(cur_hw_ste); next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, log_table_size, @@ -628,7 +453,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, goto free_table; } - mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl); + mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx, + cur_hw_ste, next_htbl); ste->next_htbl = next_htbl; next_htbl->pointing_ste = ste; } @@ -657,7 +483,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size, - u8 lu_type, u16 byte_mask) + u16 lu_type, u16 byte_mask) { struct mlx5dr_icm_chunk *chunk; struct mlx5dr_ste_htbl *htbl; @@ -709,6 +535,92 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) return 0; } +void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *hw_ste_arr, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) +{ + ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr, + attr, added_stes); +} + +void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *hw_ste_arr, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) +{ + ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr, + attr, added_stes); +} + +const struct mlx5dr_ste_action_modify_field * +mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field) +{ + const struct mlx5dr_ste_action_modify_field *hw_field; + + if (sw_field >= ste_ctx->modify_field_arr_sz) + return NULL; + + hw_field = &ste_ctx->modify_field_arr[sw_field]; + if (!hw_field->end && !hw_field->start) + return NULL; + + return hw_field; +} + +void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx, + __be64 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) +{ + ste_ctx->set_action_set((u8 *)hw_action, + hw_field, shifter, length, data); +} + +void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx, + __be64 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) +{ + ste_ctx->set_action_add((u8 *)hw_action, + hw_field, shifter, length, data); +} + +void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx, + __be64 *hw_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter) +{ + ste_ctx->set_action_copy((u8 *)hw_action, + dst_hw_field, dst_shifter, dst_len, + src_hw_field, src_shifter); +} + +int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, + void *data, u32 data_sz, + u8 *hw_action, u32 hw_action_sz, + u16 *used_hw_action_num) +{ + /* Only Ethernet frame is supported, with VLAN (18) or without (14) */ + if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN) + return -EINVAL; + + return ste_ctx->set_action_decap_l3_list(data, data_sz, + hw_action, hw_action_sz, + used_hw_action_num); +} + int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, u8 match_criteria, struct mlx5dr_match_param *mask, @@ -738,6 +650,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_ste_build *sb; int ret, i; @@ -748,14 +661,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, sb = nic_matcher->ste_builder; for (i = 0; i < nic_matcher->num_of_builders; i++) { - mlx5dr_ste_init(ste_arr, - sb->lu_type, - nic_dmn->ste_type, - dmn->info.caps.gvmi); + ste_ctx->ste_init(ste_arr, + sb->lu_type, + nic_dmn->ste_type, + dmn->info.caps.gvmi); mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); - ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr)); + ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr)); if (ret) return ret; @@ -765,45 +678,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, * not relevant for the last ste in the chain. */ sb++; - MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type); - MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask); + ste_ctx->set_next_lu_type(ste_arr, sb->lu_type); + ste_ctx->set_byte_mask(ste_arr, sb->byte_mask); } ste_arr += DR_STE_SIZE; } return 0; } -static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16); - DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0); - - if (mask->smac_47_16 || mask->smac_15_0) { - MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32, - mask->smac_47_16 >> 16); - MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0, - mask->smac_47_16 << 16 | mask->smac_15_0); - mask->smac_47_16 = 0; - mask->smac_15_0 = 0; - } - - DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid); - DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi); - DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio); - DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version); - - if (mask->cvlan_tag) { - MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); - mask->cvlan_tag = 0; - } else if (mask->svlan_tag) { - MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); - mask->svlan_tag = 0; - } -} - static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec) { spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present); @@ -1045,566 +927,93 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } } -static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16); - DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0); - - if (spec->smac_47_16 || spec->smac_15_0) { - MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32, - spec->smac_47_16 >> 16); - MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0, - spec->smac_47_16 << 16 | spec->smac_15_0); - spec->smac_47_16 = 0; - spec->smac_15_0 = 0; - } - - if (spec->ip_version) { - if (spec->ip_version == IP_VERSION_IPV4) { - MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4); - spec->ip_version = 0; - } else if (spec->ip_version == IP_VERSION_IPV6) { - MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6); - spec->ip_version = 0; - } else { - pr_info("Unsupported ip_version value\n"); - return -EINVAL; - } - } - - DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid); - DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi); - DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio); - - if (spec->cvlan_tag) { - MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN); - spec->cvlan_tag = 0; - } else if (spec->svlan_tag) { - MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN); - spec->svlan_tag = 0; - } - return 0; -} - -void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag; -} - -static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96); - DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64); - DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32); - DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0); + ste_ctx->build_eth_l2_src_dst_init(sb, mask); } -static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96); - DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64); - DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32); - DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0); - - return 0; -} - -void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag; -} - -static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96); - DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64); - DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32); - DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0); -} - -static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96); - DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64); - DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32); - DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0); - - return 0; + ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask); } -void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag; -} - -static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value, - bool inner, - u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - destination_address, mask, dst_ip_31_0); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - source_address, mask, src_ip_31_0); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - destination_port, mask, tcp_dport); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - destination_port, mask, udp_dport); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - source_port, mask, tcp_sport); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - source_port, mask, udp_sport); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - protocol, mask, ip_protocol); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - fragmented, mask, frag); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - dscp, mask, ip_dscp); - DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, - ecn, mask, ip_ecn); - - if (mask->tcp_flags) { - DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask); - mask->tcp_flags = 0; - } -} - -static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp); - DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn); - - if (spec->tcp_flags) { - DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec); - spec->tcp_flags = 0; - } - - return 0; + ste_ctx->build_eth_l3_ipv6_src_init(sb, mask); } -void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag; -} - -static void -dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - struct mlx5dr_match_misc *misc_mask = &value->misc; - - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype); - DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version); - - if (mask->svlan_tag || mask->cvlan_tag) { - MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1); - mask->cvlan_tag = 0; - mask->svlan_tag = 0; - } - - if (inner) { - if (misc_mask->inner_second_cvlan_tag || - misc_mask->inner_second_svlan_tag) { - MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); - misc_mask->inner_second_cvlan_tag = 0; - misc_mask->inner_second_svlan_tag = 0; - } - - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, - second_vlan_id, misc_mask, inner_second_vid); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, - second_cfi, misc_mask, inner_second_cfi); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, - second_priority, misc_mask, inner_second_prio); - } else { - if (misc_mask->outer_second_cvlan_tag || - misc_mask->outer_second_svlan_tag) { - MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); - misc_mask->outer_second_cvlan_tag = 0; - misc_mask->outer_second_svlan_tag = 0; - } - - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, - second_vlan_id, misc_mask, outer_second_vid); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, - second_cfi, misc_mask, outer_second_cfi); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, - second_priority, misc_mask, outer_second_prio); - } + ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask); } -static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value, - bool inner, u8 *tag) -{ - struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer; - struct mlx5dr_match_misc *misc_spec = &value->misc; - - DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid); - DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi); - DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio); - DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag); - DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype); - - if (spec->ip_version) { - if (spec->ip_version == IP_VERSION_IPV4) { - MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4); - spec->ip_version = 0; - } else if (spec->ip_version == IP_VERSION_IPV6) { - MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6); - spec->ip_version = 0; - } else { - pr_info("Unsupported ip_version value\n"); - return -EINVAL; - } - } - - if (spec->cvlan_tag) { - MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN); - spec->cvlan_tag = 0; - } else if (spec->svlan_tag) { - MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN); - spec->svlan_tag = 0; - } - - if (inner) { - if (misc_spec->inner_second_cvlan_tag) { - MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); - misc_spec->inner_second_cvlan_tag = 0; - } else if (misc_spec->inner_second_svlan_tag) { - MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); - misc_spec->inner_second_svlan_tag = 0; - } - - DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid); - DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi); - DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio); - } else { - if (misc_spec->outer_second_cvlan_tag) { - MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); - misc_spec->outer_second_cvlan_tag = 0; - } else if (misc_spec->outer_second_svlan_tag) { - MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); - misc_spec->outer_second_svlan_tag = 0; - } - DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid); - DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi); - DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio); - } - - return 0; -} - -static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16); - DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0); - - dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); -} - -static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16); - DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0); - - return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); -} - -void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask); sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag; -} - -static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16); - DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0); - - dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); -} - -static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16); - DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0); - - return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); + ste_ctx->build_eth_l2_src_init(sb, mask); } -void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag; -} - -static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - struct mlx5dr_match_misc *misc = &value->misc; - - DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16); - DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0); - DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid); - DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi); - DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio); - DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag); - DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype); - DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version); - - if (misc->vxlan_vni) { - MLX5_SET(ste_eth_l2_tnl, bit_mask, - l2_tunneling_network_id, (misc->vxlan_vni << 8)); - misc->vxlan_vni = 0; - } - - if (mask->svlan_tag || mask->cvlan_tag) { - MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1); - mask->cvlan_tag = 0; - mask->svlan_tag = 0; - } + ste_ctx->build_eth_l2_dst_init(sb, mask); } -static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - struct mlx5dr_match_misc *misc = &value->misc; - - DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16); - DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0); - DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid); - DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi); - DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag); - DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio); - DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype); - - if (misc->vxlan_vni) { - MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id, - (misc->vxlan_vni << 8)); - misc->vxlan_vni = 0; - } - - if (spec->cvlan_tag) { - MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN); - spec->cvlan_tag = 0; - } else if (spec->svlan_tag) { - MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN); - spec->svlan_tag = 0; - } - - if (spec->ip_version) { - if (spec->ip_version == IP_VERSION_IPV4) { - MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4); - spec->ip_version = 0; - } else if (spec->ip_version == IP_VERSION_IPV6) { - MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6); - spec->ip_version = 0; - } else { - return -EINVAL; - } - } - - return 0; -} - -void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag; -} - -static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit); + ste_ctx->build_eth_l2_tnl_init(sb, mask); } -static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit); - - return 0; -} - -void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag; -} - -static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; - - DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport); - DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport); - DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport); - DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport); - DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol); - DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag); - DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp); - DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn); - DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit); - - if (mask->tcp_flags) { - DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask); - mask->tcp_flags = 0; - } + ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask); } -static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; - - DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport); - DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport); - DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport); - DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport); - DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol); - DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag); - DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp); - DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn); - DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit); - - if (spec->tcp_flags) { - DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec); - spec->tcp_flags = 0; - } - - return 0; -} - -void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag; + ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask); } static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value, @@ -1622,653 +1031,110 @@ void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx) sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag; } -static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_misc2 *misc2_mask = &value->misc2; - - if (inner) - DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask); - else - DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask); -} - -static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc2 *misc2_mask = &value->misc2; - - if (sb->inner) - DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag); - else - DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag); - - return 0; -} - -void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_mpls_tag; -} - -static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_misc *misc_mask = &value->misc; - - DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol); - DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present); - DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h); - DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l); - - DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present); - DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present); + ste_ctx->build_mpls_init(sb, mask); } -static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) +void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { - struct mlx5dr_match_misc *misc = &value->misc; - - DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol); - - DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present); - DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h); - DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l); - - DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present); - - DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present); - - return 0; -} - -void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, bool inner, bool rx) -{ - dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_GRE; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_gre_tag; -} - -static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; - - if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) { - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label, - misc_2_mask, outer_first_mpls_over_gre_label); - - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp, - misc_2_mask, outer_first_mpls_over_gre_exp); - - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos, - misc_2_mask, outer_first_mpls_over_gre_s_bos); - - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl, - misc_2_mask, outer_first_mpls_over_gre_ttl); - } else { - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label, - misc_2_mask, outer_first_mpls_over_udp_label); - - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp, - misc_2_mask, outer_first_mpls_over_udp_exp); - - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos, - misc_2_mask, outer_first_mpls_over_udp_s_bos); - - DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl, - misc_2_mask, outer_first_mpls_over_udp_ttl); - } -} - -static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; - - if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) { - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, - misc_2_mask, outer_first_mpls_over_gre_label); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, - misc_2_mask, outer_first_mpls_over_gre_exp); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, - misc_2_mask, outer_first_mpls_over_gre_s_bos); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, - misc_2_mask, outer_first_mpls_over_gre_ttl); - } else { - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, - misc_2_mask, outer_first_mpls_over_udp_label); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, - misc_2_mask, outer_first_mpls_over_udp_exp); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, - misc_2_mask, outer_first_mpls_over_udp_s_bos); - - DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, - misc_2_mask, outer_first_mpls_over_udp_ttl); - } - return 0; + ste_ctx->build_tnl_gre_init(sb, mask); } -void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag; -} - -#define ICMP_TYPE_OFFSET_FIRST_DW 24 -#define ICMP_CODE_OFFSET_FIRST_DW 16 -#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0 - -static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask, - struct mlx5dr_cmd_caps *caps, - u8 *bit_mask) -{ - bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3); - struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3; - u32 icmp_header_data_mask; - u32 icmp_type_mask; - u32 icmp_code_mask; - int dw0_location; - int dw1_location; - - if (is_ipv4_mask) { - icmp_header_data_mask = misc_3_mask->icmpv4_header_data; - icmp_type_mask = misc_3_mask->icmpv4_type; - icmp_code_mask = misc_3_mask->icmpv4_code; - dw0_location = caps->flex_parser_id_icmp_dw0; - dw1_location = caps->flex_parser_id_icmp_dw1; - } else { - icmp_header_data_mask = misc_3_mask->icmpv6_header_data; - icmp_type_mask = misc_3_mask->icmpv6_type; - icmp_code_mask = misc_3_mask->icmpv6_code; - dw0_location = caps->flex_parser_id_icmpv6_dw0; - dw1_location = caps->flex_parser_id_icmpv6_dw1; - } - - switch (dw0_location) { - case 4: - if (icmp_type_mask) { - MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4, - (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW)); - if (is_ipv4_mask) - misc_3_mask->icmpv4_type = 0; - else - misc_3_mask->icmpv6_type = 0; - } - if (icmp_code_mask) { - u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask, - flex_parser_4); - MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4, - cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW)); - if (is_ipv4_mask) - misc_3_mask->icmpv4_code = 0; - else - misc_3_mask->icmpv6_code = 0; - } - break; - default: - return -EINVAL; - } - - switch (dw1_location) { - case 5: - if (icmp_header_data_mask) { - MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5, - (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW)); - if (is_ipv4_mask) - misc_3_mask->icmpv4_header_data = 0; - else - misc_3_mask->icmpv6_header_data = 0; - } - break; - default: - return -EINVAL; - } - - return 0; -} - -static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc3 *misc_3 = &value->misc3; - u32 icmp_header_data; - int dw0_location; - int dw1_location; - u32 icmp_type; - u32 icmp_code; - bool is_ipv4; - - is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3); - if (is_ipv4) { - icmp_header_data = misc_3->icmpv4_header_data; - icmp_type = misc_3->icmpv4_type; - icmp_code = misc_3->icmpv4_code; - dw0_location = sb->caps->flex_parser_id_icmp_dw0; - dw1_location = sb->caps->flex_parser_id_icmp_dw1; - } else { - icmp_header_data = misc_3->icmpv6_header_data; - icmp_type = misc_3->icmpv6_type; - icmp_code = misc_3->icmpv6_code; - dw0_location = sb->caps->flex_parser_id_icmpv6_dw0; - dw1_location = sb->caps->flex_parser_id_icmpv6_dw1; - } - - switch (dw0_location) { - case 4: - if (icmp_type) { - MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, - (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW)); - if (is_ipv4) - misc_3->icmpv4_type = 0; - else - misc_3->icmpv6_type = 0; - } - - if (icmp_code) { - u32 cur_val = MLX5_GET(ste_flex_parser_1, tag, - flex_parser_4); - MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, - cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW)); - if (is_ipv4) - misc_3->icmpv4_code = 0; - else - misc_3->icmpv6_code = 0; - } - break; - default: - return -EINVAL; - } - - switch (dw1_location) { - case 5: - if (icmp_header_data) { - MLX5_SET(ste_flex_parser_1, tag, flex_parser_5, - (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW)); - if (is_ipv4) - misc_3->icmpv4_header_data = 0; - else - misc_3->icmpv6_header_data = 0; - } - break; - default: - return -EINVAL; - } - - return 0; + ste_ctx->build_tnl_mpls_init(sb, mask); } -int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb, +int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, struct mlx5dr_cmd_caps *caps, bool inner, bool rx) { - int ret; - - ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask); - if (ret) - return ret; - sb->rx = rx; sb->inner = inner; sb->caps = caps; - sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag; - - return 0; -} - -static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; - - DR_STE_SET_MASK_V(general_purpose, bit_mask, - general_purpose_lookup_field, misc_2_mask, - metadata_reg_a); -} - -static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; - - DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field, - misc_2_mask, metadata_reg_a); - - return 0; + return ste_ctx->build_icmp_init(sb, mask); } -void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag; -} - -static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; - - if (inner) { - DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask, - inner_tcp_seq_num); - DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask, - inner_tcp_ack_num); - } else { - DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask, - outer_tcp_seq_num); - DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask, - outer_tcp_ack_num); - } -} - -static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc3 *misc3 = &value->misc3; - - if (sb->inner) { - DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num); - DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num); - } else { - DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num); - DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num); - } - - return 0; + ste_ctx->build_general_purpose_init(sb, mask); } -void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner); - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag; + ste_ctx->build_eth_l4_misc_init(sb, mask); } -static void -dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) -{ - struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; - - DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, - outer_vxlan_gpe_flags, - misc_3_mask, outer_vxlan_gpe_flags); - DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, - outer_vxlan_gpe_next_protocol, - misc_3_mask, outer_vxlan_gpe_next_protocol); - DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, - outer_vxlan_gpe_vni, - misc_3_mask, outer_vxlan_gpe_vni); -} - -static int -dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc3 *misc3 = &value->misc3; - - DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, - outer_vxlan_gpe_flags, misc3, - outer_vxlan_gpe_flags); - DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, - outer_vxlan_gpe_next_protocol, misc3, - outer_vxlan_gpe_next_protocol); - DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, - outer_vxlan_gpe_vni, misc3, - outer_vxlan_gpe_vni); - - return 0; -} - -void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner, - sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag; + ste_ctx->build_tnl_vxlan_gpe_init(sb, mask); } -static void -dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value, - u8 *bit_mask) -{ - struct mlx5dr_match_misc *misc_mask = &value->misc; - - DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, - geneve_protocol_type, - misc_mask, geneve_protocol_type); - DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, - geneve_oam, - misc_mask, geneve_oam); - DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, - geneve_opt_len, - misc_mask, geneve_opt_len); - DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, - geneve_vni, - misc_mask, geneve_vni); -} - -static int -dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc *misc = &value->misc; - - DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, - geneve_protocol_type, misc, geneve_protocol_type); - DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, - geneve_oam, misc, geneve_oam); - DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, - geneve_opt_len, misc, geneve_opt_len); - DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, - geneve_vni, misc, geneve_vni); - - return 0; -} - -void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask); sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag; + ste_ctx->build_tnl_geneve_init(sb, mask); } -static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value, - u8 *bit_mask) -{ - struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; - - DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h, - misc_2_mask, metadata_reg_c_0); - DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l, - misc_2_mask, metadata_reg_c_1); - DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h, - misc_2_mask, metadata_reg_c_2); - DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l, - misc_2_mask, metadata_reg_c_3); -} - -static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc2 *misc2 = &value->misc2; - - DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0); - DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1); - DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2); - DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3); - - return 0; -} - -void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_register_0_bit_mask(mask, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_register_0_tag; + ste_ctx->build_register_0_init(sb, mask); } -static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value, - u8 *bit_mask) -{ - struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; - - DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h, - misc_2_mask, metadata_reg_c_4); - DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l, - misc_2_mask, metadata_reg_c_5); - DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h, - misc_2_mask, metadata_reg_c_6); - DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l, - misc_2_mask, metadata_reg_c_7); -} - -static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc2 *misc2 = &value->misc2; - - DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4); - DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5); - DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6); - DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7); - - return 0; -} - -void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { - dr_ste_build_register_1_bit_mask(mask, sb->bit_mask); - sb->rx = rx; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_register_1_tag; + ste_ctx->build_register_1_init(sb, mask); } -static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value, - u8 *bit_mask) -{ - struct mlx5dr_match_misc *misc_mask = &value->misc; - - DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port); - DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn); - misc_mask->source_eswitch_owner_vhca_id = 0; -} - -static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *tag) -{ - struct mlx5dr_match_misc *misc = &value->misc; - struct mlx5dr_cmd_vport_cap *vport_cap; - struct mlx5dr_domain *dmn = sb->dmn; - struct mlx5dr_cmd_caps *caps; - u8 *bit_mask = sb->bit_mask; - bool source_gvmi_set; - - DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); - - if (sb->vhca_id_valid) { - /* Find port GVMI based on the eswitch_owner_vhca_id */ - if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) - caps = &dmn->info.caps; - else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == - dmn->peer_dmn->info.caps.gvmi)) - caps = &dmn->peer_dmn->info.caps; - else - return -EINVAL; - } else { - caps = &dmn->info.caps; - } - - vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); - if (!vport_cap) - return -EINVAL; - - source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); - if (vport_cap->vport_gvmi && source_gvmi_set) - MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); - - misc->source_eswitch_owner_vhca_id = 0; - misc->source_port = 0; - - return 0; -} - -void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, struct mlx5dr_domain *dmn, bool inner, bool rx) @@ -2276,12 +1142,21 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */ sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id; - dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); - sb->rx = rx; sb->dmn = dmn; sb->inner = inner; - sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP; - sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag; + ste_ctx->build_src_gvmi_qpn_init(sb, mask); +} + +static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { + [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, + [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, +}; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version) +{ + if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX) + return NULL; + + return mlx5dr_ste_ctx_arr[version]; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h new file mode 100644 index 000000000000..06bcb0ee8f96 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */ + +#ifndef _DR_STE_ +#define _DR_STE_ + +#include "dr_types.h" + +#define STE_IPV4 0x1 +#define STE_IPV6 0x2 +#define STE_TCP 0x1 +#define STE_UDP 0x2 +#define STE_SPI 0x3 +#define IP_VERSION_IPV4 0x4 +#define IP_VERSION_IPV6 0x6 +#define STE_SVLAN 0x1 +#define STE_CVLAN 0x2 +#define HDR_LEN_L2_MACS 0xC +#define HDR_LEN_L2_VLAN 0x4 +#define HDR_LEN_L2_ETHER 0x2 +#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER) +#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN) + +/* Set to STE a specific value using DR_STE_SET */ +#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \ + if ((spec)->s_fname) { \ + MLX5_SET(ste_##lookup_type, tag, t_fname, value); \ + (spec)->s_fname = 0; \ + } \ +} while (0) + +/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */ +#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \ + DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname) + +/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */ +#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \ + DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1) + +#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \ + MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \ +} while (0) + +#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \ + struct mlx5dr_match_misc2 *_mask = mask; \ + u8 *_tag = tag; \ + DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \ + in_out##_first_mpls_label);\ + DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \ + in_out##_first_mpls_s_bos); \ + DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \ + in_out##_first_mpls_exp); \ + DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \ + in_out##_first_mpls_ttl); \ +} while (0) + +#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ + (_misc)->outer_first_mpls_over_gre_label || \ + (_misc)->outer_first_mpls_over_gre_exp || \ + (_misc)->outer_first_mpls_over_gre_s_bos || \ + (_misc)->outer_first_mpls_over_gre_ttl) + +#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\ + (_misc)->outer_first_mpls_over_udp_label || \ + (_misc)->outer_first_mpls_over_udp_exp || \ + (_misc)->outer_first_mpls_over_udp_s_bos || \ + (_misc)->outer_first_mpls_over_udp_ttl) + +enum dr_ste_action_modify_type_l3 { + DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0, + DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1, + DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2, +}; + +enum dr_ste_action_modify_type_l4 { + DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0, + DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1, + DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2, +}; + +u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask); + +#define DR_STE_CTX_BUILDER(fname) \ + ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \ + struct mlx5dr_match_param *mask)) + +struct mlx5dr_ste_ctx { + /* Builders */ + void DR_STE_CTX_BUILDER(eth_l2_src_dst); + void DR_STE_CTX_BUILDER(eth_l3_ipv6_src); + void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst); + void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple); + void DR_STE_CTX_BUILDER(eth_l2_src); + void DR_STE_CTX_BUILDER(eth_l2_dst); + void DR_STE_CTX_BUILDER(eth_l2_tnl); + void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc); + void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4); + void DR_STE_CTX_BUILDER(mpls); + void DR_STE_CTX_BUILDER(tnl_gre); + void DR_STE_CTX_BUILDER(tnl_mpls); + int DR_STE_CTX_BUILDER(icmp); + void DR_STE_CTX_BUILDER(general_purpose); + void DR_STE_CTX_BUILDER(eth_l4_misc); + void DR_STE_CTX_BUILDER(tnl_vxlan_gpe); + void DR_STE_CTX_BUILDER(tnl_geneve); + void DR_STE_CTX_BUILDER(register_0); + void DR_STE_CTX_BUILDER(register_1); + void DR_STE_CTX_BUILDER(src_gvmi_qpn); + + /* Getters and Setters */ + void (*ste_init)(u8 *hw_ste_p, u16 lu_type, + u8 entry_type, u16 gvmi); + void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type); + u16 (*get_next_lu_type)(u8 *hw_ste_p); + void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr); + u64 (*get_miss_addr)(u8 *hw_ste_p); + void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size); + void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask); + u16 (*get_byte_mask)(u8 *hw_ste_p); + + /* Actions */ + void (*set_actions_rx)(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *hw_ste_arr, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes); + void (*set_actions_tx)(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *hw_ste_arr, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes); + u32 modify_field_arr_sz; + const struct mlx5dr_ste_action_modify_field *modify_field_arr; + void (*set_action_set)(u8 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data); + void (*set_action_add)(u8 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data); + void (*set_action_copy)(u8 *hw_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter); + int (*set_action_decap_l3_list)(void *data, + u32 data_sz, + u8 *hw_action, + u32 hw_action_sz, + u16 *used_hw_action_num); + + /* Send */ + void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); +}; + +extern struct mlx5dr_ste_ctx ste_ctx_v0; +extern struct mlx5dr_ste_ctx ste_ctx_v1; + +#endif /* _DR_STE_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c new file mode 100644 index 000000000000..9ec079247c4b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -0,0 +1,1640 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */ + +#include <linux/types.h> +#include <linux/crc32.h> +#include "dr_ste.h" + +#define SVLAN_ETHERTYPE 0x88a8 +#define DR_STE_ENABLE_FLOW_TAG BIT(31) + +enum dr_ste_v0_action_tunl { + DR_STE_TUNL_ACTION_NONE = 0, + DR_STE_TUNL_ACTION_ENABLE = 1, + DR_STE_TUNL_ACTION_DECAP = 2, + DR_STE_TUNL_ACTION_L3_DECAP = 3, + DR_STE_TUNL_ACTION_POP_VLAN = 4, +}; + +enum dr_ste_v0_action_type { + DR_STE_ACTION_TYPE_PUSH_VLAN = 1, + DR_STE_ACTION_TYPE_ENCAP_L3 = 3, + DR_STE_ACTION_TYPE_ENCAP = 4, +}; + +enum dr_ste_v0_action_mdfy_op { + DR_STE_ACTION_MDFY_OP_COPY = 0x1, + DR_STE_ACTION_MDFY_OP_SET = 0x2, + DR_STE_ACTION_MDFY_OP_ADD = 0x3, +}; + +#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \ + ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \ + (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \ + DR_STE_V0_LU_TYPE_##lookup_type##_O) + +enum { + DR_STE_V0_LU_TYPE_NOP = 0x00, + DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP = 0x05, + DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I = 0x0a, + DR_STE_V0_LU_TYPE_ETHL2_DST_O = 0x06, + DR_STE_V0_LU_TYPE_ETHL2_DST_I = 0x07, + DR_STE_V0_LU_TYPE_ETHL2_DST_D = 0x1b, + DR_STE_V0_LU_TYPE_ETHL2_SRC_O = 0x08, + DR_STE_V0_LU_TYPE_ETHL2_SRC_I = 0x09, + DR_STE_V0_LU_TYPE_ETHL2_SRC_D = 0x1c, + DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O = 0x36, + DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I = 0x37, + DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D = 0x38, + DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d, + DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e, + DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e, + DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f, + DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10, + DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f, + DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11, + DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12, + DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20, + DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29, + DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a, + DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b, + DR_STE_V0_LU_TYPE_ETHL4_O = 0x13, + DR_STE_V0_LU_TYPE_ETHL4_I = 0x14, + DR_STE_V0_LU_TYPE_ETHL4_D = 0x21, + DR_STE_V0_LU_TYPE_ETHL4_MISC_O = 0x2c, + DR_STE_V0_LU_TYPE_ETHL4_MISC_I = 0x2d, + DR_STE_V0_LU_TYPE_ETHL4_MISC_D = 0x2e, + DR_STE_V0_LU_TYPE_MPLS_FIRST_O = 0x15, + DR_STE_V0_LU_TYPE_MPLS_FIRST_I = 0x24, + DR_STE_V0_LU_TYPE_MPLS_FIRST_D = 0x25, + DR_STE_V0_LU_TYPE_GRE = 0x16, + DR_STE_V0_LU_TYPE_FLEX_PARSER_0 = 0x22, + DR_STE_V0_LU_TYPE_FLEX_PARSER_1 = 0x23, + DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19, + DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18, + DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f, + DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30, + DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, +}; + +enum { + DR_STE_V0_ACTION_MDFY_FLD_L2_0 = 0, + DR_STE_V0_ACTION_MDFY_FLD_L2_1 = 1, + DR_STE_V0_ACTION_MDFY_FLD_L2_2 = 2, + DR_STE_V0_ACTION_MDFY_FLD_L3_0 = 3, + DR_STE_V0_ACTION_MDFY_FLD_L3_1 = 4, + DR_STE_V0_ACTION_MDFY_FLD_L3_2 = 5, + DR_STE_V0_ACTION_MDFY_FLD_L3_3 = 6, + DR_STE_V0_ACTION_MDFY_FLD_L3_4 = 7, + DR_STE_V0_ACTION_MDFY_FLD_L4_0 = 8, + DR_STE_V0_ACTION_MDFY_FLD_L4_1 = 9, + DR_STE_V0_ACTION_MDFY_FLD_MPLS = 10, + DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0 = 11, + DR_STE_V0_ACTION_MDFY_FLD_REG_0 = 12, + DR_STE_V0_ACTION_MDFY_FLD_REG_1 = 13, + DR_STE_V0_ACTION_MDFY_FLD_REG_2 = 14, + DR_STE_V0_ACTION_MDFY_FLD_REG_3 = 15, + DR_STE_V0_ACTION_MDFY_FLD_L4_2 = 16, + DR_STE_V0_ACTION_MDFY_FLD_FLEX_0 = 17, + DR_STE_V0_ACTION_MDFY_FLD_FLEX_1 = 18, + DR_STE_V0_ACTION_MDFY_FLD_FLEX_2 = 19, + DR_STE_V0_ACTION_MDFY_FLD_FLEX_3 = 20, + DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1 = 21, + DR_STE_V0_ACTION_MDFY_FLD_METADATA = 22, + DR_STE_V0_ACTION_MDFY_FLD_RESERVED = 23, +}; + +static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15, + }, +}; + +static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type) +{ + MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type); +} + +static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p) +{ + return MLX5_GET(ste_general, hw_ste_p, entry_type); +} + +static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) +{ + u64 index = miss_addr >> 6; + + /* Miss address for TX and RX STEs located in the same offsets */ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index); +} + +static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p) +{ + u64 index = + ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) | + ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26); + + return index << 6; +} + +static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) +{ + MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask); +} + +static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p) +{ + return MLX5_GET(ste_general, hw_ste_p, byte_mask); +} + +static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type) +{ + MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type); +} + +static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) +{ + MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type); +} + +static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p) +{ + return MLX5_GET(ste_general, hw_ste_p, next_lu_type); +} + +static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) +{ + MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi); +} + +static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) +{ + u64 index = (icm_addr >> 5) | ht_size; + + MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27); + MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index); +} + +static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, + u8 entry_type, u16 gvmi) +{ + dr_ste_v0_set_entry_type(hw_ste_p, entry_type); + dr_ste_v0_set_lu_type(hw_ste_p, lu_type); + dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); + + /* Set GVMI once, this is the same for RX/TX + * bits 63_48 of next table base / miss address encode the next GVMI + */ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi); +} + +static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer, + DR_STE_ENABLE_FLOW_TAG | flow_tag); +} + +static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id) +{ + /* This can be used for both rx_steering_mult and for sx_transmit */ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16); +} + +static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p) +{ + MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1); +} + +static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr, + bool go_back) +{ + MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, + DR_STE_ACTION_TYPE_PUSH_VLAN); + MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr); + /* Due to HW limitation we need to set this bit, otherwise reforamt + + * push vlan will not work. + */ + if (go_back) + dr_ste_v0_set_go_back_bit(hw_ste_p); +} + +static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id, + int size, bool encap_l3) +{ + MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, + encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2); + MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id); +} + +static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, + DR_STE_TUNL_ACTION_DECAP); +} + +static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, + DR_STE_TUNL_ACTION_POP_VLAN); +} + +static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, + DR_STE_TUNL_ACTION_L3_DECAP); + MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0); +} + +static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, + u32 re_write_index) +{ + MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions, + num_of_actions); + MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer, + re_write_index); +} + +static void dr_ste_v0_arr_init_next(u8 **last_ste, + u32 *added_stes, + enum mlx5dr_ste_entry_type entry_type, + u16 gvmi) +{ + (*added_stes)++; + *last_ste += DR_STE_SIZE; + dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, + entry_type, gvmi); +} + +static void +dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) +{ + bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] || + action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]; + + /* We want to make sure the modify header comes before L2 + * encapsulation. The reason for that is that we support + * modify headers for outer headers only + */ + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { + dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_rewrite_actions(last_ste, + attr->modify_actions, + attr->modify_index); + } + + if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR]) + dr_ste_v0_arr_init_next(&last_ste, + added_stes, + MLX5DR_STE_TYPE_TX, + attr->gvmi); + + dr_ste_v0_set_tx_push_vlan(last_ste, + attr->vlans.headers[i], + encap); + } + } + + if (encap) { + /* Modify header and encapsulation require a different STEs. + * Since modify header STE format doesn't support encapsulation + * tunneling_action. + */ + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] || + action_type_set[DR_ACTION_TYP_PUSH_VLAN]) + dr_ste_v0_arr_init_next(&last_ste, + added_stes, + MLX5DR_STE_TYPE_TX, + attr->gvmi); + + dr_ste_v0_set_tx_encap(last_ste, + attr->reformat_id, + attr->reformat_size, + action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]); + /* Whenever prio_tag_required enabled, we can be sure that the + * previous table (ACL) already push vlan to our packet, + * And due to HW limitation we need to set this bit, otherwise + * push vlan + reformat will not work. + */ + if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required)) + dr_ste_v0_set_go_back_bit(last_ste); + } + + if (action_type_set[DR_ACTION_TYP_CTR]) + dr_ste_v0_set_counter_id(last_ste, attr->ctr_id); + + dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi); + dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1); +} + +static void +dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) +{ + if (action_type_set[DR_ACTION_TYP_CTR]) + dr_ste_v0_set_counter_id(last_ste, attr->ctr_id); + + if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { + dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan); + dr_ste_v0_set_rewrite_actions(last_ste, + attr->decap_actions, + attr->decap_index); + } + + if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) + dr_ste_v0_set_rx_decap(last_ste); + + if (action_type_set[DR_ACTION_TYP_POP_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (i || + action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] || + action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) + dr_ste_v0_arr_init_next(&last_ste, + added_stes, + MLX5DR_STE_TYPE_RX, + attr->gvmi); + + dr_ste_v0_set_rx_pop_vlan(last_ste); + } + } + + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { + if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + dr_ste_v0_arr_init_next(&last_ste, + added_stes, + MLX5DR_STE_TYPE_MODIFY_PKT, + attr->gvmi); + else + dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + + dr_ste_v0_set_rewrite_actions(last_ste, + attr->modify_actions, + attr->modify_index); + } + + if (action_type_set[DR_ACTION_TYP_TAG]) { + if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + dr_ste_v0_arr_init_next(&last_ste, + added_stes, + MLX5DR_STE_TYPE_RX, + attr->gvmi); + + dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag); + } + + dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi); + dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1); +} + +static void dr_ste_v0_set_action_set(u8 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) +{ + length = (length == 32) ? 0 : length; + MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET); + MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field); + MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter); + MLX5_SET(dr_action_hw_set, hw_action, destination_length, length); + MLX5_SET(dr_action_hw_set, hw_action, inline_data, data); +} + +static void dr_ste_v0_set_action_add(u8 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) +{ + length = (length == 32) ? 0 : length; + MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD); + MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field); + MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter); + MLX5_SET(dr_action_hw_set, hw_action, destination_length, length); + MLX5_SET(dr_action_hw_set, hw_action, inline_data, data); +} + +static void dr_ste_v0_set_action_copy(u8 *hw_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter) +{ + MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY); + MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field); + MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter); + MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len); + MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field); + MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter); +} + +#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5 + +static int +dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz, + u8 *hw_action, u32 hw_action_sz, + u16 *used_hw_action_num) +{ + struct mlx5_ifc_l2_hdr_bits *l2_hdr = data; + u32 hw_action_num; + int required_actions; + u32 hdr_fld_4b; + u16 hdr_fld_2b; + u16 vlan_type; + bool vlan; + + vlan = (data_sz != HDR_LEN_L2); + hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set); + required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan; + + if (hw_action_num < required_actions) + return -ENOMEM; + + /* dmac_47_16 */ + MLX5_SET(dr_action_hw_set, hw_action, + opcode, DR_STE_ACTION_MDFY_OP_SET); + MLX5_SET(dr_action_hw_set, hw_action, + destination_length, 0); + MLX5_SET(dr_action_hw_set, hw_action, + destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0); + MLX5_SET(dr_action_hw_set, hw_action, + destination_left_shifter, 16); + hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16); + MLX5_SET(dr_action_hw_set, hw_action, + inline_data, hdr_fld_4b); + hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set); + + /* smac_47_16 */ + MLX5_SET(dr_action_hw_set, hw_action, + opcode, DR_STE_ACTION_MDFY_OP_SET); + MLX5_SET(dr_action_hw_set, hw_action, + destination_length, 0); + MLX5_SET(dr_action_hw_set, hw_action, + destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1); + MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16); + hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 | + MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16); + MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b); + hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set); + + /* dmac_15_0 */ + MLX5_SET(dr_action_hw_set, hw_action, + opcode, DR_STE_ACTION_MDFY_OP_SET); + MLX5_SET(dr_action_hw_set, hw_action, + destination_length, 16); + MLX5_SET(dr_action_hw_set, hw_action, + destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0); + MLX5_SET(dr_action_hw_set, hw_action, + destination_left_shifter, 0); + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0); + MLX5_SET(dr_action_hw_set, hw_action, + inline_data, hdr_fld_2b); + hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set); + + /* ethertype + (optional) vlan */ + MLX5_SET(dr_action_hw_set, hw_action, + opcode, DR_STE_ACTION_MDFY_OP_SET); + MLX5_SET(dr_action_hw_set, hw_action, + destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2); + MLX5_SET(dr_action_hw_set, hw_action, + destination_left_shifter, 32); + if (!vlan) { + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype); + MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b); + MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16); + } else { + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype); + vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN; + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan); + hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b; + MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b); + MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18); + } + hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set); + + /* smac_15_0 */ + MLX5_SET(dr_action_hw_set, hw_action, + opcode, DR_STE_ACTION_MDFY_OP_SET); + MLX5_SET(dr_action_hw_set, hw_action, + destination_length, 16); + MLX5_SET(dr_action_hw_set, hw_action, + destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1); + MLX5_SET(dr_action_hw_set, hw_action, + destination_left_shifter, 0); + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0); + MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b); + hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set); + + if (vlan) { + MLX5_SET(dr_action_hw_set, hw_action, + opcode, DR_STE_ACTION_MDFY_OP_SET); + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type); + MLX5_SET(dr_action_hw_set, hw_action, + inline_data, hdr_fld_2b); + MLX5_SET(dr_action_hw_set, hw_action, + destination_length, 16); + MLX5_SET(dr_action_hw_set, hw_action, + destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2); + MLX5_SET(dr_action_hw_set, hw_action, + destination_left_shifter, 0); + } + + *used_hw_action_num = required_actions; + + return 0; +} + +static void +dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0); + + if (mask->smac_47_16 || mask->smac_15_0) { + MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32, + mask->smac_47_16 >> 16); + MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0, + mask->smac_47_16 << 16 | mask->smac_15_0); + mask->smac_47_16 = 0; + mask->smac_15_0 = 0; + } + + DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version); + + if (mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + } else if (mask->svlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); + mask->svlan_tag = 0; + } +} + +static int +dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0); + + if (spec->smac_47_16 || spec->smac_15_0) { + MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32, + spec->smac_47_16 >> 16); + MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0, + spec->smac_47_16 << 16 | spec->smac_15_0); + spec->smac_47_16 = 0; + spec->smac_15_0 = 0; + } + + if (spec->ip_version) { + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else { + return -EINVAL; + } + } + + DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio); + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + return 0; +} + +static void +dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag; +} + +static int +dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0); + + return 0; +} + +static void +dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag; +} + +static int +dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0); + + return 0; +} + +static void +dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag; +} + +static int +dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn); + + if (spec->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec); + spec->tcp_flags = 0; + } + + return 0; +} + +static void +dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag; +} + +static void +dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc_mask = &value->misc; + + DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag); + DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype); + DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version); + + if (mask->svlan_tag || mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + mask->svlan_tag = 0; + } + + if (inner) { + if (misc_mask->inner_second_cvlan_tag || + misc_mask->inner_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); + misc_mask->inner_second_cvlan_tag = 0; + misc_mask->inner_second_svlan_tag = 0; + } + + DR_STE_SET_TAG(eth_l2_src, bit_mask, + second_vlan_id, misc_mask, inner_second_vid); + DR_STE_SET_TAG(eth_l2_src, bit_mask, + second_cfi, misc_mask, inner_second_cfi); + DR_STE_SET_TAG(eth_l2_src, bit_mask, + second_priority, misc_mask, inner_second_prio); + } else { + if (misc_mask->outer_second_cvlan_tag || + misc_mask->outer_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); + misc_mask->outer_second_cvlan_tag = 0; + misc_mask->outer_second_svlan_tag = 0; + } + + DR_STE_SET_TAG(eth_l2_src, bit_mask, + second_vlan_id, misc_mask, outer_second_vid); + DR_STE_SET_TAG(eth_l2_src, bit_mask, + second_cfi, misc_mask, outer_second_cfi); + DR_STE_SET_TAG(eth_l2_src, bit_mask, + second_priority, misc_mask, outer_second_prio); + } +} + +static int +dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value, + bool inner, u8 *tag) +{ + struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc_spec = &value->misc; + + DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio); + DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag); + DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype); + + if (spec->ip_version) { + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else { + return -EINVAL; + } + } + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + + if (inner) { + if (misc_spec->inner_second_cvlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); + misc_spec->inner_second_cvlan_tag = 0; + } else if (misc_spec->inner_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); + misc_spec->inner_second_svlan_tag = 0; + } + + DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid); + DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi); + DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio); + } else { + if (misc_spec->outer_second_cvlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); + misc_spec->outer_second_cvlan_tag = 0; + } else if (misc_spec->outer_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); + misc_spec->outer_second_svlan_tag = 0; + } + DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid); + DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi); + DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio); + } + + return 0; +} + +static void +dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16); + DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0); + + dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); +} + +static int +dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16); + DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0); + + return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); +} + +static void +dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask); + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag; +} + +static void +dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0); + + dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask); +} + +static int +dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0); + + return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); +} + +static void +dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag; +} + +static void +dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0); + DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag); + DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype); + DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version); + + if (misc->vxlan_vni) { + MLX5_SET(ste_eth_l2_tnl, bit_mask, + l2_tunneling_network_id, (misc->vxlan_vni << 8)); + misc->vxlan_vni = 0; + } + + if (mask->svlan_tag || mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + mask->svlan_tag = 0; + } +} + +static int +dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0); + DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag); + DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio); + DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype); + + if (misc->vxlan_vni) { + MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id, + (misc->vxlan_vni << 8)); + misc->vxlan_vni = 0; + } + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + + if (spec->ip_version) { + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else { + return -EINVAL; + } + } + + return 0; +} + +static void +dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag; +} + +static int +dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit); + + return 0; +} + +static void +dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag; +} + +static int +dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport); + DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport); + DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport); + DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport); + DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol); + DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag); + DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp); + DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn); + DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit); + + if (spec->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec); + spec->tcp_flags = 0; + } + + return 0; +} + +static void +dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag; +} + +static int +dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + if (sb->inner) + DR_STE_SET_MPLS(mpls, misc2, inner, tag); + else + DR_STE_SET_MPLS(mpls, misc2, outer, tag); + + return 0; +} + +static void +dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag; +} + +static int +dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol); + + DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present); + DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h); + DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l); + + DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present); + + DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present); + + return 0; +} + +static void +dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_GRE; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag; +} + +static int +dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc_2 = &value->misc2; + + if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) { + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, + misc_2, outer_first_mpls_over_gre_label); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, + misc_2, outer_first_mpls_over_gre_exp); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, + misc_2, outer_first_mpls_over_gre_s_bos); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, + misc_2, outer_first_mpls_over_gre_ttl); + } else { + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, + misc_2, outer_first_mpls_over_udp_label); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, + misc_2, outer_first_mpls_over_udp_exp); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, + misc_2, outer_first_mpls_over_udp_s_bos); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, + misc_2, outer_first_mpls_over_udp_ttl); + } + return 0; +} + +static void +dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag; +} + +#define ICMP_TYPE_OFFSET_FIRST_DW 24 +#define ICMP_CODE_OFFSET_FIRST_DW 16 + +static int +dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc_3 = &value->misc3; + u32 *icmp_header_data; + int dw0_location; + int dw1_location; + u8 *icmp_type; + u8 *icmp_code; + bool is_ipv4; + + is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3); + if (is_ipv4) { + icmp_header_data = &misc_3->icmpv4_header_data; + icmp_type = &misc_3->icmpv4_type; + icmp_code = &misc_3->icmpv4_code; + dw0_location = sb->caps->flex_parser_id_icmp_dw0; + dw1_location = sb->caps->flex_parser_id_icmp_dw1; + } else { + icmp_header_data = &misc_3->icmpv6_header_data; + icmp_type = &misc_3->icmpv6_type; + icmp_code = &misc_3->icmpv6_code; + dw0_location = sb->caps->flex_parser_id_icmpv6_dw0; + dw1_location = sb->caps->flex_parser_id_icmpv6_dw1; + } + + switch (dw0_location) { + case 4: + MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, + (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) | + (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW)); + + *icmp_type = 0; + *icmp_code = 0; + break; + default: + return -EINVAL; + } + + switch (dw1_location) { + case 5: + MLX5_SET(ste_flex_parser_1, tag, flex_parser_5, + *icmp_header_data); + *icmp_header_data = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + int ret; + + ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask); + if (ret) + return ret; + + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag; + + return 0; +} + +static int +dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc_2 = &value->misc2; + + DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field, + misc_2, metadata_reg_a); + + return 0; +} + +static void +dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag; +} + +static int +dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + + if (sb->inner) { + DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num); + DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num); + } else { + DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num); + DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num); + } + + return 0; +} + +static void +dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag; +} + +static int +dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_flags, misc3, + outer_vxlan_gpe_flags); + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_next_protocol, misc3, + outer_vxlan_gpe_next_protocol); + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_vni, misc3, + outer_vxlan_gpe_vni); + + return 0; +} + +static void +dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask); + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag; +} + +static int +dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_protocol_type, misc, geneve_protocol_type); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_oam, misc, geneve_oam); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_opt_len, misc, geneve_opt_len); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_vni, misc, geneve_vni); + + return 0; +} + +static void +dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask); + sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag; +} + +static int +dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0); + DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1); + DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2); + DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3); + + return 0; +} + +static void +dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag; +} + +static int +dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4); + DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5); + DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6); + DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7); + + return 0; +} + +static void +dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag; +} + +static void +dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value, + u8 *bit_mask) +{ + struct mlx5dr_match_misc *misc_mask = &value->misc; + + DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port); + DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn); + misc_mask->source_eswitch_owner_vhca_id = 0; +} + +static int +dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc *misc = &value->misc; + struct mlx5dr_cmd_vport_cap *vport_cap; + struct mlx5dr_domain *dmn = sb->dmn; + struct mlx5dr_cmd_caps *caps; + u8 *bit_mask = sb->bit_mask; + bool source_gvmi_set; + + DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); + + if (sb->vhca_id_valid) { + /* Find port GVMI based on the eswitch_owner_vhca_id */ + if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) + caps = &dmn->info.caps; + else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == + dmn->peer_dmn->info.caps.gvmi)) + caps = &dmn->peer_dmn->info.caps; + else + return -EINVAL; + + misc->source_eswitch_owner_vhca_id = 0; + } else { + caps = &dmn->info.caps; + } + + source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); + if (source_gvmi_set) { + vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); + if (!vport_cap) { + mlx5dr_err(dmn, "Vport 0x%x is invalid\n", + misc->source_port); + return -EINVAL; + } + + if (vport_cap->vport_gvmi) + MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); + + misc->source_port = 0; + } + + return 0; +} + +static void +dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); + + sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag; +} + +struct mlx5dr_ste_ctx ste_ctx_v0 = { + /* Builders */ + .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init, + .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init, + .build_eth_l3_ipv6_dst_init = &dr_ste_v0_build_eth_l3_ipv6_dst_init, + .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init, + .build_eth_l2_src_init = &dr_ste_v0_build_eth_l2_src_init, + .build_eth_l2_dst_init = &dr_ste_v0_build_eth_l2_dst_init, + .build_eth_l2_tnl_init = &dr_ste_v0_build_eth_l2_tnl_init, + .build_eth_l3_ipv4_misc_init = &dr_ste_v0_build_eth_l3_ipv4_misc_init, + .build_eth_ipv6_l3_l4_init = &dr_ste_v0_build_eth_ipv6_l3_l4_init, + .build_mpls_init = &dr_ste_v0_build_mpls_init, + .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init, + .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init, + .build_icmp_init = &dr_ste_v0_build_icmp_init, + .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init, + .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init, + .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init, + .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init, + .build_register_0_init = &dr_ste_v0_build_register_0_init, + .build_register_1_init = &dr_ste_v0_build_register_1_init, + .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init, + + /* Getters and Setters */ + .ste_init = &dr_ste_v0_init, + .set_next_lu_type = &dr_ste_v0_set_next_lu_type, + .get_next_lu_type = &dr_ste_v0_get_next_lu_type, + .set_miss_addr = &dr_ste_v0_set_miss_addr, + .get_miss_addr = &dr_ste_v0_get_miss_addr, + .set_hit_addr = &dr_ste_v0_set_hit_addr, + .set_byte_mask = &dr_ste_v0_set_byte_mask, + .get_byte_mask = &dr_ste_v0_get_byte_mask, + + /* Actions */ + .set_actions_rx = &dr_ste_v0_set_actions_rx, + .set_actions_tx = &dr_ste_v0_set_actions_tx, + .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr), + .modify_field_arr = dr_ste_v0_action_modify_field_arr, + .set_action_set = &dr_ste_v0_set_action_set, + .set_action_add = &dr_ste_v0_set_action_add, + .set_action_copy = &dr_ste_v0_set_action_copy, + .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c new file mode 100644 index 000000000000..4088d6e51508 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -0,0 +1,1633 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */ + +#include <linux/types.h> +#include "mlx5_ifc_dr_ste_v1.h" +#include "dr_ste.h" + +#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ + ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ + DR_STE_V1_LU_TYPE_##lookup_type##_O) + +enum dr_ste_v1_entry_format { + DR_STE_V1_TYPE_BWC_BYTE = 0x0, + DR_STE_V1_TYPE_BWC_DW = 0x1, + DR_STE_V1_TYPE_MATCH = 0x2, +}; + +/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */ +enum { + DR_STE_V1_LU_TYPE_NOP = 0x0000, + DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002, + DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102, + DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003, + DR_STE_V1_LU_TYPE_IBL4 = 0x0103, + DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004, + DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104, + DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005, + DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105, + DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006, + DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007, + DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008, + DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108, + DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009, + DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109, + DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a, + DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a, + DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b, + DR_STE_V1_LU_TYPE_MPLS_O = 0x010b, + DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c, + DR_STE_V1_LU_TYPE_MPLS_I = 0x010c, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d, + DR_STE_V1_LU_TYPE_GRE = 0x010d, + DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e, + DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f, + DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f, + DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110, + DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111, + DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112, + DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113, + DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114, + DR_STE_V1_LU_TYPE_INVALID = 0x00ff, + DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, +}; + +enum dr_ste_v1_header_anchors { + DR_STE_HEADER_ANCHOR_START_OUTER = 0x00, + DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02, + DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07, + DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13, + DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19, +}; + +enum dr_ste_v1_action_size { + DR_STE_ACTION_SINGLE_SZ = 4, + DR_STE_ACTION_DOUBLE_SZ = 8, + DR_STE_ACTION_TRIPLE_SZ = 12, +}; + +enum dr_ste_v1_action_insert_ptr_attr { + DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */ + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */ + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */ +}; + +enum dr_ste_v1_action_id { + DR_STE_V1_ACTION_ID_NOP = 0x00, + DR_STE_V1_ACTION_ID_COPY = 0x05, + DR_STE_V1_ACTION_ID_SET = 0x06, + DR_STE_V1_ACTION_ID_ADD = 0x07, + DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09, + DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a, + DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b, + DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c, + DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d, + DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e, + DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f, + DR_STE_V1_ACTION_ID_TRAILER = 0x13, + DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14, + DR_STE_V1_ACTION_ID_MAX = 0x21, + /* use for special cases */ + DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22, +}; + +enum { + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91, +}; + +static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, + }, +}; + +static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type) +{ + MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type); +} + +static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) +{ + u64 index = miss_addr >> 6; + + MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26); + MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index); +} + +static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) +{ + u64 index = + (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | + MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26); + + return index << 6; +} + +static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) +{ + MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask); +} + +static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p) +{ + return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask); +} + +static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type) +{ + MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8); + MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF); +} + +static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) +{ + MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8); + MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF); +} + +static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p) +{ + u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format); + u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx); + + return (mode << 8 | index); +} + +static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) +{ + MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi); +} + +static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) +{ + u64 index = (icm_addr >> 5) | ht_size; + + MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27); + MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index); +} + +static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, + u8 entry_type, u16 gvmi) +{ + dr_ste_v1_set_lu_type(hw_ste_p, lu_type); + dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); + + MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi); + MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi); + MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi); +} + +static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, + u32 ste_size) +{ + u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL; + u8 *mask = tag + DR_STE_SIZE_TAG; + u8 tmp_tag[DR_STE_SIZE_TAG] = {}; + + if (ste_size == DR_STE_SIZE_CTRL) + return; + + WARN_ON(ste_size != DR_STE_SIZE); + + /* Backup tag */ + memcpy(tmp_tag, tag, DR_STE_SIZE_TAG); + + /* Swap mask and tag both are the same size */ + memcpy(tag, mask, DR_STE_SIZE_MASK); + memcpy(mask, tmp_tag, DR_STE_SIZE_TAG); +} + +static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag) +{ + MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id, + DR_STE_V1_ACTION_ID_FLOW_TAG); + MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag); +} + +static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id) +{ + MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id); +} + +static void dr_ste_v1_set_reparse(u8 *hw_ste_p) +{ + MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1); +} + +static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action, + u32 reformat_id, int size) +{ + MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id, + DR_STE_V1_ACTION_ID_INSERT_POINTER); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2); + MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id); + MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes, + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP); + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action, + u32 vlan_hdr) +{ + MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, + action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE); + /* The hardware expects offset to vlan header in words (2 byte) */ + MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, + start_offset, HDR_LEN_L2_MACS >> 1); + MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, + inline_data, vlan_hdr); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) +{ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, + action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, + start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, + remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_tx_encap_l3(u8 *hw_ste_p, + u8 *frst_s_action, + u8 *scnd_d_action, + u32 reformat_id, + int size) +{ + /* Remove L2 headers */ + MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); + MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor, + DR_STE_HEADER_ANCHOR_IPV6_IPV4); + + /* Encapsulate with given reformat ID */ + MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id, + DR_STE_V1_ACTION_ID_INSERT_POINTER); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2); + MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id); + MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes, + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) +{ + MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); + MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1); + MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1); + MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor, + DR_STE_HEADER_ANCHOR_INNER_MAC); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p, + u8 *s_action, + u16 decap_actions, + u32 decap_index) +{ + MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id, + DR_STE_V1_ACTION_ID_MODIFY_LIST); + MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions, + decap_actions); + MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr, + decap_index); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, + u8 *s_action, + u16 num_of_actions, + u32 re_write_index) +{ + MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id, + DR_STE_V1_ACTION_ID_MODIFY_LIST); + MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions, + num_of_actions); + MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr, + re_write_index); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_arr_init_next_match(u8 **last_ste, + u32 *added_stes, + u16 gvmi) +{ + u8 *action; + + (*added_stes)++; + *last_ste += DR_STE_SIZE; + dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi); + dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH); + + action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action); + memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action)); +} + +static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) +{ + u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); + u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; + bool allow_encap = true; + + if (action_type_set[DR_ACTION_TYP_CTR]) + dr_ste_v1_set_counter_id(last_ste, attr->ctr_id); + + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { + if (action_sz < DR_STE_ACTION_DOUBLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, + attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, + last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_rewrite_actions(last_ste, action, + attr->modify_actions, + attr->modify_index); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + allow_encap = false; + } + + if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_encap = true; + } + dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } + } + + if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) { + if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_encap = true; + } + dr_ste_v1_set_tx_encap(last_ste, action, + attr->reformat_id, + attr->reformat_size); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) { + u8 *d_action; + + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + d_action = action + DR_STE_ACTION_SINGLE_SZ; + + dr_ste_v1_set_tx_encap_l3(last_ste, + action, d_action, + attr->reformat_id, + attr->reformat_size); + action_sz -= DR_STE_ACTION_TRIPLE_SZ; + action += DR_STE_ACTION_TRIPLE_SZ; + } + + dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); + dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); +} + +static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) +{ + u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); + u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; + bool allow_modify_hdr = true; + bool allow_ctr = true; + + if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { + dr_ste_v1_set_rx_decap_l3(last_ste, action, + attr->decap_actions, + attr->decap_index); + dr_ste_v1_set_rewrite_actions(last_ste, action, + attr->decap_actions, + attr->decap_index); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + allow_modify_hdr = false; + allow_ctr = false; + } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) { + dr_ste_v1_set_rx_decap(last_ste, action); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; + allow_modify_hdr = false; + allow_ctr = false; + } + + if (action_type_set[DR_ACTION_TYP_TAG]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_modify_hdr = true; + allow_ctr = true; + } + dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; + } + + if (action_type_set[DR_ACTION_TYP_POP_VLAN]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ || + !allow_modify_hdr) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_modify_hdr = false; + allow_ctr = false; + } + + dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; + } + + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { + /* Modify header and decapsulation must use different STEs */ + if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_modify_hdr = true; + allow_ctr = true; + } + dr_ste_v1_set_rewrite_actions(last_ste, action, + attr->modify_actions, + attr->modify_index); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } + + if (action_type_set[DR_ACTION_TYP_CTR]) { + /* Counter action set after decap to exclude decaped header */ + if (!allow_ctr) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_modify_hdr = true; + allow_ctr = false; + } + dr_ste_v1_set_counter_id(last_ste, attr->ctr_id); + } + + dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); + dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); +} + +static void dr_ste_v1_set_action_set(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) +{ + shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; + MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET); + MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field); + MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter); + MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length); + MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data); +} + +static void dr_ste_v1_set_action_add(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) +{ + shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; + MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD); + MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field); + MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter); + MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length); + MLX5_SET(ste_double_action_add_v1, d_action, add_value, data); +} + +static void dr_ste_v1_set_action_copy(u8 *d_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter) +{ + dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; + src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; + MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY); + MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field); + MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter); + MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len); + MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field); + MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter); +} + +#define DR_STE_DECAP_L3_ACTION_NUM 8 +#define DR_STE_L2_HDR_MAX_SZ 20 + +static int dr_ste_v1_set_action_decap_l3_list(void *data, + u32 data_sz, + u8 *hw_action, + u32 hw_action_sz, + u16 *used_hw_action_num) +{ + u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {}; + void *data_ptr = padded_data; + u16 used_actions = 0; + u32 inline_data_sz; + u32 i; + + if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM) + return -EINVAL; + + memcpy(padded_data, data, data_sz); + + /* Remove L2L3 outer headers */ + MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); + MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1); + MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1); + MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor, + DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4); + hw_action += DR_STE_ACTION_DOUBLE_SZ; + used_actions++; /* Remove and NOP are a single double action */ + + inline_data_sz = + MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data); + + /* Add the new header inline + 2 extra bytes */ + for (i = 0; i < data_sz / inline_data_sz + 1; i++) { + void *addr_inline; + + MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id, + DR_STE_V1_ACTION_ID_INSERT_INLINE); + /* The hardware expects here offset to words (2 bytes) */ + MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, + i * 2); + + /* Copy bytes one by one to avoid endianness problem */ + addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1, + hw_action, inline_data); + memcpy(addr_inline, data_ptr, inline_data_sz); + hw_action += DR_STE_ACTION_DOUBLE_SZ; + data_ptr += inline_data_sz; + used_actions++; + } + + /* Remove 2 extra bytes */ + MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id, + DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2); + /* The hardware expects here size in words (2 bytes) */ + MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1); + used_actions++; + + *used_hw_action_num = used_actions; + + return 0; +} + +static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0); + + DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16); + DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0); + + DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version); + + if (mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + } else if (mask->svlan_tag) { + MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1); + mask->svlan_tag = 0; + } +} + +static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0); + + DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16); + DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0); + + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else if (spec->ip_version) { + return -EINVAL; + } + + DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio); + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + return 0; +} + +static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag; +} + +static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0); + + return 0; +} + +static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag; +} + +static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0); + + return 0; +} + +static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag; +} + +static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn); + + if (spec->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec); + spec->tcp_flags = 0; + } + + return 0; +} + +static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag; +} + +static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc_mask = &value->misc; + + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ? + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ? + DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version); + + if (mask->svlan_tag || mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + mask->svlan_tag = 0; + } + + if (inner) { + if (misc_mask->inner_second_cvlan_tag || + misc_mask->inner_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1); + misc_mask->inner_second_cvlan_tag = 0; + misc_mask->inner_second_svlan_tag = 0; + } + + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, + second_vlan_id, misc_mask, inner_second_vid); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, + second_cfi, misc_mask, inner_second_cfi); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, + second_priority, misc_mask, inner_second_prio); + } else { + if (misc_mask->outer_second_cvlan_tag || + misc_mask->outer_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1); + misc_mask->outer_second_cvlan_tag = 0; + misc_mask->outer_second_svlan_tag = 0; + } + + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, + second_vlan_id, misc_mask, outer_second_vid); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, + second_cfi, misc_mask, outer_second_cfi); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, + second_priority, misc_mask, outer_second_prio); + } +} + +static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value, + bool inner, u8 *tag) +{ + struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc_spec = &value->misc; + + DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio); + DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag); + DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype); + + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else if (spec->ip_version) { + return -EINVAL; + } + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + + if (inner) { + if (misc_spec->inner_second_cvlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN); + misc_spec->inner_second_cvlan_tag = 0; + } else if (misc_spec->inner_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN); + misc_spec->inner_second_svlan_tag = 0; + } + + DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid); + DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi); + DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio); + } else { + if (misc_spec->outer_second_cvlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN); + misc_spec->outer_second_cvlan_tag = 0; + } else if (misc_spec->outer_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN); + misc_spec->outer_second_svlan_tag = 0; + } + DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid); + DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi); + DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio); + } + + return 0; +} + +static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0); + + dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); +} + +static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16); + DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0); + + return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); +} + +static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag; +} + +static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0); + + dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); +} + +static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0); + + return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); +} + +static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag; +} + +static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0); + DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag); + DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype); + DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version); + + if (misc->vxlan_vni) { + MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, + l2_tunneling_network_id, (misc->vxlan_vni << 8)); + misc->vxlan_vni = 0; + } + + if (mask->svlan_tag || mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + mask->svlan_tag = 0; + } +} + +static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0); + DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag); + DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio); + DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype); + + if (misc->vxlan_vni) { + MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id, + (misc->vxlan_vni << 8)); + misc->vxlan_vni = 0; + } + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else if (spec->ip_version) { + return -EINVAL; + } + + return 0; +} + +static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag; +} + +static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + + DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit); + + return 0; +} + +static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag; +} + +static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport); + DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport); + DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport); + DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport); + DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol); + DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag); + DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp); + DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn); + DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit); + + if (sb->inner) + DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label); + else + DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label); + + if (spec->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec); + spec->tcp_flags = 0; + } + + return 0; +} + +static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag; +} + +static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + if (sb->inner) + DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag); + else + DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag); + + return 0; +} + +static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag; +} + +static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol); + DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present); + DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h); + DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l); + + DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present); + DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present); + + return 0; +} + +static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_GRE; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag; +} + +static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) { + DR_STE_SET_TAG(mpls_v1, tag, mpls0_label, + misc2, outer_first_mpls_over_gre_label); + + DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp, + misc2, outer_first_mpls_over_gre_exp); + + DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos, + misc2, outer_first_mpls_over_gre_s_bos); + + DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl, + misc2, outer_first_mpls_over_gre_ttl); + } else { + DR_STE_SET_TAG(mpls_v1, tag, mpls0_label, + misc2, outer_first_mpls_over_udp_label); + + DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp, + misc2, outer_first_mpls_over_udp_exp); + + DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos, + misc2, outer_first_mpls_over_udp_s_bos); + + DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl, + misc2, outer_first_mpls_over_udp_ttl); + } + + return 0; +} + +static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag; +} + +static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3); + u32 *icmp_header_data; + u8 *icmp_type; + u8 *icmp_code; + + if (is_ipv4) { + icmp_header_data = &misc3->icmpv4_header_data; + icmp_type = &misc3->icmpv4_type; + icmp_code = &misc3->icmpv4_code; + } else { + icmp_header_data = &misc3->icmpv6_header_data; + icmp_type = &misc3->icmpv6_type; + icmp_code = &misc3->icmpv6_code; + } + + MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data); + MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type); + MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code); + + *icmp_header_data = 0; + *icmp_type = 0; + *icmp_code = 0; + + return 0; +} + +static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag; + + return 0; +} + +static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field, + misc2, metadata_reg_a); + + return 0; +} + +static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag; +} + +static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + + if (sb->inner) { + DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num); + DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num); + } else { + DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num); + DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num); + } + + return 0; +} + +static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag; +} + +static int +dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_flags, misc3, + outer_vxlan_gpe_flags); + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_next_protocol, misc3, + outer_vxlan_gpe_next_protocol); + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_vni, misc3, + outer_vxlan_gpe_vni); + + return 0; +} + +static void +dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag; +} + +static int +dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_protocol_type, misc, geneve_protocol_type); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_oam, misc, geneve_oam); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_opt_len, misc, geneve_opt_len); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_vni, misc, geneve_vni); + + return 0; +} + +static void +dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag; +} + +static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0); + DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1); + DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2); + DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3); + + return 0; +} + +static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag; +} + +static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + + DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4); + DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5); + DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6); + DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7); + + return 0; +} + +static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag; +} + +static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value, + u8 *bit_mask) +{ + struct mlx5dr_match_misc *misc_mask = &value->misc; + + DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port); + DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn); +} + +static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *tag) +{ + struct mlx5dr_match_misc *misc = &value->misc; + struct mlx5dr_cmd_vport_cap *vport_cap; + struct mlx5dr_domain *dmn = sb->dmn; + struct mlx5dr_cmd_caps *caps; + u8 *bit_mask = sb->bit_mask; + + DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn); + + if (sb->vhca_id_valid) { + /* Find port GVMI based on the eswitch_owner_vhca_id */ + if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) + caps = &dmn->info.caps; + else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == + dmn->peer_dmn->info.caps.gvmi)) + caps = &dmn->peer_dmn->info.caps; + else + return -EINVAL; + + misc->source_eswitch_owner_vhca_id = 0; + } else { + caps = &dmn->info.caps; + } + + if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi)) + return 0; + + vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); + if (!vport_cap) { + mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", + misc->source_port); + return -EINVAL; + } + + if (vport_cap->vport_gvmi) + MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi); + + misc->source_port = 0; + return 0; +} + +static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); + + sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI; + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag; +} + +struct mlx5dr_ste_ctx ste_ctx_v1 = { + /* Builders */ + .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, + .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, + .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init, + .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init, + .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init, + .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init, + .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init, + .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init, + .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init, + .build_mpls_init = &dr_ste_v1_build_mpls_init, + .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, + .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, + .build_icmp_init = &dr_ste_v1_build_icmp_init, + .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, + .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, + .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, + .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, + .build_register_0_init = &dr_ste_v1_build_register_0_init, + .build_register_1_init = &dr_ste_v1_build_register_1_init, + .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, + /* Getters and Setters */ + .ste_init = &dr_ste_v1_init, + .set_next_lu_type = &dr_ste_v1_set_next_lu_type, + .get_next_lu_type = &dr_ste_v1_get_next_lu_type, + .set_miss_addr = &dr_ste_v1_set_miss_addr, + .get_miss_addr = &dr_ste_v1_get_miss_addr, + .set_hit_addr = &dr_ste_v1_set_hit_addr, + .set_byte_mask = &dr_ste_v1_set_byte_mask, + .get_byte_mask = &dr_ste_v1_get_byte_mask, + /* Actions */ + .set_actions_rx = &dr_ste_v1_set_actions_rx, + .set_actions_tx = &dr_ste_v1_set_actions_tx, + .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr), + .modify_field_arr = dr_ste_v1_action_modify_field_arr, + .set_action_set = &dr_ste_v1_set_action_set, + .set_action_add = &dr_ste_v1_set_action_add, + .set_action_copy = &dr_ste_v1_set_action_copy, + .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + /* Send */ + .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 51880df26724..4af0e4e6a13c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -120,6 +120,7 @@ struct mlx5dr_ste_htbl; struct mlx5dr_match_param; struct mlx5dr_cmd_caps; struct mlx5dr_matcher_rx_tx; +struct mlx5dr_ste_ctx; struct mlx5dr_ste { u8 *hw_ste; @@ -154,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl { }; struct mlx5dr_ste_htbl { - u8 lu_type; + u16 lu_type; u16 byte_mask; u32 refcount; struct mlx5dr_icm_chunk *chunk; @@ -190,7 +191,7 @@ struct mlx5dr_ste_build { u8 vhca_id_valid:1; struct mlx5dr_domain *dmn; struct mlx5dr_cmd_caps *caps; - u8 lu_type; + u16 lu_type; u16 byte_mask; u8 bit_mask[DR_STE_SIZE_MASK]; int (*ste_build_tag_func)(struct mlx5dr_match_param *spec, @@ -201,7 +202,7 @@ struct mlx5dr_ste_build { struct mlx5dr_ste_htbl * mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size, - u8 lu_type, u16 byte_mask); + u16 lu_type, u16 byte_mask); int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl); @@ -219,35 +220,84 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) /* STE utils */ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl); -void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi); -void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste, - struct mlx5dr_ste_htbl *next_htbl); -void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr); -u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste); -void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi); -void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size); -void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr); +void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste, u64 miss_addr); +void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste, u64 icm_addr, u32 ht_size); +void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste, + struct mlx5dr_ste_htbl *next_htbl); void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask); bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher, u8 ste_location); -void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag); -void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id); -void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, - int size, bool encap_l3); -void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p); -void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan); -void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p); -void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid, - bool go_back); -void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type); -u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p); -void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, - u32 re_write_index); -void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p); u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste); u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste); struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste); +#define MLX5DR_MAX_VLANS 2 + +struct mlx5dr_ste_actions_attr { + u32 modify_index; + u16 modify_actions; + u32 decap_index; + u16 decap_actions; + u8 decap_with_vlan:1; + u64 final_icm_addr; + u32 flow_tag; + u32 ctr_id; + u16 gvmi; + u16 hit_gvmi; + u32 reformat_id; + u32 reformat_size; + struct { + int count; + u32 headers[MLX5DR_MAX_VLANS]; + } vlans; +}; + +void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes); +void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes); + +void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx, + __be64 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data); +void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx, + __be64 *hw_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data); +void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx, + __be64 *hw_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter); +int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, + void *data, + u32 data_sz, + u8 *hw_action, + u32 hw_action_sz, + u16 *used_hw_action_num); + +const struct mlx5dr_ste_action_modify_field * +mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field); + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version); void mlx5dr_ste_free(struct mlx5dr_ste *ste, struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher); @@ -271,8 +321,6 @@ static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste) return !ste->refcount; } -void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, - struct mlx5dr_ste_htbl *next_htbl); bool mlx5dr_ste_equal_tag(void *src, void *dst); int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, @@ -289,65 +337,85 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_match_param *value, u8 *ste_arr); -void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder, +void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *builder, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb, +int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, struct mlx5dr_cmd_caps *caps, bool inner, bool rx); -void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, struct mlx5dr_domain *dmn, bool inner, bool rx); @@ -574,10 +642,10 @@ struct mlx5dr_match_misc3 { u32 outer_vxlan_gpe_next_protocol:8; u32 icmpv4_header_data; u32 icmpv6_header_data; - u32 icmpv6_code:8; - u32 icmpv6_type:8; - u32 icmpv4_code:8; - u32 icmpv4_type:8; + u8 icmpv6_code; + u8 icmpv6_type; + u8 icmpv4_code; + u8 icmpv4_type; u8 reserved_auto3[0x1c]; }; @@ -598,7 +666,8 @@ struct mlx5dr_esw_caps { u64 drop_icm_address_tx; u64 uplink_icm_address_rx; u64 uplink_icm_address_tx; - bool sw_owner; + u8 sw_owner:1; + u8 sw_owner_v2:1; }; struct mlx5dr_cmd_vport_cap { @@ -631,6 +700,9 @@ struct mlx5dr_cmd_caps { bool rx_sw_owner; bool tx_sw_owner; bool fdb_sw_owner; + u8 rx_sw_owner_v2:1; + u8 tx_sw_owner_v2:1; + u8 fdb_sw_owner_v2:1; u32 num_vports; struct mlx5dr_esw_caps esw_caps; struct mlx5dr_cmd_vport_cap *vports_caps; @@ -671,6 +743,7 @@ struct mlx5dr_domain { struct mlx5dr_send_ring *send_ring; struct mlx5dr_domain_info info; struct mlx5dr_domain_cache cache; + struct mlx5dr_ste_ctx *ste_ctx; }; struct mlx5dr_table_rx_tx { @@ -725,6 +798,14 @@ struct mlx5dr_rule_member { struct list_head use_ste_list; }; +struct mlx5dr_ste_action_modify_field { + u16 hw_field; + u8 start; + u8 end; + u8 l3_type; + u8 l4_type; +}; + struct mlx5dr_action { enum mlx5dr_action_type action_type; refcount_t refcount; @@ -995,12 +1076,16 @@ struct mlx5dr_icm_chunk * mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size); void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk); + +void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx, + u8 *hw_ste_p, u32 ste_size); int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, struct mlx5dr_domain_rx_tx *nic_dmn, struct mlx5dr_ste_htbl *htbl, struct mlx5dr_htbl_connect_info *connect_info, bool update_hw_ste); -void mlx5dr_ste_set_formatted_ste(u16 gvmi, +void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, + u16 gvmi, struct mlx5dr_domain_rx_tx *nic_dmn, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, @@ -1126,6 +1211,8 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, u32 group_id, struct mlx5dr_cmd_fte_info *fte); +bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps); + struct mlx5dr_fw_recalc_cs_ft { u64 rx_icm_addr; u32 table_id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index e01c3766c7de..83df6df6b459 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -5,91 +5,6 @@ #define MLX5_IFC_DR_H enum { - MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0, - MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1, - MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2, - MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3, - MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4, - MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5, - MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6, - MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7, - MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8, - MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9, - MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10, - MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11, - MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12, - MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13, - MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14, - MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15, - MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16, - MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17, - MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18, - MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19, - MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20, - MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21, - MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22, - MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23, -}; - -enum { - MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1, - MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2, - MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3, -}; - -enum { - MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0, - MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1, - MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2, -}; - -enum { - MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0, - MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1, - MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2, -}; - -enum { - MLX5DR_STE_LU_TYPE_NOP = 0x00, - MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05, - MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a, - MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06, - MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07, - MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b, - MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08, - MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09, - MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c, - MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36, - MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37, - MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38, - MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d, - MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e, - MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e, - MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f, - MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10, - MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f, - MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11, - MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12, - MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20, - MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29, - MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a, - MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b, - MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13, - MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14, - MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21, - MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c, - MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d, - MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e, - MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15, - MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24, - MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25, - MLX5DR_STE_LU_TYPE_GRE = 0x16, - MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22, - MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23, - MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19, - MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18, - MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f, - MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30, MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h new file mode 100644 index 000000000000..34c2bd17a8b4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h @@ -0,0 +1,434 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */ + +#ifndef MLX5_IFC_DR_STE_V1_H +#define MLX5_IFC_DR_STE_V1_H + +enum mlx5_ifc_ste_v1_modify_hdr_offset { + MLX5_MODIFY_HEADER_V1_QW_OFFSET = 0x20, +}; + +struct mlx5_ifc_ste_single_action_flow_tag_v1_bits { + u8 action_id[0x8]; + u8 flow_tag[0x18]; +}; + +struct mlx5_ifc_ste_single_action_modify_list_v1_bits { + u8 action_id[0x8]; + u8 num_of_modify_actions[0x8]; + u8 modify_actions_ptr[0x10]; +}; + +struct mlx5_ifc_ste_single_action_remove_header_v1_bits { + u8 action_id[0x8]; + u8 reserved_at_8[0x2]; + u8 start_anchor[0x6]; + u8 reserved_at_10[0x2]; + u8 end_anchor[0x6]; + u8 reserved_at_18[0x4]; + u8 decap[0x1]; + u8 vni_to_cqe[0x1]; + u8 qos_profile[0x2]; +}; + +struct mlx5_ifc_ste_single_action_remove_header_size_v1_bits { + u8 action_id[0x8]; + u8 reserved_at_8[0x2]; + u8 start_anchor[0x6]; + u8 outer_l4_remove[0x1]; + u8 reserved_at_11[0x1]; + u8 start_offset[0x7]; + u8 reserved_at_18[0x1]; + u8 remove_size[0x6]; +}; + +struct mlx5_ifc_ste_double_action_copy_v1_bits { + u8 action_id[0x8]; + u8 destination_dw_offset[0x8]; + u8 reserved_at_10[0x2]; + u8 destination_left_shifter[0x6]; + u8 reserved_at_17[0x2]; + u8 destination_length[0x6]; + + u8 reserved_at_20[0x8]; + u8 source_dw_offset[0x8]; + u8 reserved_at_30[0x2]; + u8 source_right_shifter[0x6]; + u8 reserved_at_38[0x8]; +}; + +struct mlx5_ifc_ste_double_action_set_v1_bits { + u8 action_id[0x8]; + u8 destination_dw_offset[0x8]; + u8 reserved_at_10[0x2]; + u8 destination_left_shifter[0x6]; + u8 reserved_at_18[0x2]; + u8 destination_length[0x6]; + + u8 inline_data[0x20]; +}; + +struct mlx5_ifc_ste_double_action_add_v1_bits { + u8 action_id[0x8]; + u8 destination_dw_offset[0x8]; + u8 reserved_at_10[0x2]; + u8 destination_left_shifter[0x6]; + u8 reserved_at_18[0x2]; + u8 destination_length[0x6]; + + u8 add_value[0x20]; +}; + +struct mlx5_ifc_ste_double_action_insert_with_inline_v1_bits { + u8 action_id[0x8]; + u8 reserved_at_8[0x2]; + u8 start_anchor[0x6]; + u8 start_offset[0x7]; + u8 reserved_at_17[0x9]; + + u8 inline_data[0x20]; +}; + +struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits { + u8 action_id[0x8]; + u8 reserved_at_8[0x2]; + u8 start_anchor[0x6]; + u8 start_offset[0x7]; + u8 size[0x6]; + u8 attributes[0x3]; + + u8 pointer[0x20]; +}; + +struct mlx5_ifc_ste_double_action_modify_action_list_v1_bits { + u8 action_id[0x8]; + u8 modify_actions_pattern_pointer[0x18]; + + u8 number_of_modify_actions[0x8]; + u8 modify_actions_argument_pointer[0x18]; +}; + +struct mlx5_ifc_ste_match_bwc_v1_bits { + u8 entry_format[0x8]; + u8 counter_id[0x18]; + + u8 miss_address_63_48[0x10]; + u8 match_definer_ctx_idx[0x8]; + u8 miss_address_39_32[0x8]; + + u8 miss_address_31_6[0x1a]; + u8 reserved_at_5a[0x1]; + u8 match_polarity[0x1]; + u8 reparse[0x1]; + u8 reserved_at_5d[0x3]; + + u8 next_table_base_63_48[0x10]; + u8 hash_definer_ctx_idx[0x8]; + u8 next_table_base_39_32_size[0x8]; + + u8 next_table_base_31_5_size[0x1b]; + u8 hash_type[0x2]; + u8 hash_after_actions[0x1]; + u8 reserved_at_9e[0x2]; + + u8 byte_mask[0x10]; + u8 next_entry_format[0x1]; + u8 mask_mode[0x1]; + u8 gvmi[0xe]; + + u8 action[0x40]; +}; + +struct mlx5_ifc_ste_mask_and_match_v1_bits { + u8 entry_format[0x8]; + u8 counter_id[0x18]; + + u8 miss_address_63_48[0x10]; + u8 match_definer_ctx_idx[0x8]; + u8 miss_address_39_32[0x8]; + + u8 miss_address_31_6[0x1a]; + u8 reserved_at_5a[0x1]; + u8 match_polarity[0x1]; + u8 reparse[0x1]; + u8 reserved_at_5d[0x3]; + + u8 next_table_base_63_48[0x10]; + u8 hash_definer_ctx_idx[0x8]; + u8 next_table_base_39_32_size[0x8]; + + u8 next_table_base_31_5_size[0x1b]; + u8 hash_type[0x2]; + u8 hash_after_actions[0x1]; + u8 reserved_at_9e[0x2]; + + u8 action[0x60]; +}; + +struct mlx5_ifc_ste_eth_l2_src_v1_bits { + u8 reserved_at_0[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_loopback[0x1]; + u8 ip_fragmented[0x1]; + u8 qp_type[0x2]; + u8 encapsulation_type[0x2]; + u8 port[0x2]; + u8 l3_type[0x2]; + u8 l4_type[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_id[0xc]; + + u8 smac_47_16[0x20]; + + u8 smac_15_0[0x10]; + u8 l3_ethertype[0x10]; + + u8 reserved_at_60[0x6]; + u8 tcp_syn[0x1]; + u8 reserved_at_67[0x3]; + u8 force_loopback[0x1]; + u8 l2_ok[0x1]; + u8 l3_ok[0x1]; + u8 l4_ok[0x1]; + u8 second_vlan_qualifier[0x2]; + + u8 second_priority[0x3]; + u8 second_cfi[0x1]; + u8 second_vlan_id[0xc]; +}; + +struct mlx5_ifc_ste_eth_l2_dst_v1_bits { + u8 reserved_at_0[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 ip_fragmented[0x1]; + u8 qp_type[0x2]; + u8 encapsulation_type[0x2]; + u8 port[0x2]; + u8 l3_type[0x2]; + u8 l4_type[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_id[0xc]; + + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 l3_ethertype[0x10]; + + u8 reserved_at_60[0x6]; + u8 tcp_syn[0x1]; + u8 reserved_at_67[0x3]; + u8 force_lb[0x1]; + u8 l2_ok[0x1]; + u8 l3_ok[0x1]; + u8 l4_ok[0x1]; + u8 second_vlan_qualifier[0x2]; + u8 second_priority[0x3]; + u8 second_cfi[0x1]; + u8 second_vlan_id[0xc]; +}; + +struct mlx5_ifc_ste_eth_l2_src_dst_v1_bits { + u8 dmac_47_16[0x20]; + + u8 smac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 reserved_at_50[0x2]; + u8 functional_lb[0x1]; + u8 reserved_at_53[0x5]; + u8 port[0x2]; + u8 l3_type[0x2]; + u8 reserved_at_5c[0x2]; + u8 first_vlan_qualifier[0x2]; + + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_id[0xc]; + u8 smac_15_0[0x10]; +}; + +struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_v1_bits { + u8 source_address[0x20]; + + u8 destination_address[0x20]; + + u8 source_port[0x10]; + u8 destination_port[0x10]; + + u8 reserved_at_60[0x4]; + u8 l4_ok[0x1]; + u8 l3_ok[0x1]; + u8 fragmented[0x1]; + u8 tcp_ns[0x1]; + u8 tcp_cwr[0x1]; + u8 tcp_ece[0x1]; + u8 tcp_urg[0x1]; + u8 tcp_ack[0x1]; + u8 tcp_psh[0x1]; + u8 tcp_rst[0x1]; + u8 tcp_syn[0x1]; + u8 tcp_fin[0x1]; + u8 dscp[0x6]; + u8 ecn[0x2]; + u8 protocol[0x8]; +}; + +struct mlx5_ifc_ste_eth_l2_tnl_v1_bits { + u8 l2_tunneling_network_id[0x20]; + + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 l3_ethertype[0x10]; + + u8 reserved_at_60[0x3]; + u8 ip_fragmented[0x1]; + u8 reserved_at_64[0x2]; + u8 encp_type[0x2]; + u8 reserved_at_68[0x2]; + u8 l3_type[0x2]; + u8 l4_type[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_id[0xc]; +}; + +struct mlx5_ifc_ste_eth_l3_ipv4_misc_v1_bits { + u8 identification[0x10]; + u8 flags[0x3]; + u8 fragment_offset[0xd]; + + u8 total_length[0x10]; + u8 checksum[0x10]; + + u8 version[0x4]; + u8 ihl[0x4]; + u8 time_to_live[0x8]; + u8 reserved_at_50[0x10]; + + u8 reserved_at_60[0x1c]; + u8 voq_internal_prio[0x4]; +}; + +struct mlx5_ifc_ste_eth_l4_v1_bits { + u8 ipv6_version[0x4]; + u8 reserved_at_4[0x4]; + u8 dscp[0x6]; + u8 ecn[0x2]; + u8 ipv6_hop_limit[0x8]; + u8 protocol[0x8]; + + u8 src_port[0x10]; + u8 dst_port[0x10]; + + u8 first_fragment[0x1]; + u8 reserved_at_41[0xb]; + u8 flow_label[0x14]; + + u8 tcp_data_offset[0x4]; + u8 l4_ok[0x1]; + u8 l3_ok[0x1]; + u8 fragmented[0x1]; + u8 tcp_ns[0x1]; + u8 tcp_cwr[0x1]; + u8 tcp_ece[0x1]; + u8 tcp_urg[0x1]; + u8 tcp_ack[0x1]; + u8 tcp_psh[0x1]; + u8 tcp_rst[0x1]; + u8 tcp_syn[0x1]; + u8 tcp_fin[0x1]; + u8 ipv6_paylen[0x10]; +}; + +struct mlx5_ifc_ste_eth_l4_misc_v1_bits { + u8 window_size[0x10]; + u8 urgent_pointer[0x10]; + + u8 ack_num[0x20]; + + u8 seq_num[0x20]; + + u8 length[0x10]; + u8 checksum[0x10]; +}; + +struct mlx5_ifc_ste_mpls_v1_bits { + u8 reserved_at_0[0x15]; + u8 mpls_ok[0x1]; + u8 mpls4_s_bit[0x1]; + u8 mpls4_qualifier[0x1]; + u8 mpls3_s_bit[0x1]; + u8 mpls3_qualifier[0x1]; + u8 mpls2_s_bit[0x1]; + u8 mpls2_qualifier[0x1]; + u8 mpls1_s_bit[0x1]; + u8 mpls1_qualifier[0x1]; + u8 mpls0_s_bit[0x1]; + u8 mpls0_qualifier[0x1]; + + u8 mpls0_label[0x14]; + u8 mpls0_exp[0x3]; + u8 mpls0_s_bos[0x1]; + u8 mpls0_ttl[0x8]; + + u8 mpls1_label[0x20]; + + u8 mpls2_label[0x20]; +}; + +struct mlx5_ifc_ste_gre_v1_bits { + u8 gre_c_present[0x1]; + u8 reserved_at_1[0x1]; + u8 gre_k_present[0x1]; + u8 gre_s_present[0x1]; + u8 strict_src_route[0x1]; + u8 recur[0x3]; + u8 flags[0x5]; + u8 version[0x3]; + u8 gre_protocol[0x10]; + + u8 reserved_at_20[0x20]; + + u8 gre_key_h[0x18]; + u8 gre_key_l[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_ste_src_gvmi_qp_v1_bits { + u8 loopback_synd[0x8]; + u8 reserved_at_8[0x7]; + u8 functional_lb[0x1]; + u8 source_gvmi[0x10]; + + u8 force_lb[0x1]; + u8 reserved_at_21[0x1]; + u8 source_is_requestor[0x1]; + u8 reserved_at_23[0x5]; + u8 source_qp[0x18]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_ste_icmp_v1_bits { + u8 icmp_payload_data[0x20]; + + u8 icmp_header_data[0x20]; + + u8 icmp_type[0x8]; + u8 icmp_code[0x8]; + u8 reserved_at_50[0x10]; + + u8 reserved_at_60[0x20]; +}; + +#endif /* MLX5_IFC_DR_STE_V1_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 4177786b8eaf..612b0ac31db2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -124,7 +124,10 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action); static inline bool mlx5dr_is_supported(struct mlx5_core_dev *dev) { - return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner); + return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) || + (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) && + (MLX5_CAP_GEN(dev, steering_format_version) <= + MLX5_STEERING_FORMAT_CONNECTX_6DX)); } /* buddy functions & structure */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index bdafc85fd874..e05c5c0f3ae1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -36,6 +36,7 @@ #include <linux/mlx5/vport.h> #include <linux/mlx5/eswitch.h> #include "mlx5_core.h" +#include "sf/sf.h" /* Mutex to hold while enabling or disabling RoCE */ static DEFINE_MUTEX(mlx5_roce_en_lock); @@ -1160,6 +1161,18 @@ EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); */ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) { - return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev); + return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev); } EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); + +int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out) +{ + u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); + u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {}; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, opmod); + MLX5_SET(query_hca_cap_in, in, function_id, function_id); + MLX5_SET(query_hca_cap_in, in, other_function, true); + return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); +} |