diff options
33 files changed, 1152 insertions, 572 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b18e9f2adc82..1f8d0d2c5f17 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -82,6 +82,8 @@ static const char mlx4_ib_version[] = static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num); +static int mlx4_ib_event(struct notifier_block *this, unsigned long event, + void *param); static struct workqueue_struct *wq; @@ -125,12 +127,14 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u32 port_num) { struct mlx4_ib_dev *ibdev = to_mdev(device); - struct net_device *dev; + struct net_device *dev, *ret = NULL; rcu_read_lock(); - dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); + for_each_netdev_rcu(&init_net, dev) { + if (dev->dev.parent != ibdev->ib_dev.dev.parent || + dev->dev_port + 1 != port_num) + continue; - if (dev) { if (mlx4_is_bonded(ibdev->dev)) { struct net_device *upper = NULL; @@ -143,11 +147,14 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, dev = active; } } + + dev_hold(dev); + ret = dev; + break; } - dev_hold(dev); rcu_read_unlock(); - return dev; + return ret; } static int mlx4_ib_update_gids_v1(struct gid_entry *gids, @@ -2319,61 +2326,53 @@ unlock: mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); } -static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, - struct net_device *dev, - unsigned long event) +static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + unsigned long event) { - struct mlx4_ib_iboe *iboe; - int update_qps_port = -1; - int port; + struct mlx4_ib_iboe *iboe = &ibdev->iboe; ASSERT_RTNL(); - iboe = &ibdev->iboe; + if (dev->dev.parent != ibdev->ib_dev.dev.parent) + return; spin_lock_bh(&iboe->lock); - mlx4_foreach_ib_transport_port(port, ibdev->dev) { - - iboe->netdevs[port - 1] = - mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); - if (dev == iboe->netdevs[port - 1] && - (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || - event == NETDEV_UP || event == NETDEV_CHANGE)) - update_qps_port = port; + iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL; - if (dev == iboe->netdevs[port - 1] && - (event == NETDEV_UP || event == NETDEV_DOWN)) { - enum ib_port_state port_state; - struct ib_event ibev = { }; + if (event == NETDEV_UP || event == NETDEV_DOWN) { + enum ib_port_state port_state; + struct ib_event ibev = { }; - if (ib_get_cached_port_state(&ibdev->ib_dev, port, - &port_state)) - continue; + if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1, + &port_state)) + goto iboe_out; - if (event == NETDEV_UP && - (port_state != IB_PORT_ACTIVE || - iboe->last_port_state[port - 1] != IB_PORT_DOWN)) - continue; - if (event == NETDEV_DOWN && - (port_state != IB_PORT_DOWN || - iboe->last_port_state[port - 1] != IB_PORT_ACTIVE)) - continue; - iboe->last_port_state[port - 1] = port_state; - - ibev.device = &ibdev->ib_dev; - ibev.element.port_num = port; - ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : - IB_EVENT_PORT_ERR; - ib_dispatch_event(&ibev); - } + if (event == NETDEV_UP && + (port_state != IB_PORT_ACTIVE || + iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN)) + goto iboe_out; + if (event == NETDEV_DOWN && + (port_state != IB_PORT_DOWN || + iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE)) + goto iboe_out; + iboe->last_port_state[dev->dev_port] = port_state; + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = dev->dev_port + 1; + ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : + IB_EVENT_PORT_ERR; + ib_dispatch_event(&ibev); } + +iboe_out: spin_unlock_bh(&iboe->lock); - if (update_qps_port > 0) - mlx4_ib_update_qps(ibdev, dev, update_qps_port); + if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || + event == NETDEV_UP || event == NETDEV_CHANGE) + mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1); } static int mlx4_ib_netdev_event(struct notifier_block *this, @@ -2386,7 +2385,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, return NOTIFY_DONE; ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); - mlx4_ib_scan_netdevs(ibdev, dev, event); + mlx4_ib_scan_netdev(ibdev, dev, event); return NOTIFY_DONE; } @@ -2610,8 +2609,11 @@ static const struct ib_device_ops mlx4_ib_dev_fs_ops = { .destroy_flow = mlx4_ib_destroy_flow, }; -static void *mlx4_ib_add(struct mlx4_dev *dev) +static int mlx4_ib_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; struct mlx4_ib_dev *ibdev; int num_ports = 0; int i, j; @@ -2631,27 +2633,31 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) /* No point in registering a device with no ports... */ if (num_ports == 0) - return NULL; + return -ENODEV; ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); if (!ibdev) { dev_err(&dev->persist->pdev->dev, "Device struct alloc failed\n"); - return NULL; + return -ENOMEM; } iboe = &ibdev->iboe; - if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) + err = mlx4_pd_alloc(dev, &ibdev->priv_pdn); + if (err) goto err_dealloc; - if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) + err = mlx4_uar_alloc(dev, &ibdev->priv_uar); + if (err) goto err_pd; ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); - if (!ibdev->uar_map) + if (!ibdev->uar_map) { + err = -ENOMEM; goto err_uar; + } MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); ibdev->dev = dev; @@ -2695,7 +2701,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) spin_lock_init(&iboe->lock); - if (init_node_data(ibdev)) + err = init_node_data(ibdev); + if (err) goto err_map; mlx4_init_sl2vl_tbl(ibdev); @@ -2727,6 +2734,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL); if (!new_counter_index) { + err = -ENOMEM; if (allocated) mlx4_counter_free(ibdev->dev, counter_index); goto err_counter; @@ -2744,8 +2752,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); - if (!new_counter_index) + if (!new_counter_index) { + err = -ENOMEM; goto err_counter; + } new_counter_index->index = counter_index; new_counter_index->allocated = 0; list_add_tail(&new_counter_index->list, @@ -2774,8 +2784,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, GFP_KERNEL); - if (!ibdev->ib_uc_qpns_bitmap) + if (!ibdev->ib_uc_qpns_bitmap) { + err = -ENOMEM; goto err_steer_qp_release; + } if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { bitmap_zero(ibdev->ib_uc_qpns_bitmap, @@ -2795,17 +2807,21 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) for (j = 1; j <= ibdev->dev->caps.num_ports; j++) atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); - if (mlx4_ib_alloc_diag_counters(ibdev)) + err = mlx4_ib_alloc_diag_counters(ibdev); + if (err) goto err_steer_free_bitmap; - if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", - &dev->persist->pdev->dev)) + err = ib_register_device(&ibdev->ib_dev, "mlx4_%d", + &dev->persist->pdev->dev); + if (err) goto err_diag_counters; - if (mlx4_ib_mad_init(ibdev)) + err = mlx4_ib_mad_init(ibdev); + if (err) goto err_reg; - if (mlx4_ib_init_sriov(ibdev)) + err = mlx4_ib_init_sriov(ibdev); + if (err) goto err_mad; if (!iboe->nb.notifier_call) { @@ -2839,7 +2855,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) do_slave_init(ibdev, j, 1); } } - return ibdev; + + /* register mlx4 core notifier */ + ibdev->mlx_nb.notifier_call = mlx4_ib_event; + err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb); + WARN(err, "failed to register mlx4 event notifier (%d)", err); + + auxiliary_set_drvdata(adev, ibdev); + return 0; err_notif: if (ibdev->iboe.nb.notifier_call) { @@ -2883,7 +2906,7 @@ err_pd: err_dealloc: ib_dealloc_device(&ibdev->ib_dev); - return NULL; + return err; } int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) @@ -2950,12 +2973,16 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, return err; } -static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) +static void mlx4_ib_remove(struct auxiliary_device *adev) { - struct mlx4_ib_dev *ibdev = ibdev_ptr; + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; + struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(adev); int p; int i; + mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb); + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); ibdev->ib_active = false; @@ -3176,11 +3203,13 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, } } -static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, - enum mlx4_dev_event event, unsigned long param) +static int mlx4_ib_event(struct notifier_block *this, unsigned long event, + void *param) { + struct mlx4_ib_dev *ibdev = + container_of(this, struct mlx4_ib_dev, mlx_nb); + struct mlx4_dev *dev = ibdev->dev; struct ib_event ibev; - struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); struct mlx4_eqe *eqe = NULL; struct ib_event_work *ew; int p = 0; @@ -3190,22 +3219,28 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, (event == MLX4_DEV_EVENT_PORT_DOWN))) { ew = kmalloc(sizeof(*ew), GFP_ATOMIC); if (!ew) - return; + return NOTIFY_DONE; INIT_WORK(&ew->work, handle_bonded_port_state_event); ew->ib_dev = ibdev; queue_work(wq, &ew->work); - return; + return NOTIFY_DONE; } - if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) + switch (event) { + case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: + break; + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: eqe = (struct mlx4_eqe *)param; - else - p = (int) param; + break; + default: + p = *(int *)param; + break; + } switch (event) { case MLX4_DEV_EVENT_PORT_UP: if (p > ibdev->num_ports) - return; + return NOTIFY_DONE; if (!mlx4_is_slave(dev) && rdma_port_get_link_layer(&ibdev->ib_dev, p) == IB_LINK_LAYER_INFINIBAND) { @@ -3220,7 +3255,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_DOWN: if (p > ibdev->num_ports) - return; + return NOTIFY_DONE; ibev.event = IB_EVENT_PORT_ERR; break; @@ -3233,7 +3268,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - return; + return NOTIFY_DONE; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); @@ -3243,7 +3278,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, queue_work(wq, &ew->work); else handle_port_mgmt_change_event(&ew->work); - return; + return NOTIFY_DONE; case MLX4_DEV_EVENT_SLAVE_INIT: /* here, p is the slave id */ @@ -3259,7 +3294,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 1); } } - return; + return NOTIFY_DONE; case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: if (mlx4_is_master(dev)) { @@ -3275,22 +3310,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, } /* here, p is the slave id */ do_slave_init(ibdev, p, 0); - return; + return NOTIFY_DONE; default: - return; + return NOTIFY_DONE; } - ibev.device = ibdev_ptr; + ibev.device = &ibdev->ib_dev; ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; ib_dispatch_event(&ibev); + return NOTIFY_DONE; } -static struct mlx4_interface mlx4_ib_interface = { - .add = mlx4_ib_add, - .remove = mlx4_ib_remove, - .event = mlx4_ib_event, +static const struct auxiliary_device_id mlx4_ib_id_table[] = { + { .name = MLX4_ADEV_NAME ".ib" }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx4_ib_id_table); + +static struct mlx4_adrv mlx4_ib_adrv = { + .adrv = { + .name = "ib", + .probe = mlx4_ib_probe, + .remove = mlx4_ib_remove, + .id_table = mlx4_ib_id_table, + }, .protocol = MLX4_PROT_IB_IPV6, .flags = MLX4_INTFF_BONDING }; @@ -3315,7 +3361,7 @@ static int __init mlx4_ib_init(void) if (err) goto clean_cm; - err = mlx4_register_interface(&mlx4_ib_interface); + err = mlx4_register_auxiliary_driver(&mlx4_ib_adrv); if (err) goto clean_mcg; @@ -3337,7 +3383,7 @@ clean_qp_event: static void __exit mlx4_ib_cleanup(void) { - mlx4_unregister_interface(&mlx4_ib_interface); + mlx4_unregister_auxiliary_driver(&mlx4_ib_adrv); mlx4_ib_mcg_destroy(); mlx4_ib_cm_destroy(); mlx4_ib_qp_event_cleanup(); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 17fee1e73a45..41ca1114a995 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -38,6 +38,7 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/idr.h> +#include <linux/notifier.h> #include <rdma/ib_verbs.h> #include <rdma/ib_umem.h> @@ -644,6 +645,7 @@ struct mlx4_ib_dev { spinlock_t reset_flow_resource_lock; struct list_head qp_list; struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES]; + struct notifier_block mlx_nb; }; struct ib_event_work { diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 9167e83fbceb..2eda10b33f2e 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -735,10 +735,6 @@ static int a5psw_port_vlan_add(struct dsa_switch *ds, int port, u16 vid = vlan->vid; int vlan_res_id; - dev_dbg(a5psw->dev, "Add VLAN %d on port %d, %s, %s\n", - vid, port, tagged ? "tagged" : "untagged", - pvid ? "PVID" : "no PVID"); - vlan_res_id = a5psw_find_vlan_entry(a5psw, vid); if (vlan_res_id < 0) { vlan_res_id = a5psw_new_vlan_res_entry(a5psw, vid); @@ -767,8 +763,6 @@ static int a5psw_port_vlan_del(struct dsa_switch *ds, int port, u16 vid = vlan->vid; int vlan_res_id; - dev_dbg(a5psw->dev, "Removing VLAN %d on port %d\n", vid, port); - vlan_res_id = a5psw_find_vlan_entry(a5psw, vid); if (vlan_res_id < 0) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5d6ea2782c2f..5cc0dbe12132 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -994,7 +994,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, dma_addr -= bp->rx_dma_offset; dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, bp->rx_dir); - skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); + skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); if (!skb) { page_pool_recycle_direct(rxr->page_pool, page); return NULL; @@ -1069,7 +1069,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return NULL; } - skb = build_skb(data, bp->rx_buf_size); + skb = napi_build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (!skb) { @@ -1677,7 +1677,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info->data_ptr = new_data + bp->rx_offset; tpa_info->mapping = new_mapping; - skb = build_skb(data, bp->rx_buf_size); + skb = napi_build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, mapping, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c index 2caaf1e9f42f..bcfe52c11804 100644 --- a/drivers/net/ethernet/davicom/dm9051.c +++ b/drivers/net/ethernet/davicom/dm9051.c @@ -1158,9 +1158,7 @@ static int dm9051_phy_connect(struct board_info *db) db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change, PHY_INTERFACE_MODE_MII); - if (IS_ERR(db->phydev)) - return PTR_ERR_OR_ZERO(db->phydev); - return 0; + return PTR_ERR_OR_ZERO(db->phydev); } static int dm9051_probe(struct spi_device *spi) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e23a55977183..8909899e9a31 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1583,25 +1583,18 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, case XDP_REDIRECT: rxq->stats[RX_XDP_REDIRECT]++; err = xdp_do_redirect(fep->netdev, xdp, prog); - if (!err) { - ret = FEC_ENET_XDP_REDIR; - } else { - ret = FEC_ENET_XDP_CONSUMED; - page = virt_to_head_page(xdp->data); - page_pool_put_page(rxq->page_pool, page, sync, true); - } + if (unlikely(err)) + goto xdp_err; + + ret = FEC_ENET_XDP_REDIR; break; case XDP_TX: err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); - if (unlikely(err)) { - ret = FEC_ENET_XDP_CONSUMED; - page = virt_to_head_page(xdp->data); - page_pool_put_page(rxq->page_pool, page, sync, true); - trace_xdp_exception(fep->netdev, prog, act); - } else { - ret = FEC_ENET_XDP_TX; - } + if (unlikely(err)) + goto xdp_err; + + ret = FEC_ENET_XDP_TX; break; default: @@ -1613,9 +1606,12 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, case XDP_DROP: rxq->stats[RX_XDP_DROP]++; +xdp_err: ret = FEC_ENET_XDP_CONSUMED; page = virt_to_head_page(xdp->data); page_pool_put_page(rxq->page_pool, page, sync, true); + if (act != XDP_DROP) + trace_xdp_exception(fep->netdev, prog, act); break; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index e2f2b2179eef..6b5b06c2b4e9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -136,6 +136,7 @@ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ +M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -1437,6 +1438,12 @@ struct npc_get_kex_cfg_rsp { u8 mkex_pfl_name[MKEX_NAME_LEN]; }; +struct ptp_get_cap_rsp { + struct mbox_msghdr hdr; +#define PTP_CAP_HW_ATOMIC_UPDATE BIT_ULL(0) + u64 cap; +}; + struct flow_msg { unsigned char dmac[6]; unsigned char smac[6]; @@ -1568,6 +1575,8 @@ enum ptp_op { PTP_OP_GET_TSTMP = 2, PTP_OP_SET_THRESH = 3, PTP_OP_EXTTS_ON = 4, + PTP_OP_ADJTIME = 5, + PTP_OP_SET_CLOCK = 6, }; struct ptp_req { @@ -1576,11 +1585,14 @@ struct ptp_req { s64 scaled_ppm; u64 thresh; int extts_on; + s64 delta; + u64 clk; }; struct ptp_rsp { struct mbox_msghdr hdr; u64 clk; + u64 tsc; }; struct npc_get_field_status_req { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index c55c2c441a1a..ffbd22797163 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -12,8 +12,8 @@ #include <linux/hrtimer.h> #include <linux/ktime.h> -#include "ptp.h" #include "mbox.h" +#include "ptp.h" #include "rvu.h" #define DRV_NAME "Marvell PTP Driver" @@ -40,6 +40,7 @@ #define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9) #define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8) #define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10) +#define PTP_CLOCK_CFG_ATOMIC_OP_MASK GENMASK_ULL(28, 26) #define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30) #define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31) @@ -53,36 +54,62 @@ #define PTP_TIMESTAMP 0xF20ULL #define PTP_CLOCK_SEC 0xFD0ULL #define PTP_SEC_ROLLOVER 0xFD8ULL +/* Atomic update related CSRs */ +#define PTP_FRNS_TIMESTAMP 0xFE0ULL +#define PTP_NXT_ROLLOVER_SET 0xFE8ULL +#define PTP_CURR_ROLLOVER_SET 0xFF0ULL +#define PTP_NANO_TIMESTAMP 0xFF8ULL +#define PTP_SEC_TIMESTAMP 0x1000ULL #define CYCLE_MULT 1000 +#define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0) +#define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1) + +/* PTP atomic update operation type */ +enum atomic_opcode { + ATOMIC_SET = 1, + ATOMIC_INC = 3, + ATOMIC_DEC = 4 +}; + static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; -static bool is_ptp_dev_cnf10kb(struct ptp *ptp) +static bool is_ptp_dev_cnf10ka(struct ptp *ptp) { - return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP; + return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP; } -static bool is_ptp_dev_cn10k(struct ptp *ptp) +static bool is_ptp_dev_cn10ka(struct ptp *ptp) { - return ptp->pdev->device == PCI_DEVID_CN10K_PTP; + return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP; } static bool cn10k_ptp_errata(struct ptp *ptp) { - if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || - ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) + if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && + (is_rev_A0(ptp) || is_rev_A1(ptp))) return true; + return false; } -static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp) +static bool is_tstmp_atomic_update_supported(struct rvu *rvu) { - if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || - ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) - return true; - return false; + struct ptp *ptp = rvu->ptp; + + if (is_rvu_otx2(rvu)) + return false; + + /* On older silicon variants of CN10K, atomic update feature + * is not available. + */ + if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && + (is_rev_A0(ptp) || is_rev_A1(ptp))) + return false; + + return true; } static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer) @@ -222,6 +249,65 @@ void ptp_put(struct ptp *ptp) pci_dev_put(ptp->pdev); } +static void ptp_atomic_update(struct ptp *ptp, u64 timestamp) +{ + u64 regval, curr_rollover_set, nxt_rollover_set; + + /* First setup NSECs and SECs */ + writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + writeq(timestamp / NSEC_PER_SEC, + ptp->reg_base + PTP_SEC_TIMESTAMP); + + nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC); + curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC; + writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET); + writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET); + + /* Now, initiate atomic update */ + regval = readq(ptp->reg_base + PTP_CLOCK_CFG); + regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + regval |= (ATOMIC_SET << 26); + writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); +} + +static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta) +{ + bool neg_adj = false, atomic_inc_dec = false; + u64 regval, ptp_clock_hi; + + if (delta < 0) { + delta = -delta; + neg_adj = true; + } + + /* use atomic inc/dec when delta < 1 second */ + if (delta < NSEC_PER_SEC) + atomic_inc_dec = true; + + if (!atomic_inc_dec) { + ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); + if (neg_adj) { + if (ptp_clock_hi > delta) + ptp_clock_hi -= delta; + else + ptp_clock_hi = delta - ptp_clock_hi; + } else { + ptp_clock_hi += delta; + } + ptp_atomic_update(ptp, ptp_clock_hi); + } else { + writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + + /* initiate atomic inc/dec */ + regval = readq(ptp->reg_base + PTP_CLOCK_CFG); + regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26); + writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); + } +} + static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) { bool neg_adj = false; @@ -277,8 +363,9 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk) return 0; } -void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) +void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts) { + struct ptp *ptp = rvu->ptp; struct pci_dev *pdev; u64 clock_comp; u64 clock_cfg; @@ -297,8 +384,14 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) ptp->clock_rate = sclk * 1000000; /* Program the seconds rollover value to 1 second */ - if (is_ptp_dev_cnf10kb(ptp)) + if (is_tstmp_atomic_update_supported(rvu)) { + writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET); + writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET); writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER); + } /* Enable PTP clock */ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); @@ -320,6 +413,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) clock_cfg |= PTP_CLOCK_CFG_PTP_EN; clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + clock_cfg |= (ATOMIC_SET << 26); + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); /* Set 50% duty cycle for 1Hz output */ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); @@ -350,7 +447,7 @@ static int ptp_get_tstmp(struct ptp *ptp, u64 *clk) { u64 timestamp; - if (is_ptp_dev_cn10k(ptp)) { + if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) { timestamp = readq(ptp->reg_base + PTP_TIMESTAMP); *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF); } else { @@ -414,14 +511,12 @@ static int ptp_probe(struct pci_dev *pdev, first_ptp_block = ptp; spin_lock_init(&ptp->ptp_lock); - if (is_ptp_tsfmt_sec_nsec(ptp)) - ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; - else - ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; - if (cn10k_ptp_errata(ptp)) { + ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ptp->hrtimer.function = ptp_reset_thresh; + } else { + ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; } return 0; @@ -521,6 +616,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, case PTP_OP_EXTTS_ON: err = ptp_extts_on(rvu->ptp, req->extts_on); break; + case PTP_OP_ADJTIME: + ptp_atomic_adjtime(rvu->ptp, req->delta); + break; + case PTP_OP_SET_CLOCK: + ptp_atomic_update(rvu->ptp, (u64)req->clk); + break; default: err = -EINVAL; break; @@ -528,3 +629,17 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, return err; } + +int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req, + struct ptp_get_cap_rsp *rsp) +{ + if (!rvu->ptp) + return -ENODEV; + + if (is_tstmp_atomic_update_supported(rvu)) + rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE; + else + rsp->cap &= ~BIT_ULL_MASK(0); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index b9d92abc3844..1229344c7279 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -23,9 +23,10 @@ struct ptp { u32 clock_period; }; +struct rvu; struct ptp *ptp_get(void); void ptp_put(struct ptp *ptp); -void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts); +void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts); extern struct pci_driver ptp_driver; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 73df2d564545..22c395c7d040 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -3322,7 +3322,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&rvu->rswitch.switch_lock); if (rvu->fwdata) - ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, + ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, rvu->fwdata->ptp_ext_tstamp); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index e8e65fd7888d..c4d999ef5ab4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -17,6 +17,7 @@ #include "mbox.h" #include "npc.h" #include "rvu_reg.h" +#include "ptp.h" /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 @@ -26,6 +27,7 @@ #define PCI_SUBSYS_DEVID_98XX 0xB100 #define PCI_SUBSYS_DEVID_96XX 0xB200 #define PCI_SUBSYS_DEVID_CN10K_A 0xB900 +#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 @@ -634,6 +636,16 @@ static inline bool is_rvu_otx2(struct rvu *rvu) midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } +static inline bool is_cnf10ka_a0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A && + (pdev->revision & 0x0F) == 0x0) + return true; + return false; +} + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) { u64 npc_const3; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 5fd05d94de7c..4c6032ee7800 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -326,6 +326,7 @@ struct otx2_ptp { struct ptp_pin_desc extts_config; u64 (*convert_rx_ptp_tstmp)(u64 timestamp); u64 (*convert_tx_ptp_tstmp)(u64 timestamp); + u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); struct delayed_work synctstamp_work; u64 tstamp; u32 base_ns; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 896b2f9bac34..3a72b0793d4a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -10,6 +10,65 @@ #include "otx2_common.h" #include "otx2_ptp.h" +static bool is_tstmp_atomic_update_supported(struct otx2_ptp *ptp) +{ + struct ptp_get_cap_rsp *rsp; + struct msg_req *req; + int err; + + if (!ptp->nic) + return false; + + mutex_lock(&ptp->nic->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_get_cap(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&ptp->nic->mbox.lock); + return false; + } + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) { + mutex_unlock(&ptp->nic->mbox.lock); + return false; + } + rsp = (struct ptp_get_cap_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + mutex_unlock(&ptp->nic->mbox.lock); + + if (IS_ERR(rsp)) + return false; + + if (rsp->cap & PTP_CAP_HW_ATOMIC_UPDATE) + return true; + + return false; +} + +static int otx2_ptp_hw_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + struct ptp_req *req; + int rc; + + if (!ptp->nic) + return -ENODEV; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + req->op = PTP_OP_ADJTIME; + req->delta = delta; + rc = otx2_sync_mbox_msg(&ptp->nic->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return rc; +} + static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) { struct ptp_req *req; @@ -37,6 +96,49 @@ static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) return rsp->clk; } +static int otx2_ptp_hw_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + u64 tstamp; + + tstamp = otx2_ptp_get_clock(ptp); + + *ts = ns_to_timespec64(tstamp); + return 0; +} + +static int otx2_ptp_hw_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + struct ptp_req *req; + u64 nsec; + int rc; + + if (!ptp->nic) + return -ENODEV; + + nsec = timespec64_to_ns(ts); + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->op = PTP_OP_SET_CLOCK; + req->clk = nsec; + rc = otx2_sync_mbox_msg(&ptp->nic->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return rc; +} + static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -124,16 +226,7 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp) return rsp->clk; } -static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp) -{ - struct otx2_nic *pfvf = ptp->nic; - - mutex_lock(&pfvf->mbox.lock); - *tstamp = timecounter_read(&ptp->time_counter); - mutex_unlock(&pfvf->mbox.lock); -} - -static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +static int otx2_ptp_tc_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); @@ -146,32 +239,33 @@ static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) return 0; } -static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info, - struct timespec64 *ts) +static int otx2_ptp_tc_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); u64 tstamp; - otx2_get_ptpclock(ptp, &tstamp); + mutex_lock(&ptp->nic->mbox.lock); + tstamp = timecounter_read(&ptp->time_counter); + mutex_unlock(&ptp->nic->mbox.lock); *ts = ns_to_timespec64(tstamp); return 0; } -static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, - const struct timespec64 *ts) +static int otx2_ptp_tc_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); - struct otx2_nic *pfvf = ptp->nic; u64 nsec; nsec = timespec64_to_ns(ts); - mutex_lock(&pfvf->mbox.lock); + mutex_lock(&ptp->nic->mbox.lock); timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec); - mutex_unlock(&pfvf->mbox.lock); + mutex_unlock(&ptp->nic->mbox.lock); return 0; } @@ -190,6 +284,12 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, return 0; } +static u64 otx2_ptp_hw_tstamp2time(const struct timecounter *time_counter, u64 tstamp) +{ + /* On HW which supports atomic updates, timecounter is not initialized */ + return tstamp; +} + static void otx2_ptp_extts_check(struct work_struct *work) { struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, @@ -204,7 +304,7 @@ static void otx2_ptp_extts_check(struct work_struct *work) if (tstmp != ptp->last_extts) { event.type = PTP_CLOCK_EXTTS; event.index = 0; - event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp); + event.timestamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstmp); ptp_clock_event(ptp->ptp_clock, &event); new_thresh = tstmp % 500000000; if (ptp->thresh != new_thresh) { @@ -229,7 +329,7 @@ static void otx2_sync_tstamp(struct work_struct *work) tstamp = otx2_ptp_get_clock(ptp); mutex_unlock(&pfvf->mbox.lock); - ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + ptp->tstamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstamp); ptp->base_ns = tstamp % NSEC_PER_SEC; schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250)); @@ -302,15 +402,6 @@ int otx2_ptp_init(struct otx2_nic *pfvf) ptp_ptr->nic = pfvf; - cc = &ptp_ptr->cycle_counter; - cc->read = ptp_cc_read; - cc->mask = CYCLECOUNTER_MASK(64); - cc->mult = 1; - cc->shift = 0; - - timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, - ktime_to_ns(ktime_get_real())); - snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP"); ptp_ptr->extts_config.index = 0; ptp_ptr->extts_config.func = PTP_PF_NONE; @@ -324,13 +415,33 @@ int otx2_ptp_init(struct otx2_nic *pfvf) .pps = 0, .pin_config = &ptp_ptr->extts_config, .adjfine = otx2_ptp_adjfine, - .adjtime = otx2_ptp_adjtime, - .gettime64 = otx2_ptp_gettime, - .settime64 = otx2_ptp_settime, .enable = otx2_ptp_enable, .verify = otx2_ptp_verify_pin, }; + /* Check whether hardware supports atomic updates to timestamp */ + if (is_tstmp_atomic_update_supported(ptp_ptr)) { + ptp_ptr->ptp_info.adjtime = otx2_ptp_hw_adjtime; + ptp_ptr->ptp_info.gettime64 = otx2_ptp_hw_gettime; + ptp_ptr->ptp_info.settime64 = otx2_ptp_hw_settime; + + ptp_ptr->ptp_tstamp2nsec = otx2_ptp_hw_tstamp2time; + } else { + ptp_ptr->ptp_info.adjtime = otx2_ptp_tc_adjtime; + ptp_ptr->ptp_info.gettime64 = otx2_ptp_tc_gettime; + ptp_ptr->ptp_info.settime64 = otx2_ptp_tc_settime; + + cc = &ptp_ptr->cycle_counter; + cc->read = ptp_cc_read; + cc->mask = CYCLECOUNTER_MASK(64); + cc->mult = 1; + cc->shift = 0; + ptp_ptr->ptp_tstamp2nsec = timecounter_cyc2time; + + timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, + ktime_to_ns(ktime_get_real())); + } + INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check); ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); @@ -387,7 +498,7 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) if (!pfvf->ptp) return -ENODEV; - *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + *tsns = pfvf->ptp->ptp_tstamp2nsec(&pfvf->ptp->time_counter, tstamp); return 0; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index fe05c9020269..6ad42e3b488f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1135,10 +1135,13 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) dma_addr_t dma_addr; int i; - eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, - cnt * soc->txrx.txd_size, - ð->phy_scratch_ring, - GFP_KERNEL); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) + eth->scratch_ring = eth->sram_base; + else + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, + cnt * soc->txrx.txd_size, + ð->phy_scratch_ring, + GFP_KERNEL); if (unlikely(!eth->scratch_ring)) return -ENOMEM; @@ -1325,6 +1328,10 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd, data = TX_DMA_PLEN0(info->size); if (info->last) data |= TX_DMA_LS0; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + data |= TX_DMA_PREP_ADDR64(info->addr); + WRITE_ONCE(desc->txd3, data); /* set forward port */ @@ -1994,6 +2001,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, bool xdp_flush = false; int idx; struct sk_buff *skb; + u64 addr64 = 0; u8 *data, *new_data; struct mtk_rx_dma_v2 *rxd, trxd; int done = 0, bytes = 0; @@ -2109,7 +2117,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; } - dma_unmap_single(eth->dma_dev, trxd.rxd1, + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + addr64 = RX_DMA_GET_ADDR64(trxd.rxd2); + + dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), ring->buf_size, DMA_FROM_DEVICE); skb = build_skb(data, ring->frag_size); @@ -2175,6 +2186,9 @@ release_desc: else rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); + ring->calc_idx = idx; done++; } @@ -2446,8 +2460,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (!ring->buf) goto no_tx_mem; - ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, - &ring->phys, GFP_KERNEL); + if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) { + ring->dma = eth->sram_base + ring_size * sz; + ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; + } else { + ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, + &ring->phys, GFP_KERNEL); + } + if (!ring->dma) goto no_tx_mem; @@ -2546,8 +2566,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) kfree(ring->buf); ring->buf = NULL; } - - if (ring->dma) { + if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) { dma_free_coherent(eth->dma_dev, ring->dma_size * soc->txrx.txd_size, ring->dma, ring->phys); @@ -2566,9 +2585,14 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct mtk_rx_ring *ring; - int rx_data_len, rx_dma_size; + int rx_data_len, rx_dma_size, tx_ring_size; int i; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + tx_ring_size = MTK_QDMA_RING_SIZE; + else + tx_ring_size = MTK_DMA_SIZE; + if (rx_flag == MTK_RX_FLAGS_QDMA) { if (ring_no) return -EINVAL; @@ -2603,9 +2627,20 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) ring->page_pool = pp; } - ring->dma = dma_alloc_coherent(eth->dma_dev, - rx_dma_size * eth->soc->txrx.rxd_size, - &ring->phys, GFP_KERNEL); + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || + rx_flag != MTK_RX_FLAGS_NORMAL) { + ring->dma = dma_alloc_coherent(eth->dma_dev, + rx_dma_size * eth->soc->txrx.rxd_size, + &ring->phys, GFP_KERNEL); + } else { + struct mtk_tx_ring *tx_ring = ð->tx_ring; + + ring->dma = tx_ring->dma + tx_ring_size * + eth->soc->txrx.txd_size * (ring_no + 1); + ring->phys = tx_ring->phys + tx_ring_size * + eth->soc->txrx.txd_size * (ring_no + 1); + } + if (!ring->dma) return -ENOMEM; @@ -2646,6 +2681,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) else rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); + rxd->rxd3 = 0; rxd->rxd4 = 0; if (mtk_is_netsys_v2_or_greater(eth)) { @@ -2690,8 +2728,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return 0; } -static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram) { + u64 addr64 = 0; int i; if (ring->data && ring->dma) { @@ -2705,7 +2744,10 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) if (!rxd->rxd1) continue; - dma_unmap_single(eth->dma_dev, rxd->rxd1, + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + addr64 = RX_DMA_GET_ADDR64(rxd->rxd2); + + dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), ring->buf_size, DMA_FROM_DEVICE); mtk_rx_put_buff(ring, ring->data[i], false); } @@ -2713,7 +2755,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) ring->data = NULL; } - if (ring->dma) { + if (!in_sram && ring->dma) { dma_free_coherent(eth->dma_dev, ring->dma_size * eth->soc->txrx.rxd_size, ring->dma, ring->phys); @@ -3073,7 +3115,7 @@ static void mtk_dma_free(struct mtk_eth *eth) for (i = 0; i < MTK_MAX_DEVS; i++) if (eth->netdev[i]) netdev_reset_queue(eth->netdev[i]); - if (eth->scratch_ring) { + if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { dma_free_coherent(eth->dma_dev, MTK_QDMA_RING_SIZE * soc->txrx.txd_size, eth->scratch_ring, eth->phy_scratch_ring); @@ -3081,13 +3123,13 @@ static void mtk_dma_free(struct mtk_eth *eth) eth->phy_scratch_ring = 0; } mtk_tx_clean(eth); - mtk_rx_clean(eth, ð->rx_ring[0]); - mtk_rx_clean(eth, ð->rx_ring_qdma); + mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM)); + mtk_rx_clean(eth, ð->rx_ring_qdma, false); if (eth->hwlro) { mtk_hwlro_rx_uninit(eth); for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) - mtk_rx_clean(eth, ð->rx_ring[i]); + mtk_rx_clean(eth, ð->rx_ring[i], false); } kfree(eth->scratch_head); @@ -3613,19 +3655,34 @@ static void mtk_hw_reset(struct mtk_eth *eth) { u32 val; - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v2_or_greater(eth)) regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); + + if (mtk_is_netsys_v3_or_greater(eth)) { + val = RSTCTRL_PPE0_V3; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1_V3; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + val |= RSTCTRL_PPE2; + + val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2; + } else if (mtk_is_netsys_v2_or_greater(eth)) { val = RSTCTRL_PPE0_V2; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1; } else { val = RSTCTRL_PPE0; } - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val |= RSTCTRL_PPE1; - ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); - if (mtk_is_netsys_v2_or_greater(eth)) + if (mtk_is_netsys_v3_or_greater(eth)) + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, + 0x6f8ff); + else if (mtk_is_netsys_v2_or_greater(eth)) regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff); } @@ -3651,13 +3708,21 @@ static void mtk_hw_warm_reset(struct mtk_eth *eth) return; } - if (mtk_is_netsys_v2_or_greater(eth)) + if (mtk_is_netsys_v3_or_greater(eth)) { + rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + rst_mask |= RSTCTRL_PPE1_V3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + rst_mask |= RSTCTRL_PPE2; + + rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2; + } else if (mtk_is_netsys_v2_or_greater(eth)) { rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2; - else + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + rst_mask |= RSTCTRL_PPE1; + } else { rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - rst_mask |= RSTCTRL_PPE1; + } regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); @@ -4009,11 +4074,17 @@ static void mtk_prepare_for_reset(struct mtk_eth *eth) u32 val; int i; - /* disabe FE P3 and P4 */ - val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val |= MTK_FE_LINK_DOWN_P4; - mtk_w32(eth, val, MTK_FE_GLO_CFG); + /* set FE PPE ports link down */ + for (i = MTK_GMAC1_ID; + i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); + i += 2) { + val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT); + mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); + } /* adjust PPE configurations to prepare for reset */ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) @@ -4074,11 +4145,18 @@ static void mtk_pending_work(struct work_struct *work) } } - /* enabe FE P3 and P4 */ - val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val &= ~MTK_FE_LINK_DOWN_P4; - mtk_w32(eth, val, MTK_FE_GLO_CFG); + /* set FE PPE ports link up */ + for (i = MTK_GMAC1_ID; + i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); + i += 2) { + val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT); + + mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); + } clear_bit(MTK_RESETTING, ð->state); @@ -4640,7 +4718,7 @@ static int mtk_sgmii_init(struct mtk_eth *eth) static int mtk_probe(struct platform_device *pdev) { - struct resource *res = NULL; + struct resource *res = NULL, *res_sram; struct device_node *mac_np; struct mtk_eth *eth; int err, i; @@ -4660,6 +4738,28 @@ static int mtk_probe(struct platform_device *pdev) if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) eth->ip_align = NET_IP_ALIGN; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { + /* SRAM is actual memory and supports transparent access just like DRAM. + * Hence we don't require __iomem being set and don't need to use accessor + * functions to read from or write to SRAM. + */ + if (mtk_is_netsys_v3_or_greater(eth)) { + eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(eth->sram_base)) + return PTR_ERR(eth->sram_base); + } else { + eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET; + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); + if (err) { + dev_err(&pdev->dev, "Wrong DMA config\n"); + return -EINVAL; + } + } + spin_lock_init(ð->page_lock); spin_lock_init(ð->tx_irq_lock); spin_lock_init(ð->rx_irq_lock); @@ -4723,6 +4823,18 @@ static int mtk_probe(struct platform_device *pdev) err = -EINVAL; goto err_destroy_sgmii; } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { + if (mtk_is_netsys_v3_or_greater(eth)) { + res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res_sram) { + err = -EINVAL; + goto err_destroy_sgmii; + } + eth->phy_scratch_ring = res_sram->start; + } else { + eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; + } + } } if (eth->soc->offload_version) { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 4a2470fbad2c..403219d987ef 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -76,9 +76,8 @@ #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 /* Frame Engine Global Configuration */ -#define MTK_FE_GLO_CFG 0x00 -#define MTK_FE_LINK_DOWN_P3 BIT(11) -#define MTK_FE_LINK_DOWN_P4 BIT(12) +#define MTK_FE_GLO_CFG(x) (((x) == MTK_GMAC3_ID) ? 0x24 : 0x00) +#define MTK_FE_LINK_DOWN_P(x) BIT(((x) + 8) % 16) /* Frame Engine Global Reset Register */ #define MTK_RST_GL 0x04 @@ -133,10 +132,15 @@ #define MTK_GDMA_XGDM_SEL BIT(31) /* Unicast Filter MAC Address Register - Low */ -#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) +#define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ + 0x548 : 0x508 + (_x * 0x1000); }) /* Unicast Filter MAC Address Register - High */ -#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) +#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ + 0x54C : 0x50C + (_x * 0x1000); }) + +/* Internal SRAM offset */ +#define MTK_ETH_SRAM_OFFSET 0x40000 /* FE global misc reg*/ #define MTK_FE_GLO_MISC 0x124 @@ -327,6 +331,14 @@ #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) #define TX_DMA_SWC BIT(14) #define TX_DMA_PQID GENMASK(3, 0) +#define TX_DMA_ADDR64_MASK GENMASK(3, 0) +#if IS_ENABLED(CONFIG_64BIT) +# define TX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(TX_DMA_ADDR64_MASK, (x))) << 32) +# define TX_DMA_PREP_ADDR64(x) FIELD_PREP(TX_DMA_ADDR64_MASK, ((x) >> 32)) +#else +# define TX_DMA_GET_ADDR64(x) (0) +# define TX_DMA_PREP_ADDR64(x) (0) +#endif /* PDMA on MT7628 */ #define TX_DMA_DONE BIT(31) @@ -339,6 +351,14 @@ #define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) #define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) #define RX_DMA_VTAG BIT(15) +#define RX_DMA_ADDR64_MASK GENMASK(3, 0) +#if IS_ENABLED(CONFIG_64BIT) +# define RX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(RX_DMA_ADDR64_MASK, (x))) << 32) +# define RX_DMA_PREP_ADDR64(x) FIELD_PREP(RX_DMA_ADDR64_MASK, ((x) >> 32)) +#else +# define RX_DMA_GET_ADDR64(x) (0) +# define RX_DMA_PREP_ADDR64(x) (0) +#endif /* QDMA descriptor rxd3 */ #define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) @@ -503,7 +523,7 @@ #define ETHSYS_SYSCFG0 0x14 #define SYSCFG0_GE_MASK 0x3 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) -#define SYSCFG0_SGMII_MASK GENMASK(9, 8) +#define SYSCFG0_SGMII_MASK GENMASK(9, 7) #define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK) #define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) #define SYSCFG0_SGMII_GMAC1_V2 BIT(9) @@ -520,9 +540,15 @@ /* ethernet reset control register */ #define ETHSYS_RSTCTRL 0x34 #define RSTCTRL_FE BIT(6) +#define RSTCTRL_WDMA0 BIT(24) +#define RSTCTRL_WDMA1 BIT(25) +#define RSTCTRL_WDMA2 BIT(26) #define RSTCTRL_PPE0 BIT(31) #define RSTCTRL_PPE0_V2 BIT(30) #define RSTCTRL_PPE1 BIT(31) +#define RSTCTRL_PPE0_V3 BIT(29) +#define RSTCTRL_PPE1_V3 BIT(30) +#define RSTCTRL_PPE2 BIT(31) #define RSTCTRL_ETH BIT(23) /* ethernet reset check idle register */ @@ -929,7 +955,10 @@ enum mkt_eth_capabilities { MTK_QDMA_BIT, MTK_SOC_MT7628_BIT, MTK_RSTCTRL_PPE1_BIT, + MTK_RSTCTRL_PPE2_BIT, MTK_U3_COPHY_V2_BIT, + MTK_SRAM_BIT, + MTK_36BIT_DMA_BIT, /* MUX BITS*/ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, @@ -963,7 +992,10 @@ enum mkt_eth_capabilities { #define MTK_QDMA BIT_ULL(MTK_QDMA_BIT) #define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT) #define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT) +#define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT) #define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT) +#define MTK_SRAM BIT_ULL(MTK_SRAM_BIT) +#define MTK_36BIT_DMA BIT_ULL(MTK_36BIT_DMA_BIT) #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) @@ -1039,13 +1071,14 @@ enum mkt_eth_capabilities { #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \ - MTK_RSTCTRL_PPE1) + MTK_RSTCTRL_PPE1 | MTK_SRAM) #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ - MTK_RSTCTRL_PPE1) + MTK_RSTCTRL_PPE1 | MTK_SRAM) -#define MT7988_CAPS (MTK_GDM1_ESW | MTK_QDMA | MTK_RSTCTRL_PPE1) +#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \ + MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM) struct mtk_tx_dma_desc_info { dma_addr_t addr; @@ -1205,6 +1238,7 @@ struct mtk_eth { struct device *dev; struct device *dma_dev; void __iomem *base; + void *sram_base; spinlock_t page_lock; spinlock_t tx_irq_lock; spinlock_t rx_irq_lock; diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 1b4b1f642317..825e05fb8607 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -27,6 +27,7 @@ config MLX4_EN_DCB config MLX4_CORE tristate depends on PCI + select AUXILIARY_BUS select NET_DEVLINK default n diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 0eb7b83637d8..0d8a362c2673 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -194,7 +194,7 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) mutex_unlock(&persist->device_state_mutex); /* At that step HW was already reset, now notify clients */ - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, NULL); mlx4_cmd_wake_completions(dev); return; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index c56d2194cbfc..f5b1f8c7834f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2113,7 +2113,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) goto inform_slave_state; - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, &slave); /* write the version in the event field */ reply |= mlx4_comm_get_version(); @@ -2152,7 +2152,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (mlx4_master_activate_admin_state(priv, slave)) goto reset_slave; slave_state[slave].active = true; - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, &slave); break; case MLX4_COMM_CMD_VHCR_POST: if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index f1259bdb1a29..d8f4d00ad26b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -183,24 +183,31 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) } } -static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) +static int mlx4_en_event(struct notifier_block *this, unsigned long event, + void *param) { - struct mlx4_en_dev *endev = ctx; - - return endev->pndev[port]; -} - -static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, - enum mlx4_dev_event event, unsigned long port) -{ - struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; + struct mlx4_en_dev *mdev = + container_of(this, struct mlx4_en_dev, mlx_nb); + struct mlx4_dev *dev = mdev->dev; struct mlx4_en_priv *priv; + int port; + + switch (event) { + case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: + case MLX4_DEV_EVENT_SLAVE_INIT: + case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + break; + default: + port = *(int *)param; + break; + } switch (event) { case MLX4_DEV_EVENT_PORT_UP: case MLX4_DEV_EVENT_PORT_DOWN: if (!mdev->pndev[port]) - return; + return NOTIFY_DONE; priv = netdev_priv(mdev->pndev[port]); /* To prevent races, we poll the link state in a separate task rather than changing it here */ @@ -212,23 +219,30 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, mlx4_err(mdev, "Internal error detected, restarting device\n"); break; + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: case MLX4_DEV_EVENT_SLAVE_INIT: case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: break; default: if (port < 1 || port > dev->caps.num_ports || !mdev->pndev[port]) - return; - mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, - (int) port); + return NOTIFY_DONE; + mlx4_warn(mdev, "Unhandled event %d for port %d\n", (int)event, + port); } + + return NOTIFY_DONE; } -static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) +static void mlx4_en_remove(struct auxiliary_device *adev) { - struct mlx4_en_dev *mdev = endev_ptr; + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; + struct mlx4_en_dev *mdev = auxiliary_get_drvdata(adev); int i; + mlx4_unregister_event_notifier(dev, &mdev->mlx_nb); + mutex_lock(&mdev->state_lock); mdev->device_up = false; mutex_unlock(&mdev->state_lock); @@ -242,52 +256,41 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) iounmap(mdev->uar_map); mlx4_uar_free(dev, &mdev->priv_uar); mlx4_pd_free(dev, mdev->priv_pdn); - if (mdev->nb.notifier_call) - unregister_netdevice_notifier(&mdev->nb); + if (mdev->netdev_nb.notifier_call) + unregister_netdevice_notifier(&mdev->netdev_nb); kfree(mdev); } -static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx) -{ - int i; - struct mlx4_en_dev *mdev = ctx; - - /* Create a netdev for each port */ - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - mlx4_info(mdev, "Activating port:%d\n", i); - if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) - mdev->pndev[i] = NULL; - } - - /* register notifier */ - mdev->nb.notifier_call = mlx4_en_netdev_event; - if (register_netdevice_notifier(&mdev->nb)) { - mdev->nb.notifier_call = NULL; - mlx4_err(mdev, "Failed to create notifier\n"); - } -} - -static void *mlx4_en_add(struct mlx4_dev *dev) +static int mlx4_en_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; struct mlx4_en_dev *mdev; - int i; + int err, i; printk_once(KERN_INFO "%s", mlx4_en_version); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); - if (!mdev) + if (!mdev) { + err = -ENOMEM; goto err_free_res; + } - if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) + err = mlx4_pd_alloc(dev, &mdev->priv_pdn); + if (err) goto err_free_dev; - if (mlx4_uar_alloc(dev, &mdev->priv_uar)) + err = mlx4_uar_alloc(dev, &mdev->priv_uar); + if (err) goto err_pd; mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); - if (!mdev->uar_map) + if (!mdev->uar_map) { + err = -ENOMEM; goto err_uar; + } spin_lock_init(&mdev->uar_lock); mdev->dev = dev; @@ -299,13 +302,15 @@ static void *mlx4_en_add(struct mlx4_dev *dev) if (!mdev->LSO_support) mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n"); - if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, - MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, - 0, 0, &mdev->mr)) { + err = mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, + MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0, + &mdev->mr); + if (err) { mlx4_err(mdev, "Failed allocating memory region\n"); goto err_map; } - if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { + err = mlx4_mr_enable(mdev->dev, &mdev->mr); + if (err) { mlx4_err(mdev, "Failed enabling memory region\n"); goto err_mr; } @@ -325,15 +330,39 @@ static void *mlx4_en_add(struct mlx4_dev *dev) * Note: we cannot use the shared workqueue because of deadlocks caused * by the rtnl lock */ mdev->workqueue = create_singlethread_workqueue("mlx4_en"); - if (!mdev->workqueue) + if (!mdev->workqueue) { + err = -ENOMEM; goto err_mr; + } /* At this stage all non-port specific tasks are complete: * mark the card state as up */ mutex_init(&mdev->state_lock); mdev->device_up = true; - return mdev; + /* register mlx4 core notifier */ + mdev->mlx_nb.notifier_call = mlx4_en_event; + err = mlx4_register_event_notifier(dev, &mdev->mlx_nb); + WARN(err, "failed to register mlx4 event notifier (%d)", err); + + /* Setup ports */ + + /* Create a netdev for each port */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + mlx4_info(mdev, "Activating port:%d\n", i); + if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) + mdev->pndev[i] = NULL; + } + + /* register netdev notifier */ + mdev->netdev_nb.notifier_call = mlx4_en_netdev_event; + if (register_netdevice_notifier(&mdev->netdev_nb)) { + mdev->netdev_nb.notifier_call = NULL; + mlx4_err(mdev, "Failed to create netdev notifier\n"); + } + + auxiliary_set_drvdata(adev, mdev); + return 0; err_mr: (void) mlx4_mr_free(dev, &mdev->mr); @@ -347,16 +376,24 @@ err_pd: err_free_dev: kfree(mdev); err_free_res: - return NULL; + return err; } -static struct mlx4_interface mlx4_en_interface = { - .add = mlx4_en_add, - .remove = mlx4_en_remove, - .event = mlx4_en_event, - .get_dev = mlx4_en_get_netdev, +static const struct auxiliary_device_id mlx4_en_id_table[] = { + { .name = MLX4_ADEV_NAME ".eth" }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx4_en_id_table); + +static struct mlx4_adrv mlx4_en_adrv = { + .adrv = { + .name = "eth", + .probe = mlx4_en_probe, + .remove = mlx4_en_remove, + .id_table = mlx4_en_id_table, + }, .protocol = MLX4_PROT_ETH, - .activate = mlx4_en_activate, }; static void mlx4_en_verify_params(void) @@ -385,12 +422,12 @@ static int __init mlx4_en_init(void) mlx4_en_verify_params(); mlx4_en_init_ptys2ethtool_map(); - return mlx4_register_interface(&mlx4_en_interface); + return mlx4_register_auxiliary_driver(&mlx4_en_adrv); } static void __exit mlx4_en_cleanup(void) { - mlx4_unregister_interface(&mlx4_en_interface); + mlx4_unregister_auxiliary_driver(&mlx4_en_adrv); } module_init(mlx4_en_init); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 403604ceebc8..33bbcced8105 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2894,63 +2894,6 @@ static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = { .xmo_rx_hash = mlx4_en_xdp_rx_hash, }; -struct mlx4_en_bond { - struct work_struct work; - struct mlx4_en_priv *priv; - int is_bonded; - struct mlx4_port_map port_map; -}; - -static void mlx4_en_bond_work(struct work_struct *work) -{ - struct mlx4_en_bond *bond = container_of(work, - struct mlx4_en_bond, - work); - int err = 0; - struct mlx4_dev *dev = bond->priv->mdev->dev; - - if (bond->is_bonded) { - if (!mlx4_is_bonded(dev)) { - err = mlx4_bond(dev); - if (err) - en_err(bond->priv, "Fail to bond device\n"); - } - if (!err) { - err = mlx4_port_map_set(dev, &bond->port_map); - if (err) - en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", - bond->port_map.port1, - bond->port_map.port2, - err); - } - } else if (mlx4_is_bonded(dev)) { - err = mlx4_unbond(dev); - if (err) - en_err(bond->priv, "Fail to unbond device\n"); - } - dev_put(bond->priv->dev); - kfree(bond); -} - -static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, - u8 v2p_p1, u8 v2p_p2) -{ - struct mlx4_en_bond *bond; - - bond = kzalloc(sizeof(*bond), GFP_ATOMIC); - if (!bond) - return -ENOMEM; - - INIT_WORK(&bond->work, mlx4_en_bond_work); - bond->priv = priv; - bond->is_bonded = is_bonded; - bond->port_map.port1 = v2p_p1; - bond->port_map.port2 = v2p_p2; - dev_hold(priv->dev); - queue_work(priv->mdev->workqueue, &bond->work); - return 0; -} - int mlx4_en_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { @@ -2960,14 +2903,13 @@ int mlx4_en_netdev_event(struct notifier_block *this, struct mlx4_dev *dev; int i, num_eth_ports = 0; bool do_bond = true; - struct mlx4_en_priv *priv; u8 v2p_port1 = 0; u8 v2p_port2 = 0; if (!net_eq(dev_net(ndev), &init_net)) return NOTIFY_DONE; - mdev = container_of(this, struct mlx4_en_dev, nb); + mdev = container_of(this, struct mlx4_en_dev, netdev_nb); dev = mdev->dev; /* Go into this mode only when two network devices set on two ports @@ -2995,7 +2937,6 @@ int mlx4_en_netdev_event(struct notifier_block *this, if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) return NOTIFY_DONE; - priv = netdev_priv(ndev); if (do_bond) { struct netdev_notifier_bonding_info *notifier_info = ptr; struct netdev_bonding_info *bonding_info = @@ -3062,8 +3003,7 @@ int mlx4_en_netdev_event(struct notifier_block *this, } } - mlx4_en_queue_bond_work(priv, do_bond, - v2p_port1, v2p_port2); + mlx4_queue_bond_work(dev, do_bond, v2p_port1, v2p_port2); return NOTIFY_DONE; } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 414e390e6b48..6598b10a9ff4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -501,7 +501,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) int port; int slave = 0; int ret; - u32 flr_slave; + int flr_slave; u8 update_slave_state; int i; enum slave_port_gen_event gen_event; @@ -606,8 +606,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) port = be32_to_cpu(eqe->event.port_change.port) >> 28; slaves_port = mlx4_phys_to_slaves_pport(dev, port); if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, - port); + mlx4_dispatch_event( + dev, MLX4_DEV_EVENT_PORT_DOWN, &port); mlx4_priv(dev)->sense.do_sense_port[port] = 1; if (!mlx4_is_master(dev)) break; @@ -647,7 +647,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) } } } else { - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, + &port); mlx4_priv(dev)->sense.do_sense_port[port] = 0; @@ -758,7 +759,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) } spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, - flr_slave); + &flr_slave); queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_flr_event_work); break; @@ -787,8 +788,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, - (unsigned long) eqe); + mlx4_dispatch_event( + dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, eqe); break; case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 65482f004e50..a371b970ac1e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -38,102 +38,131 @@ #include "mlx4.h" -struct mlx4_device_context { - struct list_head list; - struct list_head bond_list; - struct mlx4_interface *intf; - void *context; -}; - -static LIST_HEAD(intf_list); -static LIST_HEAD(dev_list); static DEFINE_MUTEX(intf_mutex); +static DEFINE_IDA(mlx4_adev_ida); -static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +static bool is_eth_supported(struct mlx4_dev *dev) { - struct mlx4_device_context *dev_ctx; + for (int port = 1; port <= dev->caps.num_ports; port++) + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return true; - dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); - if (!dev_ctx) - return; + return false; +} - dev_ctx->intf = intf; - dev_ctx->context = intf->add(&priv->dev); +static bool is_ib_supported(struct mlx4_dev *dev) +{ + for (int port = 1; port <= dev->caps.num_ports; port++) + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) + return true; - if (dev_ctx->context) { - spin_lock_irq(&priv->ctx_lock); - list_add_tail(&dev_ctx->list, &priv->ctx_list); - spin_unlock_irq(&priv->ctx_lock); - if (intf->activate) - intf->activate(&priv->dev, dev_ctx->context); - } else - kfree(dev_ctx); + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) + return true; + return false; } -static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +static const struct mlx4_adev_device { + const char *suffix; + bool (*is_supported)(struct mlx4_dev *dev); +} mlx4_adev_devices[] = { + { "eth", is_eth_supported }, + { "ib", is_ib_supported }, +}; + +int mlx4_adev_init(struct mlx4_dev *dev) { - struct mlx4_device_context *dev_ctx; + struct mlx4_priv *priv = mlx4_priv(dev); - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf == intf) { - spin_lock_irq(&priv->ctx_lock); - list_del(&dev_ctx->list); - spin_unlock_irq(&priv->ctx_lock); + priv->adev_idx = ida_alloc(&mlx4_adev_ida, GFP_KERNEL); + if (priv->adev_idx < 0) + return priv->adev_idx; - intf->remove(&priv->dev, dev_ctx->context); - kfree(dev_ctx); - return; - } + priv->adev = kcalloc(ARRAY_SIZE(mlx4_adev_devices), + sizeof(struct mlx4_adev *), GFP_KERNEL); + if (!priv->adev) { + ida_free(&mlx4_adev_ida, priv->adev_idx); + return -ENOMEM; + } + + return 0; } -int mlx4_register_interface(struct mlx4_interface *intf) +void mlx4_adev_cleanup(struct mlx4_dev *dev) { - struct mlx4_priv *priv; - - if (!intf->add || !intf->remove) - return -EINVAL; - - mutex_lock(&intf_mutex); + struct mlx4_priv *priv = mlx4_priv(dev); - list_add_tail(&intf->list, &intf_list); - list_for_each_entry(priv, &dev_list, dev_list) { - if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) { - mlx4_dbg(&priv->dev, - "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol); - intf->flags &= ~MLX4_INTFF_BONDING; - } - mlx4_add_device(intf, priv); - } + kfree(priv->adev); + ida_free(&mlx4_adev_ida, priv->adev_idx); +} - mutex_unlock(&intf_mutex); +static void adev_release(struct device *dev) +{ + struct mlx4_adev *mlx4_adev = + container_of(dev, struct mlx4_adev, adev.dev); + struct mlx4_priv *priv = mlx4_priv(mlx4_adev->mdev); + int idx = mlx4_adev->idx; - return 0; + kfree(mlx4_adev); + priv->adev[idx] = NULL; } -EXPORT_SYMBOL_GPL(mlx4_register_interface); -void mlx4_unregister_interface(struct mlx4_interface *intf) +static struct mlx4_adev *add_adev(struct mlx4_dev *dev, int idx) { - struct mlx4_priv *priv; + struct mlx4_priv *priv = mlx4_priv(dev); + const char *suffix = mlx4_adev_devices[idx].suffix; + struct auxiliary_device *adev; + struct mlx4_adev *madev; + int ret; - mutex_lock(&intf_mutex); + madev = kzalloc(sizeof(*madev), GFP_KERNEL); + if (!madev) + return ERR_PTR(-ENOMEM); - list_for_each_entry(priv, &dev_list, dev_list) - mlx4_remove_device(intf, priv); + adev = &madev->adev; + adev->id = priv->adev_idx; + adev->name = suffix; + adev->dev.parent = &dev->persist->pdev->dev; + adev->dev.release = adev_release; + madev->mdev = dev; + madev->idx = idx; - list_del(&intf->list); + ret = auxiliary_device_init(adev); + if (ret) { + kfree(madev); + return ERR_PTR(ret); + } - mutex_unlock(&intf_mutex); + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ERR_PTR(ret); + } + return madev; +} + +static void del_adev(struct auxiliary_device *adev) +{ + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv) +{ + return auxiliary_driver_register(&madrv->adrv); +} +EXPORT_SYMBOL_GPL(mlx4_register_auxiliary_driver); + +void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv) +{ + auxiliary_driver_unregister(&madrv->adrv); } -EXPORT_SYMBOL_GPL(mlx4_unregister_interface); +EXPORT_SYMBOL_GPL(mlx4_unregister_auxiliary_driver); int mlx4_do_bond(struct mlx4_dev *dev, bool enable) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx; - unsigned long flags; - int ret; - LIST_HEAD(bond_list); + int i, ret; if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) return -EOPNOTSUPP; @@ -155,69 +184,178 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) dev->flags &= ~MLX4_FLAG_BONDED; } - spin_lock_irqsave(&priv->ctx_lock, flags); - list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) { - if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) { - list_add_tail(&dev_ctx->bond_list, &bond_list); - list_del(&dev_ctx->list); + mutex_lock(&intf_mutex); + + for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) { + struct mlx4_adev *madev = priv->adev[i]; + struct mlx4_adrv *madrv; + enum mlx4_protocol protocol; + + if (!madev) + continue; + + device_lock(&madev->adev.dev); + if (!madev->adev.dev.driver) { + device_unlock(&madev->adev.dev); + continue; + } + + madrv = container_of(madev->adev.dev.driver, struct mlx4_adrv, + adrv.driver); + if (!(madrv->flags & MLX4_INTFF_BONDING)) { + device_unlock(&madev->adev.dev); + continue; + } + + if (mlx4_is_mfunc(dev)) { + mlx4_dbg(dev, + "SRIOV, disabled HA mode for intf proto %d\n", + madrv->protocol); + device_unlock(&madev->adev.dev); + continue; } - } - spin_unlock_irqrestore(&priv->ctx_lock, flags); - list_for_each_entry(dev_ctx, &bond_list, bond_list) { - dev_ctx->intf->remove(dev, dev_ctx->context); - dev_ctx->context = dev_ctx->intf->add(dev); + protocol = madrv->protocol; + device_unlock(&madev->adev.dev); - spin_lock_irqsave(&priv->ctx_lock, flags); - list_add_tail(&dev_ctx->list, &priv->ctx_list); - spin_unlock_irqrestore(&priv->ctx_lock, flags); + del_adev(&madev->adev); + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i, + mlx4_adev_devices[i].suffix); + priv->adev[i] = NULL; + continue; + } - mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n", - dev_ctx->intf->protocol, enable ? - "enabled" : "disabled"); + mlx4_dbg(dev, + "Interface for protocol %d restarted with bonded mode %s\n", + protocol, enable ? "enabled" : "disabled"); } + + mutex_unlock(&intf_mutex); + return 0; } void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, - unsigned long param) + void *param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + atomic_notifier_call_chain(&priv->event_nh, type, param); +} + +int mlx4_register_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_device_context *dev_ctx; - unsigned long flags; - spin_lock_irqsave(&priv->ctx_lock, flags); + return atomic_notifier_chain_register(&priv->event_nh, nb); +} +EXPORT_SYMBOL(mlx4_register_event_notifier); - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf->event) - dev_ctx->intf->event(dev, dev_ctx->context, type, param); +int mlx4_unregister_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb) +{ + struct mlx4_priv *priv = mlx4_priv(dev); - spin_unlock_irqrestore(&priv->ctx_lock, flags); + return atomic_notifier_chain_unregister(&priv->event_nh, nb); } +EXPORT_SYMBOL(mlx4_unregister_event_notifier); -int mlx4_register_device(struct mlx4_dev *dev) +static int add_drivers(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) { + bool is_supported = false; + + if (priv->adev[i]) + continue; + + if (mlx4_adev_devices[i].is_supported) + is_supported = mlx4_adev_devices[i].is_supported(dev); + + if (!is_supported) + continue; + + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i, + mlx4_adev_devices[i].suffix); + /* We continue to rescan drivers and leave to the caller + * to make decision if to release everything or + * continue. */ + ret = PTR_ERR(priv->adev[i]); + priv->adev[i] = NULL; + } + } + return ret; +} + +static void delete_drivers(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_interface *intf; + bool delete_all; + int i; + + delete_all = !(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP); + + for (i = ARRAY_SIZE(mlx4_adev_devices) - 1; i >= 0; i--) { + bool is_supported = false; + + if (!priv->adev[i]) + continue; + + if (mlx4_adev_devices[i].is_supported && !delete_all) + is_supported = mlx4_adev_devices[i].is_supported(dev); + + if (is_supported) + continue; + + del_adev(&priv->adev[i]->adev); + priv->adev[i] = NULL; + } +} + +/* This function is used after mlx4_dev is reconfigured. + */ +static int rescan_drivers_locked(struct mlx4_dev *dev) +{ + lockdep_assert_held(&intf_mutex); + + delete_drivers(dev); + if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) + return 0; + + return add_drivers(dev); +} + +int mlx4_register_device(struct mlx4_dev *dev) +{ + int ret; mutex_lock(&intf_mutex); dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP; - list_add_tail(&priv->dev_list, &dev_list); - list_for_each_entry(intf, &intf_list, list) - mlx4_add_device(intf, priv); + + ret = rescan_drivers_locked(dev); mutex_unlock(&intf_mutex); + + if (ret) { + mlx4_unregister_device(dev); + return ret; + } + mlx4_start_catas_poll(dev); - return 0; + return ret; } void mlx4_unregister_device(struct mlx4_dev *dev) { - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_interface *intf; - if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) return; @@ -236,35 +374,12 @@ void mlx4_unregister_device(struct mlx4_dev *dev) } mutex_lock(&intf_mutex); - list_for_each_entry(intf, &intf_list, list) - mlx4_remove_device(intf, priv); - - list_del(&priv->dev_list); dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP; - mutex_unlock(&intf_mutex); -} - -void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_device_context *dev_ctx; - unsigned long flags; - void *result = NULL; + rescan_drivers_locked(dev); - spin_lock_irqsave(&priv->ctx_lock, flags); - - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) { - result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); - break; - } - - spin_unlock_irqrestore(&priv->ctx_lock, flags); - - return result; + mutex_unlock(&intf_mutex); } -EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 8a5409b00530..2581226836b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -42,7 +42,6 @@ #include <linux/slab.h> #include <linux/io-mapping.h> #include <linux/delay.h> -#include <linux/kmod.h> #include <linux/etherdevice.h> #include <net/devlink.h> @@ -1091,27 +1090,6 @@ free_mem: return err; } -static void mlx4_request_modules(struct mlx4_dev *dev) -{ - int port; - int has_ib_port = false; - int has_eth_port = false; -#define EN_DRV_NAME "mlx4_en" -#define IB_DRV_NAME "mlx4_ib" - - for (port = 1; port <= dev->caps.num_ports; port++) { - if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) - has_ib_port = true; - else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - has_eth_port = true; - } - - if (has_eth_port) - request_module_nowait(EN_DRV_NAME); - if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) - request_module_nowait(IB_DRV_NAME); -} - /* * Change the port configuration of the device. * Every user of this function must hold the port mutex. @@ -1147,7 +1125,6 @@ int mlx4_change_port_types(struct mlx4_dev *dev, mlx4_err(dev, "Failed to register device\n"); goto out; } - mlx4_request_modules(dev); } out: @@ -1441,7 +1418,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev) return ret; } -int mlx4_bond(struct mlx4_dev *dev) +static int mlx4_bond(struct mlx4_dev *dev) { int ret = 0; struct mlx4_priv *priv = mlx4_priv(dev); @@ -1467,9 +1444,8 @@ int mlx4_bond(struct mlx4_dev *dev) return ret; } -EXPORT_SYMBOL_GPL(mlx4_bond); -int mlx4_unbond(struct mlx4_dev *dev) +static int mlx4_unbond(struct mlx4_dev *dev) { int ret = 0; struct mlx4_priv *priv = mlx4_priv(dev); @@ -1496,10 +1472,8 @@ int mlx4_unbond(struct mlx4_dev *dev) return ret; } -EXPORT_SYMBOL_GPL(mlx4_unbond); - -int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) +static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) { u8 port1 = v2p->port1; u8 port2 = v2p->port2; @@ -1541,7 +1515,61 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) mutex_unlock(&priv->bond_mutex); return err; } -EXPORT_SYMBOL_GPL(mlx4_port_map_set); + +struct mlx4_bond { + struct work_struct work; + struct mlx4_dev *dev; + int is_bonded; + struct mlx4_port_map port_map; +}; + +static void mlx4_bond_work(struct work_struct *work) +{ + struct mlx4_bond *bond = container_of(work, struct mlx4_bond, work); + int err = 0; + + if (bond->is_bonded) { + if (!mlx4_is_bonded(bond->dev)) { + err = mlx4_bond(bond->dev); + if (err) + mlx4_err(bond->dev, "Fail to bond device\n"); + } + if (!err) { + err = mlx4_port_map_set(bond->dev, &bond->port_map); + if (err) + mlx4_err(bond->dev, + "Fail to set port map [%d][%d]: %d\n", + bond->port_map.port1, + bond->port_map.port2, err); + } + } else if (mlx4_is_bonded(bond->dev)) { + err = mlx4_unbond(bond->dev); + if (err) + mlx4_err(bond->dev, "Fail to unbond device\n"); + } + put_device(&bond->dev->persist->pdev->dev); + kfree(bond); +} + +int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1, + u8 v2p_p2) +{ + struct mlx4_bond *bond; + + bond = kzalloc(sizeof(*bond), GFP_ATOMIC); + if (!bond) + return -ENOMEM; + + INIT_WORK(&bond->work, mlx4_bond_work); + get_device(&dev->persist->pdev->dev); + bond->dev = dev; + bond->is_bonded = is_bonded; + bond->port_map.port1 = v2p_p1; + bond->port_map.port2 = v2p_p2; + queue_work(mlx4_wq, &bond->work); + return 0; +} +EXPORT_SYMBOL(mlx4_queue_bond_work); static int mlx4_load_fw(struct mlx4_dev *dev) { @@ -3375,8 +3403,11 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, devl_assert_locked(devlink); dev = &priv->dev; - INIT_LIST_HEAD(&priv->ctx_list); - spin_lock_init(&priv->ctx_lock); + err = mlx4_adev_init(dev); + if (err) + return err; + + ATOMIC_INIT_NOTIFIER_HEAD(&priv->event_nh); mutex_init(&priv->port_mutex); mutex_init(&priv->bond_mutex); @@ -3402,10 +3433,11 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, err = mlx4_get_ownership(dev); if (err) { if (err < 0) - return err; + goto err_adev; else { mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); - return -EINVAL; + err = -EINVAL; + goto err_adev; } } @@ -3674,8 +3706,6 @@ slave_start: if (err) goto err_port; - mlx4_request_modules(dev); - mlx4_sense_init(dev); mlx4_start_sense(dev); @@ -3753,6 +3783,9 @@ err_sriov: mlx4_free_ownership(dev); kfree(dev_cap); + +err_adev: + mlx4_adev_cleanup(dev); return err; } @@ -4133,6 +4166,8 @@ static void mlx4_unload_one(struct pci_dev *pdev) mlx4_slave_destroy_special_qp_cap(dev); kfree(dev->dev_vfs); + mlx4_adev_cleanup(dev); + mlx4_clean_dev(dev); priv->pci_dev_data = pci_dev_data; priv->removed = 1; @@ -4520,6 +4555,9 @@ static int __init mlx4_init(void) { int ret; + WARN_ONCE(strcmp(MLX4_ADEV_NAME, KBUILD_MODNAME), + "mlx4_core name not in sync with kernel module name"); + if (mlx4_verify_params()) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 6ccf340660d9..d7d856d1758a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -47,6 +47,8 @@ #include <linux/spinlock.h> #include <net/devlink.h> #include <linux/rwsem.h> +#include <linux/auxiliary_bus.h> +#include <linux/notifier.h> #include <linux/mlx4/device.h> #include <linux/mlx4/driver.h> @@ -862,6 +864,11 @@ struct mlx4_steer { struct list_head steer_entries[MLX4_NUM_STEERS]; }; +struct mlx4_port_map { + u8 port1; + u8 port2; +}; + enum { MLX4_PCI_DEV_IS_VF = 1 << 0, MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, @@ -875,9 +882,9 @@ enum { struct mlx4_priv { struct mlx4_dev dev; - struct list_head dev_list; - struct list_head ctx_list; - spinlock_t ctx_lock; + struct mlx4_adev **adev; + int adev_idx; + struct atomic_notifier_head event_nh; int pci_dev_data; int removed; @@ -1045,10 +1052,13 @@ void mlx4_catas_end(struct mlx4_dev *dev); int mlx4_crdump_init(struct mlx4_dev *dev); void mlx4_crdump_end(struct mlx4_dev *dev); int mlx4_restart_one(struct pci_dev *pdev); + +int mlx4_adev_init(struct mlx4_dev *dev); +void mlx4_adev_cleanup(struct mlx4_dev *dev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, - unsigned long param); + void *param); struct mlx4_dev_cap; struct mlx4_init_hca_param; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 321f801c1d7c..efe3f97b874f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -49,6 +49,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/irq.h> #include <net/xdp.h> +#include <linux/notifier.h> #include <linux/mlx4/device.h> #include <linux/mlx4/qp.h> @@ -432,7 +433,8 @@ struct mlx4_en_dev { unsigned long last_overflow_check; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; - struct notifier_block nb; + struct notifier_block netdev_nb; + struct notifier_block mlx_nb; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d613095b78e0..1d719726f72b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -909,7 +909,6 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -int qed_device_num_engines(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 4d83ceebdc49..042a75f34060 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -556,9 +556,6 @@ void qede_config_rx_mode(struct net_device *ndev); void qede_fill_rss_params(struct qede_dev *edev, struct qed_update_vport_rss_params *rss, u8 *update); -void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); -void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); - int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp); #ifdef CONFIG_DCB diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ef8b14135133..2657be7cc049 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -631,7 +631,6 @@ static void recalibrate(struct dp83640_clock *clock) s64 now, diff; struct phy_txts event_ts; struct timespec64 ts; - struct list_head *this; struct dp83640_private *tmp; struct phy_device *master = clock->chosen->phydev; u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val; @@ -648,8 +647,7 @@ static void recalibrate(struct dp83640_clock *clock) /* * enable broadcast, disable status frames, enable ptp clock */ - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) { enable_broadcast(tmp->phydev, clock->page, 1); tmp->cfg0 = ext_read(tmp->phydev, PAGE5, PSF_CFG0); ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, 0); @@ -667,10 +665,8 @@ static void recalibrate(struct dp83640_clock *clock) evnt |= (CAL_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT; evnt |= (cal_gpio & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT; - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) ext_write(0, tmp->phydev, PAGE5, PTP_EVNT, evnt); - } ext_write(0, master, PAGE5, PTP_EVNT, evnt); /* @@ -709,8 +705,7 @@ static void recalibrate(struct dp83640_clock *clock) event_ts.sec_hi = ext_read(master, PAGE4, PTP_EDATA); now = phy2txts(&event_ts); - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) { val = ext_read(tmp->phydev, PAGE4, PTP_STS); phydev_info(tmp->phydev, "slave PTP_STS 0x%04hx\n", val); val = ext_read(tmp->phydev, PAGE4, PTP_ESTS); @@ -730,10 +725,8 @@ static void recalibrate(struct dp83640_clock *clock) /* * restore status frames */ - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, tmp->cfg0); - } ext_write(0, master, PAGE5, PSF_CFG0, cfg0); mutex_unlock(&clock->extreg_lock); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 494242bb9cf6..231ad91a919d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -312,6 +312,14 @@ struct padded_vnet_hdr { char padding[12]; }; +struct virtio_net_common_hdr { + union { + struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf mrg_hdr; + struct virtio_net_hdr_v1_hash hash_v1_hdr; + }; +}; + static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); @@ -353,9 +361,10 @@ static int rxq2vq(int rxq) return rxq * 2; } -static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) +static inline struct virtio_net_common_hdr * +skb_vnet_common_hdr(struct sk_buff *skb) { - return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; + return (struct virtio_net_common_hdr *)skb->cb; } /* @@ -478,7 +487,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, unsigned int headroom) { struct sk_buff *skb; - struct virtio_net_hdr_mrg_rxbuf *hdr; + struct virtio_net_common_hdr *hdr; unsigned int copy, hdr_len, hdr_padded_len; struct page *page_to_free = NULL; int tailroom, shinfo_size; @@ -563,7 +572,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, give_pages(rq, page); ok: - hdr = skb_vnet_hdr(skb); + hdr = skb_vnet_common_hdr(skb); memcpy(hdr, hdr_p, hdr_len); if (page_to_free) put_page(page_to_free); @@ -975,7 +984,7 @@ static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, return NULL; buf += header_offset; - memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); + memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); return skb; } @@ -1586,7 +1595,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, { struct net_device *dev = vi->dev; struct sk_buff *skb; - struct virtio_net_hdr_mrg_rxbuf *hdr; + struct virtio_net_common_hdr *hdr; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -1606,9 +1615,9 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!skb)) return; - hdr = skb_vnet_hdr(skb); + hdr = skb_vnet_common_hdr(skb); if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) - virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb); + virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -2114,7 +2123,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (can_push) hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); else - hdr = skb_vnet_hdr(skb); + hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; if (virtio_net_hdr_from_skb(skb, &hdr->hdr, virtio_is_little_endian(vi->vdev), false, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6646634a0b9d..27f42f713c89 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -33,6 +33,7 @@ #ifndef MLX4_DEVICE_H #define MLX4_DEVICE_H +#include <linux/auxiliary_bus.h> #include <linux/if_ether.h> #include <linux/pci.h> #include <linux/completion.h> @@ -889,6 +890,12 @@ struct mlx4_dev { u8 uar_page_shift; }; +struct mlx4_adev { + struct auxiliary_device adev; + struct mlx4_dev *mdev; + int idx; +}; + struct mlx4_clock_params { u64 offset; u8 bar; @@ -1087,6 +1094,19 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) (offset & (PAGE_SIZE - 1)); } +static inline int mlx4_is_bonded(struct mlx4_dev *dev) +{ + return !!(dev->flags & MLX4_FLAG_BONDED); +} + +static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev) +{ + return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev)); +} + +int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1, + u8 v2p_p2); + int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 1834c8fad12e..69825223081f 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -34,8 +34,12 @@ #define MLX4_DRIVER_H #include <net/devlink.h> +#include <linux/auxiliary_bus.h> +#include <linux/notifier.h> #include <linux/mlx4/device.h> +#define MLX4_ADEV_NAME "mlx4_core" + struct mlx4_dev; #define MLX4_MAC_MASK 0xffffffffffffULL @@ -54,41 +58,19 @@ enum { MLX4_INTFF_BONDING = 1 << 0 }; -struct mlx4_interface { - void * (*add) (struct mlx4_dev *dev); - void (*remove)(struct mlx4_dev *dev, void *context); - void (*event) (struct mlx4_dev *dev, void *context, - enum mlx4_dev_event event, unsigned long param); - void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); - void (*activate)(struct mlx4_dev *dev, void *context); - struct list_head list; +struct mlx4_adrv { + struct auxiliary_driver adrv; enum mlx4_protocol protocol; int flags; }; -int mlx4_register_interface(struct mlx4_interface *intf); -void mlx4_unregister_interface(struct mlx4_interface *intf); - -int mlx4_bond(struct mlx4_dev *dev); -int mlx4_unbond(struct mlx4_dev *dev); -static inline int mlx4_is_bonded(struct mlx4_dev *dev) -{ - return !!(dev->flags & MLX4_FLAG_BONDED); -} - -static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev) -{ - return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev)); -} - -struct mlx4_port_map { - u8 port1; - u8 port2; -}; - -int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); +int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv); +void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv); -void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); +int mlx4_register_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb); +int mlx4_unregister_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb); struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port); diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index 90e3045b2dcb..0d3b6ed21628 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -67,9 +67,6 @@ struct qed_fcoe_cb_ops { u32 (*get_login_failures)(void *cookie); }; -void qed_fcoe_set_pf_params(struct qed_dev *cdev, - struct qed_fcoe_pf_params *params); - /** * struct qed_fcoe_ops - qed FCoE operations. * @common: common operations pointer diff --git a/net/socket.c b/net/socket.c index fdb5233bf560..848116d06b51 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3567,7 +3567,12 @@ EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { - return READ_ONCE(sock->ops)->connect(sock, addr, addrlen, flags); + struct sockaddr_storage address; + + memcpy(&address, addr, addrlen); + + return READ_ONCE(sock->ops)->connect(sock, (struct sockaddr *)&address, + addrlen, flags); } EXPORT_SYMBOL(kernel_connect); |