diff options
Diffstat (limited to 'drivers/net/dsa/ocelot')
-rw-r--r-- | drivers/net/dsa/ocelot/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/felix.c | 1422 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/felix.h | 50 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/felix_vsc9959.c | 1761 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/seville_vsc9953.c | 518 |
5 files changed, 2627 insertions, 1126 deletions
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index 9948544ba1c4..08db9cf76818 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -6,6 +6,7 @@ config NET_DSA_MSCC_FELIX depends on NET_VENDOR_FREESCALE depends on HAS_IOMEM depends on PTP_1588_CLOCK_OPTIONAL + depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n select MSCC_OCELOT_SWITCH_LIB select NET_DSA_TAG_OCELOT_8021Q select NET_DSA_TAG_OCELOT @@ -21,6 +22,7 @@ config NET_DSA_MSCC_SEVILLE depends on NET_VENDOR_MICROSEMI depends on HAS_IOMEM depends on PTP_1588_CLOCK_OPTIONAL + select MDIO_MSCC_MIIM select MSCC_OCELOT_SWITCH_LIB select NET_DSA_TAG_OCELOT_8021Q select NET_DSA_TAG_OCELOT diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index f1a05e7dc818..dd3a18cc89dd 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -21,37 +21,69 @@ #include <linux/of_net.h> #include <linux/pci.h> #include <linux/of.h> -#include <linux/pcs-lynx.h> #include <net/pkt_sched.h> #include <net/dsa.h> #include "felix.h" -static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +/* Translate the DSA database API into the ocelot switch library API, + * which uses VID 0 for all ports that aren't part of a bridge, + * and expects the bridge_dev to be NULL in that case. + */ +static struct net_device *felix_classify_db(struct dsa_db db) { - struct ocelot_vcap_filter *outer_tagging_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int key_length, upstream, err; + switch (db.type) { + case DSA_DB_PORT: + case DSA_DB_LAG: + return NULL; + case DSA_DB_BRIDGE: + return db.bridge.dev; + default: + return ERR_PTR(-EOPNOTSUPP); + } +} - /* We don't need to install the rxvlan into the other ports' filtering - * tables, because we're just pushing the rxvlan when sending towards - * the CPU - */ - if (!pvid) - return 0; +static int felix_cpu_port_for_master(struct dsa_switch *ds, + struct net_device *master) +{ + struct ocelot *ocelot = ds->priv; + struct dsa_port *cpu_dp; + int lag; + + if (netif_is_lag_master(master)) { + mutex_lock(&ocelot->fwd_domain_lock); + lag = ocelot_bond_get_id(ocelot, master); + mutex_unlock(&ocelot->fwd_domain_lock); + + return lag; + } + + cpu_dp = master->dsa_ptr; + return cpu_dp->index; +} + +/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that + * the tagger can perform RX source port identification. + */ +static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid) +{ + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int key_length, err; key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; - upstream = dsa_upstream_port(ds, port); outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!outer_tagging_rule) return -ENOMEM; + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); + outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; outer_tagging_rule->prio = 1; - outer_tagging_rule->id.cookie = port; + outer_tagging_rule->id.cookie = cookie; outer_tagging_rule->id.tc_offload = false; outer_tagging_rule->block_id = VCAP_ES0; outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -72,20 +104,36 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, return err; } -static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid) { - struct ocelot_vcap_filter *untagging_rule, *redirect_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int upstream, err; + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot_vcap_block *block_vcap_es0; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; - /* tag_8021q.c assumes we are implementing this via port VLAN - * membership, which we aren't. So we don't need to add any VCAP filter - * for the CPU port. - */ - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; + block_vcap_es0 = &ocelot->block[VCAP_ES0]; + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); + + outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, + cookie, false); + if (!outer_tagging_rule) + return -ENOENT; + + return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); +} + +/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 + * rules for steering those tagged packets towards the correct destination port + */ +static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port, + u16 vid) +{ + struct ocelot_vcap_filter *untagging_rule, *redirect_rule; + unsigned long cpu_ports = dsa_cpu_ports(ds); + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int err; untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!untagging_rule) @@ -97,14 +145,14 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return -ENOMEM; } - upstream = dsa_upstream_port(ds, port); + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; - untagging_rule->ingress_port_mask = BIT(upstream); + untagging_rule->ingress_port_mask = cpu_ports; untagging_rule->vlan.vid.value = vid; untagging_rule->vlan.vid.mask = VLAN_VID_MASK; untagging_rule->prio = 1; - untagging_rule->id.cookie = port; + untagging_rule->id.cookie = cookie; untagging_rule->id.tc_offload = false; untagging_rule->block_id = VCAP_IS1; untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -121,11 +169,13 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return err; } + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); + redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; - redirect_rule->ingress_port_mask = BIT(upstream); + redirect_rule->ingress_port_mask = cpu_ports; redirect_rule->pag = port; redirect_rule->prio = 1; - redirect_rule->id.cookie = port; + redirect_rule->id.cookie = cookie; redirect_rule->id.tc_offload = false; redirect_rule->block_id = VCAP_IS2; redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -143,265 +193,320 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return 0; } -static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, - u16 flags) -{ - bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = flags & BRIDGE_VLAN_INFO_PVID; - struct ocelot *ocelot = ds->priv; - - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - return 0; -} - -static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) -{ - struct ocelot_vcap_filter *outer_tagging_rule; - struct ocelot_vcap_block *block_vcap_es0; - struct ocelot *ocelot = &felix->ocelot; - - block_vcap_es0 = &ocelot->block[VCAP_ES0]; - - outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, - port, false); - /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid - * installing outer tagging ES0 rules where they weren't needed. - * But in rxvlan_del, the API doesn't give us the "flags" anymore, - * so that forces us to be slightly sloppy here, and just assume that - * if we didn't find an outer_tagging_rule it means that there was - * none in the first place, i.e. rxvlan_del is called on a non-pvid - * port. This is most probably true though. - */ - if (!outer_tagging_rule) - return 0; - - return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); -} - -static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot_vcap_block *block_vcap_is1; struct ocelot_vcap_block *block_vcap_is2; - struct ocelot *ocelot = &felix->ocelot; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; int err; - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; - block_vcap_is1 = &ocelot->block[VCAP_IS1]; block_vcap_is2 = &ocelot->block[VCAP_IS2]; + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, - port, false); + cookie, false); if (!untagging_rule) - return 0; + return -ENOENT; err = ocelot_vcap_filter_del(ocelot, untagging_rule); if (err) return err; + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, - port, false); + cookie, false); if (!redirect_rule) - return 0; + return -ENOENT; return ocelot_vcap_filter_del(ocelot, redirect_rule); } -static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) +static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, + u16 flags) { - struct ocelot *ocelot = ds->priv; + struct dsa_port *cpu_dp; + int err; - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), - port, vid); + /* tag_8021q.c assumes we are implementing this via port VLAN + * membership, which we aren't. So we don't need to add any VCAP filter + * for the CPU port. + */ + if (!dsa_is_user_port(ds, port)) + return 0; - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), - port, vid); + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); + if (err) + return err; + } + + err = felix_tag_8021q_vlan_add_tx(ds, port, vid); + if (err) + goto add_tx_failed; return 0; + +add_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); + + return err; } -/* Alternatively to using the NPI functionality, that same hardware MAC - * connected internally to the enetc or fman DSA master can be configured to - * use the software-defined tag_8021q frame format. As far as the hardware is - * concerned, it thinks it is a "dumb switch" - the queues of the CPU port - * module are now disconnected from it, but can still be accessed through - * register-based MMIO. - */ -static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) +static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { - ocelot->ports[port]->is_dsa_8021q_cpu = true; - ocelot->npi = -1; + struct dsa_port *cpu_dp; + int err; - /* Overwrite PGID_CPU with the non-tagging port */ - ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); + if (!dsa_is_user_port(ds, port)) + return 0; - ocelot_apply_bridge_fwd_mask(ocelot); + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); + if (err) + return err; + } + + err = felix_tag_8021q_vlan_del_tx(ds, port, vid); + if (err) + goto del_tx_failed; + + return 0; + +del_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); + + return err; } -static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) +static int felix_trap_get_cpu_port(struct dsa_switch *ds, + const struct ocelot_vcap_filter *trap) { - ocelot->ports[port]->is_dsa_8021q_cpu = false; + struct dsa_port *dp; + int first_port; + + if (WARN_ON(!trap->ingress_port_mask)) + return -1; - /* Restore PGID_CPU */ - ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, - PGID_CPU); + first_port = __ffs(trap->ingress_port_mask); + dp = dsa_to_port(ds, first_port); - ocelot_apply_bridge_fwd_mask(ocelot); + return dp->cpu_dp->index; } -/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. - * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the - * tag_8021q CPU port. +/* On switches with no extraction IRQ wired, trapped packets need to be + * replicated over Ethernet as well, otherwise we'd get no notification of + * their arrival when using the ocelot-8021q tagging protocol. */ -static int felix_setup_mmio_filtering(struct felix *felix) +static int felix_update_trapping_destinations(struct dsa_switch *ds, + bool using_tag_8021q) { - unsigned long user_ports = dsa_user_ports(felix->ds); - struct ocelot_vcap_filter *redirect_rule; - struct ocelot_vcap_filter *tagging_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int cpu = -1, port, ret; + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + enum ocelot_mask_mode mask_mode; + unsigned long port_mask; + bool cpu_copy_ena; + int err; - tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); - if (!tagging_rule) - return -ENOMEM; + if (!felix->info->quirk_no_xtr_irq) + return 0; - redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); - if (!redirect_rule) { - kfree(tagging_rule); - return -ENOMEM; - } + /* We are sure that "cpu" was found, otherwise + * dsa_tree_setup_default_cpu() would have failed earlier. + */ + block_vcap_is2 = &ocelot->block[VCAP_IS2]; - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_cpu_port(ds, port)) { - cpu = port; - break; + /* Make sure all traps are set up for that destination */ + list_for_each_entry(trap, &block_vcap_is2->rules, list) { + if (!trap->is_trap) + continue; + + /* Figure out the current trapping destination */ + if (using_tag_8021q) { + /* Redirect to the tag_8021q CPU port. If timestamps + * are necessary, also copy trapped packets to the CPU + * port module. + */ + mask_mode = OCELOT_MASK_MODE_REDIRECT; + port_mask = BIT(felix_trap_get_cpu_port(ds, trap)); + cpu_copy_ena = !!trap->take_ts; + } else { + /* Trap packets only to the CPU port module, which is + * redirected to the NPI port (the DSA CPU port) + */ + mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + port_mask = 0; + cpu_copy_ena = true; } - } - if (cpu < 0) { - kfree(tagging_rule); - kfree(redirect_rule); - return -EINVAL; - } + if (trap->action.mask_mode == mask_mode && + trap->action.port_mask == port_mask && + trap->action.cpu_copy_ena == cpu_copy_ena) + continue; - tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; - *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); - *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); - tagging_rule->ingress_port_mask = user_ports; - tagging_rule->prio = 1; - tagging_rule->id.cookie = ocelot->num_phys_ports; - tagging_rule->id.tc_offload = false; - tagging_rule->block_id = VCAP_IS1; - tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; - tagging_rule->lookup = 0; - tagging_rule->action.pag_override_mask = 0xff; - tagging_rule->action.pag_val = ocelot->num_phys_ports; - - ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); - if (ret) { - kfree(tagging_rule); - kfree(redirect_rule); - return ret; - } + trap->action.mask_mode = mask_mode; + trap->action.port_mask = port_mask; + trap->action.cpu_copy_ena = cpu_copy_ena; - redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; - redirect_rule->ingress_port_mask = user_ports; - redirect_rule->pag = ocelot->num_phys_ports; - redirect_rule->prio = 1; - redirect_rule->id.cookie = ocelot->num_phys_ports; - redirect_rule->id.tc_offload = false; - redirect_rule->block_id = VCAP_IS2; - redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; - redirect_rule->lookup = 0; - redirect_rule->action.cpu_copy_ena = true; - if (felix->info->quirk_no_xtr_irq) { - /* Redirect to the tag_8021q CPU but also copy PTP packets to - * the CPU port module - */ - redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; - redirect_rule->action.port_mask = BIT(cpu); - } else { - /* Trap PTP packets only to the CPU port module (which is - * redirected to the NPI port) - */ - redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; - redirect_rule->action.port_mask = 0; + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) + return err; } - ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); - if (ret) { - ocelot_vcap_filter_del(ocelot, tagging_rule); - kfree(redirect_rule); - return ret; + return 0; +} + +/* The CPU port module is connected to the Node Processor Interface (NPI). This + * is the mode through which frames can be injected from and extracted to an + * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU + * running Linux, and this forms a DSA setup together with the enetc or fman + * DSA master. + */ +static void felix_npi_port_init(struct ocelot *ocelot, int port) +{ + ocelot->npi = port; + + ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | + QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), + QSYS_EXT_CPU_CFG); + + /* NPI port Injection/Extraction configuration */ + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, + ocelot->npi_xtr_prefix); + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, + ocelot->npi_inj_prefix); + + /* Disable transmission of pause frames */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); +} + +static void felix_npi_port_deinit(struct ocelot *ocelot, int port) +{ + /* Restore hardware defaults */ + int unused_port = ocelot->num_phys_ports + 2; + + ocelot->npi = -1; + + ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), + QSYS_EXT_CPU_CFG); + + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, + OCELOT_TAG_PREFIX_DISABLED); + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, + OCELOT_TAG_PREFIX_DISABLED); + + /* Enable transmission of pause frames */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); +} + +static int felix_tag_npi_setup(struct dsa_switch *ds) +{ + struct dsa_port *dp, *first_cpu_dp = NULL; + struct ocelot *ocelot = ds->priv; + + dsa_switch_for_each_user_port(dp, ds) { + if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { + dev_err(ds->dev, "Multiple NPI ports not supported\n"); + return -EINVAL; + } + + first_cpu_dp = dp->cpu_dp; } - /* The ownership of the CPU port module's queues might have just been - * transferred to the tag_8021q tagger from the NPI-based tagger. - * So there might still be all sorts of crap in the queues. On the - * other hand, the MMIO-based matching of PTP frames is very brittle, - * so we need to be careful that there are no extra frames to be - * dequeued over MMIO, since we would never know to discard them. - */ - ocelot_drain_cpu_queue(ocelot, 0); + if (!first_cpu_dp) + return -EINVAL; + + felix_npi_port_init(ocelot, first_cpu_dp->index); return 0; } -static int felix_teardown_mmio_filtering(struct felix *felix) +static void felix_tag_npi_teardown(struct dsa_switch *ds) { - struct ocelot_vcap_filter *tagging_rule, *redirect_rule; - struct ocelot_vcap_block *block_vcap_is1; - struct ocelot_vcap_block *block_vcap_is2; - struct ocelot *ocelot = &felix->ocelot; - int err; + struct ocelot *ocelot = ds->priv; - block_vcap_is1 = &ocelot->block[VCAP_IS1]; - block_vcap_is2 = &ocelot->block[VCAP_IS2]; + felix_npi_port_deinit(ocelot, ocelot->npi); +} - tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, - ocelot->num_phys_ports, - false); - if (!tagging_rule) - return -ENOENT; +static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; - err = ocelot_vcap_filter_del(ocelot, tagging_rule); - if (err) - return err; + return BIT(ocelot->num_phys_ports); +} - redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, - ocelot->num_phys_ports, - false); - if (!redirect_rule) - return -ENOENT; +static int felix_tag_npi_change_master(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; + struct ocelot *ocelot = ds->priv; - return ocelot_vcap_filter_del(ocelot, redirect_rule); + if (netif_is_lag_master(master)) { + NL_SET_ERR_MSG_MOD(extack, + "LAG DSA master only supported using ocelot-8021q"); + return -EOPNOTSUPP; + } + + /* Changing the NPI port breaks user ports still assigned to the old + * one, so only allow it while they're down, and don't allow them to + * come back up until they're all changed to the new one. + */ + dsa_switch_for_each_user_port(other_dp, ds) { + struct net_device *slave = other_dp->slave; + + if (other_dp != dp && (slave->flags & IFF_UP) && + dsa_port_to_master(other_dp) != master) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change while old master still has users"); + return -EOPNOTSUPP; + } + } + + felix_npi_port_deinit(ocelot, ocelot->npi); + felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master)); + + return 0; } -static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) +/* Alternatively to using the NPI functionality, that same hardware MAC + * connected internally to the enetc or fman DSA master can be configured to + * use the software-defined tag_8021q frame format. As far as the hardware is + * concerned, it thinks it is a "dumb switch" - the queues of the CPU port + * module are now disconnected from it, but can still be accessed through + * register-based MMIO. + */ +static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { + .setup = felix_tag_npi_setup, + .teardown = felix_tag_npi_teardown, + .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, + .change_master = felix_tag_npi_change_master, +}; + +static int felix_tag_8021q_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - unsigned long cpu_flood; - int port, err; + struct dsa_port *dp; + int err; + + err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); + if (err) + return err; - felix_8021q_cpu_port_init(ocelot, cpu); + dsa_switch_for_each_cpu_port(dp, ds) + ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index); - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index, + dp->cpu_dp->index); + dsa_switch_for_each_available_port(dp, ds) /* This overwrites ocelot_init(): * Do not forward BPDU frames to the CPU port module, * for 2 reasons: @@ -414,202 +519,194 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) */ ocelot_write_gix(ocelot, ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), - ANA_PORT_CPU_FWD_BPDU_CFG, port); - } + ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); - /* In tag_8021q mode, the CPU port module is unused, except for PTP - * frames. So we want to disable flooding of any kind to the CPU port - * module, since packets going there will end in a black hole. + /* The ownership of the CPU port module's queues might have just been + * transferred to the tag_8021q tagger from the NPI-based tagger. + * So there might still be all sorts of crap in the queues. On the + * other hand, the MMIO-based matching of PTP frames is very brittle, + * so we need to be careful that there are no extra frames to be + * dequeued over MMIO, since we would never know to discard them. */ - cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); - - err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); - if (err) - return err; - - err = felix_setup_mmio_filtering(felix); - if (err) - goto out_tag_8021q_unregister; + ocelot_drain_cpu_queue(ocelot, 0); return 0; - -out_tag_8021q_unregister: - dsa_tag_8021q_unregister(ds); - return err; } -static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) +static void felix_tag_8021q_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - int err, port; - - err = felix_teardown_mmio_filtering(felix); - if (err) - dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", - err); - - dsa_tag_8021q_unregister(ds); - - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; + struct dsa_port *dp; + dsa_switch_for_each_available_port(dp, ds) /* Restore the logic from ocelot_init: * do not forward BPDU frames to the front ports. */ ocelot_write_gix(ocelot, ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), ANA_PORT_CPU_FWD_BPDU_CFG, - port); - } + dp->index); - felix_8021q_cpu_port_deinit(ocelot, cpu); + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index); + + dsa_switch_for_each_cpu_port(dp, ds) + ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index); + + dsa_tag_8021q_unregister(ds); } -/* The CPU port module is connected to the Node Processor Interface (NPI). This - * is the mode through which frames can be injected from and extracted to an - * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU - * running Linux, and this forms a DSA setup together with the enetc or fman - * DSA master. - */ -static void felix_npi_port_init(struct ocelot *ocelot, int port) +static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) { - ocelot->npi = port; + return dsa_cpu_ports(ds); +} - ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | - QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), - QSYS_EXT_CPU_CFG); +static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack) +{ + int cpu = felix_cpu_port_for_master(ds, master); + struct ocelot *ocelot = ds->priv; - /* NPI port Injection/Extraction configuration */ - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, - ocelot->npi_xtr_prefix); - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, - ocelot->npi_inj_prefix); + ocelot_port_unassign_dsa_8021q_cpu(ocelot, port); + ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu); - /* Disable transmission of pause frames */ - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); + return felix_update_trapping_destinations(ds, true); } -static void felix_npi_port_deinit(struct ocelot *ocelot, int port) -{ - /* Restore hardware defaults */ - int unused_port = ocelot->num_phys_ports + 2; +static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { + .setup = felix_tag_8021q_setup, + .teardown = felix_tag_8021q_teardown, + .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, + .change_master = felix_tag_8021q_change_master, +}; - ocelot->npi = -1; +static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, + bool uc, bool mc, bool bc) +{ + struct ocelot *ocelot = ds->priv; + unsigned long val; - ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), - QSYS_EXT_CPU_CFG); + val = uc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, - OCELOT_TAG_PREFIX_DISABLED); - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, - OCELOT_TAG_PREFIX_DISABLED); + val = mc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); - /* Enable transmission of pause frames */ - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); + val = bc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC); } -static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) +static void +felix_migrate_host_flood(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) { struct ocelot *ocelot = ds->priv; - unsigned long cpu_flood; - - felix_npi_port_init(ocelot, cpu); + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask; - /* Include the CPU port module (and indirectly, the NPI port) - * in the forwarding mask for unknown unicast - the hardware - * default value for ANA_FLOODING_FLD_UNICAST excludes - * BIT(ocelot->num_phys_ports), and so does ocelot_init, - * since Ocelot relies on whitelisting MAC addresses towards - * PGID_CPU. - * We do this because DSA does not yet perform RX filtering, - * and the NPI port does not perform source address learning, - * so traffic sent to Linux is effectively unknown from the - * switch's perspective. - */ - cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); + if (old_proto_ops) { + mask = old_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, false, false, false); + } - return 0; + mask = proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); } -static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) +static int felix_migrate_mdbs(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) { struct ocelot *ocelot = ds->priv; + unsigned long from, to; + + if (!old_proto_ops) + return 0; - felix_npi_port_deinit(ocelot, cpu); + from = old_proto_ops->get_host_fwd_mask(ds); + to = proto_ops->get_host_fwd_mask(ds); + + return ocelot_migrate_mdbs(ocelot, from, to); } -static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, - enum dsa_tag_protocol proto) +/* Configure the shared hardware resources for a transition between + * @old_proto_ops and @proto_ops. + * Manual migration is needed because as far as DSA is concerned, no change of + * the CPU port is taking place here, just of the tagging protocol. + */ +static int +felix_tag_proto_setup_shared(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) { + bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); int err; - switch (proto) { - case DSA_TAG_PROTO_SEVILLE: - case DSA_TAG_PROTO_OCELOT: - err = felix_setup_tag_npi(ds, cpu); - break; - case DSA_TAG_PROTO_OCELOT_8021Q: - err = felix_setup_tag_8021q(ds, cpu); - break; - default: - err = -EPROTONOSUPPORT; - } + err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); + if (err) + return err; - return err; -} + felix_update_trapping_destinations(ds, using_tag_8021q); -static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, - enum dsa_tag_protocol proto) -{ - switch (proto) { - case DSA_TAG_PROTO_SEVILLE: - case DSA_TAG_PROTO_OCELOT: - felix_teardown_tag_npi(ds, cpu); - break; - case DSA_TAG_PROTO_OCELOT_8021Q: - felix_teardown_tag_8021q(ds, cpu); - break; - default: - break; - } + felix_migrate_host_flood(ds, proto_ops, old_proto_ops); + + return 0; } /* This always leaves the switch in a consistent state, because although the * tag_8021q setup can fail, the NPI setup can't. So either the change is made, * or the restoration is guaranteed to work. */ -static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, +static int felix_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { + const struct felix_tag_proto_ops *old_proto_ops, *proto_ops; struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - enum dsa_tag_protocol old_proto = felix->tag_proto; int err; - if (proto != DSA_TAG_PROTO_SEVILLE && - proto != DSA_TAG_PROTO_OCELOT && - proto != DSA_TAG_PROTO_OCELOT_8021Q) + switch (proto) { + case DSA_TAG_PROTO_SEVILLE: + case DSA_TAG_PROTO_OCELOT: + proto_ops = &felix_tag_npi_proto_ops; + break; + case DSA_TAG_PROTO_OCELOT_8021Q: + proto_ops = &felix_tag_8021q_proto_ops; + break; + default: return -EPROTONOSUPPORT; + } - felix_del_tag_protocol(ds, cpu, old_proto); + old_proto_ops = felix->tag_proto_ops; - err = felix_set_tag_protocol(ds, cpu, proto); - if (err) { - felix_set_tag_protocol(ds, cpu, old_proto); - return err; - } + if (proto_ops == old_proto_ops) + return 0; + + err = proto_ops->setup(ds); + if (err) + goto setup_failed; + + err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); + if (err) + goto setup_shared_failed; + + if (old_proto_ops) + old_proto_ops->teardown(ds); + felix->tag_proto_ops = proto_ops; felix->tag_proto = proto; return 0; + +setup_shared_failed: + proto_ops->teardown(ds); +setup_failed: + return err; } static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, @@ -622,6 +719,38 @@ static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, return felix->tag_proto; } +static void felix_port_set_host_flood(struct dsa_switch *ds, int port, + bool uc, bool mc) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask; + + if (uc) + felix->host_flood_uc_mask |= BIT(port); + else + felix->host_flood_uc_mask &= ~BIT(port); + + if (mc) + felix->host_flood_mc_mask |= BIT(port); + else + felix->host_flood_mc_mask &= ~BIT(port); + + mask = felix->tag_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); +} + +static int felix_port_change_master(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + return felix->tag_proto_ops->change_master(ds, port, master, extack); +} + static int felix_set_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) { @@ -632,6 +761,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds, return 0; } +static void felix_port_fast_age(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + int err; + + err = ocelot_mact_flush(ocelot, port); + if (err) + dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", + port, ERR_PTR(err)); +} + static int felix_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -641,35 +781,111 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port, } static int felix_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; - return ocelot_fdb_add(ocelot, port, addr, vid); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_port_is_cpu(dp) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + + return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); } static int felix_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; - return ocelot_fdb_del(ocelot, port, addr, vid); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_port_is_cpu(dp) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + + return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); } static int felix_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_port_mdb_add(ocelot, port, mdb); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + + return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); } static int felix_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_port_mdb_del(ocelot, port, mdb); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + + return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); } static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, @@ -695,46 +911,63 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + ocelot_port_bridge_flags(ocelot, port, val); return 0; } static int felix_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_join(ocelot, port, br); - - return 0; + return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, + extack); } static void felix_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_leave(ocelot, port, br); + ocelot_port_bridge_leave(ocelot, port, bridge.dev); } static int felix_lag_join(struct dsa_switch *ds, int port, - struct net_device *bond, - struct netdev_lag_upper_info *info) + struct dsa_lag lag, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; + int err; + + err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack); + if (err) + return err; - return ocelot_port_lag_join(ocelot, port, bond, info); + /* Update the logical LAG port that serves as tag_8021q CPU port */ + if (!dsa_is_cpu_port(ds, port)) + return 0; + + return felix_port_change_master(ds, port, lag.dev, extack); } static int felix_lag_leave(struct dsa_switch *ds, int port, - struct net_device *bond) + struct dsa_lag lag) { struct ocelot *ocelot = ds->priv; - ocelot_port_lag_leave(ocelot, port, bond); + ocelot_port_lag_leave(ocelot, port, lag.dev); - return 0; + /* Update the logical LAG port that serves as tag_8021q CPU port */ + if (!dsa_is_cpu_port(ds, port)) + return 0; + + return felix_port_change_master(ds, port, lag.dev, NULL); } static int felix_lag_change(struct dsa_switch *ds, int port) @@ -804,6 +1037,21 @@ static int felix_vlan_del(struct dsa_switch *ds, int port, return ocelot_vlan_del(ocelot, port, vlan->vid); } +static void felix_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct ocelot *ocelot = ds->priv; + + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. + */ + config->legacy_pre_march2020 = false; + + __set_bit(ocelot->ports[port]->phy_mode, + config->supported_interfaces); +} + static void felix_phylink_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) @@ -815,16 +1063,18 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port, felix->info->phylink_validate(ocelot, port, supported, state); } -static void felix_phylink_mac_config(struct dsa_switch *ds, int port, - unsigned int link_an_mode, - const struct phylink_link_state *state) +static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, + int port, + phy_interface_t iface) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - struct dsa_port *dp = dsa_to_port(ds, port); + struct phylink_pcs *pcs = NULL; + + if (felix->pcs && felix->pcs[port]) + pcs = felix->pcs[port]; - if (felix->pcs[port]) - phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); + return pcs; } static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, @@ -855,6 +1105,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, felix->info->port_sched_speed_set(ocelot, port, speed); } +static int felix_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + + if (!dsa_port_is_user(dp)) + return 0; + + if (ocelot->npi >= 0) { + struct net_device *master = dsa_port_to_master(dp); + + if (felix_cpu_port_for_master(ds, master) != ocelot->npi) { + dev_err(ds->dev, "Multiple masters are not allowed\n"); + return -EINVAL; + } + } + + return 0; +} + static void felix_port_qos_map_init(struct ocelot *ocelot, int port) { int i; @@ -876,6 +1147,55 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port) } } +static void felix_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_stats64(ocelot, port, stats); +} + +static void felix_get_pause_stats(struct dsa_switch *ds, int port, + struct ethtool_pause_stats *pause_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_pause_stats(ocelot, port, pause_stats); +} + +static void felix_get_rmon_stats(struct dsa_switch *ds, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges); +} + +static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats); +} + +static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats); +} + +static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats); +} + static void felix_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data) { @@ -906,11 +1226,29 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port, return ocelot_get_ts_info(ocelot, port, info); } +static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { + [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, + [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, + [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, + [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, + [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, + [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, +}; + +static int felix_validate_phy_mode(struct felix *felix, int port, + phy_interface_t phy_mode) +{ + u32 modes = felix->info->port_modes[port]; + + if (felix_phy_match_table[phy_mode] & modes) + return 0; + return -EOPNOTSUPP; +} + static int felix_parse_ports_node(struct felix *felix, struct device_node *ports_node, phy_interface_t *port_phy_modes) { - struct ocelot *ocelot = &felix->ocelot; struct device *dev = felix->ocelot.dev; struct device_node *child; @@ -937,7 +1275,7 @@ static int felix_parse_ports_node(struct felix *felix, return -ENODEV; } - err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); + err = felix_validate_phy_mode(felix, port, phy_mode); if (err < 0) { dev_err(dev, "Unsupported PHY mode %s on port %d\n", phy_modes(phy_mode), port); @@ -974,11 +1312,55 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) return err; } +static struct regmap *felix_request_regmap_by_name(struct felix *felix, + const char *resource_name) +{ + struct ocelot *ocelot = &felix->ocelot; + struct resource res; + int i; + + for (i = 0; i < felix->info->num_resources; i++) { + if (strcmp(resource_name, felix->info->resources[i].name)) + continue; + + memcpy(&res, &felix->info->resources[i], sizeof(res)); + res.start += felix->switch_base; + res.end += felix->switch_base; + + return ocelot_regmap_init(ocelot, &res); + } + + return ERR_PTR(-ENOENT); +} + +static struct regmap *felix_request_regmap(struct felix *felix, + enum ocelot_target target) +{ + const char *resource_name = felix->info->resource_names[target]; + + /* If the driver didn't provide a resource name for the target, + * the resource is optional. + */ + if (!resource_name) + return NULL; + + return felix_request_regmap_by_name(felix, resource_name); +} + +static struct regmap *felix_request_port_regmap(struct felix *felix, int port) +{ + char resource_name[32]; + + sprintf(resource_name, "port%d", port); + + return felix_request_regmap_by_name(felix, resource_name); +} + static int felix_init_structs(struct felix *felix, int num_phys_ports) { struct ocelot *ocelot = &felix->ocelot; phy_interface_t *port_phy_modes; - struct resource res; + struct regmap *target; int port, i, err; ocelot->num_phys_ports = num_phys_ports; @@ -989,9 +1371,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot->map = felix->info->map; ocelot->stats_layout = felix->info->stats_layout; - ocelot->num_stats = felix->info->num_stats; ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap = felix->info->vcap; + ocelot->vcap_pol.base = felix->info->vcap_pol_base; + ocelot->vcap_pol.max = felix->info->vcap_pol_max; + ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; + ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; ocelot->ops = felix->info->ops; ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; @@ -1009,20 +1394,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) } for (i = 0; i < TARGET_MAX; i++) { - struct regmap *target; - - if (!felix->info->target_io_res[i].name) - continue; - - memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); - res.flags = IORESOURCE_MEM; - res.start += felix->switch_base; - res.end += felix->switch_base; - - target = ocelot_regmap_init(ocelot, &res); + target = felix_request_regmap(felix, i); if (IS_ERR(target)) { dev_err(ocelot->dev, - "Failed to map device memory space\n"); + "Failed to map device memory space: %pe\n", + target); kfree(port_phy_modes); return PTR_ERR(target); } @@ -1039,7 +1415,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; - struct regmap *target; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), @@ -1051,16 +1426,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return -ENOMEM; } - memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); - res.flags = IORESOURCE_MEM; - res.start += felix->switch_base; - res.end += felix->switch_base; - - target = ocelot_regmap_init(ocelot, &res); + target = felix_request_port_regmap(felix, port); if (IS_ERR(target)) { dev_err(ocelot->dev, - "Failed to map memory space for port %d\n", - port); + "Failed to map memory space for port %d: %pe\n", + port, target); kfree(port_phy_modes); return PTR_ERR(target); } @@ -1068,6 +1438,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; ocelot_port->target = target; + ocelot_port->index = port; ocelot->ports[port] = ocelot_port; } @@ -1143,38 +1514,22 @@ static void felix_port_deferred_xmit(struct kthread_work *work) kfree(xmit_work); } -static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port) +static int felix_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) { - struct dsa_port *dp = dsa_to_port(ds, port); - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - struct felix_port *felix_port; + struct ocelot_8021q_tagger_data *tagger_data; - if (!dsa_port_is_user(dp)) + switch (proto) { + case DSA_TAG_PROTO_OCELOT_8021Q: + tagger_data = ocelot_8021q_tagger_data(ds); + tagger_data->xmit_work_fn = felix_port_deferred_xmit; return 0; - - felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL); - if (!felix_port) - return -ENOMEM; - - felix_port->xmit_worker = felix->xmit_worker; - felix_port->xmit_work_fn = felix_port_deferred_xmit; - - dp->priv = felix_port; - - return 0; -} - -static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port) -{ - struct dsa_port *dp = dsa_to_port(ds, port); - struct felix_port *felix_port = dp->priv; - - if (!felix_port) - return; - - dp->priv = NULL; - kfree(felix_port); + case DSA_TAG_PROTO_OCELOT: + case DSA_TAG_PROTO_SEVILLE: + return 0; + default: + return -EPROTONOSUPPORT; + } } /* Hardware initialization done here so that we can allocate structures with @@ -1186,7 +1541,8 @@ static int felix_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - int port, err; + struct dsa_port *dp; + int err; err = felix_init_structs(felix, ds->num_ports); if (err) @@ -1205,64 +1561,35 @@ static int felix_setup(struct dsa_switch *ds) } } - felix->xmit_worker = kthread_create_worker(0, "felix_xmit"); - if (IS_ERR(felix->xmit_worker)) { - err = PTR_ERR(felix->xmit_worker); - goto out_deinit_timestamp; - } - - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - - ocelot_init_port(ocelot, port); + dsa_switch_for_each_available_port(dp, ds) { + ocelot_init_port(ocelot, dp->index); /* Set the default QoS Classification based on PCP and DEI * bits of vlan tag. */ - felix_port_qos_map_init(ocelot, port); - - err = felix_port_setup_tagger_data(ds, port); - if (err) { - dev_err(ds->dev, - "port %d failed to set up tagger data: %pe\n", - port, ERR_PTR(err)); - goto out_deinit_ports; - } + felix_port_qos_map_init(ocelot, dp->index); } err = ocelot_devlink_sb_register(ocelot); if (err) goto out_deinit_ports; - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; - - /* The initial tag protocol is NPI which always returns 0, so - * there's no real point in checking for errors. - */ - felix_set_tag_protocol(ds, port, felix->tag_proto); - break; - } + /* The initial tag protocol is NPI which won't fail during initial + * setup, there's no real point in checking for errors. + */ + felix_change_tag_protocol(ds, felix->tag_proto); ds->mtu_enforcement_ingress = true; ds->assisted_learning_on_cpu_port = true; + ds->fdb_isolation = true; + ds->max_num_bridges = ds->num_ports; return 0; out_deinit_ports: - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - - felix_port_teardown_tagger_data(ds, port); - ocelot_deinit_port(ocelot, port); - } - - kthread_destroy_worker(felix->xmit_worker); + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); -out_deinit_timestamp: ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); @@ -1277,25 +1604,13 @@ static void felix_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - int port; - - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; - - felix_del_tag_protocol(ds, port, felix->tag_proto); - break; - } - - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; + struct dsa_port *dp; - felix_port_teardown_tagger_data(ds, port); - ocelot_deinit_port(ocelot, port); - } + if (felix->tag_proto_ops) + felix->tag_proto_ops->teardown(ds); - kthread_destroy_worker(felix->xmit_worker); + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); ocelot_devlink_sb_unregister(ocelot); ocelot_deinit_timestamp(ocelot); @@ -1317,14 +1632,23 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) { struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; + + err = ocelot_hwstamp_set(ocelot, port, ifr); + if (err) + return err; + + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; - return ocelot_hwstamp_set(ocelot, port, ifr); + return felix_update_trapping_destinations(ds, using_tag_8021q); } -static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) +static bool felix_check_xtr_pkt(struct ocelot *ocelot) { struct felix *felix = ocelot_to_felix(ocelot); - int err, grp = 0; + int err = 0, grp = 0; if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) return false; @@ -1332,9 +1656,6 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) if (!felix->info->quirk_no_xtr_irq) return false; - if (ptp_type == PTP_CLASS_NONE) - return false; - while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { struct sk_buff *skb; unsigned int type; @@ -1364,8 +1685,12 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) } out: - if (err < 0) + if (err < 0) { + dev_err_ratelimited(ocelot->dev, + "Error during packet extraction: %pe\n", + ERR_PTR(err)); ocelot_drain_cpu_queue(ocelot, 0); + } return true; } @@ -1385,7 +1710,7 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port, * MMIO in the CPU port module, and inject that into the stack from * ocelot_xtr_poll(). */ - if (felix_check_xtr_pkt(ocelot, type)) { + if (felix_check_xtr_pkt(ocelot)) { kfree_skb(skb); return true; } @@ -1428,9 +1753,18 @@ static void felix_txtstamp(struct dsa_switch *ds, int port, static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct ocelot *ocelot = ds->priv; + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct felix *felix = ocelot_to_felix(ocelot); ocelot_port_set_maxlen(ocelot, port, new_mtu); + mutex_lock(&ocelot->tas_lock); + + if (ocelot_port->taprio && felix->info->tas_guard_bands_update) + felix->info->tas_guard_bands_update(ocelot, port); + + mutex_unlock(&ocelot->tas_lock); + return 0; } @@ -1445,8 +1779,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress) { struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; + + err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); + if (err) + return err; + + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; - return ocelot_cls_flower_replace(ocelot, port, cls, ingress); + return felix_update_trapping_destinations(ds, using_tag_8021q); } static int felix_cls_flower_del(struct dsa_switch *ds, int port, @@ -1484,6 +1827,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port) ocelot_port_policer_del(ocelot, port); } +static int felix_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress, struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port, + ingress, extack); +} + +static void felix_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_mirror_del(ocelot, port, mirror->ingress); +} + static int felix_port_setup_tc(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data) @@ -1633,23 +1994,73 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port, return ocelot_mrp_del_ring_role(ocelot, port, mrp); } +static int felix_port_get_default_prio(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_get_default_prio(ocelot, port); +} + +static int felix_port_set_default_prio(struct dsa_switch *ds, int port, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_set_default_prio(ocelot, port, prio); +} + +static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_get_dscp_prio(ocelot, port, dscp); +} + +static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio); +} + +static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio); +} + const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .change_tag_protocol = felix_change_tag_protocol, + .connect_tag_protocol = felix_connect_tag_protocol, .setup = felix_setup, .teardown = felix_teardown, .set_ageing_time = felix_set_ageing_time, + .get_stats64 = felix_get_stats64, + .get_pause_stats = felix_get_pause_stats, + .get_rmon_stats = felix_get_rmon_stats, + .get_eth_ctrl_stats = felix_get_eth_ctrl_stats, + .get_eth_mac_stats = felix_get_eth_mac_stats, + .get_eth_phy_stats = felix_get_eth_phy_stats, .get_strings = felix_get_strings, .get_ethtool_stats = felix_get_ethtool_stats, .get_sset_count = felix_get_sset_count, .get_ts_info = felix_get_ts_info, + .phylink_get_caps = felix_phylink_get_caps, .phylink_validate = felix_phylink_validate, - .phylink_mac_config = felix_phylink_mac_config, + .phylink_mac_select_pcs = felix_phylink_mac_select_pcs, .phylink_mac_link_down = felix_phylink_mac_link_down, .phylink_mac_link_up = felix_phylink_mac_link_up, + .port_enable = felix_port_enable, + .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, + .lag_fdb_add = felix_lag_fdb_add, + .lag_fdb_del = felix_lag_fdb_del, .port_mdb_add = felix_mdb_add, .port_mdb_del = felix_mdb_del, .port_pre_bridge_flags = felix_pre_bridge_flags, @@ -1671,6 +2082,8 @@ const struct dsa_switch_ops felix_switch_ops = { .port_max_mtu = felix_get_max_mtu, .port_policer_add = felix_port_policer_add, .port_policer_del = felix_port_policer_del, + .port_mirror_add = felix_port_mirror_add, + .port_mirror_del = felix_port_mirror_del, .cls_flower_add = felix_cls_flower_add, .cls_flower_del = felix_cls_flower_del, .cls_flower_stats = felix_cls_flower_stats, @@ -1691,6 +2104,13 @@ const struct dsa_switch_ops felix_switch_ops = { .port_mrp_del_ring_role = felix_mrp_del_ring_role, .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, + .port_get_default_prio = felix_port_get_default_prio, + .port_set_default_prio = felix_port_set_default_prio, + .port_get_dscp_prio = felix_port_get_dscp_prio, + .port_add_dscp_prio = felix_port_add_dscp_prio, + .port_del_dscp_prio = felix_port_del_dscp_prio, + .port_set_host_flood = felix_port_set_host_flood, + .port_change_master = felix_port_change_master, }; struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index be3e42e135c0..c9c29999c336 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -7,22 +7,35 @@ #define ocelot_to_felix(o) container_of((o), struct felix, ocelot) #define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION +#define OCELOT_PORT_MODE_INTERNAL BIT(0) +#define OCELOT_PORT_MODE_SGMII BIT(1) +#define OCELOT_PORT_MODE_QSGMII BIT(2) +#define OCELOT_PORT_MODE_2500BASEX BIT(3) +#define OCELOT_PORT_MODE_USXGMII BIT(4) +#define OCELOT_PORT_MODE_1000BASEX BIT(5) + /* Platform-specific information */ struct felix_info { - const struct resource *target_io_res; - const struct resource *port_io_res; - const struct resource *imdio_res; + /* Hardcoded resources provided by the hardware instantiation. */ + const struct resource *resources; + size_t num_resources; + /* Names of the mandatory resources that will be requested during + * probe. Must have TARGET_MAX elements, since it is indexed by target. + */ + const char *const *resource_names; const struct reg_field *regfields; const u32 *const *map; const struct ocelot_ops *ops; + const u32 *port_modes; int num_mact_rows; const struct ocelot_stat_layout *stats_layout; - unsigned int num_stats; int num_ports; int num_tx_queues; struct vcap_props *vcap; - int switch_pci_bar; - int imdio_pci_bar; + u16 vcap_pol_base; + u16 vcap_pol_max; + u16 vcap_pol_base2; + u16 vcap_pol_max2; const struct ptp_clock_info *ptp_caps; /* Some Ocelot switches are integrated into the SoC without the @@ -42,14 +55,29 @@ struct felix_info { void (*phylink_validate)(struct ocelot *ocelot, int port, unsigned long *supported, struct phylink_link_state *state); - int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port, - phy_interface_t phy_mode); int (*port_setup_tc)(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data); + void (*tas_guard_bands_update)(struct ocelot *ocelot, int port); void (*port_sched_speed_set)(struct ocelot *ocelot, int port, u32 speed); }; +/* Methods for initializing the hardware resources specific to a tagging + * protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs, + * for "ocelot-8021q"). + * It is important that the resources configured here do not have side effects + * for the other tagging protocols. If that is the case, their configuration + * needs to go to felix_tag_proto_setup_shared(). + */ +struct felix_tag_proto_ops { + int (*setup)(struct dsa_switch *ds); + void (*teardown)(struct dsa_switch *ds); + unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds); + int (*change_master)(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack); +}; + extern const struct dsa_switch_ops felix_switch_ops; /* DSA glue / front-end for struct ocelot */ @@ -58,11 +86,13 @@ struct felix { const struct felix_info *info; struct ocelot ocelot; struct mii_bus *imdio; - struct lynx_pcs **pcs; + struct phylink_pcs **pcs; resource_size_t switch_base; - resource_size_t imdio_base; enum dsa_tag_protocol tag_proto; + const struct felix_tag_proto_ops *tag_proto_ops; struct kthread_worker *xmit_worker; + unsigned long host_flood_uc_mask; + unsigned long host_flood_mc_mask; }; struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port); diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 45c5ec7a83ea..26a35ae322d1 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -5,8 +5,10 @@ #include <linux/fsl/enetc_mdio.h> #include <soc/mscc/ocelot_qsys.h> #include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/ocelot_ana.h> #include <soc/mscc/ocelot_ptp.h> #include <soc/mscc/ocelot_sys.h> +#include <net/tc_act/tc_gate.h> #include <soc/mscc/ocelot.h> #include <linux/dsa/ocelot.h> #include <linux/pcs-lynx.h> @@ -14,9 +16,32 @@ #include <linux/iopoll.h> #include <linux/mdio.h> #include <linux/pci.h> +#include <linux/time.h> #include "felix.h" +#define VSC9959_NUM_PORTS 6 + #define VSC9959_TAS_GCL_ENTRY_MAX 63 +#define VSC9959_TAS_MIN_GATE_LEN_NS 33 +#define VSC9959_VCAP_POLICER_BASE 63 +#define VSC9959_VCAP_POLICER_MAX 383 +#define VSC9959_SWITCH_PCI_BAR 4 +#define VSC9959_IMDIO_PCI_BAR 0 + +#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \ + OCELOT_PORT_MODE_QSGMII | \ + OCELOT_PORT_MODE_1000BASEX | \ + OCELOT_PORT_MODE_2500BASEX | \ + OCELOT_PORT_MODE_USXGMII) + +static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = { + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + OCELOT_PORT_MODE_INTERNAL, + OCELOT_PORT_MODE_INTERNAL, +}; static const u32 vsc9959_ana_regmap[] = { REG(ANA_ADVLEARN, 0x0089a0), @@ -250,27 +275,102 @@ static const u32 vsc9959_rew_regmap[] = { static const u32 vsc9959_sys_regmap[] = { REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), REG(SYS_COUNT_RX_SHORTS, 0x000010), REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), REG(SYS_COUNT_RX_64, 0x000024), REG(SYS_COUNT_RX_65_127, 0x000028), REG(SYS_COUNT_RX_128_255, 0x00002c), - REG(SYS_COUNT_RX_256_1023, 0x000030), - REG(SYS_COUNT_RX_1024_1526, 0x000034), - REG(SYS_COUNT_RX_1527_MAX, 0x000038), - REG(SYS_COUNT_RX_LONGS, 0x000044), + REG(SYS_COUNT_RX_256_511, 0x000030), + REG(SYS_COUNT_RX_512_1023, 0x000034), + REG(SYS_COUNT_RX_1024_1526, 0x000038), + REG(SYS_COUNT_RX_1527_MAX, 0x00003c), + REG(SYS_COUNT_RX_PAUSE, 0x000040), + REG(SYS_COUNT_RX_CONTROL, 0x000044), + REG(SYS_COUNT_RX_LONGS, 0x000048), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c), + REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050), + REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054), + REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058), + REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c), + REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060), + REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064), + REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068), + REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c), + REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070), + REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074), + REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078), + REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c), + REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080), + REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084), + REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088), + REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c), + REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090), + REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094), + REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098), + REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c), + REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0), + REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4), + REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8), + REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac), REG(SYS_COUNT_TX_OCTETS, 0x000200), + REG(SYS_COUNT_TX_UNICAST, 0x000204), + REG(SYS_COUNT_TX_MULTICAST, 0x000208), + REG(SYS_COUNT_TX_BROADCAST, 0x00020c), REG(SYS_COUNT_TX_COLLISION, 0x000210), REG(SYS_COUNT_TX_DROPS, 0x000214), + REG(SYS_COUNT_TX_PAUSE, 0x000218), REG(SYS_COUNT_TX_64, 0x00021c), REG(SYS_COUNT_TX_65_127, 0x000220), - REG(SYS_COUNT_TX_128_511, 0x000224), - REG(SYS_COUNT_TX_512_1023, 0x000228), - REG(SYS_COUNT_TX_1024_1526, 0x00022c), - REG(SYS_COUNT_TX_1527_MAX, 0x000230), - REG(SYS_COUNT_TX_AGING, 0x000278), + REG(SYS_COUNT_TX_128_255, 0x000224), + REG(SYS_COUNT_TX_256_511, 0x000228), + REG(SYS_COUNT_TX_512_1023, 0x00022c), + REG(SYS_COUNT_TX_1024_1526, 0x000230), + REG(SYS_COUNT_TX_1527_MAX, 0x000234), + REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238), + REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c), + REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240), + REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244), + REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248), + REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c), + REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250), + REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254), + REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258), + REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c), + REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260), + REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264), + REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268), + REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c), + REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270), + REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274), + REG(SYS_COUNT_TX_AGED, 0x000278), + REG(SYS_COUNT_DROP_LOCAL, 0x000400), + REG(SYS_COUNT_DROP_TAIL, 0x000404), + REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408), + REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410), + REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414), + REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418), + REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420), + REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424), + REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428), + REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c), + REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430), + REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434), + REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438), + REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c), + REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440), + REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444), + REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800), + REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804), + REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808), + REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c), REG(SYS_RESET_CFG, 0x000e00), REG(SYS_SR_ETYPE_CFG, 0x000e04), REG(SYS_VLAN_ETYPE_CFG, 0x000e08), @@ -292,7 +392,6 @@ static const u32 vsc9959_sys_regmap[] = { REG_RESERVED(SYS_MMGT_FAST), REG_RESERVED(SYS_EVENTS_DIF), REG_RESERVED(SYS_EVENTS_CORE), - REG_RESERVED(SYS_CNT), REG(SYS_PTP_STATUS, 0x000f14), REG(SYS_PTP_TXSTAMP, 0x000f18), REG(SYS_PTP_NXT, 0x000f1c), @@ -378,100 +477,43 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = { }; /* Addresses are relative to the PCI device's base address */ -static const struct resource vsc9959_target_io_res[TARGET_MAX] = { - [ANA] = { - .start = 0x0280000, - .end = 0x028ffff, - .name = "ana", - }, - [QS] = { - .start = 0x0080000, - .end = 0x00800ff, - .name = "qs", - }, - [QSYS] = { - .start = 0x0200000, - .end = 0x021ffff, - .name = "qsys", - }, - [REW] = { - .start = 0x0030000, - .end = 0x003ffff, - .name = "rew", - }, - [SYS] = { - .start = 0x0010000, - .end = 0x001ffff, - .name = "sys", - }, - [S0] = { - .start = 0x0040000, - .end = 0x00403ff, - .name = "s0", - }, - [S1] = { - .start = 0x0050000, - .end = 0x00503ff, - .name = "s1", - }, - [S2] = { - .start = 0x0060000, - .end = 0x00603ff, - .name = "s2", - }, - [PTP] = { - .start = 0x0090000, - .end = 0x00900cb, - .name = "ptp", - }, - [GCB] = { - .start = 0x0070000, - .end = 0x00701ff, - .name = "devcpu_gcb", - }, +static const struct resource vsc9959_resources[] = { + DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"), + DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"), + DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"), + DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"), + DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"), + DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"), + DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"), + DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"), + DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"), + DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"), + DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"), + DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"), + DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"), + DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"), + DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"), + DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"), }; -static const struct resource vsc9959_port_io_res[] = { - { - .start = 0x0100000, - .end = 0x010ffff, - .name = "port0", - }, - { - .start = 0x0110000, - .end = 0x011ffff, - .name = "port1", - }, - { - .start = 0x0120000, - .end = 0x012ffff, - .name = "port2", - }, - { - .start = 0x0130000, - .end = 0x013ffff, - .name = "port3", - }, - { - .start = 0x0140000, - .end = 0x014ffff, - .name = "port4", - }, - { - .start = 0x0150000, - .end = 0x015ffff, - .name = "port5", - }, +static const char * const vsc9959_resource_names[TARGET_MAX] = { + [SYS] = "sys", + [REW] = "rew", + [S0] = "s0", + [S1] = "s1", + [S2] = "s2", + [GCB] = "devcpu_gcb", + [QS] = "qs", + [PTP] = "ptp", + [QSYS] = "qsys", + [ANA] = "ana", }; /* Port MAC 0 Internal MDIO bus through which the SerDes acting as an * SGMII/QSGMII MAC PCS can be found. */ -static const struct resource vsc9959_imdio_res = { - .start = 0x8030, - .end = 0x8040, - .name = "imdio", -}; +static const struct resource vsc9959_imdio_res = + DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio"); static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6), @@ -523,99 +565,8 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4), }; -static const struct ocelot_stat_layout vsc9959_stats_layout[] = { - { .offset = 0x00, .name = "rx_octets", }, - { .offset = 0x01, .name = "rx_unicast", }, - { .offset = 0x02, .name = "rx_multicast", }, - { .offset = 0x03, .name = "rx_broadcast", }, - { .offset = 0x04, .name = "rx_shorts", }, - { .offset = 0x05, .name = "rx_fragments", }, - { .offset = 0x06, .name = "rx_jabbers", }, - { .offset = 0x07, .name = "rx_crc_align_errs", }, - { .offset = 0x08, .name = "rx_sym_errs", }, - { .offset = 0x09, .name = "rx_frames_below_65_octets", }, - { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", }, - { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", }, - { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", }, - { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", }, - { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", }, - { .offset = 0x0F, .name = "rx_frames_over_1526_octets", }, - { .offset = 0x10, .name = "rx_pause", }, - { .offset = 0x11, .name = "rx_control", }, - { .offset = 0x12, .name = "rx_longs", }, - { .offset = 0x13, .name = "rx_classified_drops", }, - { .offset = 0x14, .name = "rx_red_prio_0", }, - { .offset = 0x15, .name = "rx_red_prio_1", }, - { .offset = 0x16, .name = "rx_red_prio_2", }, - { .offset = 0x17, .name = "rx_red_prio_3", }, - { .offset = 0x18, .name = "rx_red_prio_4", }, - { .offset = 0x19, .name = "rx_red_prio_5", }, - { .offset = 0x1A, .name = "rx_red_prio_6", }, - { .offset = 0x1B, .name = "rx_red_prio_7", }, - { .offset = 0x1C, .name = "rx_yellow_prio_0", }, - { .offset = 0x1D, .name = "rx_yellow_prio_1", }, - { .offset = 0x1E, .name = "rx_yellow_prio_2", }, - { .offset = 0x1F, .name = "rx_yellow_prio_3", }, - { .offset = 0x20, .name = "rx_yellow_prio_4", }, - { .offset = 0x21, .name = "rx_yellow_prio_5", }, - { .offset = 0x22, .name = "rx_yellow_prio_6", }, - { .offset = 0x23, .name = "rx_yellow_prio_7", }, - { .offset = 0x24, .name = "rx_green_prio_0", }, - { .offset = 0x25, .name = "rx_green_prio_1", }, - { .offset = 0x26, .name = "rx_green_prio_2", }, - { .offset = 0x27, .name = "rx_green_prio_3", }, - { .offset = 0x28, .name = "rx_green_prio_4", }, - { .offset = 0x29, .name = "rx_green_prio_5", }, - { .offset = 0x2A, .name = "rx_green_prio_6", }, - { .offset = 0x2B, .name = "rx_green_prio_7", }, - { .offset = 0x80, .name = "tx_octets", }, - { .offset = 0x81, .name = "tx_unicast", }, - { .offset = 0x82, .name = "tx_multicast", }, - { .offset = 0x83, .name = "tx_broadcast", }, - { .offset = 0x84, .name = "tx_collision", }, - { .offset = 0x85, .name = "tx_drops", }, - { .offset = 0x86, .name = "tx_pause", }, - { .offset = 0x87, .name = "tx_frames_below_65_octets", }, - { .offset = 0x88, .name = "tx_frames_65_to_127_octets", }, - { .offset = 0x89, .name = "tx_frames_128_255_octets", }, - { .offset = 0x8B, .name = "tx_frames_256_511_octets", }, - { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", }, - { .offset = 0x8D, .name = "tx_frames_over_1526_octets", }, - { .offset = 0x8E, .name = "tx_yellow_prio_0", }, - { .offset = 0x8F, .name = "tx_yellow_prio_1", }, - { .offset = 0x90, .name = "tx_yellow_prio_2", }, - { .offset = 0x91, .name = "tx_yellow_prio_3", }, - { .offset = 0x92, .name = "tx_yellow_prio_4", }, - { .offset = 0x93, .name = "tx_yellow_prio_5", }, - { .offset = 0x94, .name = "tx_yellow_prio_6", }, - { .offset = 0x95, .name = "tx_yellow_prio_7", }, - { .offset = 0x96, .name = "tx_green_prio_0", }, - { .offset = 0x97, .name = "tx_green_prio_1", }, - { .offset = 0x98, .name = "tx_green_prio_2", }, - { .offset = 0x99, .name = "tx_green_prio_3", }, - { .offset = 0x9A, .name = "tx_green_prio_4", }, - { .offset = 0x9B, .name = "tx_green_prio_5", }, - { .offset = 0x9C, .name = "tx_green_prio_6", }, - { .offset = 0x9D, .name = "tx_green_prio_7", }, - { .offset = 0x9E, .name = "tx_aged", }, - { .offset = 0x100, .name = "drop_local", }, - { .offset = 0x101, .name = "drop_tail", }, - { .offset = 0x102, .name = "drop_yellow_prio_0", }, - { .offset = 0x103, .name = "drop_yellow_prio_1", }, - { .offset = 0x104, .name = "drop_yellow_prio_2", }, - { .offset = 0x105, .name = "drop_yellow_prio_3", }, - { .offset = 0x106, .name = "drop_yellow_prio_4", }, - { .offset = 0x107, .name = "drop_yellow_prio_5", }, - { .offset = 0x108, .name = "drop_yellow_prio_6", }, - { .offset = 0x109, .name = "drop_yellow_prio_7", }, - { .offset = 0x10A, .name = "drop_green_prio_0", }, - { .offset = 0x10B, .name = "drop_green_prio_1", }, - { .offset = 0x10C, .name = "drop_green_prio_2", }, - { .offset = 0x10D, .name = "drop_green_prio_3", }, - { .offset = 0x10E, .name = "drop_green_prio_4", }, - { .offset = 0x10F, .name = "drop_green_prio_5", }, - { .offset = 0x110, .name = "drop_green_prio_6", }, - { .offset = 0x111, .name = "drop_green_prio_7", }, +static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = { + OCELOT_COMMON_STATS, }; static const struct vcap_field vsc9959_vcap_es0_keys[] = { @@ -938,15 +889,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, unsigned long *supported, struct phylink_link_state *state) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { - linkmode_zero(supported); - return; - } - phylink_set_port_modes(mask); phylink_set(mask, Autoneg); phylink_set(mask, Pause); @@ -957,6 +901,7 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, phylink_set(mask, 100baseT_Full); phylink_set(mask, 1000baseT_Half); phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); if (state->interface == PHY_INTERFACE_MODE_INTERNAL || state->interface == PHY_INTERFACE_MODE_2500BASEX || @@ -969,27 +914,6 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, linkmode_and(state->advertising, state->advertising, mask); } -static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, - phy_interface_t phy_mode) -{ - switch (phy_mode) { - case PHY_INTERFACE_MODE_INTERNAL: - if (port != 4 && port != 5) - return -ENOTSUPP; - return 0; - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_USXGMII: - case PHY_INTERFACE_MODE_2500BASEX: - /* Not supported on internal to-CPU ports */ - if (port == 4 || port == 5) - return -ENOTSUPP; - return 0; - default: - return -ENOTSUPP; - } -} - /* Watermark encode * Bit 8: Unit; 0:1, 1:16 * Bit 7-0: Value to be multiplied with unit @@ -1020,20 +944,13 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse) *maxuse = val & GENMASK(11, 0); } -static const struct ocelot_ops vsc9959_ops = { - .reset = vsc9959_reset, - .wm_enc = vsc9959_wm_enc, - .wm_dec = vsc9959_wm_dec, - .wm_stat = vsc9959_wm_stat, - .port_to_netdev = felix_port_to_netdev, - .netdev_to_port = felix_netdev_to_port, -}; - static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) { + struct pci_dev *pdev = to_pci_dev(ocelot->dev); struct felix *felix = ocelot_to_felix(ocelot); struct enetc_mdio_priv *mdio_priv; struct device *dev = ocelot->dev; + resource_size_t imdio_base; void __iomem *imdio_regs; struct resource res; struct enetc_hw *hw; @@ -1042,17 +959,18 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) int rc; felix->pcs = devm_kcalloc(dev, felix->info->num_ports, - sizeof(struct lynx_pcs *), + sizeof(struct phylink_pcs *), GFP_KERNEL); if (!felix->pcs) { dev_err(dev, "failed to allocate array for PCS PHYs\n"); return -ENOMEM; } - memcpy(&res, felix->info->imdio_res, sizeof(res)); - res.flags = IORESOURCE_MEM; - res.start += felix->imdio_base; - res.end += felix->imdio_base; + imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR); + + memcpy(&res, &vsc9959_imdio_res, sizeof(res)); + res.start += imdio_base; + res.end += imdio_base; imdio_regs = devm_ioremap_resource(dev, &res); if (IS_ERR(imdio_regs)) @@ -1064,7 +982,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) return PTR_ERR(hw); } - bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); if (!bus) return -ENOMEM; @@ -1084,6 +1002,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) rc = mdiobus_register(bus); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); + mdiobus_free(bus); return rc; } @@ -1091,8 +1010,8 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) for (port = 0; port < felix->info->num_ports; port++) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct mdio_device *pcs; - struct lynx_pcs *lynx; + struct phylink_pcs *phylink_pcs; + struct mdio_device *mdio_device; if (dsa_is_unused_port(felix->ds, port)) continue; @@ -1100,17 +1019,17 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) continue; - pcs = mdio_device_create(felix->imdio, port); - if (IS_ERR(pcs)) + mdio_device = mdio_device_create(felix->imdio, port); + if (IS_ERR(mdio_device)) continue; - lynx = lynx_pcs_create(pcs); - if (!lynx) { - mdio_device_free(pcs); + phylink_pcs = lynx_pcs_create(mdio_device); + if (!phylink_pcs) { + mdio_device_free(mdio_device); continue; } - felix->pcs[port] = lynx; + felix->pcs[port] = phylink_pcs; dev_info(dev, "Found PCS at internal MDIO address %d\n", port); } @@ -1124,20 +1043,296 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) int port; for (port = 0; port < ocelot->num_phys_ports; port++) { - struct lynx_pcs *pcs = felix->pcs[port]; + struct phylink_pcs *phylink_pcs = felix->pcs[port]; + struct mdio_device *mdio_device; - if (!pcs) + if (!phylink_pcs) continue; - mdio_device_free(pcs->mdio); - lynx_pcs_destroy(pcs); + mdio_device = lynx_get_mdio_device(phylink_pcs); + mdio_device_free(mdio_device); + lynx_pcs_destroy(phylink_pcs); } mdiobus_unregister(felix->imdio); + mdiobus_free(felix->imdio); +} + +/* The switch considers any frame (regardless of size) as eligible for + * transmission if the traffic class gate is open for at least 33 ns. + * Overruns are prevented by cropping an interval at the end of the gate time + * slot for which egress scheduling is blocked, but we need to still keep 33 ns + * available for one packet to be transmitted, otherwise the port tc will hang. + * This function returns the size of a gate interval that remains available for + * setting the guard band, after reserving the space for one egress frame. + */ +static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns) +{ + /* Gate always open */ + if (gate_len_ns == U64_MAX) + return U64_MAX; + + return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC; +} + +/* Extract shortest continuous gate open intervals in ns for each traffic class + * of a cyclic tc-taprio schedule. If a gate is always open, the duration is + * considered U64_MAX. If the gate is always closed, it is considered 0. + */ +static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio, + u64 min_gate_len[OCELOT_NUM_TC]) +{ + struct tc_taprio_sched_entry *entry; + u64 gate_len[OCELOT_NUM_TC]; + u8 gates_ever_opened = 0; + int tc, i, n; + + /* Initialize arrays */ + for (tc = 0; tc < OCELOT_NUM_TC; tc++) { + min_gate_len[tc] = U64_MAX; + gate_len[tc] = 0; + } + + /* If we don't have taprio, consider all gates as permanently open */ + if (!taprio) + return; + + n = taprio->num_entries; + + /* Walk through the gate list twice to determine the length + * of consecutively open gates for a traffic class, including + * open gates that wrap around. We are just interested in the + * minimum window size, and this doesn't change what the + * minimum is (if the gate never closes, min_gate_len will + * remain U64_MAX). + */ + for (i = 0; i < 2 * n; i++) { + entry = &taprio->entries[i % n]; + + for (tc = 0; tc < OCELOT_NUM_TC; tc++) { + if (entry->gate_mask & BIT(tc)) { + gate_len[tc] += entry->interval; + gates_ever_opened |= BIT(tc); + } else { + /* Gate closes now, record a potential new + * minimum and reinitialize length + */ + if (min_gate_len[tc] > gate_len[tc] && + gate_len[tc]) + min_gate_len[tc] = gate_len[tc]; + gate_len[tc] = 0; + } + } + } + + /* min_gate_len[tc] actually tracks minimum *open* gate time, so for + * permanently closed gates, min_gate_len[tc] will still be U64_MAX. + * Therefore they are currently indistinguishable from permanently + * open gates. Overwrite the gate len with 0 when we know they're + * actually permanently closed, i.e. after the loop above. + */ + for (tc = 0; tc < OCELOT_NUM_TC; tc++) + if (!(gates_ever_opened & BIT(tc))) + min_gate_len[tc] = 0; +} + +/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ, + * so we need to spell out the register access to each traffic class in helper + * functions, to simplify callers + */ +static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc, + u32 max_sdu) +{ + switch (tc) { + case 0: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0, + port); + break; + case 1: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1, + port); + break; + case 2: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2, + port); + break; + case 3: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3, + port); + break; + case 4: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4, + port); + break; + case 5: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5, + port); + break; + case 6: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6, + port); + break; + case 7: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7, + port); + break; + } +} + +static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc) +{ + switch (tc) { + case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port); + case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port); + case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port); + case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port); + case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port); + case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port); + case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port); + case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port); + default: + return 0; + } +} + +static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc) +{ + if (!taprio || !taprio->max_sdu[tc]) + return 0; + + return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; +} + +/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the + * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU + * values (the default value is 1518). Also, for traffic class windows smaller + * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame + * dropping, such that these won't hang the port, as they will never be sent. + */ +static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct tc_taprio_qopt_offload *taprio; + u64 min_gate_len[OCELOT_NUM_TC]; + int speed, picos_per_byte; + u64 needed_bit_time_ps; + u32 val, maxlen; + u8 tas_speed; + int tc; + + lockdep_assert_held(&ocelot->tas_lock); + + taprio = ocelot_port->taprio; + + val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port); + tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val); + + switch (tas_speed) { + case OCELOT_SPEED_10: + speed = SPEED_10; + break; + case OCELOT_SPEED_100: + speed = SPEED_100; + break; + case OCELOT_SPEED_1000: + speed = SPEED_1000; + break; + case OCELOT_SPEED_2500: + speed = SPEED_2500; + break; + default: + return; + } + + picos_per_byte = (USEC_PER_SEC * 8) / speed; + + val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG); + /* MAXLEN_CFG accounts automatically for VLAN. We need to include it + * manually in the bit time calculation, plus the preamble and SFD. + */ + maxlen = val + 2 * VLAN_HLEN; + /* Consider the standard Ethernet overhead of 8 octets preamble+SFD, + * 4 octets FCS, 12 octets IFG. + */ + needed_bit_time_ps = (maxlen + 24) * picos_per_byte; + + dev_dbg(ocelot->dev, + "port %d: max frame size %d needs %llu ps at speed %d\n", + port, maxlen, needed_bit_time_ps, speed); + + vsc9959_tas_min_gate_lengths(taprio, min_gate_len); + + mutex_lock(&ocelot->fwd_domain_lock); + + for (tc = 0; tc < OCELOT_NUM_TC; tc++) { + u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc); + u64 remaining_gate_len_ps; + u32 max_sdu; + + remaining_gate_len_ps = + vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]); + + if (remaining_gate_len_ps > needed_bit_time_ps) { + /* Setting QMAXSDU_CFG to 0 disables oversized frame + * dropping. + */ + max_sdu = requested_max_sdu; + dev_dbg(ocelot->dev, + "port %d tc %d min gate len %llu" + ", sending all frames\n", + port, tc, min_gate_len[tc]); + } else { + /* If traffic class doesn't support a full MTU sized + * frame, make sure to enable oversize frame dropping + * for frames larger than the smallest that would fit. + * + * However, the exact same register, QSYS_QMAXSDU_CFG_*, + * controls not only oversized frame dropping, but also + * per-tc static guard band lengths, so it reduces the + * useful gate interval length. Therefore, be careful + * to calculate a guard band (and therefore max_sdu) + * that still leaves 33 ns available in the time slot. + */ + max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte); + /* A TC gate may be completely closed, which is a + * special case where all packets are oversized. + * Any limit smaller than 64 octets accomplishes this + */ + if (!max_sdu) + max_sdu = 1; + /* Take L1 overhead into account, but just don't allow + * max_sdu to go negative or to 0. Here we use 20 + * because QSYS_MAXSDU_CFG_* already counts the 4 FCS + * octets as part of packet size. + */ + if (max_sdu > 20) + max_sdu -= 20; + + if (requested_max_sdu && requested_max_sdu < max_sdu) + max_sdu = requested_max_sdu; + + dev_info(ocelot->dev, + "port %d tc %d min gate length %llu" + " ns not enough for max frame size %d at %d" + " Mbps, dropping frames over %d" + " octets including FCS\n", + port, tc, min_gate_len[tc], maxlen, speed, + max_sdu); + } + + vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu); + } + + ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port); + + ocelot->ops->cut_through_fwd(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); } static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, u32 speed) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; u8 tas_speed; switch (speed) { @@ -1158,10 +1353,17 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, break; } + mutex_lock(&ocelot->tas_lock); + ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_LINK_SPEED(tas_speed), QSYS_TAG_CONFIG_LINK_SPEED_M, QSYS_TAG_CONFIG, port); + + if (ocelot_port->taprio) + vsc9959_tas_guard_bands_update(ocelot, port); + + mutex_unlock(&ocelot->tas_lock); } static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time, @@ -1204,26 +1406,36 @@ static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix, static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, struct tc_taprio_qopt_offload *taprio) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; struct timespec64 base_ts; int ret, i; u32 val; + mutex_lock(&ocelot->tas_lock); + if (!taprio->enable) { - ocelot_rmw_rix(ocelot, - QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF), - QSYS_TAG_CONFIG_ENABLE | - QSYS_TAG_CONFIG_INIT_GATE_STATE_M, + ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE, QSYS_TAG_CONFIG, port); + taprio_offload_free(ocelot_port->taprio); + ocelot_port->taprio = NULL; + + vsc9959_tas_guard_bands_update(ocelot, port); + + mutex_unlock(&ocelot->tas_lock); return 0; } if (taprio->cycle_time > NSEC_PER_SEC || - taprio->cycle_time_extension >= NSEC_PER_SEC) - return -EINVAL; + taprio->cycle_time_extension >= NSEC_PER_SEC) { + ret = -EINVAL; + goto err; + } - if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) - return -ERANGE; + if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) { + ret = -ERANGE; + goto err; + } /* Enable guard band. The switch will schedule frames without taking * their length into account. Thus we'll always need to enable the @@ -1244,8 +1456,10 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, * config is pending, need reset the TAS module */ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8); - if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) - return -EBUSY; + if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) { + ret = -EBUSY; + goto err; + } ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE | @@ -1278,10 +1492,67 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val, !(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE), 10, 100000); + if (ret) + goto err; + + ocelot_port->taprio = taprio_offload_get(taprio); + vsc9959_tas_guard_bands_update(ocelot, port); + +err: + mutex_unlock(&ocelot->tas_lock); return ret; } +static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) +{ + struct tc_taprio_qopt_offload *taprio; + struct ocelot_port *ocelot_port; + struct timespec64 base_ts; + int port; + u32 val; + + mutex_lock(&ocelot->tas_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_port = ocelot->ports[port]; + taprio = ocelot_port->taprio; + if (!taprio) + continue; + + ocelot_rmw(ocelot, + QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port), + QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M, + QSYS_TAS_PARAM_CFG_CTRL); + + /* Disable time-aware shaper */ + ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE, + QSYS_TAG_CONFIG, port); + + vsc9959_new_base_time(ocelot, taprio->base_time, + taprio->cycle_time, &base_ts); + + ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1); + ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec), + QSYS_PARAM_CFG_REG_2); + val = upper_32_bits(base_ts.tv_sec); + ocelot_rmw(ocelot, + QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val), + QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M, + QSYS_PARAM_CFG_REG_3); + + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL); + + /* Re-enable time-aware shaper */ + ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE, + QSYS_TAG_CONFIG_ENABLE, + QSYS_TAG_CONFIG, port); + } + mutex_unlock(&ocelot->tas_lock); +} + static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port, struct tc_cbs_qopt_offload *cbs_qopt) { @@ -1328,6 +1599,21 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port, return 0; } +static int vsc9959_qos_query_caps(struct tc_query_caps_base *base) +{ + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->supports_queue_max_sdu = true; + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data) @@ -1335,6 +1621,8 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port, struct ocelot *ocelot = ds->priv; switch (type) { + case TC_QUERY_CAPS: + return vsc9959_qos_query_caps(type_data); case TC_SETUP_QDISC_TAPRIO: return vsc9959_qos_port_tas_set(ocelot, port, type_data); case TC_SETUP_QDISC_CBS: @@ -1344,29 +1632,967 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port, } } +#define VSC9959_PSFP_SFID_MAX 175 +#define VSC9959_PSFP_GATE_ID_MAX 183 +#define VSC9959_PSFP_POLICER_BASE 63 +#define VSC9959_PSFP_POLICER_MAX 383 +#define VSC9959_PSFP_GATE_LIST_NUM 4 +#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000 + +struct felix_stream { + struct list_head list; + unsigned long id; + bool dummy; + int ports; + int port; + u8 dmac[ETH_ALEN]; + u16 vid; + s8 prio; + u8 sfid_valid; + u8 ssid_valid; + u32 sfid; + u32 ssid; +}; + +struct felix_stream_filter_counters { + u64 match; + u64 not_pass_gate; + u64 not_pass_sdu; + u64 red; +}; + +struct felix_stream_filter { + struct felix_stream_filter_counters stats; + struct list_head list; + refcount_t refcount; + u32 index; + u8 enable; + int portmask; + u8 sg_valid; + u32 sgid; + u8 fm_valid; + u32 fmid; + u8 prio_valid; + u8 prio; + u32 maxsdu; +}; + +struct felix_stream_gate { + u32 index; + u8 enable; + u8 ipv_valid; + u8 init_ipv; + u64 basetime; + u64 cycletime; + u64 cycletime_ext; + u32 num_entries; + struct action_gate_entry entries[]; +}; + +struct felix_stream_gate_entry { + struct list_head list; + refcount_t refcount; + u32 index; +}; + +static int vsc9959_stream_identify(struct flow_cls_offload *f, + struct felix_stream *stream) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) + return -EOPNOTSUPP; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(stream->dmac, match.key->dst); + if (!is_zero_ether_addr(match.mask->src)) + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) + stream->prio = match.key->vlan_priority; + else + stream->prio = -1; + + if (!match.mask->vlan_id) + return -EOPNOTSUPP; + stream->vid = match.key->vlan_id; + } else { + return -EOPNOTSUPP; + } + + stream->id = f->cookie; + + return 0; +} + +static int vsc9959_mact_stream_set(struct ocelot *ocelot, + struct felix_stream *stream, + struct netlink_ext_ack *extack) +{ + enum macaccess_entry_type type; + int ret, sfid, ssid; + u32 vid, dst_idx; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, stream->dmac); + vid = stream->vid; + + /* Stream identification desn't support to add a stream with non + * existent MAC (The MAC entry has not been learned in MAC table). + */ + ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type); + if (ret) { + if (extack) + NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table"); + return -EOPNOTSUPP; + } + + if ((stream->sfid_valid || stream->ssid_valid) && + type == ENTRYTYPE_NORMAL) + type = ENTRYTYPE_LOCKED; + + sfid = stream->sfid_valid ? stream->sfid : -1; + ssid = stream->ssid_valid ? stream->ssid : -1; + + ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type, + sfid, ssid); + + return ret; +} + +static struct felix_stream * +vsc9959_stream_table_lookup(struct list_head *stream_list, + struct felix_stream *stream) +{ + struct felix_stream *tmp; + + list_for_each_entry(tmp, stream_list, list) + if (ether_addr_equal(tmp->dmac, stream->dmac) && + tmp->vid == stream->vid) + return tmp; + + return NULL; +} + +static int vsc9959_stream_table_add(struct ocelot *ocelot, + struct list_head *stream_list, + struct felix_stream *stream, + struct netlink_ext_ack *extack) +{ + struct felix_stream *stream_entry; + int ret; + + stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL); + if (!stream_entry) + return -ENOMEM; + + if (!stream->dummy) { + ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack); + if (ret) { + kfree(stream_entry); + return ret; + } + } + + list_add_tail(&stream_entry->list, stream_list); + + return 0; +} + +static struct felix_stream * +vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id) +{ + struct felix_stream *tmp; + + list_for_each_entry(tmp, stream_list, list) + if (tmp->id == id) + return tmp; + + return NULL; +} + +static void vsc9959_stream_table_del(struct ocelot *ocelot, + struct felix_stream *stream) +{ + if (!stream->dummy) + vsc9959_mact_stream_set(ocelot, stream, NULL); + + list_del(&stream->list); + kfree(stream); +} + +static u32 vsc9959_sfi_access_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS); +} + +static int vsc9959_psfp_sfi_set(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + u32 val; + + if (sfi->index > VSC9959_PSFP_SFID_MAX) + return -EINVAL; + + if (!sfi->enable) { + ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index), + ANA_TABLES_SFIDTIDX); + + val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE); + ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); + } + + if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX || + sfi->fmid > VSC9959_PSFP_POLICER_MAX) + return -EINVAL; + + ocelot_write(ocelot, + (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) | + ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) | + (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) | + ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) | + ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index), + ANA_TABLES_SFIDTIDX); + + ocelot_write(ocelot, + (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) | + ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) | + ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) | + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE), + ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); +} + +static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports) +{ + u32 val; + + ocelot_rmw(ocelot, + ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid), + ANA_TABLES_SFIDTIDX_SFID_INDEX_M, + ANA_TABLES_SFIDTIDX); + + ocelot_write(ocelot, + ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) | + ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA, + ANA_TABLES_SFID_MASK); + + ocelot_rmw(ocelot, + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE), + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M, + ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); +} + +static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot, + struct felix_stream_filter *sfi, + struct list_head *pos) +{ + struct felix_stream_filter *sfi_entry; + int ret; + + sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL); + if (!sfi_entry) + return -ENOMEM; + + refcount_set(&sfi_entry->refcount, 1); + + ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry); + if (ret) { + kfree(sfi_entry); + return ret; + } + + vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask); + + list_add(&sfi_entry->list, pos); + + return 0; +} + +static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + struct list_head *pos, *q, *last; + struct felix_stream_filter *tmp; + struct ocelot_psfp_list *psfp; + u32 insert = 0; + + psfp = &ocelot->psfp; + last = &psfp->sfi_list; + + list_for_each_safe(pos, q, &psfp->sfi_list) { + tmp = list_entry(pos, struct felix_stream_filter, list); + if (sfi->sg_valid == tmp->sg_valid && + sfi->fm_valid == tmp->fm_valid && + sfi->portmask == tmp->portmask && + tmp->sgid == sfi->sgid && + tmp->fmid == sfi->fmid) { + sfi->index = tmp->index; + refcount_inc(&tmp->refcount); + return 0; + } + /* Make sure that the index is increasing in order. */ + if (tmp->index == insert) { + last = pos; + insert++; + } + } + sfi->index = insert; + + return vsc9959_psfp_sfi_list_add(ocelot, sfi, last); +} + +static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot, + struct felix_stream_filter *sfi, + struct felix_stream_filter *sfi2) +{ + struct felix_stream_filter *tmp; + struct list_head *pos, *q, *last; + struct ocelot_psfp_list *psfp; + u32 insert = 0; + int ret; + + psfp = &ocelot->psfp; + last = &psfp->sfi_list; + + list_for_each_safe(pos, q, &psfp->sfi_list) { + tmp = list_entry(pos, struct felix_stream_filter, list); + /* Make sure that the index is increasing in order. */ + if (tmp->index >= insert + 2) + break; + + insert = tmp->index + 1; + last = pos; + } + sfi->index = insert; + + ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last); + if (ret) + return ret; + + sfi2->index = insert + 1; + + return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next); +} + +static struct felix_stream_filter * +vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index) +{ + struct felix_stream_filter *tmp; + + list_for_each_entry(tmp, sfi_list, list) + if (tmp->index == index) + return tmp; + + return NULL; +} + +static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index) +{ + struct felix_stream_filter *tmp, *n; + struct ocelot_psfp_list *psfp; + u8 z; + + psfp = &ocelot->psfp; + + list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list) + if (tmp->index == index) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + tmp->enable = 0; + vsc9959_psfp_sfi_set(ocelot, tmp); + list_del(&tmp->list); + kfree(tmp); + } + break; + } +} + +static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry, + struct felix_stream_gate *sgi) +{ + sgi->index = entry->hw_index; + sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1; + sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0; + sgi->basetime = entry->gate.basetime; + sgi->cycletime = entry->gate.cycletime; + sgi->num_entries = entry->gate.num_entries; + sgi->enable = 1; + + memcpy(sgi->entries, entry->gate.entries, + entry->gate.num_entries * sizeof(struct action_gate_entry)); +} + +static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL); +} + +static int vsc9959_psfp_sgi_set(struct ocelot *ocelot, + struct felix_stream_gate *sgi) +{ + struct action_gate_entry *e; + struct timespec64 base_ts; + u32 interval_sum = 0; + u32 val; + int i; + + if (sgi->index > VSC9959_PSFP_GATE_ID_MAX) + return -EINVAL; + + ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index), + ANA_SG_ACCESS_CTRL); + + if (!sgi->enable) { + ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE, + ANA_SG_CONFIG_REG_3_INIT_GATE_STATE | + ANA_SG_CONFIG_REG_3_GATE_ENABLE, + ANA_SG_CONFIG_REG_3); + + return 0; + } + + if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN || + sgi->cycletime > NSEC_PER_SEC) + return -EINVAL; + + if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM) + return -EINVAL; + + vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts); + ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1); + val = lower_32_bits(base_ts.tv_sec); + ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2); + + val = upper_32_bits(base_ts.tv_sec); + ocelot_write(ocelot, + (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) | + ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) | + ANA_SG_CONFIG_REG_3_GATE_ENABLE | + ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) | + ANA_SG_CONFIG_REG_3_INIT_GATE_STATE | + ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val), + ANA_SG_CONFIG_REG_3); + + ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4); + + e = sgi->entries; + for (i = 0; i < sgi->num_entries; i++) { + u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8); + + ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) | + (e[i].gate_state ? + ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0), + ANA_SG_GCL_GS_CONFIG, i); + + interval_sum += e[i].interval; + ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i); + } + + ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE, + ANA_SG_ACCESS_CTRL_CONFIG_CHANGE, + ANA_SG_ACCESS_CTRL); + + return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val, + (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)), + 10, 100000); +} + +static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot, + struct felix_stream_gate *sgi) +{ + struct felix_stream_gate_entry *tmp; + struct ocelot_psfp_list *psfp; + int ret; + + psfp = &ocelot->psfp; + + list_for_each_entry(tmp, &psfp->sgi_list, list) + if (tmp->index == sgi->index) { + refcount_inc(&tmp->refcount); + return 0; + } + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = vsc9959_psfp_sgi_set(ocelot, sgi); + if (ret) { + kfree(tmp); + return ret; + } + + tmp->index = sgi->index; + refcount_set(&tmp->refcount, 1); + list_add_tail(&tmp->list, &psfp->sgi_list); + + return 0; +} + +static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot, + u32 index) +{ + struct felix_stream_gate_entry *tmp, *n; + struct felix_stream_gate sgi = {0}; + struct ocelot_psfp_list *psfp; + u8 z; + + psfp = &ocelot->psfp; + + list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list) + if (tmp->index == index) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + sgi.index = index; + sgi.enable = 0; + vsc9959_psfp_sgi_set(ocelot, &sgi); + list_del(&tmp->list); + kfree(tmp); + } + break; + } +} + +static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port, + struct flow_cls_offload *f) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct felix_stream_filter old_sfi, *sfi_entry; + struct felix_stream_filter sfi = {0}; + const struct flow_action_entry *a; + struct felix_stream *stream_entry; + struct felix_stream stream = {0}; + struct felix_stream_gate *sgi; + struct ocelot_psfp_list *psfp; + struct ocelot_policer pol; + int ret, i, size; + u64 rate, burst; + u32 index; + + psfp = &ocelot->psfp; + + ret = vsc9959_stream_identify(f, &stream); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC"); + return ret; + } + + mutex_lock(&psfp->lock); + + flow_action_for_each(i, a, &f->rule->action) { + switch (a->id) { + case FLOW_ACTION_GATE: + size = struct_size(sgi, entries, a->gate.num_entries); + sgi = kzalloc(size, GFP_KERNEL); + if (!sgi) { + ret = -ENOMEM; + goto err; + } + vsc9959_psfp_parse_gate(a, sgi); + ret = vsc9959_psfp_sgi_table_add(ocelot, sgi); + if (ret) { + kfree(sgi); + goto err; + } + sfi.sg_valid = 1; + sfi.sgid = sgi->index; + kfree(sgi); + break; + case FLOW_ACTION_POLICE: + index = a->hw_index + VSC9959_PSFP_POLICER_BASE; + if (index > VSC9959_PSFP_POLICER_MAX) { + ret = -EINVAL; + goto err; + } + + rate = a->police.rate_bytes_ps; + burst = rate * PSCHED_NS2TICKS(a->police.burst); + pol = (struct ocelot_policer) { + .burst = div_u64(burst, PSCHED_TICKS_PER_SEC), + .rate = div_u64(rate, 1000) * 8, + }; + ret = ocelot_vcap_policer_add(ocelot, index, &pol); + if (ret) + goto err; + + sfi.fm_valid = 1; + sfi.fmid = index; + sfi.maxsdu = a->police.mtu; + break; + default: + mutex_unlock(&psfp->lock); + return -EOPNOTSUPP; + } + } + + stream.ports = BIT(port); + stream.port = port; + + sfi.portmask = stream.ports; + sfi.prio_valid = (stream.prio < 0 ? 0 : 1); + sfi.prio = (sfi.prio_valid ? stream.prio : 0); + sfi.enable = 1; + + /* Check if stream is set. */ + stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream); + if (stream_entry) { + if (stream_entry->ports & BIT(port)) { + NL_SET_ERR_MSG_MOD(extack, + "The stream is added on this port"); + ret = -EEXIST; + goto err; + } + + if (stream_entry->ports != BIT(stream_entry->port)) { + NL_SET_ERR_MSG_MOD(extack, + "The stream is added on two ports"); + ret = -EEXIST; + goto err; + } + + stream_entry->ports |= BIT(port); + stream.ports = stream_entry->ports; + + sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, + stream_entry->sfid); + memcpy(&old_sfi, sfi_entry, sizeof(old_sfi)); + + vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid); + + old_sfi.portmask = stream_entry->ports; + sfi.portmask = stream.ports; + + if (stream_entry->port > port) { + ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi, + &old_sfi); + stream_entry->dummy = true; + } else { + ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi, + &sfi); + stream.dummy = true; + } + if (ret) + goto err; + + stream_entry->sfid = old_sfi.index; + } else { + ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi); + if (ret) + goto err; + } + + stream.sfid = sfi.index; + stream.sfid_valid = 1; + ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list, + &stream, extack); + if (ret) { + vsc9959_psfp_sfi_table_del(ocelot, stream.sfid); + goto err; + } + + mutex_unlock(&psfp->lock); + + return 0; + +err: + if (sfi.sg_valid) + vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid); + + if (sfi.fm_valid) + ocelot_vcap_policer_del(ocelot, sfi.fmid); + + mutex_unlock(&psfp->lock); + + return ret; +} + +static int vsc9959_psfp_filter_del(struct ocelot *ocelot, + struct flow_cls_offload *f) +{ + struct felix_stream *stream, tmp, *stream_entry; + struct ocelot_psfp_list *psfp = &ocelot->psfp; + static struct felix_stream_filter *sfi; + + mutex_lock(&psfp->lock); + + stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie); + if (!stream) { + mutex_unlock(&psfp->lock); + return -ENOMEM; + } + + sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid); + if (!sfi) { + mutex_unlock(&psfp->lock); + return -ENOMEM; + } + + if (sfi->sg_valid) + vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid); + + if (sfi->fm_valid) + ocelot_vcap_policer_del(ocelot, sfi->fmid); + + vsc9959_psfp_sfi_table_del(ocelot, stream->sfid); + + memcpy(&tmp, stream, sizeof(tmp)); + + stream->sfid_valid = 0; + vsc9959_stream_table_del(ocelot, stream); + + stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp); + if (stream_entry) { + stream_entry->ports = BIT(stream_entry->port); + if (stream_entry->dummy) { + stream_entry->dummy = false; + vsc9959_mact_stream_set(ocelot, stream_entry, NULL); + } + vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid, + stream_entry->ports); + } + + mutex_unlock(&psfp->lock); + + return 0; +} + +static void vsc9959_update_sfid_stats(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + struct felix_stream_filter_counters *s = &sfi->stats; + u32 match, not_pass_gate, not_pass_sdu, red; + u32 sfid = sfi->index; + + lockdep_assert_held(&ocelot->stat_view_lock); + + ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid), + SYS_STAT_CFG_STAT_VIEW_M, + SYS_STAT_CFG); + + match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES); + not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES); + not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU); + red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES); + + /* Clear the PSFP counter. */ + ocelot_write(ocelot, + SYS_STAT_CFG_STAT_VIEW(sfid) | + SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10), + SYS_STAT_CFG); + + s->match += match; + s->not_pass_gate += not_pass_gate; + s->not_pass_sdu += not_pass_sdu; + s->red += red; +} + +/* Caller must hold &ocelot->stat_view_lock */ +static void vsc9959_update_stats(struct ocelot *ocelot) +{ + struct ocelot_psfp_list *psfp = &ocelot->psfp; + struct felix_stream_filter *sfi; + + mutex_lock(&psfp->lock); + + list_for_each_entry(sfi, &psfp->sfi_list, list) + vsc9959_update_sfid_stats(ocelot, sfi); + + mutex_unlock(&psfp->lock); +} + +static int vsc9959_psfp_stats_get(struct ocelot *ocelot, + struct flow_cls_offload *f, + struct flow_stats *stats) +{ + struct ocelot_psfp_list *psfp = &ocelot->psfp; + struct felix_stream_filter_counters *s; + static struct felix_stream_filter *sfi; + struct felix_stream *stream; + + stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie); + if (!stream) + return -ENOMEM; + + sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid); + if (!sfi) + return -EINVAL; + + mutex_lock(&ocelot->stat_view_lock); + + vsc9959_update_sfid_stats(ocelot, sfi); + + s = &sfi->stats; + stats->pkts = s->match; + stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red; + + memset(s, 0, sizeof(*s)); + + mutex_unlock(&ocelot->stat_view_lock); + + return 0; +} + +static void vsc9959_psfp_init(struct ocelot *ocelot) +{ + struct ocelot_psfp_list *psfp = &ocelot->psfp; + + INIT_LIST_HEAD(&psfp->stream_list); + INIT_LIST_HEAD(&psfp->sfi_list); + INIT_LIST_HEAD(&psfp->sgi_list); + mutex_init(&psfp->lock); +} + +/* When using cut-through forwarding and the egress port runs at a higher data + * rate than the ingress port, the packet currently under transmission would + * suffer an underrun since it would be transmitted faster than it is received. + * The Felix switch implementation of cut-through forwarding does not check in + * hardware whether this condition is satisfied or not, so we must restrict the + * list of ports that have cut-through forwarding enabled on egress to only be + * the ports operating at the lowest link speed within their respective + * forwarding domain. + */ +static void vsc9959_cut_through_fwd(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_switch *ds = felix->ds; + int tc, port, other_port; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int min_speed = ocelot_port->speed; + unsigned long mask = 0; + u32 tmp, val = 0; + + /* Disable cut-through on ports that are down */ + if (ocelot_port->speed <= 0) + goto set; + + if (dsa_is_cpu_port(ds, port)) { + /* Ocelot switches forward from the NPI port towards + * any port, regardless of it being in the NPI port's + * forwarding domain or not. + */ + mask = dsa_user_ports(ds); + } else { + mask = ocelot_get_bridge_fwd_mask(ocelot, port); + mask &= ~BIT(port); + if (ocelot->npi >= 0) + mask |= BIT(ocelot->npi); + else + mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); + } + + /* Calculate the minimum link speed, among the ports that are + * up, of this source port's forwarding domain. + */ + for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) { + struct ocelot_port *other_ocelot_port; + + other_ocelot_port = ocelot->ports[other_port]; + if (other_ocelot_port->speed <= 0) + continue; + + if (min_speed > other_ocelot_port->speed) + min_speed = other_ocelot_port->speed; + } + + /* Enable cut-through forwarding for all traffic classes that + * don't have oversized dropping enabled, since this check is + * bypassed in cut-through mode. + */ + if (ocelot_port->speed == min_speed) { + val = GENMASK(7, 0); + + for (tc = 0; tc < OCELOT_NUM_TC; tc++) + if (vsc9959_port_qmaxsdu_get(ocelot, port, tc)) + val &= ~BIT(tc); + } + +set: + tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port); + if (tmp == val) + continue; + + dev_dbg(ocelot->dev, + "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n", + port, mask, ocelot_port->speed, min_speed, + val ? "enabling" : "disabling", val); + + ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port); + } +} + +static const struct ocelot_ops vsc9959_ops = { + .reset = vsc9959_reset, + .wm_enc = vsc9959_wm_enc, + .wm_dec = vsc9959_wm_dec, + .wm_stat = vsc9959_wm_stat, + .port_to_netdev = felix_port_to_netdev, + .netdev_to_port = felix_netdev_to_port, + .psfp_init = vsc9959_psfp_init, + .psfp_filter_add = vsc9959_psfp_filter_add, + .psfp_filter_del = vsc9959_psfp_filter_del, + .psfp_stats_get = vsc9959_psfp_stats_get, + .cut_through_fwd = vsc9959_cut_through_fwd, + .tas_clock_adjust = vsc9959_tas_clock_adjust, + .update_stats = vsc9959_update_stats, +}; + static const struct felix_info felix_info_vsc9959 = { - .target_io_res = vsc9959_target_io_res, - .port_io_res = vsc9959_port_io_res, - .imdio_res = &vsc9959_imdio_res, + .resources = vsc9959_resources, + .num_resources = ARRAY_SIZE(vsc9959_resources), + .resource_names = vsc9959_resource_names, .regfields = vsc9959_regfields, .map = vsc9959_regmap, .ops = &vsc9959_ops, .stats_layout = vsc9959_stats_layout, - .num_stats = ARRAY_SIZE(vsc9959_stats_layout), .vcap = vsc9959_vcap_props, + .vcap_pol_base = VSC9959_VCAP_POLICER_BASE, + .vcap_pol_max = VSC9959_VCAP_POLICER_MAX, + .vcap_pol_base2 = 0, + .vcap_pol_max2 = 0, .num_mact_rows = 2048, - .num_ports = 6, + .num_ports = VSC9959_NUM_PORTS, .num_tx_queues = OCELOT_NUM_TC, - .switch_pci_bar = 4, - .imdio_pci_bar = 0, .quirk_no_xtr_irq = true, .ptp_caps = &vsc9959_ptp_caps, .mdio_bus_alloc = vsc9959_mdio_bus_alloc, .mdio_bus_free = vsc9959_mdio_bus_free, .phylink_validate = vsc9959_phylink_validate, - .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, + .port_modes = vsc9959_port_modes, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, + .tas_guard_bands_update = vsc9959_tas_guard_bands_update, }; static irqreturn_t felix_irq_handler(int irq, void *data) @@ -1417,10 +2643,7 @@ static int felix_pci_probe(struct pci_dev *pdev, ocelot->dev = &pdev->dev; ocelot->num_flooding_pgids = OCELOT_NUM_TC; felix->info = &felix_info_vsc9959; - felix->switch_base = pci_resource_start(pdev, - felix->info->switch_pci_bar); - felix->imdio_base = pci_resource_start(pdev, - felix->info->imdio_pci_bar); + felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR); pci_set_master(pdev); @@ -1451,7 +2674,7 @@ static int felix_pci_probe(struct pci_dev *pdev, err = dsa_register_switch(ds); if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); goto err_register_ds; } @@ -1481,8 +2704,6 @@ static void felix_pci_remove(struct pci_dev *pdev) kfree(felix); pci_disable_device(pdev); - - pci_set_drvdata(pdev, NULL); } static void felix_pci_shutdown(struct pci_dev *pdev) diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 92eae63150ea..7af33b2c685d 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -6,18 +6,37 @@ #include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot_sys.h> #include <soc/mscc/ocelot.h> +#include <linux/mdio/mdio-mscc-miim.h> +#include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/pcs-lynx.h> #include <linux/dsa/ocelot.h> #include <linux/iopoll.h> #include "felix.h" -#define MSCC_MIIM_CMD_OPR_WRITE BIT(1) -#define MSCC_MIIM_CMD_OPR_READ BIT(2) -#define MSCC_MIIM_CMD_WRDATA_SHIFT 4 -#define MSCC_MIIM_CMD_REGAD_SHIFT 20 -#define MSCC_MIIM_CMD_PHYAD_SHIFT 25 -#define MSCC_MIIM_CMD_VLD BIT(31) +#define VSC9953_NUM_PORTS 10 + +#define VSC9953_VCAP_POLICER_BASE 11 +#define VSC9953_VCAP_POLICER_MAX 31 +#define VSC9953_VCAP_POLICER_BASE2 120 +#define VSC9953_VCAP_POLICER_MAX2 161 + +#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \ + OCELOT_PORT_MODE_SGMII | \ + OCELOT_PORT_MODE_QSGMII) + +static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = { + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + OCELOT_PORT_MODE_INTERNAL, + OCELOT_PORT_MODE_INTERNAL, +}; static const u32 vsc9953_ana_regmap[] = { REG(ANA_ADVLEARN, 0x00b500), @@ -251,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = { static const u32 vsc9953_sys_regmap[] = { REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), REG(SYS_COUNT_RX_SHORTS, 0x000010), REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), REG(SYS_COUNT_RX_64, 0x000024), REG(SYS_COUNT_RX_65_127, 0x000028), REG(SYS_COUNT_RX_128_255, 0x00002c), - REG(SYS_COUNT_RX_256_1023, 0x000030), - REG(SYS_COUNT_RX_1024_1526, 0x000034), - REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_256_511, 0x000030), + REG(SYS_COUNT_RX_512_1023, 0x000034), + REG(SYS_COUNT_RX_1024_1526, 0x000038), + REG(SYS_COUNT_RX_1527_MAX, 0x00003c), + REG(SYS_COUNT_RX_PAUSE, 0x000040), + REG(SYS_COUNT_RX_CONTROL, 0x000044), REG(SYS_COUNT_RX_LONGS, 0x000048), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c), + REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050), + REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054), + REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058), + REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c), + REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060), + REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064), + REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068), + REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c), + REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070), + REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074), + REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078), + REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c), + REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080), + REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084), + REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088), + REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c), + REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090), + REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094), + REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098), + REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c), + REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0), + REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4), + REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8), + REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac), REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), REG(SYS_COUNT_TX_COLLISION, 0x000110), REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), REG(SYS_COUNT_TX_64, 0x00011c), REG(SYS_COUNT_TX_65_127, 0x000120), - REG(SYS_COUNT_TX_128_511, 0x000124), - REG(SYS_COUNT_TX_512_1023, 0x000128), - REG(SYS_COUNT_TX_1024_1526, 0x00012c), - REG(SYS_COUNT_TX_1527_MAX, 0x000130), - REG(SYS_COUNT_TX_AGING, 0x000178), + REG(SYS_COUNT_TX_128_255, 0x000124), + REG(SYS_COUNT_TX_256_511, 0x000128), + REG(SYS_COUNT_TX_512_1023, 0x00012c), + REG(SYS_COUNT_TX_1024_1526, 0x000130), + REG(SYS_COUNT_TX_1527_MAX, 0x000134), + REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138), + REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c), + REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140), + REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144), + REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148), + REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c), + REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150), + REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154), + REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158), + REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c), + REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160), + REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164), + REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168), + REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c), + REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170), + REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174), + REG(SYS_COUNT_TX_AGED, 0x000178), + REG(SYS_COUNT_DROP_LOCAL, 0x000200), + REG(SYS_COUNT_DROP_TAIL, 0x000204), + REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208), + REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210), + REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214), + REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218), + REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220), + REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224), + REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228), + REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c), + REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230), + REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234), + REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238), + REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c), + REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240), + REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244), REG(SYS_RESET_CFG, 0x000318), REG_RESERVED(SYS_SR_ETYPE_CFG), REG(SYS_VLAN_ETYPE_CFG, 0x000320), @@ -293,7 +383,6 @@ static const u32 vsc9953_sys_regmap[] = { REG_RESERVED(SYS_MMGT_FAST), REG_RESERVED(SYS_EVENTS_DIF), REG_RESERVED(SYS_EVENTS_CORE), - REG_RESERVED(SYS_CNT), REG_RESERVED(SYS_PTP_STATUS), REG_RESERVED(SYS_PTP_TXSTAMP), REG_RESERVED(SYS_PTP_NXT), @@ -369,110 +458,40 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = { }; /* Addresses are relative to the device's base address */ -static const struct resource vsc9953_target_io_res[TARGET_MAX] = { - [ANA] = { - .start = 0x0280000, - .end = 0x028ffff, - .name = "ana", - }, - [QS] = { - .start = 0x0080000, - .end = 0x00800ff, - .name = "qs", - }, - [QSYS] = { - .start = 0x0200000, - .end = 0x021ffff, - .name = "qsys", - }, - [REW] = { - .start = 0x0030000, - .end = 0x003ffff, - .name = "rew", - }, - [SYS] = { - .start = 0x0010000, - .end = 0x001ffff, - .name = "sys", - }, - [S0] = { - .start = 0x0040000, - .end = 0x00403ff, - .name = "s0", - }, - [S1] = { - .start = 0x0050000, - .end = 0x00503ff, - .name = "s1", - }, - [S2] = { - .start = 0x0060000, - .end = 0x00603ff, - .name = "s2", - }, - [PTP] = { - .start = 0x0090000, - .end = 0x00900cb, - .name = "ptp", - }, - [GCB] = { - .start = 0x0070000, - .end = 0x00701ff, - .name = "devcpu_gcb", - }, +static const struct resource vsc9953_resources[] = { + DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"), + DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"), + DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"), + DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"), + DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"), + DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"), + DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"), + DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"), + DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"), + DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"), + DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"), + DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"), + DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"), + DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"), + DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"), + DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"), + DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"), + DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"), + DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"), + DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"), }; -static const struct resource vsc9953_port_io_res[] = { - { - .start = 0x0100000, - .end = 0x010ffff, - .name = "port0", - }, - { - .start = 0x0110000, - .end = 0x011ffff, - .name = "port1", - }, - { - .start = 0x0120000, - .end = 0x012ffff, - .name = "port2", - }, - { - .start = 0x0130000, - .end = 0x013ffff, - .name = "port3", - }, - { - .start = 0x0140000, - .end = 0x014ffff, - .name = "port4", - }, - { - .start = 0x0150000, - .end = 0x015ffff, - .name = "port5", - }, - { - .start = 0x0160000, - .end = 0x016ffff, - .name = "port6", - }, - { - .start = 0x0170000, - .end = 0x017ffff, - .name = "port7", - }, - { - .start = 0x0180000, - .end = 0x018ffff, - .name = "port8", - }, - { - .start = 0x0190000, - .end = 0x019ffff, - .name = "port9", - }, +static const char * const vsc9953_resource_names[TARGET_MAX] = { + [SYS] = "sys", + [REW] = "rew", + [S0] = "s0", + [S1] = "s1", + [S2] = "s2", + [GCB] = "devcpu_gcb", + [QS] = "qs", + [PTP] = "ptp", + [QSYS] = "qsys", + [ANA] = "ana", }; static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = { @@ -524,100 +543,8 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = { [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4), }; -static const struct ocelot_stat_layout vsc9953_stats_layout[] = { - { .offset = 0x00, .name = "rx_octets", }, - { .offset = 0x01, .name = "rx_unicast", }, - { .offset = 0x02, .name = "rx_multicast", }, - { .offset = 0x03, .name = "rx_broadcast", }, - { .offset = 0x04, .name = "rx_shorts", }, - { .offset = 0x05, .name = "rx_fragments", }, - { .offset = 0x06, .name = "rx_jabbers", }, - { .offset = 0x07, .name = "rx_crc_align_errs", }, - { .offset = 0x08, .name = "rx_sym_errs", }, - { .offset = 0x09, .name = "rx_frames_below_65_octets", }, - { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", }, - { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", }, - { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", }, - { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", }, - { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", }, - { .offset = 0x0F, .name = "rx_frames_over_1526_octets", }, - { .offset = 0x10, .name = "rx_pause", }, - { .offset = 0x11, .name = "rx_control", }, - { .offset = 0x12, .name = "rx_longs", }, - { .offset = 0x13, .name = "rx_classified_drops", }, - { .offset = 0x14, .name = "rx_red_prio_0", }, - { .offset = 0x15, .name = "rx_red_prio_1", }, - { .offset = 0x16, .name = "rx_red_prio_2", }, - { .offset = 0x17, .name = "rx_red_prio_3", }, - { .offset = 0x18, .name = "rx_red_prio_4", }, - { .offset = 0x19, .name = "rx_red_prio_5", }, - { .offset = 0x1A, .name = "rx_red_prio_6", }, - { .offset = 0x1B, .name = "rx_red_prio_7", }, - { .offset = 0x1C, .name = "rx_yellow_prio_0", }, - { .offset = 0x1D, .name = "rx_yellow_prio_1", }, - { .offset = 0x1E, .name = "rx_yellow_prio_2", }, - { .offset = 0x1F, .name = "rx_yellow_prio_3", }, - { .offset = 0x20, .name = "rx_yellow_prio_4", }, - { .offset = 0x21, .name = "rx_yellow_prio_5", }, - { .offset = 0x22, .name = "rx_yellow_prio_6", }, - { .offset = 0x23, .name = "rx_yellow_prio_7", }, - { .offset = 0x24, .name = "rx_green_prio_0", }, - { .offset = 0x25, .name = "rx_green_prio_1", }, - { .offset = 0x26, .name = "rx_green_prio_2", }, - { .offset = 0x27, .name = "rx_green_prio_3", }, - { .offset = 0x28, .name = "rx_green_prio_4", }, - { .offset = 0x29, .name = "rx_green_prio_5", }, - { .offset = 0x2A, .name = "rx_green_prio_6", }, - { .offset = 0x2B, .name = "rx_green_prio_7", }, - { .offset = 0x40, .name = "tx_octets", }, - { .offset = 0x41, .name = "tx_unicast", }, - { .offset = 0x42, .name = "tx_multicast", }, - { .offset = 0x43, .name = "tx_broadcast", }, - { .offset = 0x44, .name = "tx_collision", }, - { .offset = 0x45, .name = "tx_drops", }, - { .offset = 0x46, .name = "tx_pause", }, - { .offset = 0x47, .name = "tx_frames_below_65_octets", }, - { .offset = 0x48, .name = "tx_frames_65_to_127_octets", }, - { .offset = 0x49, .name = "tx_frames_128_255_octets", }, - { .offset = 0x4A, .name = "tx_frames_256_511_octets", }, - { .offset = 0x4B, .name = "tx_frames_512_1023_octets", }, - { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", }, - { .offset = 0x4D, .name = "tx_frames_over_1526_octets", }, - { .offset = 0x4E, .name = "tx_yellow_prio_0", }, - { .offset = 0x4F, .name = "tx_yellow_prio_1", }, - { .offset = 0x50, .name = "tx_yellow_prio_2", }, - { .offset = 0x51, .name = "tx_yellow_prio_3", }, - { .offset = 0x52, .name = "tx_yellow_prio_4", }, - { .offset = 0x53, .name = "tx_yellow_prio_5", }, - { .offset = 0x54, .name = "tx_yellow_prio_6", }, - { .offset = 0x55, .name = "tx_yellow_prio_7", }, - { .offset = 0x56, .name = "tx_green_prio_0", }, - { .offset = 0x57, .name = "tx_green_prio_1", }, - { .offset = 0x58, .name = "tx_green_prio_2", }, - { .offset = 0x59, .name = "tx_green_prio_3", }, - { .offset = 0x5A, .name = "tx_green_prio_4", }, - { .offset = 0x5B, .name = "tx_green_prio_5", }, - { .offset = 0x5C, .name = "tx_green_prio_6", }, - { .offset = 0x5D, .name = "tx_green_prio_7", }, - { .offset = 0x5E, .name = "tx_aged", }, - { .offset = 0x80, .name = "drop_local", }, - { .offset = 0x81, .name = "drop_tail", }, - { .offset = 0x82, .name = "drop_yellow_prio_0", }, - { .offset = 0x83, .name = "drop_yellow_prio_1", }, - { .offset = 0x84, .name = "drop_yellow_prio_2", }, - { .offset = 0x85, .name = "drop_yellow_prio_3", }, - { .offset = 0x86, .name = "drop_yellow_prio_4", }, - { .offset = 0x87, .name = "drop_yellow_prio_5", }, - { .offset = 0x88, .name = "drop_yellow_prio_6", }, - { .offset = 0x89, .name = "drop_yellow_prio_7", }, - { .offset = 0x8A, .name = "drop_green_prio_0", }, - { .offset = 0x8B, .name = "drop_green_prio_1", }, - { .offset = 0x8C, .name = "drop_green_prio_2", }, - { .offset = 0x8D, .name = "drop_green_prio_3", }, - { .offset = 0x8E, .name = "drop_green_prio_4", }, - { .offset = 0x8F, .name = "drop_green_prio_5", }, - { .offset = 0x90, .name = "drop_green_prio_6", }, - { .offset = 0x91, .name = "drop_green_prio_7", }, +static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = { + OCELOT_COMMON_STATS, }; static const struct vcap_field vsc9953_vcap_es0_keys[] = { @@ -857,7 +784,6 @@ static struct vcap_props vsc9953_vcap_props[] = { #define VSC9953_INIT_TIMEOUT 50000 #define VSC9953_GCB_RST_SLEEP 100 #define VSC9953_SYS_RAMINIT_SLEEP 80 -#define VCS9953_MII_TIMEOUT 10000 static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot) { @@ -877,82 +803,6 @@ static int vsc9953_sys_ram_init_status(struct ocelot *ocelot) return val; } -static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot) -{ - int val; - - ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val); - - return val; -} - -static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot) -{ - int val; - - ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val); - - return val; -} - -static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum, - u16 value) -{ - struct ocelot *ocelot = bus->priv; - int err, cmd, val; - - /* Wait while MIIM controller becomes idle */ - err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot, - val, !val, 10, VCS9953_MII_TIMEOUT); - if (err) { - dev_err(ocelot->dev, "MDIO write: pending timeout\n"); - goto out; - } - - cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | - (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | - (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | - MSCC_MIIM_CMD_OPR_WRITE; - - ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD); - -out: - return err; -} - -static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum) -{ - struct ocelot *ocelot = bus->priv; - int err, cmd, val; - - /* Wait until MIIM controller becomes idle */ - err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot, - val, !val, 10, VCS9953_MII_TIMEOUT); - if (err) { - dev_err(ocelot->dev, "MDIO read: pending timeout\n"); - goto out; - } - - /* Write the MIIM COMMAND register */ - cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | - (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ; - - ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD); - - /* Wait while read operation via the MIIM controller is in progress */ - err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot, - val, !val, 10, VCS9953_MII_TIMEOUT); - if (err) { - dev_err(ocelot->dev, "MDIO read: busy timeout\n"); - goto out; - } - - val = ocelot_read(ocelot, GCB_MIIM_MII_DATA); - - err = val & 0xFFFF; -out: - return err; -} /* CORE_ENA is in SYS:SYSTEM:RESET_CFG * MEM_INIT is in SYS:SYSTEM:RESET_CFG @@ -994,15 +844,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, unsigned long *supported, struct phylink_link_state *state) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { - linkmode_zero(supported); - return; - } - phylink_set_port_modes(mask); phylink_set(mask, Autoneg); phylink_set(mask, Pause); @@ -1012,6 +855,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, phylink_set(mask, 100baseT_Full); phylink_set(mask, 100baseT_Half); phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); if (state->interface == PHY_INTERFACE_MODE_INTERNAL) { phylink_set(mask, 2500baseT_Full); @@ -1022,25 +866,6 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, linkmode_and(state->advertising, state->advertising, mask); } -static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port, - phy_interface_t phy_mode) -{ - switch (phy_mode) { - case PHY_INTERFACE_MODE_INTERNAL: - if (port != 8 && port != 9) - return -ENOTSUPP; - return 0; - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - /* Not supported on internal to-CPU ports */ - if (port == 8 || port == 9) - return -ENOTSUPP; - return 0; - default: - return -ENOTSUPP; - } -} - /* Watermark encode * Bit 9: Unit; 0:1, 1:16 * Bit 8-0: Value to be multiplied with unit @@ -1089,26 +914,24 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) int rc; felix->pcs = devm_kcalloc(dev, felix->info->num_ports, - sizeof(struct phy_device *), + sizeof(struct phylink_pcs *), GFP_KERNEL); if (!felix->pcs) { dev_err(dev, "failed to allocate array for PCS PHYs\n"); return -ENOMEM; } - bus = devm_mdiobus_alloc(dev); - if (!bus) - return -ENOMEM; + rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus", + ocelot->targets[GCB], + ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK]); - bus->name = "VSC9953 internal MDIO bus"; - bus->read = vsc9953_mdio_read; - bus->write = vsc9953_mdio_write; - bus->parent = dev; - bus->priv = ocelot; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + if (rc) { + dev_err(dev, "failed to setup MDIO bus\n"); + return rc; + } /* Needed in order to initialize the bus mutex lock */ - rc = mdiobus_register(bus); + rc = devm_of_mdiobus_register(dev, bus, NULL); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); return rc; @@ -1118,9 +941,9 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) for (port = 0; port < felix->info->num_ports; port++) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct phylink_pcs *phylink_pcs; + struct mdio_device *mdio_device; int addr = port + 4; - struct mdio_device *pcs; - struct lynx_pcs *lynx; if (dsa_is_unused_port(felix->ds, port)) continue; @@ -1128,17 +951,17 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) continue; - pcs = mdio_device_create(felix->imdio, addr); - if (IS_ERR(pcs)) + mdio_device = mdio_device_create(felix->imdio, addr); + if (IS_ERR(mdio_device)) continue; - lynx = lynx_pcs_create(pcs); - if (!lynx) { - mdio_device_free(pcs); + phylink_pcs = lynx_pcs_create(mdio_device); + if (!phylink_pcs) { + mdio_device_free(mdio_device); continue; } - felix->pcs[port] = lynx; + felix->pcs[port] = phylink_pcs; dev_info(dev, "Found PCS at internal MDIO address %d\n", addr); } @@ -1152,33 +975,40 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot) int port; for (port = 0; port < ocelot->num_phys_ports; port++) { - struct lynx_pcs *pcs = felix->pcs[port]; + struct phylink_pcs *phylink_pcs = felix->pcs[port]; + struct mdio_device *mdio_device; - if (!pcs) + if (!phylink_pcs) continue; - mdio_device_free(pcs->mdio); - lynx_pcs_destroy(pcs); + mdio_device = lynx_get_mdio_device(phylink_pcs); + mdio_device_free(mdio_device); + lynx_pcs_destroy(phylink_pcs); } - mdiobus_unregister(felix->imdio); + + /* mdiobus_unregister and mdiobus_free handled by devres */ } static const struct felix_info seville_info_vsc9953 = { - .target_io_res = vsc9953_target_io_res, - .port_io_res = vsc9953_port_io_res, + .resources = vsc9953_resources, + .num_resources = ARRAY_SIZE(vsc9953_resources), + .resource_names = vsc9953_resource_names, .regfields = vsc9953_regfields, .map = vsc9953_regmap, .ops = &vsc9953_ops, .stats_layout = vsc9953_stats_layout, - .num_stats = ARRAY_SIZE(vsc9953_stats_layout), .vcap = vsc9953_vcap_props, + .vcap_pol_base = VSC9953_VCAP_POLICER_BASE, + .vcap_pol_max = VSC9953_VCAP_POLICER_MAX, + .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2, + .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2, .num_mact_rows = 2048, - .num_ports = 10, + .num_ports = VSC9953_NUM_PORTS, .num_tx_queues = OCELOT_NUM_TC, .mdio_bus_alloc = vsc9953_mdio_bus_alloc, .mdio_bus_free = vsc9953_mdio_bus_free, .phylink_validate = vsc9953_phylink_validate, - .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode, + .port_modes = vsc9953_port_modes, }; static int seville_probe(struct platform_device *pdev) @@ -1253,8 +1083,6 @@ static int seville_remove(struct platform_device *pdev) kfree(felix->ds); kfree(felix); - platform_set_drvdata(pdev, NULL); - return 0; } |