diff options
Diffstat (limited to 'drivers/net/ethernet/mscc/ocelot.c')
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot.c | 1390 |
1 files changed, 721 insertions, 669 deletions
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index b1311b656e17..13b14110a060 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -6,13 +6,13 @@ */ #include <linux/dsa/ocelot.h> #include <linux/if_bridge.h> -#include <linux/ptp_classify.h> #include <soc/mscc/ocelot_vcap.h> #include "ocelot.h" #include "ocelot_vcap.h" #define TABLE_UPDATE_SLEEP_US 10 #define TABLE_UPDATE_TIMEOUT_US 100000 +#define OCELOT_RSV_VLAN_RANGE_START 4000 struct ocelot_mact_entry { u8 mac[ETH_ALEN]; @@ -221,6 +221,35 @@ static void ocelot_vcap_enable(struct ocelot *ocelot, int port) REW_PORT_CFG, port); } +static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot, + struct netlink_ext_ack *extack) +{ + struct net_device *bridge = NULL; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->bridge || + !br_vlan_enabled(ocelot_port->bridge)) + continue; + + if (!bridge) { + bridge = ocelot_port->bridge; + continue; + } + + if (bridge == ocelot_port->bridge) + continue; + + NL_SET_ERR_MSG_MOD(extack, + "Only one VLAN-aware bridge is supported"); + return -EBUSY; + } + + return 0; +} + static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot) { return ocelot_read(ocelot, ANA_TABLES_VLANACCESS); @@ -260,6 +289,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port) if (!(vlan->portmask & BIT(port))) continue; + /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(), + * because this is never active in hardware at the same time as + * the bridge VLANs, which only matter in VLAN-aware mode. + */ + if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START) + continue; + if (vlan->untagged & BIT(port)) num_untagged++; } @@ -347,12 +383,45 @@ static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port) } } +int ocelot_bridge_num_find(struct ocelot *ocelot, + const struct net_device *bridge) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (ocelot_port && ocelot_port->bridge == bridge) + return ocelot_port->bridge_num; + } + + return -1; +} +EXPORT_SYMBOL_GPL(ocelot_bridge_num_find); + +static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot, + const struct net_device *bridge) +{ + int bridge_num; + + /* Standalone ports use VID 0 */ + if (!bridge) + return 0; + + bridge_num = ocelot_bridge_num_find(ocelot, bridge); + if (WARN_ON(bridge_num < 0)) + return 0; + + /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */ + return VLAN_N_VID - bridge_num - 1; +} + /* Default vlan to clasify for untagged frames (may be zero) */ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, const struct ocelot_bridge_vlan *pvid_vlan) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - u16 pvid = OCELOT_VLAN_UNAWARE_PVID; + u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge); u32 val = 0; ocelot_port->pvid_vlan = pvid_vlan; @@ -466,12 +535,29 @@ static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) return 0; } +static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port, + const struct net_device *bridge) +{ + u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_vlan_member_add(ocelot, port, vid, true); +} + +static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port, + const struct net_device *bridge) +{ + u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_vlan_member_del(ocelot, port, vid); +} + int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool vlan_aware, struct netlink_ext_ack *extack) { struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_vcap_filter *filter; + int err = 0; u32 val; list_for_each_entry(filter, &block->rules, list) { @@ -483,6 +569,19 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, } } + err = ocelot_single_vlan_aware_bridge(ocelot, extack); + if (err) + return err; + + if (vlan_aware) + err = ocelot_del_vlan_unaware_pvid(ocelot, port, + ocelot_port->bridge); + else if (ocelot_port->bridge) + err = ocelot_add_vlan_unaware_pvid(ocelot, port, + ocelot_port->bridge); + if (err) + return err; + ocelot_port->vlan_aware = vlan_aware; if (vlan_aware) @@ -521,6 +620,12 @@ int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, } } + if (vid > OCELOT_RSV_VLAN_RANGE_START) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN range 4000-4095 reserved for VLAN-unaware bridging"); + return -EBUSY; + } + return 0; } EXPORT_SYMBOL(ocelot_vlan_prepare); @@ -530,6 +635,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, { int err; + /* Ignore VID 0 added to our RX filter by the 8021q module, since + * that collides with OCELOT_STANDALONE_PVID and changes it from + * egress-untagged to egress-tagged. + */ + if (!vid) + return 0; + err = ocelot_vlan_member_add(ocelot, port, vid, untagged); if (err) return err; @@ -549,14 +661,21 @@ EXPORT_SYMBOL(ocelot_vlan_add); int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool del_pvid = false; int err; + if (!vid) + return 0; + + if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + del_pvid = true; + err = ocelot_vlan_member_del(ocelot, port, vid); if (err) return err; /* Ingress */ - if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + if (del_pvid) ocelot_port_set_pvid(ocelot, port, NULL); /* Egress */ @@ -580,11 +699,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot) for (vid = 1; vid < VLAN_N_VID; vid++) ocelot_vlant_set_mask(ocelot, vid, 0); - /* Because VLAN filtering is enabled, we need VID 0 to get untagged - * traffic. It is added automatically if 8021q module is loaded, but - * we can't rely on it since module may be not loaded. + /* We need VID 0 to get traffic on standalone ports. + * It is added automatically if the 8021q module is loaded, but we + * can't rely on that since it might not be. */ - ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports); + ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports); /* Set vlan ingress filter mask to all ports but the CPU port by * default. @@ -771,7 +890,10 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); + /* Don't attempt to send PAUSE frames on the NPI port, it's broken */ + if (port != ocelot->npi) + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, + tx_pause); /* Undo the effects of ocelot_phylink_mac_link_down: * enable MAC module @@ -794,211 +916,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, } EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up); -static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, - struct sk_buff *clone) -{ - struct ocelot_port *ocelot_port = ocelot->ports[port]; - unsigned long flags; - - spin_lock_irqsave(&ocelot->ts_id_lock, flags); - - if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || - ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { - spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); - return -EBUSY; - } - - skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; - /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ - OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; - - ocelot_port->ts_id++; - if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) - ocelot_port->ts_id = 0; - - ocelot_port->ptp_skbs_in_flight++; - ocelot->ptp_skbs_in_flight++; - - skb_queue_tail(&ocelot_port->tx_skbs, clone); - - spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); - - return 0; -} - -static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb, - unsigned int ptp_class) -{ - struct ptp_header *hdr; - u8 msgtype, twostep; - - hdr = ptp_parse_header(skb, ptp_class); - if (!hdr) - return false; - - msgtype = ptp_get_msgtype(hdr, ptp_class); - twostep = hdr->flag_field[0] & 0x2; - - if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) - return true; - - return false; -} - -int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, - struct sk_buff *skb, - struct sk_buff **clone) -{ - struct ocelot_port *ocelot_port = ocelot->ports[port]; - u8 ptp_cmd = ocelot_port->ptp_cmd; - unsigned int ptp_class; - int err; - - /* Don't do anything if PTP timestamping not enabled */ - if (!ptp_cmd) - return 0; - - ptp_class = ptp_classify_raw(skb); - if (ptp_class == PTP_CLASS_NONE) - return -EINVAL; - - /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */ - if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { - if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) { - OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; - return 0; - } - - /* Fall back to two-step timestamping */ - ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; - } - - if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { - *clone = skb_clone_sk(skb); - if (!(*clone)) - return -ENOMEM; - - err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); - if (err) - return err; - - OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; - OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; - } - - return 0; -} -EXPORT_SYMBOL(ocelot_port_txtstamp_request); - -static void ocelot_get_hwtimestamp(struct ocelot *ocelot, - struct timespec64 *ts) -{ - unsigned long flags; - u32 val; - - spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); - - /* Read current PTP time to get seconds */ - val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); - - val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); - val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE); - ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); - ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); - - /* Read packet HW timestamp from FIFO */ - val = ocelot_read(ocelot, SYS_PTP_TXSTAMP); - ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val); - - /* Sec has incremented since the ts was registered */ - if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC)) - ts->tv_sec--; - - spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); -} - -static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid) -{ - struct ptp_header *hdr; - - hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class); - if (WARN_ON(!hdr)) - return false; - - return seqid == ntohs(hdr->sequence_id); -} - -void ocelot_get_txtstamp(struct ocelot *ocelot) -{ - int budget = OCELOT_PTP_QUEUE_SZ; - - while (budget--) { - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; - struct skb_shared_hwtstamps shhwtstamps; - u32 val, id, seqid, txport; - struct ocelot_port *port; - struct timespec64 ts; - unsigned long flags; - - val = ocelot_read(ocelot, SYS_PTP_STATUS); - - /* Check if a timestamp can be retrieved */ - if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD)) - break; - - WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL); - - /* Retrieve the ts ID and Tx port */ - id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); - txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); - seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); - - port = ocelot->ports[txport]; - - spin_lock(&ocelot->ts_id_lock); - port->ptp_skbs_in_flight--; - ocelot->ptp_skbs_in_flight--; - spin_unlock(&ocelot->ts_id_lock); - - /* Retrieve its associated skb */ -try_again: - spin_lock_irqsave(&port->tx_skbs.lock, flags); - - skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { - if (OCELOT_SKB_CB(skb)->ts_id != id) - continue; - __skb_unlink(skb, &port->tx_skbs); - skb_match = skb; - break; - } - - spin_unlock_irqrestore(&port->tx_skbs.lock, flags); - - if (WARN_ON(!skb_match)) - continue; - - if (!ocelot_validate_ptp_skb(skb_match, seqid)) { - dev_err_ratelimited(ocelot->dev, - "port %d received stale TX timestamp for seqid %d, discarding\n", - txport, seqid); - dev_kfree_skb_any(skb); - goto try_again; - } - - /* Get the h/w timestamp */ - ocelot_get_hwtimestamp(ocelot, &ts); - - /* Set the timestamp into the skb */ - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); - skb_complete_tx_timestamp(skb_match, &shhwtstamps); - - /* Next ts */ - ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); - } -} -EXPORT_SYMBOL(ocelot_get_txtstamp); - static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, u32 *rval) { @@ -1230,69 +1147,26 @@ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) } EXPORT_SYMBOL(ocelot_drain_cpu_queue); -int ocelot_fdb_add(struct ocelot *ocelot, int port, - const unsigned char *addr, u16 vid) +int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge) { - int pgid = port; - - if (port == ocelot->npi) - pgid = PGID_CPU; + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); - return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); + return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED); } EXPORT_SYMBOL(ocelot_fdb_add); -int ocelot_fdb_del(struct ocelot *ocelot, int port, - const unsigned char *addr, u16 vid) +int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge) { + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + return ocelot_mact_forget(ocelot, addr, vid); } EXPORT_SYMBOL(ocelot_fdb_del); -int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, - bool is_static, void *data) -{ - struct ocelot_dump_ctx *dump = data; - u32 portid = NETLINK_CB(dump->cb->skb).portid; - u32 seq = dump->cb->nlh->nlmsg_seq; - struct nlmsghdr *nlh; - struct ndmsg *ndm; - - if (dump->idx < dump->cb->args[2]) - goto skip; - - nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, - sizeof(*ndm), NLM_F_MULTI); - if (!nlh) - return -EMSGSIZE; - - ndm = nlmsg_data(nlh); - ndm->ndm_family = AF_BRIDGE; - ndm->ndm_pad1 = 0; - ndm->ndm_pad2 = 0; - ndm->ndm_flags = NTF_SELF; - ndm->ndm_type = 0; - ndm->ndm_ifindex = dump->dev->ifindex; - ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; - - if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) - goto nla_put_failure; - - if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) - goto nla_put_failure; - - nlmsg_end(dump->skb, nlh); - -skip: - dump->idx++; - return 0; - -nla_put_failure: - nlmsg_cancel(dump->skb, nlh); - return -EMSGSIZE; -} -EXPORT_SYMBOL(ocelot_port_fdb_do_dump); - /* Caller must hold &ocelot->mact_lock */ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, struct ocelot_mact_entry *entry) @@ -1406,6 +1280,12 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, is_static = (entry.type == ENTRYTYPE_LOCKED); + /* Hide the reserved VLANs used for + * VLAN-unaware bridging. + */ + if (entry.vid > OCELOT_RSV_VLAN_RANGE_START) + entry.vid = 0; + err = cb(entry.mac, entry.vid, is_static, data); if (err) break; @@ -1418,48 +1298,9 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_fdb_dump); -static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap) -{ - trap->key_type = OCELOT_VCAP_KEY_ETYPE; - *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588); - *(__be16 *)trap->key.etype.etype.mask = htons(0xffff); -} - -static void -ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) -{ - trap->key_type = OCELOT_VCAP_KEY_IPV4; - trap->key.ipv4.dport.value = PTP_EV_PORT; - trap->key.ipv4.dport.mask = 0xffff; -} - -static void -ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) -{ - trap->key_type = OCELOT_VCAP_KEY_IPV6; - trap->key.ipv6.dport.value = PTP_EV_PORT; - trap->key.ipv6.dport.mask = 0xffff; -} - -static void -ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) -{ - trap->key_type = OCELOT_VCAP_KEY_IPV4; - trap->key.ipv4.dport.value = PTP_GEN_PORT; - trap->key.ipv4.dport.mask = 0xffff; -} - -static void -ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) -{ - trap->key_type = OCELOT_VCAP_KEY_IPV6; - trap->key.ipv6.dport.value = PTP_GEN_PORT; - trap->key.ipv6.dport.mask = 0xffff; -} - -static int ocelot_trap_add(struct ocelot *ocelot, int port, - unsigned long cookie, - void (*populate)(struct ocelot_vcap_filter *f)) +int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, bool take_ts, + void (*populate)(struct ocelot_vcap_filter *f)) { struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *trap; @@ -1485,6 +1326,8 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port, trap->action.cpu_copy_ena = true; trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; trap->action.port_mask = 0; + trap->take_ts = take_ts; + trap->is_trap = true; new = true; } @@ -1504,8 +1347,7 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port, return 0; } -static int ocelot_trap_del(struct ocelot *ocelot, int port, - unsigned long cookie) +int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie) { struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *trap; @@ -1524,307 +1366,52 @@ static int ocelot_trap_del(struct ocelot *ocelot, int port, return ocelot_vcap_filter_replace(ocelot, trap); } -static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) -{ - unsigned long l2_cookie = ocelot->num_phys_ports + 1; - - return ocelot_trap_add(ocelot, port, l2_cookie, - ocelot_populate_l2_ptp_trap_key); -} - -static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) -{ - unsigned long l2_cookie = ocelot->num_phys_ports + 1; - - return ocelot_trap_del(ocelot, port, l2_cookie); -} - -static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) -{ - unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; - unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; - int err; - - err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, - ocelot_populate_ipv4_ptp_event_trap_key); - if (err) - return err; - - err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, - ocelot_populate_ipv4_ptp_general_trap_key); - if (err) - ocelot_trap_del(ocelot, port, ipv4_ev_cookie); - - return err; -} - -static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) -{ - unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; - unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; - int err; - - err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); - err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie); - return err; -} - -static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) -{ - unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; - unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; - int err; - - err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, - ocelot_populate_ipv6_ptp_event_trap_key); - if (err) - return err; - - err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, - ocelot_populate_ipv6_ptp_general_trap_key); - if (err) - ocelot_trap_del(ocelot, port, ipv6_ev_cookie); - - return err; -} - -static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) -{ - unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; - unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; - int err; - - err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); - err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie); - return err; -} - -static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port, - bool l2, bool l4) -{ - int err; - - if (l2) - err = ocelot_l2_ptp_trap_add(ocelot, port); - else - err = ocelot_l2_ptp_trap_del(ocelot, port); - if (err) - return err; - - if (l4) { - err = ocelot_ipv4_ptp_trap_add(ocelot, port); - if (err) - goto err_ipv4; - - err = ocelot_ipv6_ptp_trap_add(ocelot, port); - if (err) - goto err_ipv6; - } else { - err = ocelot_ipv4_ptp_trap_del(ocelot, port); - - err |= ocelot_ipv6_ptp_trap_del(ocelot, port); - } - if (err) - return err; - - return 0; - -err_ipv6: - ocelot_ipv4_ptp_trap_del(ocelot, port); -err_ipv4: - if (l2) - ocelot_l2_ptp_trap_del(ocelot, port); - return err; -} - -int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) -{ - return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, - sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0; -} -EXPORT_SYMBOL(ocelot_hwstamp_get); - -int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) -{ - struct ocelot_port *ocelot_port = ocelot->ports[port]; - bool l2 = false, l4 = false; - struct hwtstamp_config cfg; - int err; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - /* Tx type sanity check */ - switch (cfg.tx_type) { - case HWTSTAMP_TX_ON: - ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; - break; - case HWTSTAMP_TX_ONESTEP_SYNC: - /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we - * need to update the origin time. - */ - ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; - break; - case HWTSTAMP_TX_OFF: - ocelot_port->ptp_cmd = 0; - break; - default: - return -ERANGE; - } - - mutex_lock(&ocelot->ptp_lock); - - switch (cfg.rx_filter) { - case HWTSTAMP_FILTER_NONE: - break; - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - l2 = true; - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - l2 = true; - l4 = true; - break; - default: - mutex_unlock(&ocelot->ptp_lock); - return -ERANGE; - } - - err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); - if (err) { - mutex_unlock(&ocelot->ptp_lock); - return err; - } - - if (l2 && l4) - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - else if (l2) - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - else if (l4) - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; - else - cfg.rx_filter = HWTSTAMP_FILTER_NONE; - - /* Commit back the result & save it */ - memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg)); - mutex_unlock(&ocelot->ptp_lock); - - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; -} -EXPORT_SYMBOL(ocelot_hwstamp_set); - -void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) -{ - int i; - - if (sset != ETH_SS_STATS) - return; - - for (i = 0; i < ocelot->num_stats; i++) - memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name, - ETH_GSTRING_LEN); -} -EXPORT_SYMBOL(ocelot_get_strings); - -static void ocelot_update_stats(struct ocelot *ocelot) +static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond) { - int i, j; - - mutex_lock(&ocelot->stats_lock); - - for (i = 0; i < ocelot->num_phys_ports; i++) { - /* Configure the port to read the stats from */ - ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); + u32 mask = 0; + int port; - for (j = 0; j < ocelot->num_stats; j++) { - u32 val; - unsigned int idx = i * ocelot->num_stats + j; + lockdep_assert_held(&ocelot->fwd_domain_lock); - val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS, - ocelot->stats_layout[j].offset); + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; - if (val < (ocelot->stats[idx] & U32_MAX)) - ocelot->stats[idx] += (u64)1 << 32; + if (!ocelot_port) + continue; - ocelot->stats[idx] = (ocelot->stats[idx] & - ~(u64)U32_MAX) + val; - } + if (ocelot_port->bond == bond) + mask |= BIT(port); } - mutex_unlock(&ocelot->stats_lock); -} - -static void ocelot_check_stats_work(struct work_struct *work) -{ - struct delayed_work *del_work = to_delayed_work(work); - struct ocelot *ocelot = container_of(del_work, struct ocelot, - stats_work); - - ocelot_update_stats(ocelot); - - queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, - OCELOT_STATS_CHECK_DELAY); -} - -void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) -{ - int i; - - /* check and update now */ - ocelot_update_stats(ocelot); - - /* Copy all counters */ - for (i = 0; i < ocelot->num_stats; i++) - *data++ = ocelot->stats[port * ocelot->num_stats + i]; + return mask; } -EXPORT_SYMBOL(ocelot_get_ethtool_stats); -int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) +/* The logical port number of a LAG is equal to the lowest numbered physical + * port ID present in that LAG. It may change if that port ever leaves the LAG. + */ +int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond) { - if (sset != ETH_SS_STATS) - return -EOPNOTSUPP; + int bond_mask = ocelot_get_bond_mask(ocelot, bond); - return ocelot->num_stats; -} -EXPORT_SYMBOL(ocelot_get_sset_count); - -int ocelot_get_ts_info(struct ocelot *ocelot, int port, - struct ethtool_ts_info *info) -{ - info->phc_index = ocelot->ptp_clock ? - ptp_clock_index(ocelot->ptp_clock) : -1; - if (info->phc_index == -1) { - info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - return 0; - } - info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | - BIT(HWTSTAMP_TX_ONESTEP_SYNC); - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | - BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + if (!bond_mask) + return -ENOENT; - return 0; + return __ffs(bond_mask); } -EXPORT_SYMBOL(ocelot_get_ts_info); +EXPORT_SYMBOL_GPL(ocelot_bond_get_id); -static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond) +/* Returns the mask of user ports assigned to this DSA tag_8021q CPU port. + * Note that when CPU ports are in a LAG, the user ports are assigned to the + * 'primary' CPU port, the one whose physical port number gives the logical + * port number of the LAG. + * + * We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG + * (to which no user port is assigned), but it appears that forwarding from + * this secondary CPU port looks at the PGID_SRC associated with the logical + * port ID that it's assigned to, which *is* configured properly. + */ +static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot, + struct ocelot_port *cpu) { u32 mask = 0; int port; @@ -1835,13 +1422,34 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond) if (!ocelot_port) continue; - if (ocelot_port->bond == bond) + if (ocelot_port->dsa_8021q_cpu == cpu) mask |= BIT(port); } + if (cpu->bond) + mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond); + return mask; } +/* Returns the DSA tag_8021q CPU port that the given port is assigned to, + * or the bit mask of CPU ports if said CPU port is in a LAG. + */ +u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu; + + if (!cpu_port) + return 0; + + if (cpu_port->bond) + return ocelot_get_bond_mask(ocelot, cpu_port->bond); + + return BIT(cpu_port->index); +} +EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask); + u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port) { struct ocelot_port *ocelot_port = ocelot->ports[src_port]; @@ -1871,28 +1479,8 @@ u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port) } EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask); -u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) +static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) { - u32 mask = 0; - int port; - - for (port = 0; port < ocelot->num_phys_ports; port++) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; - - if (!ocelot_port) - continue; - - if (ocelot_port->is_dsa_8021q_cpu) - mask |= BIT(port); - } - - return mask; -} -EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask); - -void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) -{ - unsigned long cpu_fwd_mask; int port; lockdep_assert_held(&ocelot->fwd_domain_lock); @@ -1904,15 +1492,6 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) if (joining && ocelot->ops->cut_through_fwd) ocelot->ops->cut_through_fwd(ocelot); - /* If a DSA tag_8021q CPU exists, it needs to be included in the - * regular forwarding path of the front ports regardless of whether - * those are bridged or standalone. - * If DSA tag_8021q is not used, this returns 0, which is fine because - * the hardware-based CPU port module can be a destination for packets - * even if it isn't part of PGID_SRC. - */ - cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot); - /* Apply FWD mask. The loop is needed to add/remove the current port as * a source for the other ports. */ @@ -1925,17 +1504,19 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) mask = 0; } else if (ocelot_port->is_dsa_8021q_cpu) { /* The DSA tag_8021q CPU ports need to be able to - * forward packets to all other ports except for - * themselves + * forward packets to all ports assigned to them. */ - mask = GENMASK(ocelot->num_phys_ports - 1, 0); - mask &= ~cpu_fwd_mask; + mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot, + ocelot_port); } else if (ocelot_port->bridge) { struct net_device *bond = ocelot_port->bond; mask = ocelot_get_bridge_fwd_mask(ocelot, port); - mask |= cpu_fwd_mask; mask &= ~BIT(port); + + mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); + if (bond) mask &= ~ocelot_get_bond_mask(ocelot, bond); } else { @@ -1943,7 +1524,8 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) * ports (if those exist), or to the hardware CPU port * module otherwise. */ - mask = cpu_fwd_mask; + mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); } ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port); @@ -1959,7 +1541,94 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) if (!joining && ocelot->ops->cut_through_fwd) ocelot->ops->cut_through_fwd(ocelot); } -EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); + +/* Update PGID_CPU which is the destination port mask used for whitelisting + * unicast addresses filtered towards the host. In the normal and NPI modes, + * this points to the analyzer entry for the CPU port module, while in DSA + * tag_8021q mode, it is a bit mask of all active CPU ports. + * PGID_SRC will take care of forwarding a packet from one user port to + * no more than a single CPU port. + */ +static void ocelot_update_pgid_cpu(struct ocelot *ocelot) +{ + int pgid_cpu = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu) + continue; + + pgid_cpu |= BIT(port); + } + + if (!pgid_cpu) + pgid_cpu = BIT(ocelot->num_phys_ports); + + ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU); +} + +void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu) +{ + struct ocelot_port *cpu_port = ocelot->ports[cpu]; + u16 vid; + + mutex_lock(&ocelot->fwd_domain_lock); + + cpu_port->is_dsa_8021q_cpu = true; + + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_add(ocelot, cpu, vid, true); + + ocelot_update_pgid_cpu(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu); + +void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu) +{ + struct ocelot_port *cpu_port = ocelot->ports[cpu]; + u16 vid; + + mutex_lock(&ocelot->fwd_domain_lock); + + cpu_port->is_dsa_8021q_cpu = false; + + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_del(ocelot, cpu_port->index, vid); + + ocelot_update_pgid_cpu(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu); + +void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, + int cpu) +{ + struct ocelot_port *cpu_port = ocelot->ports[cpu]; + + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot->ports[port]->dsa_8021q_cpu = cpu_port; + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu); + +void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port) +{ + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot->ports[port]->dsa_8021q_cpu = NULL; + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { @@ -2105,15 +1774,16 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr, } int ocelot_port_mdb_add(struct ocelot *ocelot, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge) { unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; struct ocelot_pgid *pgid; u16 vid = mdb->vid; - if (port == ocelot->npi) - port = ocelot->num_phys_ports; + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { @@ -2161,15 +1831,16 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port, EXPORT_SYMBOL(ocelot_port_mdb_add); int ocelot_port_mdb_del(struct ocelot *ocelot, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge) { unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; struct ocelot_pgid *pgid; u16 vid = mdb->vid; - if (port == ocelot->npi) - port = ocelot->num_phys_ports; + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) @@ -2204,18 +1875,30 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_port_mdb_del); -void ocelot_port_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge) +int ocelot_port_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge, int bridge_num, + struct netlink_ext_ack *extack) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err; + + err = ocelot_single_vlan_aware_bridge(ocelot, extack); + if (err) + return err; mutex_lock(&ocelot->fwd_domain_lock); ocelot_port->bridge = bridge; + ocelot_port->bridge_num = bridge_num; ocelot_apply_bridge_fwd_mask(ocelot, true); mutex_unlock(&ocelot->fwd_domain_lock); + + if (br_vlan_enabled(bridge)) + return 0; + + return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge); } EXPORT_SYMBOL(ocelot_port_bridge_join); @@ -2226,7 +1909,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, mutex_lock(&ocelot->fwd_domain_lock); + if (!br_vlan_enabled(bridge)) + ocelot_del_vlan_unaware_pvid(ocelot, port, bridge); + ocelot_port->bridge = NULL; + ocelot_port->bridge_num = -1; ocelot_port_set_pvid(ocelot, port, NULL); ocelot_port_manage_port_tag(ocelot, port); @@ -2335,7 +2022,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) bond = ocelot_port->bond; if (bond) { - int lag = __ffs(ocelot_get_bond_mask(ocelot, bond)); + int lag = ocelot_bond_get_id(ocelot, bond); ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_PORTID_VAL(lag), @@ -2350,12 +2037,117 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) } } +static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc, + unsigned long from_mask, unsigned long to_mask) +{ + unsigned char addr[ETH_ALEN]; + struct ocelot_pgid *pgid; + u16 vid = mc->vid; + + dev_dbg(ocelot->dev, + "Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n", + mc->addr, mc->vid, from_mask, to_mask); + + /* First clean up the current port mask from hardware, because + * we'll be modifying it. + */ + ocelot_pgid_free(ocelot, mc->pgid); + ocelot_encode_ports_to_mdb(addr, mc); + ocelot_mact_forget(ocelot, addr, vid); + + mc->ports &= ~from_mask; + mc->ports |= to_mask; + + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) { + dev_err(ocelot->dev, + "Cannot allocate PGID for mdb %pM vid %d\n", + mc->addr, mc->vid); + devm_kfree(ocelot->dev, mc); + return PTR_ERR(pgid); + } + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); + + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); +} + +int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask, + unsigned long to_mask) +{ + struct ocelot_multicast *mc; + int err; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (!(mc->ports & from_mask)) + continue; + + err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs); + +/* Documentation for PORTID_VAL says: + * Logical port number for front port. If port is not a member of a LLAG, + * then PORTID must be set to the physical port number. + * If port is a member of a LLAG, then PORTID must be set to the common + * PORTID_VAL used for all member ports of the LLAG. + * The value must not exceed the number of physical ports on the device. + * + * This means we have little choice but to migrate FDB entries pointing towards + * a logical port when that changes. + */ +static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot, + struct net_device *bond, + int lag) +{ + struct ocelot_lag_fdb *fdb; + int err; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + list_for_each_entry(fdb, &ocelot->lag_fdbs, list) { + if (fdb->bond != bond) + continue; + + err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid); + if (err) { + dev_err(ocelot->dev, + "failed to delete LAG %s FDB %pM vid %d: %pe\n", + bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); + } + + err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid, + ENTRYTYPE_LOCKED); + if (err) { + dev_err(ocelot->dev, + "failed to migrate LAG %s FDB %pM vid %d: %pe\n", + bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); + } + } +} + int ocelot_port_lag_join(struct ocelot *ocelot, int port, struct net_device *bond, - struct netdev_lag_upper_info *info) + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) { - if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + NL_SET_ERR_MSG_MOD(extack, + "Can only offload LAG using hash TX type"); return -EOPNOTSUPP; + } mutex_lock(&ocelot->fwd_domain_lock); @@ -2374,14 +2166,23 @@ EXPORT_SYMBOL(ocelot_port_lag_join); void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond) { + int old_lag_id, new_lag_id; + mutex_lock(&ocelot->fwd_domain_lock); + old_lag_id = ocelot_bond_get_id(ocelot, bond); + ocelot->ports[port]->bond = NULL; ocelot_setup_logical_port_ids(ocelot); ocelot_apply_bridge_fwd_mask(ocelot, false); ocelot_set_aggr_pgids(ocelot); + new_lag_id = ocelot_bond_get_id(ocelot, bond); + + if (new_lag_id >= 0 && old_lag_id != new_lag_id) + ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id); + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_port_lag_leave); @@ -2390,13 +2191,83 @@ void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot_port->lag_tx_active = lag_tx_active; /* Rebalance the LAGs */ ocelot_set_aggr_pgids(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_port_lag_change); +int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge) +{ + struct ocelot_lag_fdb *fdb; + int lag, err; + + fdb = kzalloc(sizeof(*fdb), GFP_KERNEL); + if (!fdb) + return -ENOMEM; + + mutex_lock(&ocelot->fwd_domain_lock); + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + ether_addr_copy(fdb->addr, addr); + fdb->vid = vid; + fdb->bond = bond; + + lag = ocelot_bond_get_id(ocelot, bond); + + err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED); + if (err) { + mutex_unlock(&ocelot->fwd_domain_lock); + kfree(fdb); + return err; + } + + list_add_tail(&fdb->list, &ocelot->lag_fdbs); + mutex_unlock(&ocelot->fwd_domain_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add); + +int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge) +{ + struct ocelot_lag_fdb *fdb, *tmp; + + mutex_lock(&ocelot->fwd_domain_lock); + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) { + if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid || + fdb->bond != bond) + continue; + + ocelot_mact_forget(ocelot, addr, vid); + list_del(&fdb->list); + mutex_unlock(&ocelot->fwd_domain_lock); + kfree(fdb); + + return 0; + } + + mutex_unlock(&ocelot->fwd_domain_lock); + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del); + /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. * In the special case that it's the NPI port that we're configuring, the @@ -2490,6 +2361,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, val = BIT(port); ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6); } static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, @@ -2535,6 +2408,198 @@ void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_port_bridge_flags); +int ocelot_port_get_default_prio(struct ocelot *ocelot, int port) +{ + int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port); + + return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val); +} +EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio); + +int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio) +{ + if (prio >= OCELOT_NUM_TC) + return -ERANGE; + + ocelot_rmw_gix(ocelot, + ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio), + ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M, + ANA_PORT_QOS_CFG, + port); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio); + +int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp) +{ + int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port); + int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp); + + /* Return error if DSCP prioritization isn't enabled */ + if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA)) + return -EOPNOTSUPP; + + if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) { + dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg); + /* Re-read ANA_DSCP_CFG for the translated DSCP */ + dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp); + } + + /* If the DSCP value is not trusted, the QoS classification falls back + * to VLAN PCP or port-based default. + */ + if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)) + return -EOPNOTSUPP; + + return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg); +} +EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio); + +int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio) +{ + int mask, val; + + if (prio >= OCELOT_NUM_TC) + return -ERANGE; + + /* There is at least one app table priority (this one), so we need to + * make sure DSCP prioritization is enabled on the port. + * Also make sure DSCP translation is disabled + * (dcbnl doesn't support it). + */ + mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA | + ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA; + + ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask, + ANA_PORT_QOS_CFG, port); + + /* Trust this DSCP value and map it to the given QoS class */ + val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio); + + ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio); + +int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio) +{ + int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp); + int mask, i; + + /* During a "dcb app replace" command, the new app table entry will be + * added first, then the old one will be deleted. But the hardware only + * supports one QoS class per DSCP value (duh), so if we blindly delete + * the app table entry for this DSCP value, we end up deleting the + * entry with the new priority. Avoid that by checking whether user + * space wants to delete the priority which is currently configured, or + * something else which is no longer current. + */ + if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio) + return 0; + + /* Untrust this DSCP value */ + ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp); + + for (i = 0; i < 64; i++) { + int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i); + + /* There are still app table entries on the port, so we need to + * keep DSCP enabled, nothing to do. + */ + if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA) + return 0; + } + + /* Disable DSCP QoS classification if there isn't any trusted + * DSCP value left. + */ + mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA | + ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA; + + ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio); + +struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to, + struct netlink_ext_ack *extack) +{ + struct ocelot_mirror *m = ocelot->mirror; + + if (m) { + if (m->to != to) { + NL_SET_ERR_MSG_MOD(extack, + "Mirroring already configured towards different egress port"); + return ERR_PTR(-EBUSY); + } + + refcount_inc(&m->refcount); + return m; + } + + m = kzalloc(sizeof(*m), GFP_KERNEL); + if (!m) + return ERR_PTR(-ENOMEM); + + m->to = to; + refcount_set(&m->refcount, 1); + ocelot->mirror = m; + + /* Program the mirror port to hardware */ + ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS); + + return m; +} + +void ocelot_mirror_put(struct ocelot *ocelot) +{ + struct ocelot_mirror *m = ocelot->mirror; + + if (!refcount_dec_and_test(&m->refcount)) + return; + + ocelot_write(ocelot, 0, ANA_MIRRORPORTS); + ocelot->mirror = NULL; + kfree(m); +} + +int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to, + bool ingress, struct netlink_ext_ack *extack) +{ + struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack); + + if (IS_ERR(m)) + return PTR_ERR(m); + + if (ingress) { + ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA, + ANA_PORT_PORT_CFG_SRC_MIRROR_ENA, + ANA_PORT_PORT_CFG, from); + } else { + ocelot_rmw(ocelot, BIT(from), BIT(from), + ANA_EMIRRORPORTS); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_mirror_add); + +void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress) +{ + if (ingress) { + ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA, + ANA_PORT_PORT_CFG, from); + } else { + ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS); + } + + ocelot_mirror_put(ocelot); +} +EXPORT_SYMBOL_GPL(ocelot_port_mirror_del); + void ocelot_init_port(struct ocelot *ocelot, int port) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -2629,7 +2694,7 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot) /* Configure the CPU port to be VLAN aware */ ocelot_write_gix(ocelot, - ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) | + ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) | ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), ANA_PORT_VLAN_CFG, cpu); @@ -2652,7 +2717,6 @@ static void ocelot_detect_features(struct ocelot *ocelot) int ocelot_init(struct ocelot *ocelot) { - char queue_name[32]; int i, ret; u32 port; @@ -2664,33 +2728,27 @@ int ocelot_init(struct ocelot *ocelot) } } - ocelot->stats = devm_kcalloc(ocelot->dev, - ocelot->num_phys_ports * ocelot->num_stats, - sizeof(u64), GFP_KERNEL); - if (!ocelot->stats) - return -ENOMEM; - - mutex_init(&ocelot->stats_lock); mutex_init(&ocelot->ptp_lock); mutex_init(&ocelot->mact_lock); mutex_init(&ocelot->fwd_domain_lock); + mutex_init(&ocelot->tas_lock); spin_lock_init(&ocelot->ptp_clock_lock); spin_lock_init(&ocelot->ts_id_lock); - snprintf(queue_name, sizeof(queue_name), "%s-stats", - dev_name(ocelot->dev)); - ocelot->stats_queue = create_singlethread_workqueue(queue_name); - if (!ocelot->stats_queue) - return -ENOMEM; ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); - if (!ocelot->owq) { - destroy_workqueue(ocelot->stats_queue); + if (!ocelot->owq) return -ENOMEM; + + ret = ocelot_stats_init(ocelot); + if (ret) { + destroy_workqueue(ocelot->owq); + return ret; } INIT_LIST_HEAD(&ocelot->multicast); INIT_LIST_HEAD(&ocelot->pgids); INIT_LIST_HEAD(&ocelot->vlans); + INIT_LIST_HEAD(&ocelot->lag_fdbs); ocelot_detect_features(ocelot); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); @@ -2796,20 +2854,14 @@ int ocelot_init(struct ocelot *ocelot) ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), ANA_CPUQ_8021_CFG, i); - INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); - queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, - OCELOT_STATS_CHECK_DELAY); - return 0; } EXPORT_SYMBOL(ocelot_init); void ocelot_deinit(struct ocelot *ocelot) { - cancel_delayed_work(&ocelot->stats_work); - destroy_workqueue(ocelot->stats_queue); + ocelot_stats_deinit(ocelot); destroy_workqueue(ocelot->owq); - mutex_destroy(&ocelot->stats_lock); } EXPORT_SYMBOL(ocelot_deinit); |