diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bcmsysport.c')
| -rw-r--r-- | drivers/net/ethernet/broadcom/bcmsysport.c | 512 | 
1 files changed, 416 insertions, 96 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5776e503e4c5..d9b9170ed2fc 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -81,14 +81,14 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,  {  #ifdef CONFIG_PHYS_ADDR_T_64BIT  	__raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, -			d + DESC_ADDR_HI_STATUS_LEN); +		     d + DESC_ADDR_HI_STATUS_LEN);  #endif  	__raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);  }  static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, -						struct dma_desc *desc, -						unsigned int port) +					     struct dma_desc *desc, +					     unsigned int port)  {  	/* Ports are latched, so write upper address first */  	tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); @@ -108,7 +108,7 @@ static int bcm_sysport_set_settings(struct net_device *dev,  }  static int bcm_sysport_get_settings(struct net_device *dev, -					struct ethtool_cmd *cmd) +				    struct ethtool_cmd *cmd)  {  	struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -119,14 +119,14 @@ static int bcm_sysport_get_settings(struct net_device *dev,  }  static int bcm_sysport_set_rx_csum(struct net_device *dev, -					netdev_features_t wanted) +				   netdev_features_t wanted)  {  	struct bcm_sysport_priv *priv = netdev_priv(dev);  	u32 reg; -	priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM); +	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);  	reg = rxchk_readl(priv, RXCHK_CONTROL); -	if (priv->rx_csum_en) +	if (priv->rx_chk_en)  		reg |= RXCHK_EN;  	else  		reg &= ~RXCHK_EN; @@ -134,7 +134,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,  	/* If UniMAC forwards CRC, we need to skip over it to get  	 * a valid CHK bit to be set in the per-packet status word  	 */ -	if (priv->rx_csum_en && priv->crc_fwd) +	if (priv->rx_chk_en && priv->crc_fwd)  		reg |= RXCHK_SKIP_FCS;  	else  		reg &= ~RXCHK_SKIP_FCS; @@ -145,7 +145,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,  }  static int bcm_sysport_set_tx_csum(struct net_device *dev, -					netdev_features_t wanted) +				   netdev_features_t wanted)  {  	struct bcm_sysport_priv *priv = netdev_priv(dev);  	u32 reg; @@ -165,7 +165,7 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,  }  static int bcm_sysport_set_features(struct net_device *dev, -					netdev_features_t features) +				    netdev_features_t features)  {  	netdev_features_t changed = features ^ dev->features;  	netdev_features_t wanted = dev->wanted_features; @@ -261,7 +261,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {  	/* RXCHK misc statistics */  	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),  	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, -			RXCHK_OTHER_DISC_CNTR), +		   RXCHK_OTHER_DISC_CNTR),  	/* RBUF misc statistics */  	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),  	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), @@ -270,7 +270,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {  #define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)  static void bcm_sysport_get_drvinfo(struct net_device *dev, -					struct ethtool_drvinfo *info) +				    struct ethtool_drvinfo *info)  {  	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));  	strlcpy(info->version, "0.1", sizeof(info->version)); @@ -303,7 +303,7 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)  }  static void bcm_sysport_get_strings(struct net_device *dev, -					u32 stringset, u8 *data) +				    u32 stringset, u8 *data)  {  	int i; @@ -311,8 +311,8 @@ static void bcm_sysport_get_strings(struct net_device *dev,  	case ETH_SS_STATS:  		for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {  			memcpy(data + i * ETH_GSTRING_LEN, -				bcm_sysport_gstrings_stats[i].stat_string, -				ETH_GSTRING_LEN); +			       bcm_sysport_gstrings_stats[i].stat_string, +			       ETH_GSTRING_LEN);  		}  		break;  	default: @@ -362,7 +362,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)  }  static void bcm_sysport_get_stats(struct net_device *dev, -					struct ethtool_stats *stats, u64 *data) +				  struct ethtool_stats *stats, u64 *data)  {  	struct bcm_sysport_priv *priv = netdev_priv(dev);  	int i; @@ -384,6 +384,64 @@ static void bcm_sysport_get_stats(struct net_device *dev,  	}  } +static void bcm_sysport_get_wol(struct net_device *dev, +				struct ethtool_wolinfo *wol) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	u32 reg; + +	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; +	wol->wolopts = priv->wolopts; + +	if (!(priv->wolopts & WAKE_MAGICSECURE)) +		return; + +	/* Return the programmed SecureOn password */ +	reg = umac_readl(priv, UMAC_PSW_MS); +	put_unaligned_be16(reg, &wol->sopass[0]); +	reg = umac_readl(priv, UMAC_PSW_LS); +	put_unaligned_be32(reg, &wol->sopass[2]); +} + +static int bcm_sysport_set_wol(struct net_device *dev, +			       struct ethtool_wolinfo *wol) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	struct device *kdev = &priv->pdev->dev; +	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; + +	if (!device_can_wakeup(kdev)) +		return -ENOTSUPP; + +	if (wol->wolopts & ~supported) +		return -EINVAL; + +	/* Program the SecureOn password */ +	if (wol->wolopts & WAKE_MAGICSECURE) { +		umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), +			    UMAC_PSW_MS); +		umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), +			    UMAC_PSW_LS); +	} + +	/* Flag the device and relevant IRQ as wakeup capable */ +	if (wol->wolopts) { +		device_set_wakeup_enable(kdev, 1); +		enable_irq_wake(priv->wol_irq); +		priv->wol_irq_disabled = 0; +	} else { +		device_set_wakeup_enable(kdev, 0); +		/* Avoid unbalanced disable_irq_wake calls */ +		if (!priv->wol_irq_disabled) +			disable_irq_wake(priv->wol_irq); +		priv->wol_irq_disabled = 1; +	} + +	priv->wolopts = wol->wolopts; + +	return 0; +} +  static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)  {  	dev_kfree_skb_any(cb->skb); @@ -406,7 +464,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,  	}  	mapping = dma_map_single(kdev, cb->skb->data, -				RX_BUF_LENGTH, DMA_FROM_DEVICE); +				 RX_BUF_LENGTH, DMA_FROM_DEVICE);  	ret = dma_mapping_error(kdev, mapping);  	if (ret) {  		bcm_sysport_free_cb(cb); @@ -470,33 +528,24 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,  		to_process = p_index - priv->rx_c_index;  	netif_dbg(priv, rx_status, ndev, -			"p_index=%d rx_c_index=%d to_process=%d\n", -			p_index, priv->rx_c_index, to_process); - -	while ((processed < to_process) && -		(processed < budget)) { +		  "p_index=%d rx_c_index=%d to_process=%d\n", +		  p_index, priv->rx_c_index, to_process); +	while ((processed < to_process) && (processed < budget)) {  		cb = &priv->rx_cbs[priv->rx_read_ptr];  		skb = cb->skb; -		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), -				RX_BUF_LENGTH, DMA_FROM_DEVICE); - -		/* Extract the Receive Status Block prepended */ -		rsb = (struct bcm_rsb *)skb->data; -		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; -		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & -			DESC_STATUS_MASK;  		processed++;  		priv->rx_read_ptr++; +  		if (priv->rx_read_ptr == priv->num_rx_bds)  			priv->rx_read_ptr = 0; -		netif_dbg(priv, rx_status, ndev, -				"p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", -				p_index, priv->rx_c_index, priv->rx_read_ptr, -				len, status); - +		/* We do not have a backing SKB, so we do not a corresponding +		 * DMA mapping for this incoming packet since +		 * bcm_sysport_rx_refill always either has both skb and mapping +		 * or none. +		 */  		if (unlikely(!skb)) {  			netif_err(priv, rx_err, ndev, "out of memory!\n");  			ndev->stats.rx_dropped++; @@ -504,6 +553,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,  			goto refill;  		} +		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), +				 RX_BUF_LENGTH, DMA_FROM_DEVICE); + +		/* Extract the Receive Status Block prepended */ +		rsb = (struct bcm_rsb *)skb->data; +		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; +		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & +			  DESC_STATUS_MASK; + +		netif_dbg(priv, rx_status, ndev, +			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", +			  p_index, priv->rx_c_index, priv->rx_read_ptr, +			  len, status); +  		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {  			netif_err(priv, rx_status, ndev, "fragmented packet!\n");  			ndev->stats.rx_dropped++; @@ -554,9 +617,9 @@ refill:  }  static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, -					struct bcm_sysport_cb *cb, -					unsigned int *bytes_compl, -					unsigned int *pkts_compl) +				       struct bcm_sysport_cb *cb, +				       unsigned int *bytes_compl, +				       unsigned int *pkts_compl)  {  	struct device *kdev = &priv->pdev->dev;  	struct net_device *ndev = priv->netdev; @@ -565,8 +628,8 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,  		ndev->stats.tx_bytes += cb->skb->len;  		*bytes_compl += cb->skb->len;  		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), -				dma_unmap_len(cb, dma_len), -				DMA_TO_DEVICE); +				 dma_unmap_len(cb, dma_len), +				 DMA_TO_DEVICE);  		ndev->stats.tx_packets++;  		(*pkts_compl)++;  		bcm_sysport_free_cb(cb); @@ -574,7 +637,7 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,  	} else if (dma_unmap_addr(cb, dma_addr)) {  		ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);  		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), -				dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); +			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);  		dma_unmap_addr_set(cb, dma_addr, 0);  	}  } @@ -608,8 +671,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,  		last_tx_cn = num_tx_cbs - last_c_index + c_index;  	netif_dbg(priv, tx_done, ndev, -			"ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", -			ring->index, c_index, last_tx_cn, last_c_index); +		  "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", +		  ring->index, c_index, last_tx_cn, last_c_index);  	while (last_tx_cn-- > 0) {  		cb = ring->cbs + last_c_index; @@ -626,8 +689,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,  		netif_tx_wake_queue(txq);  	netif_dbg(priv, tx_done, ndev, -			"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", -			ring->index, ring->c_index, pkts_compl, bytes_compl); +		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", +		  ring->index, ring->c_index, pkts_compl, bytes_compl);  	return pkts_compl;  } @@ -692,6 +755,20 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)  	return work_done;  } +static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) +{ +	u32 reg; + +	/* Stop monitoring MPD interrupt */ +	intrl2_0_mask_set(priv, INTRL2_0_MPD); + +	/* Clear the MagicPacket detection logic */ +	reg = umac_readl(priv, UMAC_MPD_CTRL); +	reg &= ~MPD_EN; +	umac_writel(priv, reg, UMAC_MPD_CTRL); + +	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); +}  /* RX and misc interrupt routine */  static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) @@ -722,6 +799,11 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)  	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)  		bcm_sysport_tx_reclaim_all(priv); +	if (priv->irq0_stat & INTRL2_0_MPD) { +		netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); +		bcm_sysport_resume_from_wol(priv); +	} +  	return IRQ_HANDLED;  } @@ -757,6 +839,15 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)  	return IRQ_HANDLED;  } +static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) +{ +	struct bcm_sysport_priv *priv = dev_id; + +	pm_wakeup_event(&priv->pdev->dev, 0); + +	return IRQ_HANDLED; +} +  static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)  {  	struct sk_buff *nskb; @@ -804,8 +895,9 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)  			csum_info |= L4_LENGTH_VALID;  			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)  				csum_info |= L4_UDP; -		} else +		} else {  			csum_info = 0; +		}  		tsb->l4_ptr_dest_map = csum_info;  	} @@ -869,7 +961,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,  	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);  	if (dma_mapping_error(kdev, mapping)) {  		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", -				skb->data, skb_len); +			  skb->data, skb_len);  		ret = NETDEV_TX_OK;  		goto out;  	} @@ -887,7 +979,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,  	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;  	len_status |= (skb_len << DESC_LEN_SHIFT);  	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << -			DESC_STATUS_SHIFT; +		       DESC_STATUS_SHIFT;  	if (skb->ip_summed == CHECKSUM_PARTIAL)  		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); @@ -912,7 +1004,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,  		netif_tx_stop_queue(txq);  	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", -			ring->index, ring->desc_count, ring->curr_desc); +		  ring->index, ring->desc_count, ring->curr_desc);  	ret = NETDEV_TX_OK;  out: @@ -1010,7 +1102,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,  		return -ENOMEM;  	} -	ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL); +	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);  	if (!ring->cbs) {  		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");  		return -ENOMEM; @@ -1050,14 +1142,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,  	napi_enable(&ring->napi);  	netif_dbg(priv, hw, priv->netdev, -			"TDMA cfg, size=%d, desc_cpu=%p\n", -			ring->size, ring->desc_cpu); +		  "TDMA cfg, size=%d, desc_cpu=%p\n", +		  ring->size, ring->desc_cpu);  	return 0;  }  static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, -					unsigned int index) +				     unsigned int index)  {  	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];  	struct device *kdev = &priv->pdev->dev; @@ -1088,7 +1180,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,  /* RDMA helper */  static inline int rdma_enable_set(struct bcm_sysport_priv *priv, -					unsigned int enable) +				  unsigned int enable)  {  	unsigned int timeout = 1000;  	u32 reg; @@ -1115,7 +1207,7 @@ static inline int rdma_enable_set(struct bcm_sysport_priv *priv,  /* TDMA helper */  static inline int tdma_enable_set(struct bcm_sysport_priv *priv, -					unsigned int enable) +				  unsigned int enable)  {  	unsigned int timeout = 1000;  	u32 reg; @@ -1153,8 +1245,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)  	priv->rx_bd_assign_index = 0;  	priv->rx_c_index = 0;  	priv->rx_read_ptr = 0; -	priv->rx_cbs = kzalloc(priv->num_rx_bds * -				sizeof(struct bcm_sysport_cb), GFP_KERNEL); +	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), +				GFP_KERNEL);  	if (!priv->rx_cbs) {  		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");  		return -ENOMEM; @@ -1186,8 +1278,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)  	rdma_writel(priv, 1, RDMA_MBDONE_INTR);  	netif_dbg(priv, hw, priv->netdev, -			"RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", -			priv->num_rx_bds, priv->rx_bds); +		  "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", +		  priv->num_rx_bds, priv->rx_bds);  	return 0;  } @@ -1207,8 +1299,8 @@ static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)  		cb = &priv->rx_cbs[i];  		if (dma_unmap_addr(cb, dma_addr))  			dma_unmap_single(&priv->pdev->dev, -					dma_unmap_addr(cb, dma_addr), -					RX_BUF_LENGTH, DMA_FROM_DEVICE); +					 dma_unmap_addr(cb, dma_addr), +					 RX_BUF_LENGTH, DMA_FROM_DEVICE);  		bcm_sysport_free_cb(cb);  	} @@ -1236,15 +1328,15 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)  }  static inline void umac_enable_set(struct bcm_sysport_priv *priv, -					unsigned int enable) +				   u32 mask, unsigned int enable)  {  	u32 reg;  	reg = umac_readl(priv, UMAC_CMD);  	if (enable) -		reg |= CMD_RX_EN | CMD_TX_EN; +		reg |= mask;  	else -		reg &= ~(CMD_RX_EN | CMD_TX_EN); +		reg &= ~mask;  	umac_writel(priv, reg, UMAC_CMD);  	/* UniMAC stops on a packet boundary, wait for a full-sized packet @@ -1268,7 +1360,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)  }  static void umac_set_hw_addr(struct bcm_sysport_priv *priv, -				unsigned char *addr) +			     unsigned char *addr)  {  	umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |  			(addr[2] << 8) | addr[3], UMAC_MAC0); @@ -1284,11 +1376,35 @@ static void topctrl_flush(struct bcm_sysport_priv *priv)  	topctrl_writel(priv, 0, TX_FLUSH_CNTL);  } +static void bcm_sysport_netif_start(struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); + +	/* Enable NAPI */ +	napi_enable(&priv->napi); + +	phy_start(priv->phydev); + +	/* Enable TX interrupts for the 32 TXQs */ +	intrl2_1_mask_clear(priv, 0xffffffff); + +	/* Last call before we start the real business */ +	netif_tx_start_all_queues(dev); +} + +static void rbuf_init(struct bcm_sysport_priv *priv) +{ +	u32 reg; + +	reg = rbuf_readl(priv, RBUF_CONTROL); +	reg |= RBUF_4B_ALGN | RBUF_RSB_EN; +	rbuf_writel(priv, reg, RBUF_CONTROL); +} +  static int bcm_sysport_open(struct net_device *dev)  {  	struct bcm_sysport_priv *priv = netdev_priv(dev);  	unsigned int i; -	u32 reg;  	int ret;  	/* Reset UniMAC */ @@ -1298,12 +1414,10 @@ static int bcm_sysport_open(struct net_device *dev)  	topctrl_flush(priv);  	/* Disable the UniMAC RX/TX */ -	umac_enable_set(priv, 0); +	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);  	/* Enable RBUF 2bytes alignment and Receive Status Block */ -	reg = rbuf_readl(priv, RBUF_CONTROL); -	reg |= RBUF_4B_ALGN | RBUF_RSB_EN; -	rbuf_writel(priv, reg, RBUF_CONTROL); +	rbuf_init(priv);  	/* Set maximum frame length */  	umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); @@ -1351,7 +1465,7 @@ static int bcm_sysport_open(struct net_device *dev)  		ret = bcm_sysport_init_tx_ring(priv, i);  		if (ret) {  			netdev_err(dev, "failed to initialize TX ring %d\n", -					i); +				   i);  			goto out_free_tx_ring;  		}  	} @@ -1379,19 +1493,10 @@ static int bcm_sysport_open(struct net_device *dev)  	if (ret)  		goto out_clear_rx_int; -	/* Enable NAPI */ -	napi_enable(&priv->napi); -  	/* Turn on UniMAC TX/RX */ -	umac_enable_set(priv, 1); +	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); -	phy_start(priv->phydev); - -	/* Enable TX interrupts for the 32 TXQs */ -	intrl2_1_mask_clear(priv, 0xffffffff); - -	/* Last call before we start the real business */ -	netif_tx_start_all_queues(dev); +	bcm_sysport_netif_start(dev);  	return 0; @@ -1410,12 +1515,9 @@ out_phy_disconnect:  	return ret;  } -static int bcm_sysport_stop(struct net_device *dev) +static void bcm_sysport_netif_stop(struct net_device *dev)  {  	struct bcm_sysport_priv *priv = netdev_priv(dev); -	unsigned int i; -	u32 reg; -	int ret;  	/* stop all software from updating hardware */  	netif_tx_stop_all_queues(dev); @@ -1427,11 +1529,18 @@ static int bcm_sysport_stop(struct net_device *dev)  	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);  	intrl2_1_mask_set(priv, 0xffffffff);  	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); +} + +static int bcm_sysport_stop(struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	unsigned int i; +	int ret; + +	bcm_sysport_netif_stop(dev);  	/* Disable UniMAC RX */ -	reg = umac_readl(priv, UMAC_CMD); -	reg &= ~CMD_RX_EN; -	umac_writel(priv, reg, UMAC_CMD); +	umac_enable_set(priv, CMD_RX_EN, 0);  	ret = tdma_enable_set(priv, 0);  	if (ret) { @@ -1449,9 +1558,7 @@ static int bcm_sysport_stop(struct net_device *dev)  	}  	/* Disable UniMAC TX */ -	reg = umac_readl(priv, UMAC_CMD); -	reg &= ~CMD_TX_EN; -	umac_writel(priv, reg, UMAC_CMD); +	umac_enable_set(priv, CMD_TX_EN, 0);  	/* Free RX/TX rings SW structures */  	for (i = 0; i < dev->num_tx_queues; i++) @@ -1477,6 +1584,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {  	.get_strings		= bcm_sysport_get_strings,  	.get_ethtool_stats	= bcm_sysport_get_stats,  	.get_sset_count		= bcm_sysport_get_sset_count, +	.get_wol		= bcm_sysport_get_wol, +	.set_wol		= bcm_sysport_set_wol,  };  static const struct net_device_ops bcm_sysport_netdev_ops = { @@ -1518,6 +1627,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)  	priv->irq0 = platform_get_irq(pdev, 0);  	priv->irq1 = platform_get_irq(pdev, 1); +	priv->wol_irq = platform_get_irq(pdev, 2);  	if (priv->irq0 <= 0 || priv->irq1 <= 0) {  		dev_err(&pdev->dev, "invalid interrupts\n");  		ret = -EINVAL; @@ -1570,6 +1680,13 @@ static int bcm_sysport_probe(struct platform_device *pdev)  	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |  				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +	/* Request the WOL interrupt and advertise suspend if available */ +	priv->wol_irq_disabled = 1; +	ret = devm_request_irq(&pdev->dev, priv->wol_irq, +			       bcm_sysport_wol_isr, 0, dev->name, priv); +	if (!ret) +		device_set_wakeup_capable(&pdev->dev, 1); +  	/* Set the needed headroom once and for all */  	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);  	dev->needed_headroom += sizeof(struct bcm_tsb); @@ -1585,10 +1702,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)  	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;  	dev_info(&pdev->dev, -		"Broadcom SYSTEMPORT" REV_FMT -		" at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", -		(priv->rev >> 8) & 0xff, priv->rev & 0xff, -		priv->base, priv->irq0, priv->irq1, txq, rxq); +		 "Broadcom SYSTEMPORT" REV_FMT +		 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", +		 (priv->rev >> 8) & 0xff, priv->rev & 0xff, +		 priv->base, priv->irq0, priv->irq1, txq, rxq);  	return 0;  err: @@ -1610,6 +1727,208 @@ static int bcm_sysport_remove(struct platform_device *pdev)  	return 0;  } +#ifdef CONFIG_PM_SLEEP +static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) +{ +	struct net_device *ndev = priv->netdev; +	unsigned int timeout = 1000; +	u32 reg; + +	/* Password has already been programmed */ +	reg = umac_readl(priv, UMAC_MPD_CTRL); +	reg |= MPD_EN; +	reg &= ~PSW_EN; +	if (priv->wolopts & WAKE_MAGICSECURE) +		reg |= PSW_EN; +	umac_writel(priv, reg, UMAC_MPD_CTRL); + +	/* Make sure RBUF entered WoL mode as result */ +	do { +		reg = rbuf_readl(priv, RBUF_STATUS); +		if (reg & RBUF_WOL_MODE) +			break; + +		udelay(10); +	} while (timeout-- > 0); + +	/* Do not leave the UniMAC RBUF matching only MPD packets */ +	if (!timeout) { +		reg = umac_readl(priv, UMAC_MPD_CTRL); +		reg &= ~MPD_EN; +		umac_writel(priv, reg, UMAC_MPD_CTRL); +		netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); +		return -ETIMEDOUT; +	} + +	/* UniMAC receive needs to be turned on */ +	umac_enable_set(priv, CMD_RX_EN, 1); + +	/* Enable the interrupt wake-up source */ +	intrl2_0_mask_clear(priv, INTRL2_0_MPD); + +	netif_dbg(priv, wol, ndev, "entered WOL mode\n"); + +	return 0; +} + +static int bcm_sysport_suspend(struct device *d) +{ +	struct net_device *dev = dev_get_drvdata(d); +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	unsigned int i; +	int ret = 0; +	u32 reg; + +	if (!netif_running(dev)) +		return 0; + +	bcm_sysport_netif_stop(dev); + +	phy_suspend(priv->phydev); + +	netif_device_detach(dev); + +	/* Disable UniMAC RX */ +	umac_enable_set(priv, CMD_RX_EN, 0); + +	ret = rdma_enable_set(priv, 0); +	if (ret) { +		netdev_err(dev, "RDMA timeout!\n"); +		return ret; +	} + +	/* Disable RXCHK if enabled */ +	if (priv->rx_chk_en) { +		reg = rxchk_readl(priv, RXCHK_CONTROL); +		reg &= ~RXCHK_EN; +		rxchk_writel(priv, reg, RXCHK_CONTROL); +	} + +	/* Flush RX pipe */ +	if (!priv->wolopts) +		topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); + +	ret = tdma_enable_set(priv, 0); +	if (ret) { +		netdev_err(dev, "TDMA timeout!\n"); +		return ret; +	} + +	/* Wait for a packet boundary */ +	usleep_range(2000, 3000); + +	umac_enable_set(priv, CMD_TX_EN, 0); + +	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); + +	/* Free RX/TX rings SW structures */ +	for (i = 0; i < dev->num_tx_queues; i++) +		bcm_sysport_fini_tx_ring(priv, i); +	bcm_sysport_fini_rx_ring(priv); + +	/* Get prepared for Wake-on-LAN */ +	if (device_may_wakeup(d) && priv->wolopts) +		ret = bcm_sysport_suspend_to_wol(priv); + +	return ret; +} + +static int bcm_sysport_resume(struct device *d) +{ +	struct net_device *dev = dev_get_drvdata(d); +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	unsigned int i; +	u32 reg; +	int ret; + +	if (!netif_running(dev)) +		return 0; + +	/* We may have been suspended and never received a WOL event that +	 * would turn off MPD detection, take care of that now +	 */ +	bcm_sysport_resume_from_wol(priv); + +	/* Initialize both hardware and software ring */ +	for (i = 0; i < dev->num_tx_queues; i++) { +		ret = bcm_sysport_init_tx_ring(priv, i); +		if (ret) { +			netdev_err(dev, "failed to initialize TX ring %d\n", +				   i); +			goto out_free_tx_rings; +		} +	} + +	/* Initialize linked-list */ +	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); + +	/* Initialize RX ring */ +	ret = bcm_sysport_init_rx_ring(priv); +	if (ret) { +		netdev_err(dev, "failed to initialize RX ring\n"); +		goto out_free_rx_ring; +	} + +	netif_device_attach(dev); + +	/* Enable RX interrupt and TX ring full interrupt */ +	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); + +	/* RX pipe enable */ +	topctrl_writel(priv, 0, RX_FLUSH_CNTL); + +	ret = rdma_enable_set(priv, 1); +	if (ret) { +		netdev_err(dev, "failed to enable RDMA\n"); +		goto out_free_rx_ring; +	} + +	/* Enable rxhck */ +	if (priv->rx_chk_en) { +		reg = rxchk_readl(priv, RXCHK_CONTROL); +		reg |= RXCHK_EN; +		rxchk_writel(priv, reg, RXCHK_CONTROL); +	} + +	rbuf_init(priv); + +	/* Set maximum frame length */ +	umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + +	/* Set MAC address */ +	umac_set_hw_addr(priv, dev->dev_addr); + +	umac_enable_set(priv, CMD_RX_EN, 1); + +	/* TX pipe enable */ +	topctrl_writel(priv, 0, TX_FLUSH_CNTL); + +	umac_enable_set(priv, CMD_TX_EN, 1); + +	ret = tdma_enable_set(priv, 1); +	if (ret) { +		netdev_err(dev, "TDMA timeout!\n"); +		goto out_free_rx_ring; +	} + +	phy_resume(priv->phydev); + +	bcm_sysport_netif_start(dev); + +	return 0; + +out_free_rx_ring: +	bcm_sysport_fini_rx_ring(priv); +out_free_tx_rings: +	for (i = 0; i < dev->num_tx_queues; i++) +		bcm_sysport_fini_tx_ring(priv, i); +	return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, +		bcm_sysport_suspend, bcm_sysport_resume); +  static const struct of_device_id bcm_sysport_of_match[] = {  	{ .compatible = "brcm,systemport-v1.00" },  	{ .compatible = "brcm,systemport" }, @@ -1623,6 +1942,7 @@ static struct platform_driver bcm_sysport_driver = {  		.name = "brcm-systemport",  		.owner = THIS_MODULE,  		.of_match_table = bcm_sysport_of_match, +		.pm = &bcm_sysport_pm_ops,  	},  };  module_platform_driver(bcm_sysport_driver);  | 
