aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/renesas
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/renesas')
-rw-r--r--drivers/net/ethernet/renesas/Kconfig6
-rw-r--r--drivers/net/ethernet/renesas/ravb.h133
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1268
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c226
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h116
6 files changed, 1270 insertions, 487 deletions
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index bb0ebdfd4459..8008b2f45934 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_RENESAS
bool "Renesas devices"
default y
- ---help---
+ help
If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
@@ -23,7 +23,7 @@ config SH_ETH
select MII
select MDIO_BITBANG
select PHYLIB
- ---help---
+ help
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
@@ -32,11 +32,11 @@ config SH_ETH
config RAVB
tristate "Renesas Ethernet AVB support"
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select MII
select MDIO_BITBANG
select PHYLIB
- imply PTP_1588_CLOCK
help
Renesas Ethernet AVB device driver.
This driver supports the following SoCs:
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9f88b5db4f89..e0f8276cffed 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -81,6 +81,7 @@ enum ravb_reg {
RQC3 = 0x00A0,
RQC4 = 0x00A4,
RPC = 0x00B0,
+ RTC = 0x00B4, /* R-Car Gen3 and RZ/G2L only */
UFCW = 0x00BC,
UFCS = 0x00C0,
UFCV0 = 0x00C4,
@@ -165,7 +166,7 @@ enum ravb_reg {
GTO2 = 0x03A8,
GIC = 0x03AC,
GIS = 0x03B0,
- GCPT = 0x03B4, /* Undocumented? */
+ GCPT = 0x03B4, /* Documented for R-Car Gen3 only */
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
@@ -187,19 +188,24 @@ enum ravb_reg {
PIR = 0x0520,
PSR = 0x0528,
PIPR = 0x052c,
+ CXR31 = 0x0530, /* RZ/G2L only */
+ CXR35 = 0x0540, /* RZ/G2L only */
MPR = 0x0558,
PFTCR = 0x055c,
PFRCR = 0x0560,
GECMR = 0x05b0,
MAHR = 0x05c0,
MALR = 0x05c8,
- TROCR = 0x0700, /* R-Car Gen3 only */
+ TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */
+ CXR41 = 0x0708, /* RZ/G2L only */
+ CXR42 = 0x0710, /* RZ/G2L only */
CEFCR = 0x0740,
FRECR = 0x0748,
TSFRCR = 0x0750,
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
+ CSR0 = 0x0800, /* RZ/G2L only */
};
@@ -225,7 +231,7 @@ enum CSR_BIT {
CSR_OPS_RESET = 0x00000001,
CSR_OPS_CONFIG = 0x00000002,
CSR_OPS_OPERATION = 0x00000004,
- CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_OPS_STANDBY = 0x00000008, /* Documented for R-Car Gen3 only */
CSR_DTS = 0x00000100,
CSR_TPO0 = 0x00010000,
CSR_TPO1 = 0x00020000,
@@ -241,13 +247,12 @@ enum ESR_BIT {
ESR_EIL = 0x00001000,
};
-/* APSR */
+/* APSR (R-Car Gen3 only) */
enum APSR_BIT {
- APSR_MEMS = 0x00000002,
- APSR_CMSW = 0x00000010,
- APSR_DM = 0x00006000, /* Undocumented? */
- APSR_DM_RDM = 0x00002000,
- APSR_DM_TDM = 0x00004000,
+ APSR_MEMS = 0x00000002, /* Undocumented */
+ APSR_CMSW = 0x00000010,
+ APSR_RDM = 0x00002000,
+ APSR_TDM = 0x00004000,
};
/* RCR */
@@ -530,16 +535,16 @@ enum RIS2_BIT {
/* TIC */
enum TIC_BIT {
- TIC_FTE0 = 0x00000001, /* Undocumented? */
- TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_FTE0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIC_FTE1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIC_TFUE = 0x00000100,
TIC_TFWE = 0x00000200,
};
/* TIS */
enum TIS_BIT {
- TIS_FTF0 = 0x00000001, /* Undocumented? */
- TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_FTF0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIS_FTF1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
@@ -547,8 +552,8 @@ enum TIS_BIT {
/* ISS */
enum ISS_BIT {
- ISS_FRS = 0x00000001, /* Undocumented? */
- ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_FRS = 0x00000001, /* Documented for R-Car Gen3 only */
+ ISS_FTS = 0x00000004, /* Documented for R-Car Gen3 only */
ISS_ES = 0x00000040,
ISS_MS = 0x00000080,
ISS_TFUS = 0x00000100,
@@ -608,13 +613,13 @@ enum GTI_BIT {
/* GIC */
enum GIC_BIT {
- GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTCE = 0x00000001, /* Documented for R-Car Gen3 only */
GIC_PTME = 0x00000004,
};
/* GIS */
enum GIS_BIT {
- GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTCF = 0x00000001, /* Documented for R-Car Gen3 only */
GIS_PTMF = 0x00000004,
GIS_RESERVED = GENMASK(15, 10),
};
@@ -808,13 +813,14 @@ enum ECMR_BIT {
ECMR_TE = 0x00000020,
ECMR_RE = 0x00000040,
ECMR_MPDE = 0x00000200,
- ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 and RZ/G2L */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
+ ECMR_RCPT = 0x02000000, /* Documented for RZ/G2L only */
ECMR_TRCCM = 0x04000000,
};
@@ -824,13 +830,14 @@ enum ECSR_BIT {
ECSR_MPD = 0x00000002,
ECSR_LCHNG = 0x00000004,
ECSR_PHYI = 0x00000008,
+ ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */
};
/* ECSIPR */
enum ECSIPR_BIT {
ECSIPR_ICDIP = 0x00000001,
ECSIPR_MPDIP = 0x00000002,
- ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+ ECSIPR_LCHNGIP = 0x00000004,
};
/* PIR */
@@ -858,14 +865,18 @@ enum MPR_BIT {
/* GECMR */
enum GECMR_BIT {
- GECMR_SPEED = 0x00000001,
- GECMR_SPEED_100 = 0x00000000,
- GECMR_SPEED_1000 = 0x00000001,
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+ GBETH_GECMR_SPEED = 0x00000030,
+ GBETH_GECMR_SPEED_10 = 0x00000000,
+ GBETH_GECMR_SPEED_100 = 0x00000010,
+ GBETH_GECMR_SPEED_1000 = 0x00000020,
};
/* The Ethernet AVB descriptor definitions. */
struct ravb_desc {
- __le16 ds; /* Descriptor size */
+ __le16 ds; /* Descriptor size */
u8 cc; /* Content control MSBs (reserved) */
u8 die_dt; /* Descriptor interrupt enable and type */
__le32 dptr; /* Descriptor pointer */
@@ -950,6 +961,23 @@ enum RAVB_QUEUE {
RAVB_NC, /* Network Control Queue */
};
+enum CXR31_BIT {
+ CXR31_SEL_LINK0 = 0x00000001,
+ CXR31_SEL_LINK1 = 0x00000008,
+};
+
+enum CXR35_BIT {
+ CXR35_SEL_XMII = 0x00000003,
+ CXR35_SEL_XMII_RGMII = 0x00000000,
+ CXR35_SEL_XMII_MII = 0x00000002,
+ CXR35_HALFCYC_CLKSW = 0xffff0000,
+};
+
+enum CSR0_BIT {
+ CSR0_TPE = 0x00000010,
+ CSR0_RPE = 0x00000020,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
@@ -957,9 +985,8 @@ enum RAVB_QUEUE {
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
-/* TX descriptors per packet */
-#define NUM_TX_DESC_GEN2 2
-#define NUM_TX_DESC_GEN3 1
+#define GBETH_RX_BUFF_MAX 8192
+#define GBETH_RX_DESC_DATA_SIZE 4080
struct ravb_tstamp_skb {
struct list_head list;
@@ -984,9 +1011,38 @@ struct ravb_ptp {
struct ravb_ptp_perout perout[N_PER_OUT];
};
-enum ravb_chip_id {
- RCAR_GEN2,
- RCAR_GEN3,
+struct ravb_hw_info {
+ void (*rx_ring_free)(struct net_device *ndev, int q);
+ void (*rx_ring_format)(struct net_device *ndev, int q);
+ void *(*alloc_rx_desc)(struct net_device *ndev, int q);
+ bool (*receive)(struct net_device *ndev, int *quota, int q);
+ void (*set_rate)(struct net_device *ndev);
+ int (*set_feature)(struct net_device *ndev, netdev_features_t features);
+ int (*dmac_init)(struct net_device *ndev);
+ void (*emac_init)(struct net_device *ndev);
+ const char (*gstrings_stats)[ETH_GSTRING_LEN];
+ size_t gstrings_size;
+ netdev_features_t net_hw_features;
+ netdev_features_t net_features;
+ int stats_len;
+ size_t max_rx_len;
+ u32 tccr_mask;
+ u32 rx_max_buf_size;
+ unsigned aligned_tx: 1;
+
+ /* hardware features */
+ unsigned internal_delay:1; /* AVB-DMAC has internal delays */
+ unsigned tx_counters:1; /* E-MAC has TX counters */
+ unsigned carrier_counters:1; /* E-MAC has carrier counters */
+ unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */
+ unsigned irq_en_dis:1; /* Has separate irq enable and disable regs */
+ unsigned err_mgmt_irqs:1; /* Line1 (Err) and Line2 (Mgmt) irqs are separate */
+ unsigned gptp:1; /* AVB-DMAC has gPTP support */
+ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned gptp_ref_clk:1; /* gPTP has separate reference clock */
+ unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */
+ unsigned magic_pkt:1; /* E-MAC supports magic packet detection */
+ unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
struct ravb_private {
@@ -994,6 +1050,8 @@ struct ravb_private {
struct platform_device *pdev;
void __iomem *addr;
struct clk *clk;
+ struct clk *refclk;
+ struct clk *gptp_clk;
struct mdiobb_ctrl mdiobb;
u32 num_rx_ring[NUM_RX_QUEUE];
u32 num_tx_ring[NUM_TX_QUEUE];
@@ -1002,9 +1060,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_rx_desc *gbeth_rx_ring;
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
+ struct sk_buff *rx_1st_skb;
struct sk_buff **rx_skb[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
@@ -1029,14 +1089,23 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
- enum ravb_chip_id chip_id;
+ int erra_irq;
+ int mgmta_irq;
int rx_irqs[NUM_RX_QUEUE];
int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
unsigned wol_enabled:1;
- int num_tx_desc; /* TX descriptors per packet */
+ unsigned rxcidm:1; /* RX Clock Internal Delay Mode */
+ unsigned txcidm:1; /* TX Clock Internal Delay Mode */
+ unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */
+ unsigned int num_tx_desc; /* TX descriptors per packet */
+
+ int duplex;
+
+ const struct ravb_hw_info *info;
+ struct reset_control *rstc;
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 067ad25553b9..36324126db6d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -29,8 +29,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
-
-#include <asm/div64.h>
+#include <linux/reset.h>
+#include <linux/math64.h>
#include "ravb.h"
@@ -82,7 +82,24 @@ static int ravb_config(struct net_device *ndev)
return error;
}
-static void ravb_set_rate(struct net_device *ndev)
+static void ravb_set_rate_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 10: /* 10BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
+ break;
+ case 100: /* 100BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
+ break;
+ }
+}
+
+static void ravb_set_rate_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -109,20 +126,24 @@ static void ravb_set_buffer_align(struct sk_buff *skb)
* Ethernet AVB device doesn't have ROM for MAC address.
* This function gets the MAC address that was used by a bootloader.
*/
-static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
+static void ravb_read_mac_address(struct device_node *np,
+ struct net_device *ndev)
{
- if (!IS_ERR(mac)) {
- ether_addr_copy(ndev->dev_addr, mac);
- } else {
+ int ret;
+
+ ret = of_get_ethdev_address(np, ndev);
+ if (ret) {
u32 mahr = ravb_read(ndev, MAHR);
u32 malr = ravb_read(ndev, MALR);
-
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ u8 addr[ETH_ALEN];
+
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
@@ -162,7 +183,7 @@ static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
}
/* MDIO bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = ravb_set_mdc,
.set_mdio_dir = ravb_set_mdio_dir,
@@ -175,10 +196,10 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
- int num_tx_desc = priv->num_tx_desc;
+ unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *desc;
+ unsigned int entry;
int free_num = 0;
- int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
@@ -214,31 +235,67 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-/* Free skb's and DMA buffers for Ethernet AVB */
-static void ravb_ring_free(struct net_device *ndev, int q)
+static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
- int ring_size;
- int i;
+ unsigned int ring_size;
+ unsigned int i;
- if (priv->rx_ring[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+ if (!priv->gbeth_rx_ring)
+ return;
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
- DMA_FROM_DEVICE);
- }
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
- priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
}
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+ priv->rx_desc_dma[q]);
+ priv->gbeth_rx_ring = NULL;
+}
+
+static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+ unsigned int i;
+
+ if (!priv->rx_ring[q])
+ return;
+
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ RX_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ priv->rx_desc_dma[q]);
+ priv->rx_ring[q] = NULL;
+}
+
+/* Free skb's and DMA buffers for Ethernet AVB */
+static void ravb_ring_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ unsigned int num_tx_desc = priv->num_tx_desc;
+ unsigned int ring_size;
+ unsigned int i;
+
+ info->rx_ring_free(ndev, q);
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
@@ -269,24 +326,44 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-/* Format skb and descriptor buffer for Ethernet AVB */
-static void ravb_ring_format(struct net_device *ndev, int q)
+static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
- struct ravb_ex_rx_desc *rx_desc;
- struct ravb_tx_desc *tx_desc;
- struct ravb_desc *desc;
- int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
- num_tx_desc;
+ struct ravb_rx_desc *rx_desc;
+ unsigned int rx_ring_size;
dma_addr_t dma_addr;
- int i;
+ unsigned int i;
- priv->cur_rx[q] = 0;
- priv->cur_tx[q] = 0;
- priv->dirty_rx[q] = 0;
- priv->dirty_tx[q] = 0;
+ rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ memset(priv->gbeth_rx_ring, 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ /* RX descriptor */
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+}
+
+static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_ex_rx_desc *rx_desc;
+ unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ dma_addr_t dma_addr;
+ unsigned int i;
memset(priv->rx_ring[q], 0, rx_ring_size);
/* Build RX ring buffer */
@@ -308,6 +385,26 @@ static void ravb_ring_format(struct net_device *ndev, int q)
rx_desc = &priv->rx_ring[q][i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
+}
+
+/* Format skb and descriptor buffer for Ethernet AVB */
+static void ravb_ring_format(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ unsigned int num_tx_desc = priv->num_tx_desc;
+ struct ravb_tx_desc *tx_desc;
+ struct ravb_desc *desc;
+ unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+ num_tx_desc;
+ unsigned int i;
+
+ priv->cur_rx[q] = 0;
+ priv->cur_tx[q] = 0;
+ priv->dirty_rx[q] = 0;
+ priv->dirty_tx[q] = 0;
+
+ info->rx_ring_format(ndev, q);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -333,14 +430,41 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
+static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+
+ priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ return priv->gbeth_rx_ring;
+}
+
+static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+
+ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
+
+ priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ return priv->rx_ring[q];
+}
+
/* Init skb and descriptor buffer for Ethernet AVB */
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
+ const struct ravb_hw_info *info = priv->info;
+ unsigned int num_tx_desc = priv->num_tx_desc;
+ unsigned int ring_size;
struct sk_buff *skb;
- int ring_size;
- int i;
+ unsigned int i;
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -351,7 +475,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
+ skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -367,11 +491,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
}
/* Allocate all RX descriptors. */
- ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
- priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- if (!priv->rx_ring[q])
+ if (!info->alloc_rx_desc(ndev, q))
goto error;
priv->dirty_rx[q] = 0;
@@ -393,8 +513,43 @@ error:
return -ENOMEM;
}
-/* E-MAC init function */
-static void ravb_emac_init(struct net_device *ndev)
+static void ravb_emac_init_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+
+ /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
+ ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
+ ECMR_TE | ECMR_RE | ECMR_RCPT |
+ ECMR_TXF | ECMR_RXF, ECMR);
+
+ ravb_set_rate_gbeth(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+ } else {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+ CXR31_SEL_LINK0);
+ }
+}
+
+static void ravb_emac_init_rcar(struct net_device *ndev)
{
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
@@ -404,7 +559,7 @@ static void ravb_emac_init(struct net_device *ndev)
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
- ravb_set_rate(ndev);
+ ravb_set_rate_rcar(ndev);
/* Set MAC address */
ravb_write(ndev,
@@ -420,17 +575,55 @@ static void ravb_emac_init(struct net_device *ndev)
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}
-/* Device init function for Ethernet AVB */
-static int ravb_dmac_init(struct net_device *ndev)
+/* E-MAC init function */
+static void ravb_emac_init(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ info->emac_init(ndev);
+}
+
+static int ravb_dmac_init_gbeth(struct net_device *ndev)
+{
int error;
- /* Set CONFIG mode */
- error = ravb_config(ndev);
+ error = ravb_ring_init(ndev, RAVB_BE);
if (error)
return error;
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+
+ /* Set DMAC RX */
+ ravb_write(ndev, 0x60000000, RCR);
+
+ /* Set Max Frame Length (RTC) */
+ ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+
+ /* Set FIFO size */
+ ravb_write(ndev, 0x00222200, TGC);
+
+ ravb_write(ndev, 0, TCCR);
+
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0, RIC0);
+ /* Disable FIFO full warning */
+ ravb_write(ndev, 0x0, RIC1);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
+
+ ravb_write(ndev, TIC_FTE0, TIC);
+
+ return 0;
+}
+
+static int ravb_dmac_init_rcar(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ int error;
+
error = ravb_ring_init(ndev, RAVB_BE);
if (error)
return error;
@@ -455,7 +648,7 @@ static int ravb_dmac_init(struct net_device *ndev)
ravb_write(ndev, TCCR_TFEN, TCCR);
/* Interrupt init: */
- if (priv->chip_id == RCAR_GEN3) {
+ if (info->multi_irqs) {
/* Clear DIL.DPLx */
ravb_write(ndev, 0, DIL);
/* Set queue specific interrupt */
@@ -470,6 +663,25 @@ static int ravb_dmac_init(struct net_device *ndev)
/* Frame transmitted, timestamp FIFO updated */
ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+ return 0;
+}
+
+/* Device init function for Ethernet AVB */
+static int ravb_dmac_init(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ /* Set CONFIG mode */
+ error = ravb_config(ndev);
+ if (error)
+ return error;
+
+ error = info->dmac_init(ndev);
+ if (error)
+ return error;
+
/* Setting the control will start the AVB-DMAC process. */
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
@@ -529,10 +741,154 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
+static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
+ struct ravb_rx_desc *desc)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+
+ skb = priv->rx_skb[RAVB_BE][entry];
+ priv->rx_skb[RAVB_BE][entry] = NULL;
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+
+ return skb;
+}
+
+/* Packet receive function for Gigabit Ethernet */
+static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct net_device_stats *stats;
+ struct ravb_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u8 desc_status;
+ int boguscnt;
+ u16 pkt_len;
+ u8 die_dt;
+ int entry;
+ int limit;
+
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ stats = &priv->stats[q];
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->gbeth_rx_ring[entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ die_dt = desc->die_dt & 0xF0;
+ switch (die_dt) {
+ case DT_FSINGLE:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ break;
+ case DT_FSTART:
+ priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ break;
+ case DT_FMID:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ break;
+ case DT_FEND:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ priv->rx_1st_skb->protocol =
+ eth_type_trans(priv->rx_1st_skb, ndev);
+ napi_gro_receive(&priv->napi[q],
+ priv->rx_1st_skb);
+ stats->rx_packets++;
+ stats->rx_bytes += priv->rx_1st_skb->len;
+ break;
+ }
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ if (!skb)
+ break;
+ ravb_set_buffer_align(skb);
+ dma_addr = dma_map_single(ndev->dev.parent,
+ skb->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
priv->cur_rx[q];
@@ -617,9 +973,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev,
- RX_BUF_SZ +
- RAVB_ALIGN - 1);
+ skb = netdev_alloc_skb(ndev, info->max_rx_len);
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
@@ -645,6 +999,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
return boguscnt <= 0;
}
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ return info->receive(ndev, quota, q);
+}
+
static void ravb_rcv_snd_disable(struct net_device *ndev)
{
/* Disable TX and RX */
@@ -660,11 +1023,13 @@ static void ravb_rcv_snd_enable(struct net_device *ndev)
/* function for waiting dma process finished */
static int ravb_stop_dma(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
/* Wait for stopping the hardware TX process */
- error = ravb_wait(ndev, TCCR,
- TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
+
if (error)
return error;
@@ -756,6 +1121,7 @@ static void ravb_error_interrupt(struct net_device *ndev)
static bool ravb_queue_interrupt(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
u32 ris0 = ravb_read(ndev, RIS0);
u32 ric0 = ravb_read(ndev, RIC0);
u32 tis = ravb_read(ndev, TIS);
@@ -764,7 +1130,7 @@ static bool ravb_queue_interrupt(struct net_device *ndev, int q)
if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
if (napi_schedule_prep(&priv->napi[q])) {
/* Mask RX and TX interrupts */
- if (priv->chip_id == RCAR_GEN2) {
+ if (!info->irq_en_dis) {
ravb_write(ndev, ric0 & ~BIT(q), RIC0);
ravb_write(ndev, tic & ~BIT(q), TIC);
} else {
@@ -801,6 +1167,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
irqreturn_t result = IRQ_NONE;
u32 iss;
@@ -817,8 +1184,13 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
/* Network control and best effort queue RX/TX */
- for (q = RAVB_NC; q >= RAVB_BE; q--) {
- if (ravb_queue_interrupt(ndev, q))
+ if (info->nc_queues) {
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (ravb_queue_interrupt(ndev, q))
+ result = IRQ_HANDLED;
+ }
+ } else {
+ if (ravb_queue_interrupt(ndev, RAVB_BE))
result = IRQ_HANDLED;
}
}
@@ -907,41 +1279,40 @@ static int ravb_poll(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ bool gptp = info->gptp || info->ccc_gac;
+ struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
- u32 ris0, tis;
-
- for (;;) {
- tis = ravb_read(ndev, TIS);
- ris0 = ravb_read(ndev, RIS0);
- if (!((ris0 & mask) || (tis & mask)))
- break;
+ unsigned int entry;
- /* Processing RX Descriptor Ring */
- if (ris0 & mask) {
- /* Clear RX interrupt */
- ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
- /* Processing TX Descriptor Ring */
- if (tis & mask) {
- spin_lock_irqsave(&priv->lock, flags);
- /* Clear TX interrupt */
- ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
- ravb_tx_free(ndev, q, true);
- netif_wake_subqueue(ndev, q);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
+ if (!gptp) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
+ /* Processing RX Descriptor Ring */
+ /* Clear RX interrupt */
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
+ if (gptp || desc->die_dt != DT_FEMPTY) {
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
}
+ /* Processing TX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Clear TX interrupt */
+ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
+ ravb_tx_free(ndev, q, true);
+ netif_wake_subqueue(ndev, q);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
napi_complete(napi);
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
- if (priv->chip_id == RCAR_GEN2) {
+ if (!info->irq_en_dis) {
ravb_modify(ndev, RIC0, mask, mask);
ravb_modify(ndev, TIC, mask, mask);
} else {
@@ -952,7 +1323,8 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
@@ -961,10 +1333,18 @@ out:
return budget - quota;
}
+static void ravb_set_duplex_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
+}
+
/* PHY state control function */
static void ravb_adjust_link(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct phy_device *phydev = ndev->phydev;
bool new_state = false;
unsigned long flags;
@@ -976,10 +1356,16 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
+ if (info->half_duplex && phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex_gbeth(ndev);
+ }
+
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
- ravb_set_rate(ndev);
+ info->set_rate(ndev);
}
if (!priv->link) {
ravb_modify(ndev, ECMR, ECMR_TXF, 0);
@@ -990,6 +1376,8 @@ static void ravb_adjust_link(struct net_device *ndev)
new_state = true;
priv->link = 0;
priv->speed = 0;
+ if (info->half_duplex)
+ priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1012,12 +1400,15 @@ static int ravb_phy_init(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct phy_device *phydev;
struct device_node *pn;
+ phy_interface_t iface;
int err;
priv->link = 0;
priv->speed = 0;
+ priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1032,8 +1423,10 @@ static int ravb_phy_init(struct net_device *ndev)
}
pn = of_node_get(np);
}
- phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
- priv->phy_interface);
+
+ iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
+ : priv->phy_interface;
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
@@ -1045,31 +1438,29 @@ static int ravb_phy_init(struct net_device *ndev)
* at this time.
*/
if (soc_device_match(r8a7795es10)) {
- err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
- goto err_phy_disconnect;
- }
+ phy_set_max_speed(phydev, SPEED_100);
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
- /* 10BASE, Pause and Asym Pause is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ if (!info->half_duplex) {
+ /* 10BASE, Pause and Asym Pause is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
- /* Half Duplex is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ /* Half Duplex is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
-err_phy_disconnect:
- phy_disconnect(phydev);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
@@ -1105,6 +1496,24 @@ static void ravb_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_csum_offload_errors",
+ "rx_queue_0_over_errors",
+};
+
static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_0_current",
"tx_queue_0_current",
@@ -1139,13 +1548,14 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_1_over_errors",
};
-#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
-
static int ravb_get_sset_count(struct net_device *netdev, int sset)
{
+ struct ravb_private *priv = netdev_priv(netdev);
+ const struct ravb_hw_info *info = priv->info;
+
switch (sset) {
case ETH_SS_STATS:
- return RAVB_STATS_LEN;
+ return info->stats_len;
default:
return -EOPNOTSUPP;
}
@@ -1155,11 +1565,14 @@ static void ravb_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ int num_rx_q;
int i = 0;
int q;
+ num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
/* Device-specific stats */
- for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ for (q = RAVB_BE; q < num_rx_q; q++) {
struct net_device_stats *stats = &priv->stats[q];
data[i++] = priv->cur_rx[q];
@@ -1182,15 +1595,20 @@ static void ravb_get_ethtool_stats(struct net_device *ndev,
static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ memcpy(data, info->gstrings_stats, info->gstrings_size);
break;
}
}
static void ravb_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -1201,9 +1619,12 @@ static void ravb_get_ringparam(struct net_device *ndev,
}
static int ravb_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
if (ring->tx_pending > BE_TX_RING_MAX ||
@@ -1217,7 +1638,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
if (netif_running(ndev)) {
netif_device_detach(ndev);
/* Stop PTP Clock driver */
- if (priv->chip_id == RCAR_GEN2)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
error = ravb_stop_dma(ndev);
@@ -1230,7 +1651,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
}
/* Set new parameters */
@@ -1249,7 +1671,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (priv->chip_id == RCAR_GEN2)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_device_attach(ndev);
@@ -1262,6 +1684,7 @@ static int ravb_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *hw_info = priv->info;
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -1275,7 +1698,8 @@ static int ravb_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = ptp_clock_index(priv->ptp.clock);
+ if (hw_info->gptp || hw_info->ccc_gac)
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
return 0;
}
@@ -1291,8 +1715,9 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
return -EOPNOTSUPP;
priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -1340,14 +1765,16 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
- if (priv->chip_id == RCAR_GEN2) {
+ if (!info->multi_irqs) {
error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
ndev->name, ndev);
if (error) {
@@ -1379,16 +1806,27 @@ static int ravb_open(struct net_device *ndev)
ndev, dev, "ch19:tx_nc");
if (error)
goto out_free_irq_nc_rx;
+
+ if (info->err_mgmt_irqs) {
+ error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
+ ndev, dev, "err_a");
+ if (error)
+ goto out_free_irq_nc_tx;
+ error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
+ ndev, dev, "mgmt_a");
+ if (error)
+ goto out_free_irq_erra;
+ }
}
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_nc_tx;
+ goto out_free_irq_mgmta;
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (priv->chip_id == RCAR_GEN2)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1402,11 +1840,17 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (priv->chip_id == RCAR_GEN2)
+ if (info->gptp)
ravb_ptp_stop(ndev);
-out_free_irq_nc_tx:
- if (priv->chip_id == RCAR_GEN2)
+out_free_irq_mgmta:
+ if (!info->multi_irqs)
goto out_free_irq;
+ if (info->err_mgmt_irqs)
+ free_irq(priv->mgmta_irq, ndev);
+out_free_irq_erra:
+ if (info->err_mgmt_irqs)
+ free_irq(priv->erra_irq, ndev);
+out_free_irq_nc_tx:
free_irq(priv->tx_irqs[RAVB_NC], ndev);
out_free_irq_nc_rx:
free_irq(priv->rx_irqs[RAVB_NC], ndev);
@@ -1419,7 +1863,8 @@ out_free_irq_emac:
out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
return error;
}
@@ -1443,26 +1888,50 @@ static void ravb_tx_timeout_work(struct work_struct *work)
{
struct ravb_private *priv = container_of(work, struct ravb_private,
work);
+ const struct ravb_hw_info *info = priv->info;
struct net_device *ndev = priv->ndev;
+ int error;
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
- if (priv->chip_id == RCAR_GEN2)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
- ravb_stop_dma(ndev);
+ if (ravb_stop_dma(ndev)) {
+ /* If ravb_stop_dma() fails, the hardware is still operating
+ * for TX and/or RX. So, this should not call the following
+ * functions because ravb_dmac_init() is possible to fail too.
+ * Also, this should not retry ravb_stop_dma() again and again
+ * here because it's possible to wait forever. So, this just
+ * re-enables the TX and RX and skip the following
+ * re-initialization procedure.
+ */
+ ravb_rcv_snd_enable(ndev);
+ goto out;
+ }
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
/* Device init */
- ravb_dmac_init(ndev);
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ /* If ravb_dmac_init() fails, descriptors are freed. So, this
+ * should return here to avoid re-enabling the TX and RX in
+ * ravb_emac_init().
+ */
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return;
+ }
ravb_emac_init(ndev);
+out:
/* Initialise PTP Clock driver */
- if (priv->chip_id == RCAR_GEN2)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1472,7 +1941,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
+ const struct ravb_hw_info *info = priv->info;
+ unsigned int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
@@ -1548,28 +2018,30 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
- if (q == RAVB_NC) {
- ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
- if (!ts_skb) {
- if (num_tx_desc > 1) {
- desc--;
- dma_unmap_single(ndev->dev.parent, dma_addr,
- len, DMA_TO_DEVICE);
+ if (info->gptp || info->ccc_gac) {
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ if (num_tx_desc > 1) {
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ len, DMA_TO_DEVICE);
+ }
+ goto unmap;
}
- goto unmap;
+ ts_skb->skb = skb_get(skb);
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
- ts_skb->skb = skb_get(skb);
- ts_skb->tag = priv->ts_skb_tag++;
- priv->ts_skb_tag &= 0x3ff;
- list_add_tail(&ts_skb->list, &priv->ts_skb_list);
- /* TAG and timestamp required flag */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
+ skb_tx_timestamp(skb);
}
-
- skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
if (num_tx_desc > 1) {
@@ -1612,32 +2084,50 @@ static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
nstats = &ndev->stats;
stats0 = &priv->stats[RAVB_BE];
- stats1 = &priv->stats[RAVB_NC];
- if (priv->chip_id == RCAR_GEN3) {
+ if (info->tx_counters) {
nstats->tx_dropped += ravb_read(ndev, TROCR);
ravb_write(ndev, 0, TROCR); /* (write clear) */
}
- nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
- nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
- nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
- nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
- nstats->multicast = stats0->multicast + stats1->multicast;
- nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
- nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
- nstats->rx_frame_errors =
- stats0->rx_frame_errors + stats1->rx_frame_errors;
- nstats->rx_length_errors =
- stats0->rx_length_errors + stats1->rx_length_errors;
- nstats->rx_missed_errors =
- stats0->rx_missed_errors + stats1->rx_missed_errors;
- nstats->rx_over_errors =
- stats0->rx_over_errors + stats1->rx_over_errors;
+ if (info->carrier_counters) {
+ nstats->collisions += ravb_read(ndev, CXR41);
+ ravb_write(ndev, 0, CXR41); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
+ ravb_write(ndev, 0, CXR42); /* (write clear) */
+ }
+
+ nstats->rx_packets = stats0->rx_packets;
+ nstats->tx_packets = stats0->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes;
+ nstats->multicast = stats0->multicast;
+ nstats->rx_errors = stats0->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors;
+ nstats->rx_frame_errors = stats0->rx_frame_errors;
+ nstats->rx_length_errors = stats0->rx_length_errors;
+ nstats->rx_missed_errors = stats0->rx_missed_errors;
+ nstats->rx_over_errors = stats0->rx_over_errors;
+ if (info->nc_queues) {
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->rx_packets += stats1->rx_packets;
+ nstats->tx_packets += stats1->tx_packets;
+ nstats->rx_bytes += stats1->rx_bytes;
+ nstats->tx_bytes += stats1->tx_bytes;
+ nstats->multicast += stats1->multicast;
+ nstats->rx_errors += stats1->rx_errors;
+ nstats->rx_crc_errors += stats1->rx_crc_errors;
+ nstats->rx_frame_errors += stats1->rx_frame_errors;
+ nstats->rx_length_errors += stats1->rx_length_errors;
+ nstats->rx_missed_errors += stats1->rx_missed_errors;
+ nstats->rx_over_errors += stats1->rx_over_errors;
+ }
return nstats;
}
@@ -1659,6 +2149,7 @@ static int ravb_close(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
netif_tx_stop_all_queues(ndev);
@@ -1669,7 +2160,7 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, TIC);
/* Stop PTP Clock driver */
- if (priv->chip_id == RCAR_GEN2)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -1678,10 +2169,12 @@ static int ravb_close(struct net_device *ndev)
"device will be stopped after h/w processes are done.\n");
/* Clear the timestamp list */
- list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
- list_del(&ts_skb->list);
- kfree_skb(ts_skb->skb);
- kfree(ts_skb);
+ if (info->gptp || info->ccc_gac) {
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree_skb(ts_skb->skb);
+ kfree(ts_skb);
+ }
}
/* PHY disconnect */
@@ -1692,21 +2185,27 @@ static int ravb_close(struct net_device *ndev)
of_phy_deregister_fixed_link(np);
}
- if (priv->chip_id != RCAR_GEN2) {
+ if (info->multi_irqs) {
free_irq(priv->tx_irqs[RAVB_NC], ndev);
free_irq(priv->rx_irqs[RAVB_NC], ndev);
free_irq(priv->tx_irqs[RAVB_BE], ndev);
free_irq(priv->rx_irqs[RAVB_BE], ndev);
free_irq(priv->emac_irq, ndev);
+ if (info->err_mgmt_irqs) {
+ free_irq(priv->erra_irq, ndev);
+ free_irq(priv->mgmta_irq, ndev);
+ }
}
free_irq(ndev->irq, ndev);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
return 0;
}
@@ -1719,12 +2218,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
config.flags = 0;
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
HWTSTAMP_TX_OFF;
- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
+ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ break;
+ case RAVB_RXTSTAMP_TYPE_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
- else
+ break;
+ default:
config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
@@ -1741,10 +2244,6 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
if (copy_from_user(&config, req->ifr_data, sizeof(config)))
return -EFAULT;
- /* Reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_tx_ctrl = 0;
@@ -1831,8 +2330,15 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int ravb_set_features(struct net_device *ndev,
- netdev_features_t features)
+static int ravb_set_features_gbeth(struct net_device *ndev,
+ netdev_features_t features)
+{
+ /* Place holder */
+ return 0;
+}
+
+static int ravb_set_features_rcar(struct net_device *ndev,
+ netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
@@ -1844,6 +2350,15 @@ static int ravb_set_features(struct net_device *ndev,
return 0;
}
+static int ravb_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ return info->set_feature(ndev, features);
+}
+
static const struct net_device_ops ravb_netdev_ops = {
.ndo_open = ravb_open,
.ndo_stop = ravb_close,
@@ -1852,7 +2367,7 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_get_stats = ravb_get_stats,
.ndo_set_rx_mode = ravb_set_rx_mode,
.ndo_tx_timeout = ravb_tx_timeout,
- .ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_eth_ioctl = ravb_do_ioctl,
.ndo_change_mtu = ravb_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -1904,12 +2419,110 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct ravb_hw_info ravb_gen3_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
+ .internal_delay = 1,
+ .tx_counters = 1,
+ .multi_irqs = 1,
+ .irq_en_dis = 1,
+ .ccc_gac = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
+static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
+ .aligned_tx = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
+static const struct ravb_hw_info ravb_rzv2m_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
+ .multi_irqs = 1,
+ .err_mgmt_irqs = 1,
+ .gptp = 1,
+ .gptp_ref_clk = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
+static const struct ravb_hw_info gbeth_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_gbeth,
+ .rx_ring_format = ravb_rx_ring_format_gbeth,
+ .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
+ .receive = ravb_rx_gbeth,
+ .set_rate = ravb_set_rate_gbeth,
+ .set_feature = ravb_set_features_gbeth,
+ .dmac_init = ravb_dmac_init_gbeth,
+ .emac_init = ravb_emac_init_gbeth,
+ .gstrings_stats = ravb_gstrings_stats_gbeth,
+ .gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
+ .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
+ .tccr_mask = TCCR_TSRQ0,
+ .rx_max_buf_size = SZ_8K,
+ .aligned_tx = 1,
+ .tx_counters = 1,
+ .carrier_counters = 1,
+ .half_duplex = 1,
+};
+
static const struct of_device_id ravb_match_table[] = {
- { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
- { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 },
+ { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
+ { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
+ { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
+ { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
@@ -1917,16 +2530,19 @@ MODULE_DEVICE_TABLE(of, ravb_match_table);
static int ravb_set_gti(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct device *dev = ndev->dev.parent;
unsigned long rate;
uint64_t inc;
- rate = clk_get_rate(priv->clk);
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
if (!rate)
return -EINVAL;
- inc = 1000000000ULL << 20;
- do_div(inc, rate);
+ inc = div64_ul(1000000000ULL << 20, rate);
if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
@@ -1942,50 +2558,73 @@ static int ravb_set_gti(struct net_device *ndev)
static void ravb_set_config_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
- if (priv->chip_id == RCAR_GEN2) {
+ if (info->gptp) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else {
+ } else if (info->ccc_gac) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
}
}
-static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
- { .soc_id = "r8a774c0" },
- { .soc_id = "r8a77990" },
- { .soc_id = "r8a77995" },
- { /* sentinel */ }
-};
-
/* Set tx and rx clock internal delay modes */
-static void ravb_set_delay_mode(struct net_device *ndev)
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- int set = 0;
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+
+ if (explicit_delay)
+ return;
+ /* Fall back to legacy rgmii-*id behavior */
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
- set |= APSR_DM_RDM;
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
- "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
- phy_modes(priv->phy_interface)))
- set |= APSR_DM_TDM;
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
}
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
- ravb_modify(ndev, APSR, APSR_DM, set);
+ if (priv->rxcidm)
+ set |= APSR_RDM;
+ if (priv->txcidm)
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
}
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ const struct ravb_hw_info *info;
+ struct reset_control *rstc;
struct ravb_private *priv;
- enum ravb_chip_id chip_id;
struct net_device *ndev;
int error, irq, q;
struct resource *res;
@@ -1997,33 +2636,33 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* Get base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
+ rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "failed to get cpg reset\n");
ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
NUM_TX_QUEUE, NUM_RX_QUEUE);
if (!ndev)
return -ENOMEM;
- ndev->features = NETIF_F_RXCSUM;
- ndev->hw_features = NETIF_F_RXCSUM;
+ info = of_device_get_match_data(&pdev->dev);
+ ndev->features = info->net_features;
+ ndev->hw_features = info->net_hw_features;
+
+ reset_control_deassert(rstc);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- /* The Ether-specific entries in the device structure. */
- ndev->base_addr = res->start;
-
- chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
-
- if (chip_id == RCAR_GEN3)
- irq = platform_get_irq_byname(pdev, "ch22");
- else
+ if (info->multi_irqs) {
+ if (info->err_mgmt_irqs)
+ irq = platform_get_irq_byname(pdev, "dia");
+ else
+ irq = platform_get_irq_byname(pdev, "ch22");
+ } else {
irq = platform_get_irq(pdev, 0);
+ }
if (irq < 0) {
error = irq;
goto out_release;
@@ -2033,18 +2672,26 @@ static int ravb_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
+ priv->info = info;
+ priv->rstc = rstc;
priv->ndev = ndev;
priv->pdev = pdev;
priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
- priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
- priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
- priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (info->nc_queues) {
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ }
+
+ priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
goto out_release;
}
+ /* The Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
@@ -2056,8 +2703,11 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- if (chip_id == RCAR_GEN3) {
- irq = platform_get_irq_byname(pdev, "ch24");
+ if (info->multi_irqs) {
+ if (info->err_mgmt_irqs)
+ irq = platform_get_irq_byname(pdev, "line3");
+ else
+ irq = platform_get_irq_byname(pdev, "ch24");
if (irq < 0) {
error = irq;
goto out_release;
@@ -2079,9 +2729,23 @@ static int ravb_probe(struct platform_device *pdev)
}
priv->tx_irqs[i] = irq;
}
- }
- priv->chip_id = chip_id;
+ if (info->err_mgmt_irqs) {
+ irq = platform_get_irq_byname(pdev, "err_a");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->erra_irq = irq;
+
+ irq = platform_get_irq_byname(pdev, "mgmt_a");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->mgmta_irq = irq;
+ }
+ }
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -2089,11 +2753,31 @@ static int ravb_probe(struct platform_device *pdev)
goto out_release;
}
- ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ error = PTR_ERR(priv->refclk);
+ goto out_release;
+ }
+ clk_prepare_enable(priv->refclk);
+
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_disable_refclk;
+ }
+ clk_prepare_enable(priv->gptp_clk);
+ }
+
+ ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
- priv->num_tx_desc = chip_id == RCAR_GEN2 ?
- NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
+ /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
+ * Use two descriptor to handle such situation. First descriptor to
+ * handle aligned data buffer and second descriptor to handle the
+ * overflow data because of alignment.
+ */
+ priv->num_tx_desc = info->aligned_tx ? 2 : 1;
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
@@ -2102,16 +2786,20 @@ static int ravb_probe(struct platform_device *pdev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_release;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ error = ravb_set_gti(ndev);
+ if (error)
+ goto out_disable_gptp_clk;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
- if (priv->chip_id != RCAR_GEN2)
+ if (info->internal_delay) {
+ ravb_parse_delay_mode(np, ndev);
ravb_set_delay_mode(ndev);
+ }
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
@@ -2122,7 +2810,7 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_release;
+ goto out_disable_gptp_clk;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
@@ -2132,14 +2820,14 @@ static int ravb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&priv->ts_skb_list);
/* Initialise PTP Clock driver */
- if (chip_id != RCAR_GEN2)
+ if (info->ccc_gac)
ravb_ptp_init(ndev, pdev);
/* Debug message level */
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
/* Read and set MAC address */
- ravb_read_mac_address(ndev, of_get_mac_address(np));
+ ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(&pdev->dev,
"no valid MAC address supplied, using a random one\n");
@@ -2153,8 +2841,9 @@ static int ravb_probe(struct platform_device *pdev)
goto out_dma_free;
}
- netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
- netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
+ if (info->nc_queues)
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
/* Network device register */
error = register_netdev(ndev);
@@ -2172,7 +2861,9 @@ static int ravb_probe(struct platform_device *pdev)
return 0;
out_napi_del:
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
+
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
@@ -2180,13 +2871,18 @@ out_dma_free:
priv->desc_bat_dma);
/* Stop PTP Clock driver */
- if (chip_id != RCAR_GEN2)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
+out_disable_gptp_clk:
+ clk_disable_unprepare(priv->gptp_clk);
+out_disable_refclk:
+ clk_disable_unprepare(priv->refclk);
out_release:
free_netdev(ndev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ reset_control_assert(rstc);
return error;
}
@@ -2194,21 +2890,27 @@ static int ravb_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
/* Stop PTP Clock driver */
- if (priv->chip_id != RCAR_GEN2)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
+ clk_disable_unprepare(priv->gptp_clk);
+ clk_disable_unprepare(priv->refclk);
+
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
pm_runtime_put_sync(&pdev->dev);
unregister_netdev(ndev);
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
@@ -2218,6 +2920,7 @@ static int ravb_remove(struct platform_device *pdev)
static int ravb_wol_setup(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
/* Disable interrupts by clearing the interrupt masks. */
ravb_write(ndev, 0, RIC0);
@@ -2226,7 +2929,8 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Only allow ECI interrupts */
synchronize_irq(priv->emac_irq);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
@@ -2239,17 +2943,16 @@ static int ravb_wol_setup(struct net_device *ndev)
static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- int ret;
+ const struct ravb_hw_info *info = priv->info;
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
napi_enable(&priv->napi[RAVB_BE]);
/* Disable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
- ret = ravb_close(ndev);
- if (ret < 0)
- return ret;
+ ravb_close(ndev);
return disable_irq_wake(priv->emac_irq);
}
@@ -2277,6 +2980,7 @@ static int __maybe_unused ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int ret = 0;
/* If WoL is enabled set reset mode to rearm the WoL logic */
@@ -2291,15 +2995,17 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
- if (ret)
- return ret;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ ret = ravb_set_gti(ndev);
+ if (ret)
+ return ret;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
- if (priv->chip_id != RCAR_GEN2)
+ if (info->internal_delay)
ravb_set_delay_mode(ndev);
/* Restore descriptor base address table */
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 6984bd5b7da9..87c4306d66ec 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -179,6 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
{
struct ravb_private *priv = container_of(ptp, struct ravb_private,
ptp.info);
+ const struct ravb_hw_info *info = priv->info;
struct net_device *ndev = priv->ndev;
unsigned long flags;
@@ -197,7 +198,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
priv->ptp.extts[req->index] = on;
spin_lock_irqsave(&priv->lock, flags);
- if (priv->chip_id == RCAR_GEN2)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
else if (on)
ravb_write(ndev, GIE_PTCS, GIE);
@@ -213,6 +214,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
{
struct ravb_private *priv = container_of(ptp, struct ravb_private,
ptp.info);
+ const struct ravb_hw_info *info = priv->info;
struct net_device *ndev = priv->ndev;
struct ravb_ptp_perout *perout;
unsigned long flags;
@@ -252,7 +254,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
error = ravb_ptp_update_compare(priv, (u32)start_ns);
if (!error) {
/* Unmask interrupt */
- if (priv->chip_id == RCAR_GEN2)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
else
ravb_write(ndev, GIE_PTMS0, GIE);
@@ -264,7 +266,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
perout->period = 0;
/* Mask interrupt */
- if (priv->chip_id == RCAR_GEN2)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTME, 0);
else
ravb_write(ndev, GID_PTMD0, GID);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 58ca126518a2..71a499113308 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -45,6 +45,15 @@
#define SH_ETH_OFFSET_DEFAULTS \
[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
+/* use some intentionally tricky logic here to initialize the whole struct to
+ * 0xffff, but then override certain fields, requiring us to indicate that we
+ * "know" that there are overrides in this structure, and we'll need to disable
+ * that warning from W=1 builds. GCC has supported this option since 4.2.X, but
+ * the macros available to do this only define GCC 8.
+ */
+__diag_push();
+__diag_ignore(GCC, 8, "-Woverride-init",
+ "logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -142,69 +151,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
};
-static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
- SH_ETH_OFFSET_DEFAULTS,
-
- [EDSR] = 0x0000,
- [EDMR] = 0x0400,
- [EDTRR] = 0x0408,
- [EDRRR] = 0x0410,
- [EESR] = 0x0428,
- [EESIPR] = 0x0430,
- [TDLAR] = 0x0010,
- [TDFAR] = 0x0014,
- [TDFXR] = 0x0018,
- [TDFFR] = 0x001c,
- [RDLAR] = 0x0030,
- [RDFAR] = 0x0034,
- [RDFXR] = 0x0038,
- [RDFFR] = 0x003c,
- [TRSCER] = 0x0438,
- [RMFCR] = 0x0440,
- [TFTR] = 0x0448,
- [FDR] = 0x0450,
- [RMCR] = 0x0458,
- [RPADIR] = 0x0460,
- [FCFTR] = 0x0468,
- [CSMR] = 0x04E4,
-
- [ECMR] = 0x0500,
- [RFLR] = 0x0508,
- [ECSR] = 0x0510,
- [ECSIPR] = 0x0518,
- [PIR] = 0x0520,
- [APR] = 0x0554,
- [MPR] = 0x0558,
- [PFTCR] = 0x055c,
- [PFRCR] = 0x0560,
- [TPAUSER] = 0x0564,
- [MAHR] = 0x05c0,
- [MALR] = 0x05c8,
- [CEFCR] = 0x0740,
- [FRECR] = 0x0748,
- [TSFRCR] = 0x0750,
- [TLFRCR] = 0x0758,
- [RFCR] = 0x0760,
- [MAFCR] = 0x0778,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWSLC] = 0x0038,
- [TSU_VTAG0] = 0x0058,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
- [TSU_ADRH0] = 0x0100,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008C,
-};
-
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -395,6 +341,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRH0] = 0x0100,
};
+__diag_pop();
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
@@ -569,6 +516,9 @@ static void sh_eth_set_rate_gether(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, GECMR_10, GECMR);
@@ -590,7 +540,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
- .register_type = SH_ETH_REG_FAST_RZ,
+ .register_type = SH_ETH_REG_GIGABIT,
.edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD,
@@ -610,6 +560,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
EESR_TDE,
.fdr_value = 0x0000070f,
+ .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
+
.no_psr = 1,
.apr = 1,
.mpr = 1,
@@ -663,6 +615,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -748,7 +701,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = {
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
- .trscer_err_mask = DESC_I_RINT8,
+ .trscer_err_mask = TRSCER_RMAFCE,
.apr = 1,
.mpr = 1,
@@ -788,6 +741,7 @@ static struct sh_eth_cpu_data r8a77980_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.nbst = 1,
@@ -828,6 +782,8 @@ static struct sh_eth_cpu_data r7s9210_data = {
.fdr_value = 0x0000070f,
+ .trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
+
.apr = 1,
.mpr = 1,
.tpauser = 1,
@@ -957,6 +913,9 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, 0x00000000, GECMR);
@@ -1002,6 +961,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -1042,6 +1002,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -1083,6 +1044,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -1131,6 +1093,9 @@ static struct sh_eth_cpu_data sh771x_data = {
EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
+
+ .trscer_err_mask = TRSCER_RMAFCE,
+
.tsu = 1,
.dual_port = 1,
};
@@ -1188,17 +1153,19 @@ static void update_mac_address(struct net_device *ndev)
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
- memcpy(ndev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(ndev, mac);
} else {
u32 mahr = sh_eth_read(ndev, MAHR);
u32 malr = sh_eth_read(ndev, MALR);
+ u8 addr[ETH_ALEN];
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
@@ -1254,7 +1221,7 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
}
/* mdio bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = sh_mdc_ctrl,
.set_mdio_dir = sh_mmd_ctrl,
@@ -1784,7 +1751,7 @@ static void sh_eth_emac_interrupt(struct net_device *ndev)
link_stat = sh_eth_read(ndev, PSR);
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
- if (!(link_stat & PHY_ST_LINK)) {
+ if (!(link_stat & PSR_LMON)) {
sh_eth_rcv_snd_disable(ndev);
} else {
/* Link Up */
@@ -2059,15 +2026,11 @@ static int sh_eth_phy_init(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
- int err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
- phy_disconnect(phydev);
- return err;
- }
- }
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ phy_set_max_speed(phydev, SPEED_100);
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -2140,11 +2103,13 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(EESR);
add_reg(EESIPR);
add_reg(TDLAR);
- add_reg(TDFAR);
+ if (!cd->no_xdfar)
+ add_reg(TDFAR);
add_reg(TDFXR);
add_reg(TDFFR);
add_reg(RDLAR);
- add_reg(RDFAR);
+ if (!cd->no_xdfar)
+ add_reg(RDFAR);
add_reg(RDFXR);
add_reg(RDFFR);
add_reg(TRSCER);
@@ -2179,21 +2144,26 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
if (cd->tpauser)
add_reg(TPAUSER);
add_reg(TPAUSECR);
- add_reg(GECMR);
+ if (cd->gecmr)
+ add_reg(GECMR);
if (cd->bculr)
add_reg(BCULR);
add_reg(MAHR);
add_reg(MALR);
- add_reg(TROCR);
- add_reg(CDCR);
- add_reg(LCCR);
- add_reg(CNDCR);
+ if (!cd->no_tx_cntrs) {
+ add_reg(TROCR);
+ add_reg(CDCR);
+ add_reg(LCCR);
+ add_reg(CNDCR);
+ }
add_reg(CEFCR);
add_reg(FRECR);
add_reg(TSFRCR);
add_reg(TLFRCR);
- add_reg(CERCR);
- add_reg(CEECR);
+ if (cd->cexcr) {
+ add_reg(CERCR);
+ add_reg(CEECR);
+ }
add_reg(MAFCR);
if (cd->rtrate)
add_reg(RTRATE);
@@ -2315,14 +2285,16 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *sh_eth_gstrings_stats,
+ memcpy(data, sh_eth_gstrings_stats,
sizeof(sh_eth_gstrings_stats));
break;
}
}
static void sh_eth_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2333,7 +2305,9 @@ static void sh_eth_get_ringparam(struct net_device *ndev,
}
static int sh_eth_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
@@ -2517,7 +2491,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
}
/* Packet transmit function */
-static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
@@ -2560,6 +2535,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
else
txdesc->status |= cpu_to_le32(TD_TACT);
+ wmb(); /* cur_tx must be incremented after TACT bit was set */
mdp->cur_tx++;
if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
@@ -2640,10 +2616,10 @@ static int sh_eth_close(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- pm_runtime_put_sync(&mdp->pdev->dev);
-
mdp->is_opened = 0;
+ pm_runtime_put(&mdp->pdev->dev);
+
return 0;
}
@@ -3068,6 +3044,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
return 0;
}
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read(bus, phy, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
/* MDIO bus init function */
static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
@@ -3092,6 +3090,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
if (!mdp->mii_bus)
return -ENOMEM;
+ /* Wrap accessors with Runtime PM-aware ops */
+ mdp->mii_bus->read = sh_mdiobb_read;
+ mdp->mii_bus->write = sh_mdiobb_write;
+
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = dev;
@@ -3121,9 +3123,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
- case SH_ETH_REG_FAST_RZ:
- reg_offset = sh_eth_offset_fast_rz;
- break;
case SH_ETH_REG_FAST_RCAR:
reg_offset = sh_eth_offset_fast_rcar;
break;
@@ -3145,7 +3144,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_get_stats = sh_eth_get_stats,
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -3161,7 +3160,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -3174,7 +3173,6 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
phy_interface_t interface;
- const char *mac_addr;
int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -3186,9 +3184,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
return NULL;
pdata->phy_interface = interface;
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(pdata->mac_addr, mac_addr);
+ of_get_mac_address(np, pdata->mac_addr);
pdata->no_ether_link =
of_property_read_bool(np, "renesas,no-ether-link");
@@ -3232,9 +3228,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
int ret;
- /* get base addr */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev)
return -ENOMEM;
@@ -3252,7 +3245,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
- mdp->addr = devm_ioremap_resource(&pdev->dev, res);
+ mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mdp->addr)) {
ret = PTR_ERR(mdp->addr);
goto out_release;
@@ -3371,12 +3364,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "MDIO init failed\n");
goto out_release;
}
- netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
+ netif_napi_add(ndev, &mdp->napi, sh_eth_poll);
/* network device register */
ret = register_netdev(ndev);
@@ -3454,9 +3446,7 @@ static int sh_eth_wol_restore(struct net_device *ndev)
* both be reset and all registers restored. This is what
* happens during suspend and resume without WoL enabled.
*/
- ret = sh_eth_close(ndev);
- if (ret < 0)
- return ret;
+ sh_eth_close(ndev);
ret = sh_eth_open(ndev);
if (ret < 0)
return ret;
@@ -3468,7 +3458,7 @@ static int sh_eth_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
@@ -3487,7 +3477,7 @@ static int sh_eth_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 850726301e1c..a5c07c6ff44a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -145,7 +145,6 @@ enum {
enum {
SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_RZ,
SH_ETH_REG_FAST_RCAR,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
@@ -172,7 +171,7 @@ enum GECMR_BIT {
};
/* EDMR */
-enum DMAC_M_BIT {
+enum EDMR_BIT {
EDMR_NBST = 0x80,
EDMR_EL = 0x40, /* Litte endian */
EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
@@ -181,13 +180,13 @@ enum DMAC_M_BIT {
};
/* EDTRR */
-enum DMAC_T_BIT {
+enum EDTRR_BIT {
EDTRR_TRNS_GETHER = 0x03,
EDTRR_TRNS_ETHER = 0x01,
};
/* EDRRR */
-enum EDRRR_R_BIT {
+enum EDRRR_BIT {
EDRRR_R = 0x01,
};
@@ -209,7 +208,7 @@ enum PIR_BIT {
};
/* PSR */
-enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
+enum PSR_BIT { PSR_LMON = 0x01, };
/* EESR */
enum EESR_BIT {
@@ -289,27 +288,6 @@ enum EESIPR_BIT {
EESIPR_CERFIP = 0x00000001,
};
-/* Receive descriptor 0 bits */
-enum RD_STS_BIT {
- RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
- RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
- RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
- RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
- RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
- RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008,
- RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
- RD_RFS1 = 0x00000001,
-};
-#define RDF1ST RD_RFP1
-#define RDFEND RD_RFP0
-#define RD_RFP (RD_RFP1|RD_RFP0)
-
-/* Receive descriptor 1 bits */
-enum RD_LEN_BIT {
- RD_RFL = 0x0000ffff, /* receive frame length */
- RD_RBL = 0xffff0000, /* receive buffer length */
-};
-
/* FCFTR */
enum FCFTR_BIT {
FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
@@ -319,28 +297,13 @@ enum FCFTR_BIT {
#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
-/* Transmit descriptor 0 bits */
-enum TD_STS_BIT {
- TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
- TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
- TD_TFE = 0x08000000, TD_TWBI = 0x04000000,
-};
-#define TDF1ST TD_TFP1
-#define TDFEND TD_TFP0
-#define TD_TFP (TD_TFP1|TD_TFP0)
-
-/* Transmit descriptor 1 bits */
-enum TD_LEN_BIT {
- TD_TBL = 0xffff0000, /* transmit buffer length */
-};
-
/* RMCR */
enum RMCR_BIT {
RMCR_RNC = 0x00000001,
};
/* ECMR */
-enum FELIC_MODE_BIT {
+enum ECMR_BIT {
ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
@@ -351,7 +314,7 @@ enum FELIC_MODE_BIT {
};
/* ECSR */
-enum ECSR_STATUS_BIT {
+enum ECSR_BIT {
ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
ECSR_LCHNG = 0x04,
ECSR_MPD = 0x02, ECSR_ICD = 0x01,
@@ -361,7 +324,7 @@ enum ECSR_STATUS_BIT {
ECSR_ICD | ECSIPR_MPDIP)
/* ECSIPR */
-enum ECSIPR_STATUS_MASK_BIT {
+enum ECSIPR_BIT {
ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
ECSIPR_LCHNGIP = 0x04,
ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
@@ -381,14 +344,20 @@ enum MPR_BIT {
};
/* TRSCER */
-enum DESC_I_BIT {
- DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200,
- DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010,
- DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002,
- DESC_I_RINT1 = 0x0001,
+enum TRSCER_BIT {
+ TRSCER_CNDCE = 0x00000800,
+ TRSCER_DLCCE = 0x00000400,
+ TRSCER_CDCE = 0x00000200,
+ TRSCER_TROCE = 0x00000100,
+ TRSCER_RMAFCE = 0x00000080,
+ TRSCER_RRFCE = 0x00000010,
+ TRSCER_RTLFCE = 0x00000008,
+ TRSCER_RTSFCE = 0x00000004,
+ TRSCER_PRECE = 0x00000002,
+ TRSCER_CERFCE = 0x00000001,
};
-#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+#define DEFAULT_TRSCER_ERR_MASK (TRSCER_RMAFCE | TRSCER_RRFCE | TRSCER_CDCE)
/* RPADIR */
enum RPADIR_BIT {
@@ -446,6 +415,24 @@ struct sh_eth_txdesc {
u32 pad0; /* padding data */
} __aligned(2) __packed;
+/* Transmit descriptor 0 bits */
+enum TD_STS_BIT {
+ TD_TACT = 0x80000000,
+ TD_TDLE = 0x40000000,
+ TD_TFP1 = 0x20000000,
+ TD_TFP0 = 0x10000000,
+ TD_TFE = 0x08000000,
+ TD_TWBI = 0x04000000,
+};
+#define TDF1ST TD_TFP1
+#define TDFEND TD_TFP0
+#define TD_TFP (TD_TFP1 | TD_TFP0)
+
+/* Transmit descriptor 1 bits */
+enum TD_LEN_BIT {
+ TD_TBL = 0xffff0000, /* transmit buffer length */
+};
+
/* The sh ether Rx buffer descriptors.
* This structure should be 20 bytes.
*/
@@ -456,6 +443,34 @@ struct sh_eth_rxdesc {
u32 pad0; /* padding data */
} __aligned(2) __packed;
+/* Receive descriptor 0 bits */
+enum RD_STS_BIT {
+ RD_RACT = 0x80000000,
+ RD_RDLE = 0x40000000,
+ RD_RFP1 = 0x20000000,
+ RD_RFP0 = 0x10000000,
+ RD_RFE = 0x08000000,
+ RD_RFS10 = 0x00000200,
+ RD_RFS9 = 0x00000100,
+ RD_RFS8 = 0x00000080,
+ RD_RFS7 = 0x00000040,
+ RD_RFS6 = 0x00000020,
+ RD_RFS5 = 0x00000010,
+ RD_RFS4 = 0x00000008,
+ RD_RFS3 = 0x00000004,
+ RD_RFS2 = 0x00000002,
+ RD_RFS1 = 0x00000001,
+};
+#define RDF1ST RD_RFP1
+#define RDFEND RD_RFP0
+#define RD_RFP (RD_RFP1 | RD_RFP0)
+
+/* Receive descriptor 1 bits */
+enum RD_LEN_BIT {
+ RD_RFL = 0x0000ffff, /* receive frame length */
+ RD_RBL = 0xffff0000, /* receive buffer length */
+};
+
/* This structure is used by each CPU dependency handling. */
struct sh_eth_cpu_data {
/* mandatory functions */
@@ -490,6 +505,7 @@ struct sh_eth_cpu_data {
unsigned apr:1; /* EtherC has APR */
unsigned mpr:1; /* EtherC has MPR */
unsigned tpauser:1; /* EtherC has TPAUSER */
+ unsigned gecmr:1; /* EtherC has GECMR */
unsigned bculr:1; /* EtherC has BCULR */
unsigned tsu:1; /* EtherC has TSU */
unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */