aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/Kconfig12
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/amd64_edac.c1466
-rw-r--r--drivers/edac/amd64_edac.h369
-rw-r--r--drivers/edac/amd64_edac_inj.c8
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/edac_core.h8
-rw-r--r--drivers/edac/edac_device.c4
-rw-r--r--drivers/edac/edac_device_sysfs.c4
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c28
-rw-r--r--drivers/edac/edac_pci_sysfs.c4
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c4
-rw-r--r--drivers/edac/i7300_edac.c4
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82443bxgx_edac.c4
-rw-r--r--drivers/edac/i82975x_edac.c69
-rw-r--r--drivers/edac/mce_amd.c8
-rw-r--r--drivers/edac/mce_amd.h28
-rw-r--r--drivers/edac/mce_amd_inj.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c54
-rw-r--r--drivers/edac/ppc4xx_edac.c23
-rw-r--r--drivers/edac/r82600_edac.c6
-rw-r--r--drivers/edac/tile_edac.c254
26 files changed, 1219 insertions, 1151 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fe70a341bd8b..af1a17d42bd7 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,7 +7,7 @@
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
- depends on X86 || PPC
+ depends on X86 || PPC || TILE
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -45,7 +45,7 @@ config EDAC_DECODE_MCE
default y
---help---
Enable this option if you want to decode Machine Check Exceptions
- occuring on your machine in human-readable form.
+ occurring on your machine in human-readable form.
You should definitely say Y here in case you want to decode MCEs
which occur really early upon boot, before the module infrastructure
@@ -282,4 +282,12 @@ config EDAC_CPC925
a companion chip to the PowerPC 970 family of
processors.
+config EDAC_TILE
+ tristate "Tilera Memory Controller"
+ depends on EDAC_MM_EDAC && TILE
+ default y
+ help
+ Support for error detection and correction on the
+ Tilera memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index ba2898b3639b..3e239133e29e 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
+obj-$(CONFIG_EDAC_TILE) += tile_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4a5ecc58025d..31e71c4fc831 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis;
static struct ecc_settings **ecc_stngs;
/*
- * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
- * later.
- */
-static int ddr2_dbam_revCG[] = {
- [0] = 32,
- [1] = 64,
- [2] = 128,
- [3] = 256,
- [4] = 512,
- [5] = 1024,
- [6] = 2048,
-};
-
-static int ddr2_dbam_revD[] = {
- [0] = 32,
- [1] = 64,
- [2 ... 3] = 128,
- [4] = 256,
- [5] = 512,
- [6] = 256,
- [7] = 512,
- [8 ... 9] = 1024,
- [10] = 2048,
-};
-
-static int ddr2_dbam[] = { [0] = 128,
- [1] = 256,
- [2 ... 4] = 512,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-static int ddr3_dbam[] = { [0] = -1,
- [1] = 256,
- [2] = 512,
- [3 ... 4] = -1,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
* or higher value'.
*
*FIXME: Produce a better mapping/linearisation.
*/
-
-
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
@@ -107,6 +60,79 @@ struct scrubrate {
{ 0x00, 0UL}, /* scrubbing off */
};
+static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 *val, const char *func)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error reading F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func)
+{
+ int err = 0;
+
+ err = pci_write_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error writing to F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+/*
+ *
+ * Depending on the family, F2 DCT reads need special handling:
+ *
+ * K8: has a single DCT only
+ *
+ * F10h: each DCT has its own set of regs
+ * DCT0 -> F2x040..
+ * DCT1 -> F2x140..
+ *
+ * F15h: we select which DCT we access using F1x10C[DctCfgSel]
+ *
+ */
+static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ if (addr >= 0x100)
+ return -EINVAL;
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ u32 reg = 0;
+ u8 dct = 0;
+
+ if (addr >= 0x140 && addr <= 0x1a0) {
+ dct = 1;
+ addr -= 0x100;
+ }
+
+ amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
+ reg &= 0xfffffffe;
+ reg |= dct;
+ amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
/*
* Memory scrubber control interface. For K8, memory scrubbing is handled by
* hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+ pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
if (scrubval)
return scrubrates[i].bandwidth;
@@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
+ u32 min_scrubrate = 0x5;
- return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
+ if (boot_cpu_data.x86 == 0xf)
+ min_scrubrate = 0x0;
+
+ return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
@@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
u32 scrubval = 0;
int i, retval = -EINVAL;
- amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F;
@@ -192,63 +222,14 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
return retval;
}
-/* Map from a CSROW entry to the mask entry that operates on it */
-static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
-{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
- return csrow;
- else
- return csrow >> 1;
-}
-
-/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
-static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsb0[csrow];
- else
- return pvt->dcsb1[csrow];
-}
-
/*
- * Return the 'mask' address the i'th CS entry. This function is needed because
- * there number of DCSM registers on Rev E and prior vs Rev F and later is
- * different.
+ * returns true if the SysAddr given by sys_addr matches the
+ * DRAM base/limit associated with node_id
*/
-static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
+static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
+ unsigned nid)
{
- if (dct == 0)
- return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
- else
- return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
-}
-
-
-/*
- * In *base and *limit, pass back the full 40-bit base and limit physical
- * addresses for the node given by node_id. This information is obtained from
- * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
- * base and limit addresses are of type SysAddr, as defined at the start of
- * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
- * in the address range they represent.
- */
-static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
- u64 *base, u64 *limit)
-{
- *base = pvt->dram_base[node_id];
- *limit = pvt->dram_limit[node_id];
-}
-
-/*
- * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
- * with node_id
- */
-static int amd64_base_limit_match(struct amd64_pvt *pvt,
- u64 sys_addr, int node_id)
-{
- u64 base, limit, addr;
-
- amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+ u64 addr;
/* The K8 treats this as a 40-bit value. However, bits 63-40 will be
* all ones if the most significant implemented address bit is 1.
@@ -258,7 +239,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt,
*/
addr = sys_addr & 0x000000ffffffffffull;
- return (addr >= base) && (addr <= limit);
+ return ((addr >= get_dram_base(pvt, nid)) &&
+ (addr <= get_dram_limit(pvt, nid)));
}
/*
@@ -271,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- int node_id;
+ unsigned node_id;
u32 intlv_en, bits;
/*
@@ -285,10 +267,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
* registers. Therefore we arbitrarily choose to read it from the
* register for node 0.
*/
- intlv_en = pvt->dram_IntlvEn[0];
+ intlv_en = dram_intlv_en(pvt, 0);
if (intlv_en == 0) {
- for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
+ for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
if (amd64_base_limit_match(pvt, sys_addr, node_id))
goto found;
}
@@ -305,10 +287,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
bits = (((u32) sys_addr) >> 12) & intlv_en;
for (node_id = 0; ; ) {
- if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
+ if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
break; /* intlv_sel field matches */
- if (++node_id >= DRAM_REG_COUNT)
+ if (++node_id >= DRAM_RANGES)
goto err_no_match;
}
@@ -321,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
}
found:
- return edac_mc_find(node_id);
+ return edac_mc_find((int)node_id);
err_no_match:
debugf2("sys_addr 0x%lx doesn't match any node\n",
@@ -331,37 +313,50 @@ err_no_match:
}
/*
- * Extract the DRAM CS base address from selected csrow register.
+ * compute the CS base address of the @csrow on the DRAM controller @dct.
+ * For details see F2x[5C:40] in the processor's BKDG
*/
-static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
+static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
+ u64 *base, u64 *mask)
{
- return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
- pvt->dcs_shift;
-}
+ u64 csbase, csmask, base_bits, mask_bits;
+ u8 addr_shift;
-/*
- * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
- */
-static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
-{
- u64 dcsm_bits, other_bits;
- u64 mask;
-
- /* Extract bits from DRAM CS Mask. */
- dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow];
+ base_bits = GENMASK(21, 31) | GENMASK(9, 15);
+ mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ addr_shift = 4;
+ } else {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow >> 1];
+ addr_shift = 8;
- other_bits = pvt->dcsm_mask;
- other_bits = ~(other_bits << pvt->dcs_shift);
+ if (boot_cpu_data.x86 == 0x15)
+ base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ else
+ base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ }
- /*
- * The extracted bits from DCSM belong in the spaces represented by
- * the cleared bits in other_bits.
- */
- mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+ *base = (csbase & base_bits) << addr_shift;
- return mask;
+ *mask = ~0ULL;
+ /* poke holes for the csmask */
+ *mask &= ~(mask_bits << addr_shift);
+ /* OR them in */
+ *mask |= (csmask & mask_bits) << addr_shift;
}
+#define for_each_chip_select(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].b_cnt; i++)
+
+#define chip_select_base(i, dct, pvt) \
+ pvt->csels[dct].csbases[i]
+
+#define for_each_chip_select_mask(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -374,19 +369,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
pvt = mci->pvt_info;
- /*
- * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
- * base/mask register pair, test the condition shown near the start of
- * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
- */
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
-
- /* This DRAM chip select is disabled on this node */
- if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+ for_each_chip_select(csrow, 0, pvt) {
+ if (!csrow_enabled(csrow, 0, pvt))
continue;
- base = base_from_dct_base(pvt, csrow);
- mask = ~mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
+
+ mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
@@ -396,7 +385,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
return csrow;
}
}
-
debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
(unsigned long)input_addr, pvt->mc_node_id);
@@ -404,19 +392,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
}
/*
- * Return the base value defined by the DRAM Base register for the node
- * represented by mci. This function returns the full 40-bit value despite the
- * fact that the register only stores bits 39-24 of the value. See section
- * 3.4.4.1 (BKDG #26094, K8, revA-E)
- */
-static inline u64 get_dram_base(struct mem_ctl_info *mci)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
-
- return pvt->dram_base[pvt->mc_node_id];
-}
-
-/*
* Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
* for the node represented by mci. Info is passed back in *hole_base,
* *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
@@ -445,14 +420,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 1;
}
- /* only valid for Fam10h */
- if (boot_cpu_data.x86 == 0x10 &&
- (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+ /* valid for Fam10h and above */
+ if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
- if ((pvt->dhar & DHAR_VALID) == 0) {
+ if (!dhar_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
pvt->mc_node_id);
return 1;
@@ -476,15 +450,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
* addresses in the hole so that they start at 0x100000000.
*/
- base = dhar_base(pvt->dhar);
+ base = dhar_base(pvt);
*hole_base = base;
*hole_size = (0x1ull << 32) - base;
if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt->dhar);
+ *hole_offset = f10_dhar_offset(pvt);
else
- *hole_offset = k8_dhar_offset(pvt->dhar);
+ *hole_offset = k8_dhar_offset(pvt);
debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
pvt->mc_node_id, (unsigned long)*hole_base,
@@ -525,10 +499,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
*/
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
int ret = 0;
- dram_base = get_dram_base(mci);
+ dram_base = get_dram_base(pvt, pvt->mc_node_id);
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
&hole_size);
@@ -556,7 +531,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+ dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
"DramAddr 0x%lx\n", (unsigned long)sys_addr,
@@ -592,9 +567,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
* concerning translating a DramAddr to an InputAddr.
*/
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
- input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
- (dram_addr & 0xfff);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ (dram_addr & 0xfff);
debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
intlv_shift, (unsigned long)dram_addr,
@@ -628,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
{
struct amd64_pvt *pvt;
- int node_id, intlv_shift;
+ unsigned node_id, intlv_shift;
u64 bits, dram_addr;
u32 intlv_sel;
@@ -642,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
*/
pvt = mci->pvt_info;
node_id = pvt->mc_node_id;
- BUG_ON((node_id < 0) || (node_id > 7));
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+ BUG_ON(node_id > 7);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
debugf1(" InputAddr 0x%lx translates to DramAddr of "
"same value\n", (unsigned long)input_addr);
@@ -653,10 +628,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
return input_addr;
}
- bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
- (input_addr & 0xfff);
+ bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
+ (input_addr & 0xfff);
- intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+ intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
@@ -673,7 +648,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+ u64 hole_base, hole_offset, hole_size, base, sys_addr;
int ret = 0;
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
@@ -691,7 +666,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
}
}
- amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+ base = get_dram_base(pvt, pvt->mc_node_id);
sys_addr = dram_addr + base;
/*
@@ -736,13 +711,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
u64 base, mask;
pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
+ BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
- base = base_from_dct_base(pvt, csrow);
- mask = mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
*input_addr_min = base & ~mask;
- *input_addr_max = base | mask | pvt->dcs_mask_notused;
+ *input_addr_max = base | mask;
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
@@ -775,18 +749,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
-static u16 extract_syndrome(struct err_regs *err)
-{
- return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
-}
-
/*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
- int bit;
+ u8 bit;
enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
@@ -799,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
return edac_cap;
}
-
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
static void amd64_dump_dramcfg_low(u32 dclr, int chan)
{
@@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
debugf1(" PAR/ERR parity: %s\n",
(dclr & BIT(8)) ? "enabled" : "disabled");
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ if (boot_cpu_data.x86 == 0x10)
+ debugf1(" DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
(dclr & BIT(12)) ? "yes" : "no",
@@ -824,18 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
}
/* Display and decode various NB registers for debug purposes. */
-static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+static void dump_misc_regs(struct amd64_pvt *pvt)
{
- int ganged;
-
debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
amd64_dump_dramcfg_low(pvt->dclr0, 0);
@@ -843,139 +810,84 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
"offset: 0x%08x\n",
- pvt->dhar,
- dhar_base(pvt->dhar),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
- : f10_dhar_offset(pvt->dhar));
+ pvt->dhar, dhar_base(pvt),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
+
+ debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- debugf1(" DramHoleValid: %s\n",
- (pvt->dhar & DHAR_VALID) ? "yes" : "no");
+ amd64_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
- if (boot_cpu_data.x86 == 0xf) {
- amd64_debug_display_dimm_sizes(0, pvt);
+ if (boot_cpu_data.x86 == 0xf)
return;
- }
- amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
+ amd64_debug_display_dimm_sizes(pvt, 1);
+
+ amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
amd64_dump_dramcfg_low(pvt->dclr1, 1);
-
- /*
- * Determine if ganged and then dump memory sizes for first controller,
- * and if NOT ganged dump info for 2nd controller.
- */
- ganged = dct_ganging_enabled(pvt);
-
- amd64_debug_display_dimm_sizes(0, pvt);
-
- if (!ganged)
- amd64_debug_display_dimm_sizes(1, pvt);
-}
-
-/* Read in both of DBAM registers */
-static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
-{
- amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
-
- if (boot_cpu_data.x86 >= 0x10)
- amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
}
/*
- * NOTE: CPU Revision Dependent code: Rev E and Rev F
- *
- * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
- * set the shift factor for the DCSB and DCSM values.
- *
- * ->dcs_mask_notused, RevE:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of section
- * 3.5.4 (p. 84).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
- * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
- * gaps.
- *
- * ->dcs_mask_notused, RevF and later:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of NPT section
- * 4.5.4 (p. 87).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [36:27] and [21:13].
- *
- * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
- * which are all bits in the above-mentioned gaps.
+ * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
-static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+static void prep_chip_selects(struct amd64_pvt *pvt)
{
-
if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
- pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_E_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 8;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
} else {
- pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 4;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
/*
- * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
-static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs, reg;
+ int cs;
+
+ prep_chip_selects(pvt);
- amd64_set_dct_base_and_mask(pvt);
+ for_each_chip_select(cs, 0, pvt) {
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
+ u32 *base0 = &pvt->csels[0].csbases[cs];
+ u32 *base1 = &pvt->csels[1].csbases[cs];
- for (cs = 0; cs < pvt->cs_count; cs++) {
- reg = K8_DCSB0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's base */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSB1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsb1[cs]))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb1[cs], reg);
- } else {
- pvt->dcsb1[cs] = 0;
- }
+ cs, *base0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
+ debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
- for (cs = 0; cs < pvt->num_dcsm; cs++) {
- reg = K8_DCSM0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
+ for_each_chip_select_mask(cs, 0, pvt) {
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
+ u32 *mask0 = &pvt->csels[0].csmasks[cs];
+ u32 *mask1 = &pvt->csels[1].csmasks[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's mask */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSM1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsm1[cs]))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm1[cs], reg);
- } else {
- pvt->dcsm1[cs] = 0;
- }
+ cs, *mask0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
+ debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
@@ -983,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
- if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+ /* F15h supports only DDR3 */
+ if (boot_cpu_data.x86 >= 0x15)
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
+ else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
if (pvt->dchr0 & DDR3_MODE)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
else
@@ -997,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
return type;
}
-/*
- * Read the DRAM Configuration Low register. It differs between CG, D & E revs
- * and the later RevF memory controllers (DDR vs DDR2)
- *
- * Return:
- * number of memory channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
+/* Get the number of DCT channels the memory controller is using. */
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
- int flag, err = 0;
-
- err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- if (err)
- return err;
+ int flag;
if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */
- flag = pvt->dclr0 & F10_WIDTH_128;
+ flag = pvt->dclr0 & WIDTH_128;
else
/* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -1027,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
return (flag) ? 2 : 1;
}
-/* extract the ERROR ADDRESS for the K8 CPUs */
-static u64 k8_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
+static u64 get_error_address(struct mce *m)
{
- return (((u64) (info->nbeah & 0xff)) << 32) +
- (info->nbeal & ~0x03);
+ u8 start_bit = 1;
+ u8 end_bit = 47;
+
+ if (boot_cpu_data.x86 == 0xf) {
+ start_bit = 3;
+ end_bit = 39;
+ }
+
+ return m->addr & GENMASK(start_bit, end_bit);
}
-/*
- * Read the Base and Limit registers for K8 based Memory controllers; extract
- * fields from the 'raw' reg into separate data fields
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
- */
-static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
- u32 low;
- u32 off = dram << 3; /* 8 bytes between DRAM entries */
+ int off = range << 3;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
- /* Extract parts into separate data entries */
- pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
- pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
- pvt->dram_rw_en[dram] = (low & 0x3);
+ if (boot_cpu_data.x86 == 0xf)
+ return;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
+ if (!dram_rw(pvt, range))
+ return;
- /*
- * Extract parts into separate data entries. Limit is the HIGHEST memory
- * location of the region, so lower 24 bits need to be all ones
- */
- pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
- pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
- pvt->dram_DstNode[dram] = (low & 0x7);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
}
-static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info, u64 sys_addr)
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct mem_ctl_info *src_mci;
+ struct amd64_pvt *pvt = mci->pvt_info;
int channel, csrow;
u32 page, offset;
- u16 syndrome;
-
- syndrome = extract_syndrome(err_info);
/* CHIPKILL enabled */
- if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
+ if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
if (channel < 0) {
/*
@@ -1124,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
}
}
-static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr2_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
- if (pvt->ext_model >= K8_REV_F)
- dbam_map = ddr2_dbam;
- else if (pvt->ext_model >= K8_REV_D)
- dbam_map = ddr2_dbam_revD;
+ if (i <= 2)
+ shift = i;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam_revCG;
+ shift = (i + 1) >> 1;
- return dbam_map[cs_mode];
+ return 128 << (shift + !!dct_width);
+}
+
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
+{
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ if (pvt->ext_model >= K8_REV_F) {
+ WARN_ON(cs_mode > 11);
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
+ }
+ else if (pvt->ext_model >= K8_REV_D) {
+ WARN_ON(cs_mode > 10);
+
+ if (cs_mode == 3 || cs_mode == 8)
+ return 32 << (cs_mode - 1);
+ else
+ return 32 << cs_mode;
+ }
+ else {
+ WARN_ON(cs_mode > 6);
+ return 32 << cs_mode;
+ }
}
/*
@@ -1146,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
* Pass back:
* contents of the DCL0_LOW register
*/
-static int f10_early_channel_count(struct amd64_pvt *pvt)
+static int f1x_early_channel_count(struct amd64_pvt *pvt)
{
- int dbams[] = { DBAM0, DBAM1 };
int i, j, channels = 0;
- u32 dbam;
- /* If we are in 128 bit mode, then we are using 2 channels */
- if (pvt->dclr0 & F10_WIDTH_128) {
- channels = 2;
- return channels;
- }
+ /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
+ if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+ return 2;
/*
* Need to check if in unganged mode: In such, there are 2 channels,
@@ -1173,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
* is more than just one DIMM present in unganged mode. Need to check
* both controllers since DIMMs can be placed in either one.
*/
- for (i = 0; i < ARRAY_SIZE(dbams); i++) {
- if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
- goto err_reg;
+ for (i = 0; i < 2; i++) {
+ u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
for (j = 0; j < 4; j++) {
if (DBAM_DIMM(j, dbam) > 0) {
@@ -1191,216 +1104,191 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
amd64_info("MCT channel count: %d\n", channels);
return channels;
-
-err_reg:
- return -1;
-
}
-static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr3_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
+ int cs_size = 0;
- if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
- dbam_map = ddr3_dbam;
+ if (i == 0 || i == 3 || i == 4)
+ cs_size = -1;
+ else if (i <= 2)
+ shift = i;
+ else if (i == 12)
+ shift = 7;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam;
+ shift = (i + 1) >> 1;
- return dbam_map[cs_mode];
+ if (cs_size != -1)
+ cs_size = (128 * (1 << !!dct_width)) << shift;
+
+ return cs_size;
}
-static u64 f10_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- return (((u64) (info->nbeah & 0xffff)) << 32) +
- (info->nbeal & ~0x01);
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ WARN_ON(cs_mode > 11);
+
+ if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+ return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
+ else
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
/*
- * Read the Base and Limit registers for F10 based Memory controllers. Extract
- * fields from the 'raw' reg into separate data fields.
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ * F15h supports only 64bit DCT interfaces
*/
-static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
-
- low_offset = K8_DRAM_BASE_LOW + (dram << 3);
- high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
-
- /* read the 'raw' DRAM BASE Address register */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
-
- /* Extract parts into separate data entries */
- pvt->dram_rw_en[dram] = (low_base & 0x3);
-
- if (pvt->dram_rw_en[dram] == 0)
- return;
-
- pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
-
- pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
- (((u64)low_base & 0xFFFF0000) << 8);
-
- low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
- high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
+ WARN_ON(cs_mode > 12);
- /* read the 'raw' LIMIT registers */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
-
- pvt->dram_DstNode[dram] = (low_limit & 0x7);
- pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
-
- /*
- * Extract address values and form a LIMIT address. Limit is the HIGHEST
- * memory location of the region, so low 24 bits need to be all ones.
- */
- pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
- (((u64) low_limit & 0xFFFF0000) << 8) |
- 0x00FFFFFF;
+ return ddr3_cs_size(cs_mode, false);
}
-static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
- if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
- &pvt->dram_ctl_select_low)) {
- debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
- "High range addresses at: 0x%x\n",
- pvt->dram_ctl_select_low,
- dct_sel_baseaddr(pvt));
+ if (boot_cpu_data.x86 == 0xf)
+ return;
+
+ if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
+ debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCT mode: %s, All DCTs on: %s\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
- (dct_dram_enabled(pvt) ? "yes" : "no"));
+ debugf0(" DCTs operate in %s mode.\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
debugf0(" Address range split per DCT: %s\n",
(dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" DCT data interleave for ECC: %s, "
+ debugf0(" data interleave for ECC: %s, "
"DRAM cleared since last warm reset: %s\n",
(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
(dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" DCT channel interleave: %s, "
- "DCT interleave bits selector: 0x%x\n",
+ debugf0(" channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
dct_sel_interleave_addr(pvt));
}
- amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
- &pvt->dram_ctl_select_high);
+ amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
- * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
-static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
- int hi_range_sel, u32 intlv_en)
+static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ bool hi_range_sel, u8 intlv_en)
{
- u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+ u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
if (dct_ganging_enabled(pvt))
- cs = 0;
- else if (hi_range_sel)
- cs = dct_sel_high;
- else if (dct_interleave_enabled(pvt)) {
- /*
- * see F2x110[DctSelIntLvAddr] - channel interleave mode
- */
- if (dct_sel_interleave_addr(pvt) == 0)
- cs = sys_addr >> 6 & 1;
- else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
- temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+ return 0;
- if (dct_sel_interleave_addr(pvt) & 1)
- cs = (sys_addr >> 9 & 1) ^ temp;
- else
- cs = (sys_addr >> 6 & 1) ^ temp;
- } else if (intlv_en & 4)
- cs = sys_addr >> 15 & 1;
- else if (intlv_en & 2)
- cs = sys_addr >> 14 & 1;
- else if (intlv_en & 1)
- cs = sys_addr >> 13 & 1;
- else
- cs = sys_addr >> 12 & 1;
- } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
- cs = ~dct_sel_high & 1;
- else
- cs = 0;
+ if (hi_range_sel)
+ return dct_sel_high;
- return cs;
-}
+ /*
+ * see F2x110[DctSelIntLvAddr] - channel interleave mode
+ */
+ if (dct_interleave_enabled(pvt)) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
-static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
-{
- if (intlv_en == 1)
- return 1;
- else if (intlv_en == 3)
- return 2;
- else if (intlv_en == 7)
- return 3;
+ /* return DCT select function: 0=DCT0, 1=DCT1 */
+ if (!intlv_addr)
+ return sys_addr >> 6 & 1;
+
+ if (intlv_addr & 0x2) {
+ u8 shift = intlv_addr & 0x1 ? 9 : 6;
+ u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+ return ((sys_addr >> shift) & 1) ^ temp;
+ }
+
+ return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
+ }
+
+ if (dct_high_range_enabled(pvt))
+ return ~dct_sel_high & 1;
return 0;
}
-/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
-static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
- u32 dct_sel_base_addr,
- u64 dct_sel_base_off,
- u32 hole_valid, u32 hole_off,
- u64 dram_base)
+/* Convert the sys_addr to the normalized DCT address */
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, bool hi_rng,
+ u32 dct_sel_base_addr)
{
u64 chan_off;
+ u64 dram_base = get_dram_base(pvt, range);
+ u64 hole_off = f10_dhar_offset(pvt);
+ u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
- if (hi_range_sel) {
- if (!(dct_sel_base_addr & 0xFFFF0000) &&
- hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ if (hi_rng) {
+ /*
+ * if
+ * base address of high range is below 4Gb
+ * (bits [47:27] at [31:11])
+ * DRAM address space on this DCT is hoisted above 4Gb &&
+ * sys_addr > 4Gb
+ *
+ * remove hole offset from sys_addr
+ * else
+ * remove high range offset from sys_addr
+ */
+ if ((!(dct_sel_base_addr >> 16) ||
+ dct_sel_base_addr < dhar_base(pvt)) &&
+ dhar_valid(pvt) &&
+ (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
chan_off = dct_sel_base_off;
} else {
- if (hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ /*
+ * if
+ * we have a valid hole &&
+ * sys_addr > 4Gb
+ *
+ * remove hole
+ * else
+ * remove dram base to normalize to DCT address
+ */
+ if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
- chan_off = dram_base & 0xFFFFF8000000ULL;
+ chan_off = dram_base;
}
- return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
- (chan_off & 0x0000FFFFFF800000ULL);
+ return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
}
-/* Hack for the time being - Can we get this from BIOS?? */
-#define CH0SPARE_RANK 0
-#define CH1SPARE_RANK 1
-
/*
* checks if the csrow passed in is marked as SPARED, if so returns the new
* spare row
*/
-static inline int f10_process_possible_spare(int csrow,
- u32 cs, struct amd64_pvt *pvt)
-{
- u32 swap_done;
- u32 bad_dram_cs;
-
- /* Depending on channel, isolate respective SPARING info */
- if (cs) {
- swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH1SPARE_RANK;
- } else {
- swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH0SPARE_RANK;
+static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
+{
+ int tmp_cs;
+
+ if (online_spare_swap_done(pvt, dct) &&
+ csrow == online_spare_bad_dramcs(pvt, dct)) {
+
+ for_each_chip_select(tmp_cs, dct, pvt) {
+ if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
+ csrow = tmp_cs;
+ break;
+ }
+ }
}
return csrow;
}
@@ -1413,11 +1301,11 @@ static inline int f10_process_possible_spare(int csrow,
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u32 cs_base, cs_mask;
+ u64 cs_base, cs_mask;
int cs_found = -EINVAL;
int csrow;
@@ -1427,39 +1315,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
pvt = mci->pvt_info;
- debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
-
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+ debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
- cs_base = amd64_get_dct_base(pvt, cs, csrow);
- if (!(cs_base & K8_DCSB_CS_ENABLE))
+ for_each_chip_select(csrow, dct, pvt) {
+ if (!csrow_enabled(csrow, dct, pvt))
continue;
- /*
- * We have an ENABLED CSROW, Isolate just the MASK bits of the
- * target: [28:19] and [13:5], which map to [36:27] and [21:13]
- * of the actual address.
- */
- cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
-
- /*
- * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
- * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
- */
- cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+ get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
- csrow, cs_base, cs_mask);
+ debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
- cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+ cs_mask = ~cs_mask;
- debugf1(" Final CSMask=0x%x\n", cs_mask);
- debugf1(" (InputAddr & ~CSMask)=0x%x "
- "(CSBase & ~CSMask)=0x%x\n",
- (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+ debugf1(" (InputAddr & ~CSMask)=0x%llx "
+ "(CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
- if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
- cs_found = f10_process_possible_spare(csrow, cs, pvt);
+ if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+ cs_found = f10_process_possible_spare(pvt, dct, csrow);
debugf1(" MATCH csrow=%d\n", cs_found);
break;
@@ -1468,38 +1342,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
return cs_found;
}
-/* For a given @dram_range, check if @sys_addr falls within it. */
-static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
- u64 sys_addr, int *nid, int *chan_sel)
+/*
+ * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
+ * swapped with a region located at the bottom of memory so that the GPU can use
+ * the interleaved region and thus two channels.
+ */
+static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
- int node_id, cs_found = -EINVAL, high_range = 0;
- u32 intlv_en, intlv_sel, intlv_shift, hole_off;
- u32 hole_valid, tmp, dct_sel_base, channel;
- u64 dram_base, chan_addr, dct_sel_base_off;
+ u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
- dram_base = pvt->dram_base[dram_range];
- intlv_en = pvt->dram_IntlvEn[dram_range];
+ if (boot_cpu_data.x86 == 0x10) {
+ /* only revC3 and revE have that feature */
+ if (boot_cpu_data.x86_model < 4 ||
+ (boot_cpu_data.x86_model < 0xa &&
+ boot_cpu_data.x86_mask < 3))
+ return sys_addr;
+ }
- node_id = pvt->dram_DstNode[dram_range];
- intlv_sel = pvt->dram_IntlvSel[dram_range];
+ amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
- debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
- dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+ if (!(swap_reg & 0x1))
+ return sys_addr;
- /*
- * This assumes that one node's DHAR is the same as all the other
- * nodes' DHAR.
- */
- hole_off = (pvt->dhar & 0x0000FF80);
- hole_valid = (pvt->dhar & 0x1);
- dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+ swap_base = (swap_reg >> 3) & 0x7f;
+ swap_limit = (swap_reg >> 11) & 0x7f;
+ rgn_size = (swap_reg >> 20) & 0x7f;
+ tmp_addr = sys_addr >> 27;
- debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
- hole_off, hole_valid, intlv_sel);
+ if (!(sys_addr >> 34) &&
+ (((tmp_addr >= swap_base) &&
+ (tmp_addr <= swap_limit)) ||
+ (tmp_addr < rgn_size)))
+ return sys_addr ^ (u64)swap_base << 27;
+
+ return sys_addr;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, int *nid, int *chan_sel)
+{
+ int cs_found = -EINVAL;
+ u64 chan_addr;
+ u32 dct_sel_base;
+ u8 channel;
+ bool high_range = false;
+
+ u8 node_id = dram_dst_node(pvt, range);
+ u8 intlv_en = dram_intlv_en(pvt, range);
+ u32 intlv_sel = dram_intlv_sel(pvt, range);
+
+ debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
+
+ if (dhar_valid(pvt) &&
+ dhar_base(pvt) <= sys_addr &&
+ sys_addr < BIT_64(32)) {
+ amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+ sys_addr);
+ return -EINVAL;
+ }
if (intlv_en &&
- (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+ (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
+ amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
+ intlv_en, intlv_sel);
return -EINVAL;
+ }
+
+ sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
dct_sel_base = dct_sel_baseaddr(pvt);
@@ -1510,38 +1421,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
if (dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt) &&
((sys_addr >> 27) >= (dct_sel_base >> 11)))
- high_range = 1;
-
- channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
-
- chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
- dct_sel_base_off, hole_valid,
- hole_off, dram_base);
+ high_range = true;
- intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+ channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
- /* remove Node ID (in case of memory interleaving) */
- tmp = chan_addr & 0xFC0;
+ chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
+ high_range, dct_sel_base);
- chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
+ /* Remove node interleaving, see F1x120 */
+ if (intlv_en)
+ chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
+ (chan_addr & 0xfff);
- /* remove channel interleave and hash */
+ /* remove channel interleave */
if (dct_interleave_enabled(pvt) &&
!dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt)) {
- if (dct_sel_interleave_addr(pvt) != 1)
- chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
- else {
- tmp = chan_addr & 0xFC0;
- chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
- | tmp;
- }
+
+ if (dct_sel_interleave_addr(pvt) != 1) {
+ if (dct_sel_interleave_addr(pvt) == 0x3)
+ /* hash 9 */
+ chan_addr = ((chan_addr >> 10) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ /* A[6] or hash 6 */
+ chan_addr = ((chan_addr >> 7) << 6) |
+ (chan_addr & 0x3f);
+ } else
+ /* A[12] */
+ chan_addr = ((chan_addr >> 13) << 12) |
+ (chan_addr & 0xfff);
}
- debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
- chan_addr, (u32)(chan_addr >> 8));
+ debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
- cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+ cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
if (cs_found >= 0) {
*nid = node_id;
@@ -1550,23 +1464,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
return cs_found;
}
-static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
int *node, int *chan_sel)
{
- int dram_range, cs_found = -EINVAL;
- u64 dram_base, dram_limit;
+ int cs_found = -EINVAL;
+ unsigned range;
- for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+ for (range = 0; range < DRAM_RANGES; range++) {
- if (!pvt->dram_rw_en[dram_range])
+ if (!dram_rw(pvt, range))
continue;
- dram_base = pvt->dram_base[dram_range];
- dram_limit = pvt->dram_limit[dram_range];
-
- if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
+ if ((get_dram_base(pvt, range) <= sys_addr) &&
+ (get_dram_limit(pvt, range) >= sys_addr)) {
- cs_found = f10_match_to_this_node(pvt, dram_range,
+ cs_found = f1x_match_to_this_node(pvt, range,
sys_addr, node,
chan_sel);
if (cs_found >= 0)
@@ -1583,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
* The @sys_addr is usually an error address received from the hardware
* (MCX_ADDR).
*/
-static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info,
- u64 sys_addr)
+static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 page, offset;
int nid, csrow, chan = 0;
- u16 syndrome;
- csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+ csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
@@ -1601,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
error_address_to_page_and_offset(sys_addr, &page, &offset);
- syndrome = extract_syndrome(err_info);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
* this point.
*/
- if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
+ if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
if (chan >= 0)
@@ -1625,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
/*
* debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs as well
+ * CSROWs
*/
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, factor = 0;
- u32 dbam;
- u32 *dcsb;
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
if (boot_cpu_data.x86 == 0xf) {
- if (pvt->dclr0 & F10_WIDTH_128)
+ if (pvt->dclr0 & WIDTH_128)
factor = 1;
/* K8 families < revF not supported yet */
@@ -1644,11 +1552,11 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
WARN_ON(ctrl != 0);
}
- debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
- ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
+ dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
+ : pvt->csels[0].csbases;
- dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
- dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+ debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
@@ -1656,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
- if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
- size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
size1 = 0;
- if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0 << factor,
@@ -1676,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = {
.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
.ops = {
.early_channel_count = k8_early_channel_count,
- .get_error_address = k8_get_error_address,
- .read_dram_base_limit = k8_read_dram_base_limit,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
+ .read_dct_pci_cfg = k8_read_dct_pci_cfg,
}
},
[F10_CPUS] = {
@@ -1687,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = {
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
.ops = {
- .early_channel_count = f10_early_channel_count,
- .get_error_address = f10_get_error_address,
- .read_dram_base_limit = f10_read_dram_base_limit,
- .read_dram_ctl_register = f10_read_dram_ctl_register,
- .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
+ [F15_CPUS] = {
+ .ctl_name = "F15h",
+ .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f15_dbam_to_chip_select,
+ .read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
};
@@ -1782,15 +1700,15 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
- int v_dim)
+static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+ unsigned v_dim)
{
unsigned int i, err_sym;
for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
u16 s = syndrome;
- int v_idx = err_sym * v_dim;
- int v_end = (err_sym + 1) * v_dim;
+ unsigned v_idx = err_sym * v_dim;
+ unsigned v_end = (err_sym + 1) * v_dim;
/* walk over all 16 bits of the syndrome */
for (i = 1; i < (1U << 16); i <<= 1) {
@@ -1862,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
struct amd64_pvt *pvt = mci->pvt_info;
int err_sym = -1;
- if (pvt->syn_type == 8)
+ if (pvt->ecc_sym_sz == 8)
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors),
- pvt->syn_type);
- else if (pvt->syn_type == 4)
+ pvt->ecc_sym_sz);
+ else if (pvt->ecc_sym_sz == 4)
err_sym = decode_syndrome(syndrome, x4_vectors,
ARRAY_SIZE(x4_vectors),
- pvt->syn_type);
+ pvt->ecc_sym_sz);
else {
- amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
+ amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
return err_sym;
}
- return map_err_sym_to_channel(err_sym, pvt->syn_type);
+ return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
/*
* Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
* ADDRESS and process.
*/
-static void amd64_handle_ce(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 sys_addr;
+ u16 syndrome;
/* Ensure that the Error Address is VALID */
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
+ syndrome = extract_syndrome(m->status);
amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
- pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
+ pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
}
/* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
{
- struct amd64_pvt *pvt = mci->pvt_info;
struct mem_ctl_info *log_mci, *src_mci = NULL;
int csrow;
u64 sys_addr;
@@ -1914,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
log_mci = mci;
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
/*
* Find out which node the error address belongs to. This may be
@@ -1948,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
}
static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct err_regs *info)
+ struct mce *m)
{
- u16 ec = EC(info->nbsl);
- u8 xec = XEC(info->nbsl, 0x1f);
- int ecc_type = (info->nbsh >> 13) & 0x3;
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, 0x1f);
+ u8 ecc_type = (m->status >> 45) & 0x3;
/* Bail early out if this was an 'observed' error */
- if (PP(ec) == K8_NBSL_PP_OBS)
+ if (PP(ec) == NBSL_PP_OBS)
return;
/* Do only ECC errors */
@@ -1963,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
return;
if (ecc_type == 2)
- amd64_handle_ce(mci, info);
+ amd64_handle_ce(mci, m);
else if (ecc_type == 1)
- amd64_handle_ue(mci, info);
+ amd64_handle_ue(mci, m);
}
void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
{
struct mem_ctl_info *mci = mcis[node_id];
- struct err_regs regs;
-
- regs.nbsl = (u32) m->status;
- regs.nbsh = (u32)(m->status >> 32);
- regs.nbeal = (u32) m->addr;
- regs.nbeah = (u32)(m->addr >> 32);
- regs.nbcfg = nbcfg;
-
- __amd64_decode_bus_error(mci, &regs);
-
- /*
- * Check the UE bit of the NB status high register, if set generate some
- * logs. If NOT a GART error, then process the event as a NO-INFO event.
- * If it was a GART error, skip that process.
- *
- * FIXME: this should go somewhere else, if at all.
- */
- if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
- edac_mc_handle_ue_no_info(mci, "UE bit is set");
+ __amd64_decode_bus_error(mci, m);
}
/*
@@ -2039,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u64 msr_val;
u32 tmp;
- int dram;
+ unsigned range;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -2058,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt)
} else
debugf0(" TOP_MEM2 disabled.\n");
- amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
+ amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
- if (pvt->ops->read_dram_ctl_register)
- pvt->ops->read_dram_ctl_register(pvt);
+ read_dram_ctl_register(pvt);
- for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
- /*
- * Call CPU specific READ function to get the DRAM Base and
- * Limit values from the DCT.
- */
- pvt->ops->read_dram_base_limit(pvt, dram);
+ for (range = 0; range < DRAM_RANGES; range++) {
+ u8 rw;
- /*
- * Only print out debug info on rows with both R and W Enabled.
- * Normal processing, compiler should optimize this whole 'if'
- * debug output block away.
- */
- if (pvt->dram_rw_en[dram] != 0) {
- debugf1(" DRAM-BASE[%d]: 0x%016llx "
- "DRAM-LIMIT: 0x%016llx\n",
- dram,
- pvt->dram_base[dram],
- pvt->dram_limit[dram]);
-
- debugf1(" IntlvEn=%s %s %s "
- "IntlvSel=%d DstNode=%d\n",
- pvt->dram_IntlvEn[dram] ?
- "Enabled" : "Disabled",
- (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
- (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
- pvt->dram_IntlvSel[dram],
- pvt->dram_DstNode[dram]);
- }
+ /* read settings for this DRAM range */
+ read_dram_base_limit_regs(pvt, range);
+
+ rw = dram_rw(pvt, range);
+ if (!rw)
+ continue;
+
+ debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
+
+ debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
- amd64_read_dct_base_mask(pvt);
+ read_dct_base_mask(pvt);
- amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
- amd64_read_dbam_reg(pvt);
+ amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
+ amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
+ amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
+ amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
- if (boot_cpu_data.x86 >= 0x10) {
- if (!dct_ganging_enabled(pvt)) {
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
- }
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ if (!dct_ganging_enabled(pvt)) {
+ amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
+ amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
}
- if (boot_cpu_data.x86 == 0x10 &&
- boot_cpu_data.x86_model > 7 &&
- /* F3x180[EccSymbolSize]=1 => x8 symbols */
- tmp & BIT(25))
- pvt->syn_type = 8;
- else
- pvt->syn_type = 4;
+ pvt->ecc_sym_sz = 4;
- amd64_dump_misc_regs(pvt);
+ if (c->x86 >= 0x10) {
+ amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
+
+ /* F10h, revD and later can do x8 ECC too */
+ if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+ pvt->ecc_sym_sz = 8;
+ }
+ dump_misc_regs(pvt);
}
/*
* NOTE: CPU Revision Dependent code
*
* Input:
- * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
+ * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
* k8 private pointer to -->
* DRAM Bank Address mapping register
* node_id
@@ -2156,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
@@ -2169,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
*/
cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
- nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
+ nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
/*
* If dual channel then double the memory size of single channel.
@@ -2192,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr;
+ u64 input_addr_min, input_addr_max, sys_addr, base, mask;
u32 val;
int i, empty = 1;
- amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
- pvt->ctl_error_info.nbcfg = val;
debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
pvt->mc_node_id, val,
- !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- for (i = 0; i < pvt->cs_count; i++) {
+ for_each_chip_select(i, 0, pvt) {
csrow = &mci->csrows[i];
- if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+ if (!csrow_enabled(i, 0, pvt)) {
debugf1("----CSROW %d EMPTY for node %d\n", i,
pvt->mc_node_id);
continue;
@@ -2218,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci)
i, pvt->mc_node_id);
empty = 0;
- csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+ csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
- csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+
+ get_cs_base_and_mask(pvt, i, 0, &base, &mask);
+ csrow->page_mask = ~mask;
/* 8 bytes of resolution */
csrow->mtype = amd64_determine_memory_type(pvt, i);
@@ -2243,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci)
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
- if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE)
csrow->edac_mode =
- (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+ (pvt->nbcfg & NBCFG_CHIPKILL) ?
EDAC_S4ECD4ED : EDAC_SECDED;
else
csrow->edac_mode = EDAC_NONE;
@@ -2255,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
{
int cpu;
@@ -2265,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2282,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
for_each_cpu(cpu, mask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
- nbe = reg->l & K8_MSR_MCGCTL_NBE;
+ nbe = reg->l & MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, reg->q,
@@ -2317,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) {
- if (reg->l & K8_MSR_MCGCTL_NBE)
+ if (reg->l & MSR_MCGCTL_NBE)
s->flags.nb_mce_enable = 1;
- reg->l |= K8_MSR_MCGCTL_NBE;
+ reg->l |= MSR_MCGCTL_NBE;
} else {
/*
* Turn off NB MCE reporting only when it was off before
*/
if (!s->flags.nb_mce_enable)
- reg->l &= ~K8_MSR_MCGCTL_NBE;
+ reg->l &= ~MSR_MCGCTL_NBE;
}
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2340,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
bool ret = true;
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
if (toggle_ecc_err_reporting(s, nid, ON)) {
amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
return false;
}
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
- /* turn on UECCEn and CECCEn bits */
s->old_nbctl = value & mask;
s->nbctl_valid = true;
value |= mask;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
s->flags.nb_ecc_prev = 0;
/* Attempt to turn on DRAM ECC Enable */
- value |= K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ value |= NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("Hardware rejected DRAM ECC enable,"
"check memory DIMM configuration.\n");
ret = false;
@@ -2384,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
@@ -2394,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
+
if (!s->nbctl_valid)
return;
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
value &= ~mask;
value |= s->old_nbctl;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if (!s->flags.nb_ecc_prev) {
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
- value &= ~K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
+ value &= ~NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
}
/* restore the NB Enable MCGCTL bit */
@@ -2435,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
u8 ecc_en = 0;
bool nb_mce_en = false;
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
@@ -2475,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
mci->mc_driver_sysfs_attributes = sysfs_attrs;
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
+ struct amd64_family_type *fam)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->nbcap & K8_NBCAP_SECDED)
+ if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+ if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
mci->edac_cap = amd64_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
- mci->ctl_name = pvt->ctl_name;
+ mci->ctl_name = fam->ctl_name;
mci->dev_name = pci_name(pvt->F2);
mci->ctl_page_to_phys = NULL;
@@ -2512,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
case 0xf:
fam_type = &amd64_family_types[K8_CPUS];
pvt->ops = &amd64_family_types[K8_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
break;
+
case 0x10:
fam_type = &amd64_family_types[F10_CPUS];
pvt->ops = &amd64_family_types[F10_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+ break;
+
+ case 0x15:
+ fam_type = &amd64_family_types[F15_CPUS];
+ pvt->ops = &amd64_family_types[F15_CPUS].ops;
break;
default:
@@ -2529,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+ amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
(fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
@@ -2576,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
+ mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
if (!mci)
goto err_siblings;
mci->pvt_info = pvt;
mci->dev = &pvt->F2->dev;
- setup_mci_misc_attrs(mci);
+ setup_mci_misc_attrs(mci, fam_type);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
@@ -2726,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+
{0, }
};
MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2766,7 +2668,7 @@ static int __init amd64_edac_init(void)
{
int err = -ENODEV;
- edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+ printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
opstate_init();
@@ -2777,7 +2679,7 @@ static int __init amd64_edac_init(void)
mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
if (!(mcis && ecc_stngs))
- goto err_ret;
+ goto err_free;
msrs = msrs_alloc();
if (!msrs)
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 613ec72b0f65..11be36a311eb 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -144,7 +144,7 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION "v3.3.0"
+#define EDAC_AMD64_VERSION "3.4.0"
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
@@ -153,85 +153,64 @@
#define K8_REV_F 4
/* Hardware limit on ChipSelect rows per MC and processors per system */
-#define MAX_CS_COUNT 8
-#define DRAM_REG_COUNT 8
+#define NUM_CHIPSELECTS 8
+#define DRAM_RANGES 8
#define ON true
#define OFF false
/*
+ * Create a contiguous bitmask starting at bit position @lo and ending at
+ * position @hi. For example
+ *
+ * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
+
+/*
* PCI-defined configuration space registers
*/
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
/*
* Function 1 - Address Map
*/
-#define K8_DRAM_BASE_LOW 0x40
-#define K8_DRAM_LIMIT_LOW 0x44
-#define K8_DHAR 0xf0
-
-#define DHAR_VALID BIT(0)
-#define F10_DRAM_MEM_HOIST_VALID BIT(1)
+#define DRAM_BASE_LO 0x40
+#define DRAM_LIMIT_LO 0x44
-#define DHAR_BASE_MASK 0xff000000
-#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
+#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
+#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
+#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
+#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
-#define K8_DHAR_OFFSET_MASK 0x0000ff00
-#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+#define DHAR 0xf0
+#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
+#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
+#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
+#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
-#define F10_DHAR_OFFSET_MASK 0x0000ff80
/* NOTE: Extra mask bit vs K8 */
-#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
+#define DCT_CFG_SEL 0x10C
-/* F10 High BASE/LIMIT registers */
-#define F10_DRAM_BASE_HIGH 0x140
-#define F10_DRAM_LIMIT_HIGH 0x144
+#define DRAM_BASE_HI 0x140
+#define DRAM_LIMIT_HI 0x144
/*
* Function 2 - DRAM controller
*/
-#define K8_DCSB0 0x40
-#define F10_DCSB1 0x140
+#define DCSB0 0x40
+#define DCSB1 0x140
+#define DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_NPT_SPARE BIT(1)
-#define K8_DCSB_NPT_TESTFAIL BIT(2)
+#define DCSM0 0x60
+#define DCSM1 0x160
-/*
- * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
- * the address
- */
-#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
-#define REV_E_DCS_SHIFT 4
-
-#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_F1Xh_DCS_SHIFT 8
-
-/*
- * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
- * to form the address
- */
-#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_DCS_SHIFT 8
-
-/* DRAM CS Mask Registers */
-#define K8_DCSM0 0x60
-#define F10_DCSM1 0x160
-
-/* REV E: select [29:21] and [15:9] from DCSM */
-#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
-
-/* unused bits [24:20] and [12:0] */
-#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
-
-/* REV F and later: select [28:19] and [13:5] from DCSM */
-#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
-
-/* unused bits [26:22] and [12:0] */
-#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
#define DBAM0 0x80
#define DBAM1 0x180
@@ -241,148 +220,84 @@
#define DBAM_MAX_VALUE 11
-
-#define F10_DCLR_0 0x90
-#define F10_DCLR_1 0x190
+#define DCLR0 0x90
+#define DCLR1 0x190
#define REVE_WIDTH_128 BIT(16)
-#define F10_WIDTH_128 BIT(11)
+#define WIDTH_128 BIT(11)
+#define DCHR0 0x94
+#define DCHR1 0x194
+#define DDR3_MODE BIT(8)
-#define F10_DCHR_0 0x94
-#define F10_DCHR_1 0x194
+#define DCT_SEL_LO 0x110
+#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
+#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
+#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
+#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
-#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
-#define DDR3_MODE BIT(8)
-#define F10_DCHR_MblMode BIT(6)
+#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
+#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
+#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
-#define F10_DCTL_SEL_LOW 0x110
-#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
-#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
-#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
-#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
-#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
-#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
-#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
+#define SWAP_INTLV_REG 0x10c
-#define F10_DCTL_SEL_HIGH 0x114
+#define DCT_SEL_HI 0x114
/*
* Function 3 - Misc Control
*/
-#define K8_NBCTL 0x40
-
-/* Correctable ECC error reporting enable */
-#define K8_NBCTL_CECCEn BIT(0)
-
-/* UnCorrectable ECC error reporting enable */
-#define K8_NBCTL_UECCEn BIT(1)
+#define NBCTL 0x40
-#define K8_NBCFG 0x44
-#define K8_NBCFG_CHIPKILL BIT(23)
-#define K8_NBCFG_ECC_ENABLE BIT(22)
+#define NBCFG 0x44
+#define NBCFG_CHIPKILL BIT(23)
+#define NBCFG_ECC_ENABLE BIT(22)
-#define K8_NBSL 0x48
-
-
-/* Family F10h: Normalized Extended Error Codes */
-#define F10_NBSL_EXT_ERR_RES 0x0
+/* F3x48: NBSL */
#define F10_NBSL_EXT_ERR_ECC 0x8
+#define NBSL_PP_OBS 0x2
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
-#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
-
-#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
-#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
-#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
-
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_GART_WALK 0xF
-#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
-
-/* 0x10 to 0x1B: Reserved */
-#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
-#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
-#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
-
-/* K8: Normalized Extended Error Codes */
-#define K8_NBSL_EXT_ERR_ECC 0x0
-#define K8_NBSL_EXT_ERR_CRC 0x1
-#define K8_NBSL_EXT_ERR_SYNC 0x2
-#define K8_NBSL_EXT_ERR_MST 0x3
-#define K8_NBSL_EXT_ERR_TGT 0x4
-#define K8_NBSL_EXT_ERR_GART 0x5
-#define K8_NBSL_EXT_ERR_RMW 0x6
-#define K8_NBSL_EXT_ERR_WDT 0x7
-#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
-#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
-
-/*
- * The following are for BUS type errors AFTER values have been normalized by
- * shifting right
- */
-#define K8_NBSL_PP_SRC 0x0
-#define K8_NBSL_PP_RES 0x1
-#define K8_NBSL_PP_OBS 0x2
-#define K8_NBSL_PP_GENERIC 0x3
-
-#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
-
-#define K8_NBEAL 0x50
-#define K8_NBEAH 0x54
-#define K8_SCRCTRL 0x58
-
-#define F10_NB_CFG_LOW 0x88
+#define SCRCTRL 0x58
#define F10_ONLINE_SPARE 0xB0
-#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
-#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
-#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
-#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
+#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
#define F10_NB_ARRAY_ADDR 0xB8
-
-#define F10_NB_ARRAY_DRAM_ECC 0x80000000
+#define F10_NB_ARRAY_DRAM_ECC BIT(31)
/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
#define F10_NB_ARRAY_DATA 0xBC
-
#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(17) | bits)
-
#define SET_NB_DRAM_INJECTION_READ(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(16) | bits)
-#define K8_NBCAP 0xE8
-#define K8_NBCAP_CORES (BIT(12)|BIT(13))
-#define K8_NBCAP_CHIPKILL BIT(4)
-#define K8_NBCAP_SECDED BIT(3)
-#define K8_NBCAP_DCT_DUAL BIT(0)
+#define NBCAP 0xE8
+#define NBCAP_CHIPKILL BIT(4)
+#define NBCAP_SECDED BIT(3)
+#define NBCAP_DCT_DUAL BIT(0)
#define EXT_NB_MCA_CFG 0x180
/* MSRs */
-#define K8_MSR_MCGCTL_NBE BIT(4)
-
-#define K8_MSR_MC4CTL 0x0410
-#define K8_MSR_MC4STAT 0x0411
-#define K8_MSR_MC4ADDR 0x0412
+#define MSR_MCGCTL_NBE BIT(4)
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_node_id(struct pci_dev *pdev)
+static inline u8 get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
-enum amd64_chipset_families {
+enum amd_families {
K8_CPUS = 0,
F10_CPUS,
+ F15_CPUS,
+ NUM_FAMILIES,
};
/* Error injection control structure */
@@ -392,13 +307,35 @@ struct error_injection {
u32 bit_map;
};
+/* low and high part of PCI config space regs */
+struct reg_pair {
+ u32 lo, hi;
+};
+
+/*
+ * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
+ */
+struct dram_range {
+ struct reg_pair base;
+ struct reg_pair lim;
+};
+
+/* A DCT chip selects collection */
+struct chip_select {
+ u32 csbases[NUM_CHIPSELECTS];
+ u8 b_cnt;
+
+ u32 csmasks[NUM_CHIPSELECTS];
+ u8 m_cnt;
+};
+
struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
- int mc_node_id; /* MC index of this MC node */
+ unsigned mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@@ -414,60 +351,50 @@ struct amd64_pvt {
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
- u32 dcsb0[MAX_CS_COUNT];
- u32 dcsb1[MAX_CS_COUNT];
-
- /* DRAM CS Mask Registers F2x[1,0][6C:60] */
- u32 dcsm0[MAX_CS_COUNT];
- u32 dcsm1[MAX_CS_COUNT];
-
- /*
- * Decoded parts of DRAM BASE and LIMIT Registers
- * F1x[78,70,68,60,58,50,48,40]
- */
- u64 dram_base[DRAM_REG_COUNT];
- u64 dram_limit[DRAM_REG_COUNT];
- u8 dram_IntlvSel[DRAM_REG_COUNT];
- u8 dram_IntlvEn[DRAM_REG_COUNT];
- u8 dram_DstNode[DRAM_REG_COUNT];
- u8 dram_rw_en[DRAM_REG_COUNT];
-
- /*
- * The following fields are set at (load) run time, after CPU revision
- * has been determined, since the dct_base and dct_mask registers vary
- * based on revision
- */
- u32 dcsb_base; /* DCSB base bits */
- u32 dcsm_mask; /* DCSM mask bits */
- u32 cs_count; /* num chip selects (== num DCSB registers) */
- u32 num_dcsm; /* Number of DCSM registers */
- u32 dcs_mask_notused; /* DCSM notused mask bits */
- u32 dcs_shift; /* DCSB and DCSM shift value */
+ /* one for each DCT */
+ struct chip_select csels[2];
+
+ /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
+ struct dram_range ranges[DRAM_RANGES];
u64 top_mem; /* top of memory below 4GB */
u64 top_mem2; /* top of memory above 4GB */
- u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
- u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
- u32 online_spare; /* On-Line spare Reg */
+ u32 dct_sel_lo; /* DRAM Controller Select Low */
+ u32 dct_sel_hi; /* DRAM Controller Select High */
+ u32 online_spare; /* On-Line spare Reg */
/* x4 or x8 syndromes in use */
- u8 syn_type;
-
- /* temp storage for when input is received from sysfs */
- struct err_regs ctl_error_info;
+ u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
+};
- /* DCT per-family scrubrate setting */
- u32 min_scrubrate;
+static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
- /* family name this instance is running on */
- const char *ctl_name;
+ if (boot_cpu_data.x86 == 0xf)
+ return addr;
-};
+ return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
+}
+
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
+
+ if (boot_cpu_data.x86 == 0xf)
+ return lim;
+
+ return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
+}
+
+static inline u16 extract_syndrome(u64 status)
+{
+ return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
+}
/*
* per-node ECC settings descriptor
@@ -482,14 +409,6 @@ struct ecc_settings {
} flags;
};
-extern const char *tt_msgs[4];
-extern const char *ll_msgs[4];
-extern const char *rrrr_msgs[16];
-extern const char *to_msgs[2];
-extern const char *pp_msgs[4];
-extern const char *ii_msgs[4];
-extern const char *htlink_msgs[8];
-
#ifdef CONFIG_EDAC_DEBUG
#define NUM_DBG_ATTRS 5
#else
@@ -511,14 +430,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
*/
struct low_ops {
int (*early_channel_count) (struct amd64_pvt *pvt);
-
- u64 (*get_error_address) (struct mem_ctl_info *mci,
- struct err_regs *info);
- void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
- void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
- struct err_regs *info, u64 SystemAddr);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
+ void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome);
+ int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
+ int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
+ u32 *val, const char *func);
};
struct amd64_family_type {
@@ -527,28 +443,17 @@ struct amd64_family_type {
struct low_ops ops;
};
-static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 *val, const char *func)
-{
- int err = 0;
-
- err = pci_read_config_dword(pdev, offset, val);
- if (err)
- amd64_warn("%s: error reading F%dx%x.\n",
- func, PCI_FUNC(pdev->devfn), offset);
-
- return err;
-}
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func);
#define amd64_read_pci_cfg(pdev, offset, val) \
- amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+ __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
-/*
- * For future CPU versions, verify the following as new 'slow' rates appear and
- * modify the necessary skip values for the supported CPU.
- */
-#define K8_MIN_SCRUB_RATE_BITS 0x0
-#define F10_MIN_SCRUB_RATE_BITS 0x5
+#define amd64_write_pci_cfg(pdev, offset, val) \
+ __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
+
+#define amd64_read_dct_pci_cfg(pvt, offset, val) \
+ pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 688478de1cbd..303f10e03dda 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
@@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index b9a781c47e3c..837ad8f85b48 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -817,7 +817,7 @@ static void cpc925_del_edac_devices(void)
}
}
-/* Convert current back-ground scrub rate into byte/sec bandwith */
+/* Convert current back-ground scrub rate into byte/sec bandwidth */
static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3d965347a673..eefa3501916b 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -164,7 +164,7 @@ enum mem_type {
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
- EDAC_NONE, /* Doesnt support ECC */
+ EDAC_NONE, /* Doesn't support ECC */
EDAC_RESERVED, /* Reserved ECC type */
EDAC_PARITY, /* Detects parity errors */
EDAC_EC, /* Error Checking - no correction */
@@ -233,7 +233,7 @@ enum scrub_type {
* of these in parallel provides 64 bits which is common
* for a memory stick.
*
- * Memory Stick: A printed circuit board that agregates multiple
+ * Memory Stick: A printed circuit board that aggregates multiple
* memory devices in parallel. This is the atomic
* memory component that is purchaseable by Joe consumer
* and loaded into a memory socket.
@@ -385,7 +385,7 @@ struct mem_ctl_info {
/* Get the current sdram memory scrub rate from the internal
representation and converts it to the closest matching
- bandwith in bytes/sec.
+ bandwidth in bytes/sec.
*/
int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
@@ -823,7 +823,7 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
* There are a limited number of error logging registers that can
* be exausted. When all registers are exhausted and an additional
* error occurs then an error overflow register records that an
- * error occured and the type of error, but doesn't have any
+ * error occurred and the type of error, but doesn't have any
* further information. The ce/ue versions make for cleaner
* reporting logic and function interface - reduces conditional
* statement clutter and extra function arguments.
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index d5e13c94714f..a7408cf86f37 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -672,7 +672,7 @@ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
block->counters.ce_count++;
}
- /* Propogate the count up the 'totals' tree */
+ /* Propagate the count up the 'totals' tree */
instance->counters.ce_count++;
edac_dev->counters.ce_count++;
@@ -718,7 +718,7 @@ void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
block->counters.ue_count++;
}
- /* Propogate the count up the 'totals' tree */
+ /* Propagate the count up the 'totals' tree */
instance->counters.ue_count++;
edac_dev->counters.ue_count++;
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 400de071cabc..86649df00285 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -533,7 +533,7 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
memset(&block->kobj, 0, sizeof(struct kobject));
/* bump the main kobject's reference count for this controller
- * and this instance is dependant on the main
+ * and this instance is dependent on the main
*/
main_kobj = kobject_get(&edac_dev->kobj);
if (!main_kobj) {
@@ -635,7 +635,7 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
instance->ctl = edac_dev;
/* bump the main kobject's reference count for this controller
- * and this instance is dependant on the main
+ * and this instance is dependent on the main
*/
main_kobj = kobject_get(&edac_dev->kobj);
if (!main_kobj) {
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index a4e9db2d6524..1d8056049072 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -724,7 +724,7 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci,
* Some MC's can remap memory so that it is still available
* at a different address when PCI devices map into memory.
* MC's that can't do this lose the memory where PCI devices
- * are mapped. This mapping is MC dependant and so we call
+ * are mapped. This mapping is MC dependent and so we call
* back into the MC driver for it to map the MC page to
* a physical (CPU) page which can then be mapped to a virtual
* page - which can then be scrubbed.
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 39d97cfdf58c..26343fd46596 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -785,10 +785,10 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
{
int err;
- debugf1("%s()\n", __func__);
+ debugf4("%s()\n", __func__);
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
@@ -818,7 +818,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
@@ -850,29 +850,29 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
/*
* loop if there are attributes and until we hit a NULL entry
- * Remove first all the atributes
+ * Remove first all the attributes
*/
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
- debugf1("%s() seeking for group %s\n",
+ debugf4("%s() seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
list_for_each_entry(grp_kobj,
&mci->grp_kobj_list, list) {
- debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
+ debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
- debugf0("%s() group %s\n", __func__,
+ debugf4("%s() group %s\n", __func__,
sysfs_attrib->grp->name);
kobject_put(&grp_kobj->kobj);
}
}
- debugf1("%s() end of seeking for group %s\n",
+ debugf4("%s() end of seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
sysfs_remove_file(kobj, &sysfs_attrib->attr);
} else
@@ -979,7 +979,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
- debugf0("%s() unregister this mci kobj\n", __func__);
+ debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
@@ -989,18 +989,18 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
/* remove this mci instance's attribtes */
if (mci->mc_driver_sysfs_attributes) {
- debugf0("%s() unregister mci private attributes\n", __func__);
+ debugf4("%s() unregister mci private attributes\n", __func__);
edac_remove_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes,
&mci->edac_mci_kobj, 0);
}
/* remove the symlink */
- debugf0("%s() remove_link\n", __func__);
+ debugf4("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
/* unregister this instance's kobject */
- debugf0("%s() remove_mci_instance\n", __func__);
+ debugf4("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 023b01cb5175..495198ad059c 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -352,7 +352,7 @@ static int edac_pci_main_kobj_setup(void)
return 0;
/* First time, so create the main kobject and its
- * controls and atributes
+ * controls and attributes
*/
edac_class = edac_get_sysfs_class();
if (edac_class == NULL) {
@@ -551,7 +551,7 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev)
/*
* PCI Parity polling
*
- * Fucntion to retrieve the current parity status
+ * Function to retrieve the current parity status
* and decode it
*
*/
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a5cefab8d65d..87f427c2ce5c 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1372,7 +1372,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
* actual number of slots/dimms per channel, we thus utilize the
* resource as specified by the chipset. Thus, we might have
* have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support upto the chipset max, without
+ * allows the driver to support up to the chipset max, without
* some fancy mobo determination.
*/
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 0448da0af75d..bcbdeeca48b8 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -11,7 +11,7 @@
*
* The intel 5100 has two independent channels. EDAC core currently
* can not reflect this configuration so instead the chip-select
- * rows for each respective channel are layed out one after another,
+ * rows for each respective channel are laid out one after another,
* the first half belonging to channel 0, the second half belonging
* to channel 1.
*/
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 38a9be9e1c7c..80a465efbae8 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -648,7 +648,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
return;
}
- /* Miscelaneous errors */
+ /* Miscellaneous errors */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
branch = extract_fbdchan_indx(info->ferr_nf_fbd);
@@ -1240,7 +1240,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
* actual number of slots/dimms per channel, we thus utilize the
* resource as specified by the chipset. Thus, we might have
* have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support upto the chipset max, without
+ * allows the driver to support up to the chipset max, without
* some fancy mobo determination.
*/
num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 05523b504271..363cc1602944 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -162,7 +162,7 @@ static struct edac_pci_ctl_info *i7300_pci;
#define AMBPRESENT_0 0x64
#define AMBPRESENT_1 0x66
-const static u16 mtr_regs[MAX_SLOTS] = {
+static const u16 mtr_regs[MAX_SLOTS] = {
0x80, 0x84, 0x88, 0x8c,
0x82, 0x86, 0x8a, 0x8e
};
@@ -1065,7 +1065,7 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
* actual number of slots/dimms per channel, we thus utilize the
* resource as specified by the chipset. Thus, we might have
* have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support upto the chipset max, without
+ * allows the driver to support up to the chipset max, without
* some fancy mobo determination.
*/
num_dimms_per_channel = MAX_SLOTS;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 81154ab296b6..465cbc25149f 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1772,7 +1772,7 @@ static void i7core_check_error(struct mem_ctl_info *mci)
/*
* MCE first step: Copy all mce errors into a temporary buffer
* We use a double buffering here, to reduce the risk of
- * loosing an error.
+ * losing an error.
*/
smp_rmb();
count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 678405ab04e4..4329d39f902c 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -203,7 +203,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_high_limit = ((u32) drbar << 23);
/* find the DRAM Chip Select Base address and mask */
debugf1("MC%d: %s: %s() Row=%d, "
- "Boundry Address=%#0x, Last = %#0x\n",
+ "Boundary Address=%#0x, Last = %#0x\n",
mci->mc_idx, __FILE__, __func__, index, row_high_limit,
row_high_limit_last);
@@ -305,7 +305,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
/* Many BIOSes don't clear error flags on boot, so do this
- * here, or we get "phantom" errors occuring at module-load
+ * here, or we get "phantom" errors occurring at module-load
* time. */
pci_write_bits32(pdev, I82443BXGX_EAP,
(I82443BXGX_EAP_OFFSET_SBE |
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 3218819b7286..92e65e7038e9 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -160,8 +160,8 @@ NOTE: Only ONE of the three must be enabled
* 3:2 Rank 1 architecture
* 1:0 Rank 0 architecture
*
- * 00 => x16 devices; i.e 4 banks
- * 01 => x8 devices; i.e 8 banks
+ * 00 => 4 banks
+ * 01 => 8 banks
*/
#define I82975X_C0BNKARC 0x10e
#define I82975X_C1BNKARC 0x18e
@@ -278,6 +278,7 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info, int handle_errors)
{
int row, multi_chan, chan;
+ unsigned long offst, page;
multi_chan = mci->csrows[0].nr_channels - 1;
@@ -292,17 +293,19 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
info->errsts = info->errsts2;
}
- chan = info->eap & 1;
- info->eap >>= 1;
- if (info->xeap )
- info->eap |= 0x80000000;
- info->eap >>= PAGE_SHIFT;
- row = edac_mc_find_csrow_by_page(mci, info->eap);
+ page = (unsigned long) info->eap;
+ if (info->xeap & 1)
+ page |= 0x100000000ul;
+ chan = page & 1;
+ page >>= 1;
+ offst = page & ((1 << PAGE_SHIFT) - 1);
+ page >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, page);
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+ edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
multi_chan ? chan : 0,
"i82975x CE");
@@ -344,11 +347,7 @@ static int dual_channel_active(void __iomem *mch_window)
static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
{
/*
- * ASUS P5W DH either does not program this register or programs
- * it wrong!
- * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
- * for each rank should be 01b - the LSB of the word should be 0x55;
- * but it reads 0!
+ * ECC is possible on i92975x ONLY with DEV_X8
*/
return DEV_X8;
}
@@ -356,11 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *mch_window)
{
+ static const char *labels[4] = {
+ "DIMM A1", "DIMM A2",
+ "DIMM B1", "DIMM B2"
+ };
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
u32 cumul_size;
- int index;
+ int index, chan;
last_cumul_size = 0;
@@ -369,11 +372,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
- * contain the total memory contained in all eight rows.
- *
- * FIXME:
- * EDAC currently works for Dual-channel Interleaved configuration.
- * Other configurations, which the chip supports, need fixing/testing.
+ * contain the total memory contained in all rows.
*
*/
@@ -384,8 +383,26 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
((index >= 4) ? 0x80 : 0));
cumul_size = value;
cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+ /*
+ * Adjust cumul_size w.r.t number of channels
+ *
+ */
+ if (csrow->nr_channels > 1)
+ cumul_size <<= 1;
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
+
+ /*
+ * Initialise dram labels
+ * index values:
+ * [0-7] for single-channel; i.e. csrow->nr_channels = 1
+ * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
+ */
+ for (chan = 0; chan < csrow->nr_channels; chan++)
+ strncpy(csrow->channels[chan].label,
+ labels[(index >> 1) + (chan * 2)],
+ EDAC_MC_LABEL_LEN);
+
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -393,8 +410,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */
- csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+ csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */
+ csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
csrow->dtype = i82975x_dram_type(mch_window, index);
csrow->edac_mode = EDAC_SECDED; /* only supported */
}
@@ -515,18 +532,20 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR;
+ mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82975X_REVISION;
mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
mci->ctl_page_to_phys = NULL;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82975x_pvt *) mci->pvt_info;
pvt->mch_window = mch_window;
i82975x_init_csrows(mci, pdev, mch_window);
+ mci->scrub_mode = SCRUB_HW_SRC;
i82975x_get_error_info(mci, &discard); /* clear counters */
/* finalize this instance of memory controller with edac core */
@@ -664,7 +683,7 @@ module_init(i82975x_init);
module_exit(i82975x_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>");
MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
module_param(edac_op_state, int, 0444);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index f6cf73d93359..795cfbc0bf50 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -594,6 +594,7 @@ static bool nb_noop_mce(u16 ec, u8 xec)
void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u32 nbsh = (u32)(m->status >> 32);
@@ -602,9 +603,8 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
pr_emerg(HW_ERR "Northbridge Error (node %d", node_id);
/* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */
- if ((boot_cpu_data.x86 == 0x10) &&
- (boot_cpu_data.x86_model > 7)) {
- if (nbsh & K8_NBSH_ERR_CPU_VAL)
+ if (c->x86 == 0x10 && c->x86_model > 7) {
+ if (nbsh & NBSH_ERR_CPU_VAL)
core = nbsh & nb_err_cpumask;
} else {
u8 assoc_cpus = nbsh & nb_err_cpumask;
@@ -646,7 +646,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
if (!fam_ops->nb_mce(ec, xec))
goto wrong_nb_mce;
- if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
+ if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
nb_bus_decoder(node_id, m, nbcfg);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 45dda47173f2..795a3206acf5 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -31,19 +31,10 @@
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
-#define K8_NBSH 0x4C
-
-#define K8_NBSH_VALID_BIT BIT(31)
-#define K8_NBSH_OVERFLOW BIT(30)
-#define K8_NBSH_UC_ERR BIT(29)
-#define K8_NBSH_ERR_EN BIT(28)
-#define K8_NBSH_MISCV BIT(27)
-#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
-#define K8_NBSH_PCC BIT(25)
-#define K8_NBSH_ERR_CPU_VAL BIT(24)
-#define K8_NBSH_CECC BIT(14)
-#define K8_NBSH_UECC BIT(13)
-#define K8_NBSH_ERR_SCRUBER BIT(8)
+/*
+ * F3x4C bits (MCi_STATUS' high half)
+ */
+#define NBSH_ERR_CPU_VAL BIT(24)
enum tt_ids {
TT_INSTR = 0,
@@ -86,17 +77,6 @@ extern const char *to_msgs[];
extern const char *ii_msgs[];
/*
- * relevant NB regs
- */
-struct err_regs {
- u32 nbcfg;
- u32 nbsh;
- u32 nbsl;
- u32 nbeah;
- u32 nbeal;
-};
-
-/*
* per-family decoder ops
*/
struct amd_decoder_ops {
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 733a7e7a8d6f..a4987e03f59e 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -90,7 +90,7 @@ static ssize_t edac_inject_bank_store(struct kobject *kobj,
if (value > 5)
if (boot_cpu_data.x86 != 0x15 || value > 6) {
- printk(KERN_ERR "Non-existant MCE bank: %lu\n", value);
+ printk(KERN_ERR "Non-existent MCE bank: %lu\n", value);
return -EINVAL;
}
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index b123bb308a4a..38ab8e2cd7f4 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -200,8 +200,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_pci_err_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
{
struct edac_pci_ctl_info *pci;
struct mpc85xx_pci_pdata *pdata;
@@ -338,7 +337,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
-static struct of_platform_driver mpc85xx_pci_err_driver = {
+static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
.remove = __devexit_p(mpc85xx_pci_err_remove),
.driver = {
@@ -503,8 +502,7 @@ static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_l2_err_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev;
struct mpc85xx_l2_pdata *pdata;
@@ -656,7 +654,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
-static struct of_platform_driver mpc85xx_l2_err_driver = {
+static struct platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe,
.remove = mpc85xx_l2_err_remove,
.driver = {
@@ -956,8 +954,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit mpc85xx_mc_err_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
struct mpc85xx_mc_pdata *pdata;
@@ -1136,7 +1133,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
-static struct of_platform_driver mpc85xx_mc_err_driver = {
+static struct platform_driver mpc85xx_mc_err_driver = {
.probe = mpc85xx_mc_err_probe,
.remove = mpc85xx_mc_err_remove,
.driver = {
@@ -1150,13 +1147,14 @@ static struct of_platform_driver mpc85xx_mc_err_driver = {
static void __init mpc85xx_mc_clear_rfxe(void *data)
{
orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
- mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000));
+ mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
}
#endif
static int __init mpc85xx_mc_init(void)
{
int res = 0;
+ u32 pvr = 0;
printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
"(C) 2006 Montavista Software\n");
@@ -1171,27 +1169,32 @@ static int __init mpc85xx_mc_init(void)
break;
}
- res = of_register_platform_driver(&mpc85xx_mc_err_driver);
+ res = platform_driver_register(&mpc85xx_mc_err_driver);
if (res)
printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
- res = of_register_platform_driver(&mpc85xx_l2_err_driver);
+ res = platform_driver_register(&mpc85xx_l2_err_driver);
if (res)
printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
#ifdef CONFIG_PCI
- res = of_register_platform_driver(&mpc85xx_pci_err_driver);
+ res = platform_driver_register(&mpc85xx_pci_err_driver);
if (res)
printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
#endif
#ifdef CONFIG_FSL_SOC_BOOKE
- /*
- * need to clear HID1[RFXE] to disable machine check int
- * so we can catch it
- */
- if (edac_op_state == EDAC_OPSTATE_INT)
- on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
+ pvr = mfspr(SPRN_PVR);
+
+ if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
+ (PVR_VER(pvr) == PVR_VER_E500V2)) {
+ /*
+ * need to clear HID1[RFXE] to disable machine check int
+ * so we can catch it
+ */
+ if (edac_op_state == EDAC_OPSTATE_INT)
+ on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
+ }
#endif
return 0;
@@ -1209,13 +1212,18 @@ static void __exit mpc85xx_mc_restore_hid1(void *data)
static void __exit mpc85xx_mc_exit(void)
{
#ifdef CONFIG_FSL_SOC_BOOKE
- on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
+ u32 pvr = mfspr(SPRN_PVR);
+
+ if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
+ (PVR_VER(pvr) == PVR_VER_E500V2)) {
+ on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
+ }
#endif
#ifdef CONFIG_PCI
- of_unregister_platform_driver(&mpc85xx_pci_err_driver);
+ platform_driver_unregister(&mpc85xx_pci_err_driver);
#endif
- of_unregister_platform_driver(&mpc85xx_l2_err_driver);
- of_unregister_platform_driver(&mpc85xx_mc_err_driver);
+ platform_driver_unregister(&mpc85xx_l2_err_driver);
+ platform_driver_unregister(&mpc85xx_mc_err_driver);
}
module_exit(mpc85xx_mc_exit);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index b9f0c20df1aa..c1f0045ceb8e 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -184,8 +184,7 @@ struct ppc4xx_ecc_status {
/* Function Prototypes */
-static int ppc4xx_edac_probe(struct platform_device *device,
- const struct of_device_id *device_id);
+static int ppc4xx_edac_probe(struct platform_device *device)
static int ppc4xx_edac_remove(struct platform_device *device);
/* Global Variables */
@@ -201,7 +200,7 @@ static struct of_device_id ppc4xx_edac_match[] = {
{ }
};
-static struct of_platform_driver ppc4xx_edac_driver = {
+static struct platform_driver ppc4xx_edac_driver = {
.probe = ppc4xx_edac_probe,
.remove = ppc4xx_edac_remove,
.driver = {
@@ -997,9 +996,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* initialized.
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller this EDAC instance is bound to.
- * @match: A pointer to the OpenFirmware device tree match
- * information associated with the controller this EDAC instance
- * is bound to.
* @dcr_host: A pointer to the DCR data containing the DCR mapping
* for this controller instance.
* @mcopt1: The 32-bit Memory Controller Option 1 register value
@@ -1015,7 +1011,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
static int __devinit
ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
struct platform_device *op,
- const struct of_device_id *match,
const dcr_host_t *dcr_host,
u32 mcopt1)
{
@@ -1024,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
struct ppc4xx_edac_pdata *pdata = NULL;
const struct device_node *np = op->dev.of_node;
- if (match == NULL)
+ if (op->dev.of_match == NULL)
return -EINVAL;
/* Initial driver pointers and private data */
@@ -1227,9 +1222,6 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
* ppc4xx_edac_probe - check controller and bind driver
* @op: A pointer to the OpenFirmware device tree node associated
* with the controller being probed for driver binding.
- * @match: A pointer to the OpenFirmware device tree match
- * information associated with the controller being probed
- * for driver binding.
*
* This routine probes a specific ibm,sdram-4xx-ddr2 controller
* instance for binding with the driver.
@@ -1237,8 +1229,7 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
* Returns 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
-static int __devinit
-ppc4xx_edac_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit ppc4xx_edac_probe(struct platform_device *op)
{
int status = 0;
u32 mcopt1, memcheck;
@@ -1304,7 +1295,7 @@ ppc4xx_edac_probe(struct platform_device *op, const struct of_device_id *match)
goto done;
}
- status = ppc4xx_edac_mc_init(mci, op, match, &dcr_host, mcopt1);
+ status = ppc4xx_edac_mc_init(mci, op, &dcr_host, mcopt1);
if (status) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -1421,7 +1412,7 @@ ppc4xx_edac_init(void)
ppc4xx_edac_opstate_init();
- return of_register_platform_driver(&ppc4xx_edac_driver);
+ return platform_driver_register(&ppc4xx_edac_driver);
}
/**
@@ -1434,7 +1425,7 @@ ppc4xx_edac_init(void)
static void __exit
ppc4xx_edac_exit(void)
{
- of_unregister_platform_driver(&ppc4xx_edac_driver);
+ platform_driver_unregister(&ppc4xx_edac_driver);
}
module_init(ppc4xx_edac_init);
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6a822c631ef5..678513738c33 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -120,7 +120,7 @@
* write 0=NOP
*/
-#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address
+#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundary Address
* Registers
*
* 7:0 Address lines 30:24 - upper limit of
@@ -217,7 +217,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
{
struct csrow_info *csrow;
int index;
- u8 drbar; /* SDRAM Row Boundry Address Register */
+ u8 drbar; /* SDRAM Row Boundary Address Register */
u32 row_high_limit, row_high_limit_last;
u32 reg_sdram, ecc_on, row_base;
@@ -236,7 +236,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
- debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n",
+ debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n",
__func__, index, row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
new file mode 100644
index 000000000000..1d5cf06f6c6b
--- /dev/null
+++ b/drivers/edac/tile_edac.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ * Tilera-specific EDAC driver.
+ *
+ * This source code is derived from the following driver:
+ *
+ * Cell MIC driver for ECC counting
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/edac.h>
+#include <hv/hypervisor.h>
+#include <hv/drv_mshim_intf.h>
+
+#include "edac_core.h"
+
+#define DRV_NAME "tile-edac"
+
+/* Number of cs_rows needed per memory controller on TILEPro. */
+#define TILE_EDAC_NR_CSROWS 1
+
+/* Number of channels per memory controller on TILEPro. */
+#define TILE_EDAC_NR_CHANS 1
+
+/* Granularity of reported error in bytes on TILEPro. */
+#define TILE_EDAC_ERROR_GRAIN 8
+
+/* TILE processor has multiple independent memory controllers. */
+struct platform_device *mshim_pdev[TILE_MAX_MSHIMS];
+
+struct tile_edac_priv {
+ int hv_devhdl; /* Hypervisor device handle. */
+ int node; /* Memory controller instance #. */
+ unsigned int ce_count; /*
+ * Correctable-error counter
+ * kept by the driver.
+ */
+};
+
+static void tile_edac_check(struct mem_ctl_info *mci)
+{
+ struct tile_edac_priv *priv = mci->pvt_info;
+ struct mshim_mem_error mem_error;
+
+ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error,
+ sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) !=
+ sizeof(struct mshim_mem_error)) {
+ pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n");
+ return;
+ }
+
+ /* Check if the current error count is different from the saved one. */
+ if (mem_error.sbe_count != priv->ce_count) {
+ dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
+ priv->ce_count = mem_error.sbe_count;
+ edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+ }
+}
+
+/*
+ * Initialize the 'csrows' table within the mci control structure with the
+ * addressing of memory.
+ */
+static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csrow = &mci->csrows[0];
+ struct tile_edac_priv *priv = mci->pvt_info;
+ struct mshim_mem_info mem_info;
+
+ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
+ sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
+ sizeof(struct mshim_mem_info)) {
+ pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n");
+ return -1;
+ }
+
+ if (mem_info.mem_ecc)
+ csrow->edac_mode = EDAC_SECDED;
+ else
+ csrow->edac_mode = EDAC_NONE;
+ switch (mem_info.mem_type) {
+ case DDR2:
+ csrow->mtype = MEM_DDR2;
+ break;
+
+ case DDR3:
+ csrow->mtype = MEM_DDR3;
+ break;
+
+ default:
+ return -1;
+ }
+
+ csrow->first_page = 0;
+ csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->grain = TILE_EDAC_ERROR_GRAIN;
+ csrow->dtype = DEV_UNKNOWN;
+
+ return 0;
+}
+
+static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
+{
+ char hv_file[32];
+ int hv_devhdl;
+ struct mem_ctl_info *mci;
+ struct tile_edac_priv *priv;
+ int rc;
+
+ sprintf(hv_file, "mshim/%d", pdev->id);
+ hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0);
+ if (hv_devhdl < 0)
+ return -EINVAL;
+
+ /* A TILE MC has a single channel and one chip-select row. */
+ mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
+ TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+ if (mci == NULL)
+ return -ENOMEM;
+ priv = mci->pvt_info;
+ priv->node = pdev->id;
+ priv->hv_devhdl = hv_devhdl;
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = DRV_NAME;
+ mci->ctl_name = "TILEPro_Memory_Controller";
+ mci->dev_name = dev_name(&pdev->dev);
+ mci->edac_check = tile_edac_check;
+
+ /*
+ * Initialize the MC control structure 'csrows' table
+ * with the mapping and control information.
+ */
+ if (tile_edac_init_csrows(mci)) {
+ /* No csrows found. */
+ mci->edac_cap = EDAC_FLAG_NONE;
+ } else {
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ }
+
+ platform_set_drvdata(pdev, mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ edac_mc_free(mci);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __devexit tile_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ if (mci)
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver tile_edac_mc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tile_edac_mc_probe,
+ .remove = __devexit_p(tile_edac_mc_remove),
+};
+
+/*
+ * Driver init routine.
+ */
+static int __init tile_edac_init(void)
+{
+ char hv_file[32];
+ struct platform_device *pdev;
+ int i, err, num = 0;
+
+ /* Only support POLL mode. */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ err = platform_driver_register(&tile_edac_mc_driver);
+ if (err)
+ return err;
+
+ for (i = 0; i < TILE_MAX_MSHIMS; i++) {
+ /*
+ * Not all memory controllers are configured such as in the
+ * case of a simulator. So we register only those mshims
+ * that are configured by the hypervisor.
+ */
+ sprintf(hv_file, "mshim/%d", i);
+ if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0)
+ continue;
+
+ pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0);
+ if (IS_ERR(pdev))
+ continue;
+ mshim_pdev[i] = pdev;
+ num++;
+ }
+
+ if (num == 0) {
+ platform_driver_unregister(&tile_edac_mc_driver);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/*
+ * Driver cleanup routine.
+ */
+static void __exit tile_edac_exit(void)
+{
+ int i;
+
+ for (i = 0; i < TILE_MAX_MSHIMS; i++) {
+ struct platform_device *pdev = mshim_pdev[i];
+ if (!pdev)
+ continue;
+
+ platform_set_drvdata(pdev, NULL);
+ platform_device_unregister(pdev);
+ }
+ platform_driver_unregister(&tile_edac_mc_driver);
+}
+
+module_init(tile_edac_init);
+module_exit(tile_edac_exit);