aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile13
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_44x.S15
-rw-r--r--arch/powerpc/kernel/cputable.c149
-rw-r--r--arch/powerpc/kernel/crash.c101
-rw-r--r--arch/powerpc/kernel/dma_64.c19
-rw-r--r--arch/powerpc/kernel/head_44x.S14
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_booke.h2
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S20
-rw-r--r--arch/powerpc/kernel/ibmebus.c12
-rw-r--r--arch/powerpc/kernel/iommu.c8
-rw-r--r--arch/powerpc/kernel/isa-bridge.c6
-rw-r--r--arch/powerpc/kernel/legacy_serial.c51
-rw-r--r--arch/powerpc/kernel/lparcfg.c12
-rw-r--r--arch/powerpc/kernel/misc.S73
-rw-r--r--arch/powerpc/kernel/misc_32.S56
-rw-r--r--arch/powerpc/kernel/misc_64.S7
-rw-r--r--arch/powerpc/kernel/module_32.c77
-rw-r--r--arch/powerpc/kernel/module_64.c81
-rw-r--r--arch/powerpc/kernel/of_device.c2
-rw-r--r--arch/powerpc/kernel/of_platform.c33
-rw-r--r--arch/powerpc/kernel/pci-common.c774
-rw-r--r--arch/powerpc/kernel/pci_32.c969
-rw-r--r--arch/powerpc/kernel/pci_64.c444
-rw-r--r--arch/powerpc/kernel/pci_dn.c7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kernel/prom.c52
-rw-r--r--arch/powerpc/kernel/prom_init.c149
-rw-r--r--arch/powerpc/kernel/prom_parse.c22
-rw-r--r--arch/powerpc/kernel/rio.c52
-rw-r--r--arch/powerpc/kernel/rtas_pci.c13
-rw-r--r--arch/powerpc/kernel/setup-common.c78
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/smp.c49
-rw-r--r--arch/powerpc/kernel/systbl_chk.c58
-rw-r--r--arch/powerpc/kernel/systbl_chk.sh33
-rw-r--r--arch/powerpc/kernel/time.c91
-rw-r--r--arch/powerpc/kernel/traps.c87
-rw-r--r--arch/powerpc/kernel/udbg.c7
-rw-r--r--arch/powerpc/kernel/udbg_16550.c43
41 files changed, 1998 insertions, 1688 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ca51f0cf27ab..58dbfeff9b4d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -3,7 +3,7 @@
#
ifeq ($(CONFIG_PPC64),y)
-EXTRA_CFLAGS += -mno-minimal-toc
+CFLAGS_prom_init.o += -mno-minimal-toc
endif
ifeq ($(CONFIG_PPC32),y)
CFLAGS_prom_init.o += -fPIC
@@ -70,6 +70,7 @@ pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o
obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
pci-common.o
obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_RAPIDIO) += rio.o
obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
machine_kexec_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_AUDIT) += audit.o
@@ -91,3 +92,13 @@ obj-$(CONFIG_PPC64) += $(obj64-y)
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-$(CONFIG_PPC64) += entry_64.o
+
+extra-y += systbl_chk.i
+$(obj)/systbl.o: systbl_chk
+
+quiet_cmd_systbl_chk = CALL $<
+ cmd_systbl_chk = $(CONFIG_SHELL) $< $(obj)/systbl_chk.i
+
+PHONY += systbl_chk
+systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
+ $(call cmd,systbl_chk)
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 9c74fdf29eec..80e2eef05b2e 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -236,7 +236,7 @@ int __init btext_find_display(int allow_nonstdout)
if (rc == 0 || !allow_nonstdout)
return rc;
- for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
+ for_each_node_by_type(np, "display") {
if (of_get_property(np, "linux,opened", NULL)) {
printk("trying %s ...\n", np->full_name);
rc = btext_initialize(np);
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
index 8e1812e2f3ee..6250443ab9c9 100644
--- a/arch/powerpc/kernel/cpu_setup_44x.S
+++ b/arch/powerpc/kernel/cpu_setup_44x.S
@@ -23,11 +23,24 @@ _GLOBAL(__setup_cpu_440epx)
mflr r4
bl __init_fpu_44x
bl __plb_disable_wrp
+ bl __fixup_440A_mcheck
mtlr r4
blr
_GLOBAL(__setup_cpu_440grx)
- b __plb_disable_wrp
+ mflr r4
+ bl __plb_disable_wrp
+ bl __fixup_440A_mcheck
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_440gx)
+_GLOBAL(__setup_cpu_440spe)
+ b __fixup_440A_mcheck
+ /* Temporary fixup for arch/ppc until we kill the whole thing */
+#ifndef CONFIG_PPC_MERGE
+_GLOBAL(__fixup_440A_mcheck)
+ blr
+#endif
/* enable APU between CPU and FPU */
_GLOBAL(__init_fpu_44x)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9ed351f3c966..a4c2771b5e62 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -33,7 +33,9 @@ EXPORT_SYMBOL(cur_cpu_spec);
#ifdef CONFIG_PPC32
extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -85,6 +87,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
+ .machine_check = machine_check_generic,
.platform = "power3",
},
{ /* Power3+ */
@@ -99,6 +102,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
+ .machine_check = machine_check_generic,
.platform = "power3",
},
{ /* Northstar */
@@ -113,6 +117,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
+ .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* Pulsar */
@@ -127,6 +132,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
+ .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* I-star */
@@ -141,6 +147,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
+ .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* S-star */
@@ -155,6 +162,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
+ .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* Power4 */
@@ -169,6 +177,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .machine_check = machine_check_generic,
.platform = "power4",
},
{ /* Power4+ */
@@ -183,6 +192,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .machine_check = machine_check_generic,
.platform = "power4",
},
{ /* PPC970 */
@@ -200,6 +210,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970FX */
@@ -217,6 +228,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -234,6 +246,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970MP */
@@ -251,6 +264,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970GX */
@@ -267,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* Power5 GR */
@@ -286,6 +301,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
*/
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
+ .machine_check = machine_check_generic,
.platform = "power5",
},
{ /* Power5++ */
@@ -301,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
+ .machine_check = machine_check_generic,
.platform = "power5+",
},
{ /* Power5 GS */
@@ -317,6 +334,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
+ .machine_check = machine_check_generic,
.platform = "power5+",
},
{ /* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -327,6 +345,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_POWER5_PLUS,
.icache_bsize = 128,
.dcache_bsize = 128,
+ .machine_check = machine_check_generic,
.platform = "power5+",
},
{ /* Power6 */
@@ -346,6 +365,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
.oprofile_mmcra_clear = POWER6_MMCRA_THRM |
POWER6_MMCRA_OTHER,
+ .machine_check = machine_check_generic,
.platform = "power6x",
},
{ /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -356,6 +376,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
+ .machine_check = machine_check_generic,
.platform = "power6",
},
{ /* Cell Broadband Engine */
@@ -372,6 +393,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/cell-be",
.oprofile_type = PPC_OPROFILE_CELL,
+ .machine_check = machine_check_generic,
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -388,6 +410,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_pa6t,
.oprofile_cpu_type = "ppc64/pa6t",
.oprofile_type = PPC_OPROFILE_PA6T,
+ .machine_check = machine_check_generic,
.platform = "pa6t",
},
{ /* default match */
@@ -400,6 +423,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
+ .machine_check = machine_check_generic,
.platform = "power4",
}
#endif /* CONFIG_PPC64 */
@@ -414,6 +438,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_generic,
.platform = "ppc601",
},
{ /* 603 */
@@ -425,6 +450,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
{ /* 603e */
@@ -436,6 +462,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
{ /* 603ev */
@@ -447,6 +474,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
{ /* 604 */
@@ -459,6 +487,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 2,
.cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
.platform = "ppc604",
},
{ /* 604e */
@@ -471,6 +500,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
.platform = "ppc604",
},
{ /* 604r */
@@ -483,6 +513,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
.platform = "ppc604",
},
{ /* 604ev */
@@ -495,6 +526,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
.platform = "ppc604",
},
{ /* 740/750 (0x4202, don't support TAU ?) */
@@ -507,6 +539,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750CX (80100 and 8010x?) */
@@ -519,6 +552,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750CX (82201 and 82202) */
@@ -531,6 +565,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750CXe (82214) */
@@ -543,6 +578,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750CXe "Gekko" (83214) */
@@ -555,6 +591,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750CL */
@@ -567,6 +604,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 745/755 */
@@ -579,6 +617,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750FX rev 1.x */
@@ -591,6 +630,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750FX rev 2.0 must disable HID0[DPM] */
@@ -603,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750FX (All revs except 2.0) */
@@ -615,6 +656,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750fx,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 750GX */
@@ -627,6 +669,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750fx,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 740/750 (L2CR bit need fixup for 740) */
@@ -639,6 +682,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
.platform = "ppc750",
},
{ /* 7400 rev 1.1 ? (no TAU) */
@@ -652,6 +696,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_7400,
+ .machine_check = machine_check_generic,
.platform = "ppc7400",
},
{ /* 7400 */
@@ -665,6 +710,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_7400,
+ .machine_check = machine_check_generic,
.platform = "ppc7400",
},
{ /* 7410 */
@@ -678,6 +724,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.cpu_setup = __setup_cpu_7410,
+ .machine_check = machine_check_generic,
.platform = "ppc7400",
},
{ /* 7450 2.0 - no doze/nap */
@@ -693,6 +740,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7450 2.1 */
@@ -708,6 +756,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7450 2.3 and newer */
@@ -723,6 +772,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7455 rev 1.x */
@@ -738,6 +788,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7455 rev 2.0 */
@@ -753,6 +804,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7455 others */
@@ -768,6 +820,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7447/7457 Rev 1.0 */
@@ -783,6 +836,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7447/7457 Rev 1.1 */
@@ -798,6 +852,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7447/7457 Rev 1.2 and later */
@@ -812,6 +867,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7447A */
@@ -827,6 +883,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 7448 */
@@ -842,6 +899,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
.oprofile_type = PPC_OPROFILE_G4,
+ .machine_check = machine_check_generic,
.platform = "ppc7450",
},
{ /* 82xx (8240, 8245, 8260 are all 603e cores) */
@@ -853,6 +911,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
{ /* All G2_LE (603e core, plus some) have the same pvr */
@@ -864,6 +923,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
{ /* e300c1 (a 603e core, plus some) on 83xx */
@@ -875,6 +935,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
{ /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
@@ -886,9 +947,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
- { /* e300c3 on 83xx */
+ { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00850000,
.cpu_name = "e300c3",
@@ -899,6 +961,18 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_603,
.platform = "ppc603",
},
+ { /* e300c4 (e300c1, plus one IU) */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00860000,
+ .cpu_name = "e300c4",
+ .cpu_features = CPU_FTRS_E300,
+ .cpu_user_features = COMMON_USER,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
+ .platform = "ppc603",
+ },
{ /* default match, we assume split I/D cache & TB (non-601)... */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
@@ -907,6 +981,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_generic,
.platform = "ppc603",
},
#endif /* CLASSIC_PPC */
@@ -933,6 +1008,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .machine_check = machine_check_4xx,
.platform = "ppc403",
},
{ /* 403GCX */
@@ -944,6 +1020,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .machine_check = machine_check_4xx,
.platform = "ppc403",
},
{ /* 403G ?? */
@@ -954,6 +1031,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .machine_check = machine_check_4xx,
.platform = "ppc403",
},
{ /* 405GP */
@@ -965,6 +1043,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* STB 03xxx */
@@ -976,6 +1055,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* STB 04xxx */
@@ -987,6 +1067,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* NP405L */
@@ -998,6 +1079,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* NP4GS3 */
@@ -1009,6 +1091,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* NP405H */
@@ -1020,6 +1103,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* 405GPr */
@@ -1031,6 +1115,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* STBx25xx */
@@ -1042,6 +1127,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* 405LP */
@@ -1052,6 +1138,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* Xilinx Virtex-II Pro */
@@ -1063,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* Xilinx Virtex-4 FX */
@@ -1074,6 +1162,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* 405EP */
@@ -1085,17 +1174,31 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* 405EX */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x12910000,
+ .pvr_mask = 0xffff0004,
+ .pvr_value = 0x12910004,
.cpu_name = "405EX",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr */
+ .pvr_mask = 0xffff0004,
+ .pvr_value = 0x12910000,
+ .cpu_name = "405EXr",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc405",
},
@@ -1109,6 +1212,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc440",
},
{ /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
@@ -1120,6 +1224,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
+ .machine_check = machine_check_4xx,
.platform = "ppc440",
},
{
@@ -1130,6 +1235,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ },
+ { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */
+ .pvr_mask = 0xf0000ff7,
+ .pvr_value = 0x400008d4,
+ .cpu_name = "440EP Rev. C",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440ep,
+ .machine_check = machine_check_4xx,
.platform = "ppc440",
},
{ /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
@@ -1141,6 +1259,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
+ .machine_check = machine_check_4xx,
.platform = "ppc440",
},
{ /* 440GRX */
@@ -1152,6 +1271,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440grx,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */
@@ -1163,6 +1283,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440epx,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 440GP Rev. B */
@@ -1173,6 +1294,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc440gp",
},
{ /* 440GP Rev. C */
@@ -1183,6 +1305,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc440gp",
},
{ /* 440GX Rev. A */
@@ -1193,6 +1316,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 440GX Rev. B */
@@ -1203,6 +1328,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 440GX Rev. C */
@@ -1213,6 +1340,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 440GX Rev. F */
@@ -1223,6 +1352,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 440SP Rev. A */
@@ -1233,6 +1364,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
.platform = "ppc440",
},
{ /* 440SPe Rev. A */
@@ -1243,6 +1375,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440spe,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 440SPe Rev. B */
@@ -1253,10 +1387,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440spe,
+ .machine_check = machine_check_440A,
.platform = "ppc440",
},
#endif /* CONFIG_44x */
#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_E200
{ /* e200z5 */
.pvr_mask = 0xfff00000,
.pvr_value = 0x81000000,
@@ -1267,6 +1404,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_EFP_SINGLE |
PPC_FEATURE_UNIFIED_CACHE,
.dcache_bsize = 32,
+ .machine_check = machine_check_e200,
.platform = "ppc5554",
},
{ /* e200z6 */
@@ -1280,8 +1418,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_EFP_SINGLE_COMP |
PPC_FEATURE_UNIFIED_CACHE,
.dcache_bsize = 32,
+ .machine_check = machine_check_e200,
.platform = "ppc5554",
},
+#elif defined(CONFIG_E500)
{ /* e500 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x80200000,
@@ -1296,6 +1436,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
.oprofile_type = PPC_OPROFILE_BOOKE,
+ .machine_check = machine_check_e500,
.platform = "ppc8540",
},
{ /* e500v2 */
@@ -1313,9 +1454,11 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
.oprofile_type = PPC_OPROFILE_BOOKE,
+ .machine_check = machine_check_e500,
.platform = "ppc8548",
},
#endif
+#endif
#if !CLASSIC_PPC
{ /* default match */
.pvr_mask = 0x00000000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 77c749a13378..571132ed12c1 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -32,6 +32,8 @@
#include <asm/lmb.h>
#include <asm/firmware.h>
#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/setjmp.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -45,6 +47,11 @@ int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
cpumask_t cpus_in_sr = CPU_MASK_NONE;
+#define CRASH_HANDLER_MAX 1
+/* NULL terminated list of shutdown handles */
+static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
+static DEFINE_SPINLOCK(crash_handlers_lock);
+
#ifdef CONFIG_SMP
static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
@@ -285,9 +292,72 @@ static inline void crash_kexec_stop_spus(void)
}
#endif /* CONFIG_SPU_BASE */
+/*
+ * Register a function to be called on shutdown. Only use this if you
+ * can't reset your device in the second kernel.
+ */
+int crash_shutdown_register(crash_shutdown_t handler)
+{
+ unsigned int i, rc;
+
+ spin_lock(&crash_handlers_lock);
+ for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
+ if (!crash_shutdown_handles[i]) {
+ /* Insert handle at first empty entry */
+ crash_shutdown_handles[i] = handler;
+ rc = 0;
+ break;
+ }
+
+ if (i == CRASH_HANDLER_MAX) {
+ printk(KERN_ERR "Crash shutdown handles full, "
+ "not registered.\n");
+ rc = 1;
+ }
+
+ spin_unlock(&crash_handlers_lock);
+ return rc;
+}
+EXPORT_SYMBOL(crash_shutdown_register);
+
+int crash_shutdown_unregister(crash_shutdown_t handler)
+{
+ unsigned int i, rc;
+
+ spin_lock(&crash_handlers_lock);
+ for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
+ if (crash_shutdown_handles[i] == handler)
+ break;
+
+ if (i == CRASH_HANDLER_MAX) {
+ printk(KERN_ERR "Crash shutdown handle not found\n");
+ rc = 1;
+ } else {
+ /* Shift handles down */
+ for (; crash_shutdown_handles[i]; i++)
+ crash_shutdown_handles[i] =
+ crash_shutdown_handles[i+1];
+ rc = 0;
+ }
+
+ spin_unlock(&crash_handlers_lock);
+ return rc;
+}
+EXPORT_SYMBOL(crash_shutdown_unregister);
+
+static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
+
+static int handle_fault(struct pt_regs *regs)
+{
+ longjmp(crash_shutdown_buf, 1);
+ return 0;
+}
+
void default_machine_crash_shutdown(struct pt_regs *regs)
{
- unsigned int irq;
+ unsigned int i;
+ int (*old_handler)(struct pt_regs *regs);
+
/*
* This function is only called after the system
@@ -301,15 +371,36 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/
hard_irq_disable();
- for_each_irq(irq) {
- struct irq_desc *desc = irq_desc + irq;
+ for_each_irq(i) {
+ struct irq_desc *desc = irq_desc + i;
if (desc->status & IRQ_INPROGRESS)
- desc->chip->eoi(irq);
+ desc->chip->eoi(i);
if (!(desc->status & IRQ_DISABLED))
- desc->chip->disable(irq);
+ desc->chip->disable(i);
+ }
+
+ /*
+ * Call registered shutdown routines savely. Swap out
+ * __debugger_fault_handler, and replace on exit.
+ */
+ old_handler = __debugger_fault_handler;
+ __debugger_fault_handler = handle_fault;
+ for (i = 0; crash_shutdown_handles[i]; i++) {
+ if (setjmp(crash_shutdown_buf) == 0) {
+ /*
+ * Insert syncs and delay to ensure
+ * instructions in the dangerous region don't
+ * leak away from this protected region.
+ */
+ asm volatile("sync; isync");
+ /* dangerous region */
+ crash_shutdown_handles[i]();
+ asm volatile("sync; isync");
+ }
}
+ __debugger_fault_handler = old_handler;
/*
* Make a note of crashing cpu. Will be used in machine_kexec
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 14206e3f0819..84239076a5b8 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -112,10 +112,16 @@ EXPORT_SYMBOL(dma_iommu_ops);
/*
* Generic direct DMA implementation
*
- * This implementation supports a global offset that can be applied if
- * the address at which memory is visible to devices is not 0.
+ * This implementation supports a per-device offset that can be applied if
+ * the address at which memory is visible to devices is not 0. Platform code
+ * can set archdata.dma_data to an unsigned long holding the offset. By
+ * default the offset is zero.
*/
-unsigned long dma_direct_offset;
+
+static unsigned long get_dma_direct_offset(struct device *dev)
+{
+ return (unsigned long)dev->archdata.dma_data;
+}
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
@@ -124,13 +130,12 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
void *ret;
int node = dev->archdata.numa_node;
- /* TODO: Maybe use the numa node here too ? */
page = alloc_pages_node(node, flag, get_order(size));
if (page == NULL)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
- *dma_handle = virt_to_abs(ret) | dma_direct_offset;
+ *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
return ret;
}
@@ -145,7 +150,7 @@ static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction direction)
{
- return virt_to_abs(ptr) | dma_direct_offset;
+ return virt_to_abs(ptr) + get_dma_direct_offset(dev);
}
static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
@@ -161,7 +166,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int i;
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = sg_phys(sg) | dma_direct_offset;
+ sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
sg->dma_length = sg->length;
}
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 56aba84c1f6e..ad071a146a8d 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -289,11 +289,8 @@ interrupt_base:
CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
-#ifdef CONFIG_440A
- MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
-#else
CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
-#endif
+ MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
@@ -674,6 +671,15 @@ finish_tlb_load:
*/
/*
+ * Adjust the machine check IVOR on 440A cores
+ */
+_GLOBAL(__fixup_440A_mcheck)
+ li r3,MachineCheckA@l
+ mtspr SPRN_IVOR1,r3
+ sync
+ blr
+
+/*
* extern void giveup_altivec(struct task_struct *prev)
*
* The 44x core does not have an AltiVec unit.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index c34986835a4e..11b4f6d9ffce 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -903,6 +903,7 @@ handle_page_fault:
* the PTE insertion
*/
12: bl .save_nvgprs
+ mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
ld r4,_DAR(r1)
bl .low_hash_fault
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 8536e7676160..ba9393f8e77a 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -166,7 +166,7 @@ label:
mfspr r5,SPRN_ESR; \
stw r5,_ESR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
+ EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, mcheck_transfer_to_handler, \
ret_from_mcheck_exc)
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 7aecb39a5a45..d9cc2c288d9e 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -73,8 +73,8 @@ _ENTRY(_start);
/* We try to not make any assumptions about how the boot loader
* setup or used the TLBs. We invalidate all mappings from the
* boot loader and load a single entry in TLB1[0] to map the
- * first 16M of kernel memory. Any boot info passed from the
- * bootloader needs to live in this first 16M.
+ * first 64M of kernel memory. Any boot info passed from the
+ * bootloader needs to live in this first 64M.
*
* Requirement on bootloader:
* - The page we're executing in needs to reside in TLB1 and
@@ -167,7 +167,7 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_MAS0,r7
tlbre
- /* Just modify the entry ID and EPN for the temp mapping */
+ /* Just modify the entry ID, EPN and RPN for the temp mapping */
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
mtspr SPRN_MAS0,r7
@@ -177,9 +177,12 @@ skpinv: addi r6,r6,1 /* Increment */
ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
mtspr SPRN_MAS1,r6
mfspr r6,SPRN_MAS2
- li r7,0 /* temp EPN = 0 */
+ lis r7,PHYSICAL_START@h
rlwimi r7,r6,0,20,31
mtspr SPRN_MAS2,r7
+ mfspr r6,SPRN_MAS3
+ rlwimi r7,r6,0,20,31
+ mtspr SPRN_MAS3,r7
tlbwe
xori r6,r4,1
@@ -222,11 +225,11 @@ skpinv: addi r6,r6,1 /* Increment */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
mtspr SPRN_MAS0,r6
lis r6,(MAS1_VALID|MAS1_IPROT)@h
- ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
+ ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
mtspr SPRN_MAS1,r6
li r7,0
- lis r6,KERNELBASE@h
- ori r6,r6,KERNELBASE@l
+ lis r6,PAGE_OFFSET@h
+ ori r6,r6,PAGE_OFFSET@l
rlwimi r6,r7,0,20,31
mtspr SPRN_MAS2,r6
li r7,(MAS3_SX|MAS3_SW|MAS3_SR)
@@ -234,6 +237,9 @@ skpinv: addi r6,r6,1 /* Increment */
tlbwe
/* 7. Jump to KERNELBASE mapping */
+ lis r6,KERNELBASE@h
+ ori r6,r6,KERNELBASE@l
+ rlwimi r6,r7,0,20,31
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
bl 1f /* Find our address */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 72fd87156b24..2f50bb5d00f9 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -41,6 +41,7 @@
#include <linux/kobject.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <asm/ibmebus.h>
#include <asm/abs_addr.h>
@@ -52,7 +53,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */
struct bus_type ibmebus_bus_type;
/* These devices will automatically be added to the bus during init */
-static struct of_device_id builtin_matches[] = {
+static struct of_device_id __initdata builtin_matches[] = {
{ .compatible = "IBM,lhca" },
{ .compatible = "IBM,lhea" },
{},
@@ -171,7 +172,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
root = of_find_node_by_path("/");
- for (child = NULL; (child = of_get_next_child(root, child)); ) {
+ for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
@@ -197,16 +198,13 @@ int ibmebus_register_driver(struct of_platform_driver *drv)
/* If the driver uses devices that ibmebus doesn't know, add them */
ibmebus_create_devices(drv->match_table);
- drv->driver.name = drv->name;
- drv->driver.bus = &ibmebus_bus_type;
-
- return driver_register(&drv->driver);
+ return of_register_driver(drv, &ibmebus_bus_type);
}
EXPORT_SYMBOL(ibmebus_register_driver);
void ibmebus_unregister_driver(struct of_platform_driver *drv)
{
- driver_unregister(&drv->driver);
+ of_unregister_driver(drv);
}
EXPORT_SYMBOL(ibmebus_unregister_driver);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 79a85d656871..a3c406aca664 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -532,16 +532,14 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
return tbl;
}
-void iommu_free_table(struct device_node *dn)
+void iommu_free_table(struct iommu_table *tbl, const char *node_name)
{
- struct pci_dn *pdn = dn->data;
- struct iommu_table *tbl = pdn->iommu_table;
unsigned long bitmap_sz, i;
unsigned int order;
if (!tbl || !tbl->it_map) {
printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
- dn->full_name);
+ node_name);
return;
}
@@ -550,7 +548,7 @@ void iommu_free_table(struct device_node *dn)
for (i = 0; i < (tbl->it_size/64); i++) {
if (tbl->it_map[i] != 0) {
printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
- __FUNCTION__, dn->full_name);
+ __FUNCTION__, node_name);
break;
}
}
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index f0f49d1be3d5..ee172aa42aa7 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -108,7 +108,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
if (size > 0x10000)
size = 0x10000;
- printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
+ printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
"mapping 64k\n");
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
@@ -116,7 +116,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
return;
inval_range:
- printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
+ printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
"mapping 64k\n");
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED);
@@ -145,7 +145,7 @@ void __init isa_bridge_find_early(struct pci_controller *hose)
for_each_node_by_type(np, "isa") {
/* Look for our hose being a parent */
for (parent = of_get_parent(np); parent;) {
- if (parent == hose->arch_data) {
+ if (parent == hose->dn) {
of_node_put(parent);
break;
}
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 4ed58875ee17..76b862bd1fe9 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -4,6 +4,7 @@
#include <linux/serial_core.h>
#include <linux/console.h>
#include <linux/pci.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/mmu.h>
#include <asm/prom.h>
@@ -31,6 +32,15 @@ static struct legacy_serial_info {
int irq_check_parent;
phys_addr_t taddr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
+
+static struct __initdata of_device_id parents[] = {
+ {.type = "soc",},
+ {.type = "tsi-bridge",},
+ {.type = "opb", .compatible = "ibm,opb",},
+ {.compatible = "simple-bus",},
+ {.compatible = "wrs,epld-localbus",},
+};
+
static unsigned int legacy_serial_count;
static int legacy_serial_console = -1;
@@ -306,19 +316,21 @@ void __init find_legacy_serial_ports(void)
DBG(" no linux,stdout-path !\n");
}
- /* First fill our array with SOC ports */
- for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
- struct device_node *soc = of_get_parent(np);
- if (soc && !strcmp(soc->type, "soc")) {
+ /* Iterate over all the 16550 ports, looking for known parents */
+ for_each_compatible_node(np, "serial", "ns16550") {
+ struct device_node *parent = of_get_parent(np);
+ if (!parent)
+ continue;
+ if (of_match_node(parents, parent) != NULL) {
index = add_legacy_soc_port(np, np);
if (index >= 0 && np == stdout)
legacy_serial_console = index;
}
- of_node_put(soc);
+ of_node_put(parent);
}
- /* First fill our array with ISA ports */
- for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
+ /* Next, fill our array with ISA ports */
+ for_each_node_by_type(np, "serial") {
struct device_node *isa = of_get_parent(np);
if (isa && !strcmp(isa->name, "isa")) {
index = add_legacy_isa_port(np, isa);
@@ -328,29 +340,6 @@ void __init find_legacy_serial_ports(void)
of_node_put(isa);
}
- /* First fill our array with tsi-bridge ports */
- for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
- struct device_node *tsi = of_get_parent(np);
- if (tsi && !strcmp(tsi->type, "tsi-bridge")) {
- index = add_legacy_soc_port(np, np);
- if (index >= 0 && np == stdout)
- legacy_serial_console = index;
- }
- of_node_put(tsi);
- }
-
- /* First fill our array with opb bus ports */
- for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
- struct device_node *opb = of_get_parent(np);
- if (opb && (!strcmp(opb->type, "opb") ||
- of_device_is_compatible(opb, "ibm,opb"))) {
- index = add_legacy_soc_port(np, np);
- if (index >= 0 && np == stdout)
- legacy_serial_console = index;
- }
- of_node_put(opb);
- }
-
#ifdef CONFIG_PCI
/* Next, try to locate PCI ports */
for (np = NULL; (np = of_find_all_nodes(np));) {
@@ -474,7 +463,7 @@ static int __init serial_dev_init(void)
/*
* Before we register the platfrom serial devices, we need
- * to fixup their interrutps and their IO ports.
+ * to fixup their interrupts and their IO ports.
*/
DBG("Fixing serial ports interrupts and IO ports ...\n");
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index ff781b2eddec..dcb89a88df46 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -41,7 +41,6 @@
/* #define LPARCFG_DEBUG */
static struct proc_dir_entry *proc_ppc64_lparcfg;
-#define LPARCFG_BUFF_SIZE 4096
/*
* Track sum of all purrs across all processors. This is used to further
@@ -595,13 +594,6 @@ int __init lparcfg_init(void)
ent = create_proc_entry("ppc64/lparcfg", mode, NULL);
if (ent) {
ent->proc_fops = &lparcfg_fops;
- ent->data = kmalloc(LPARCFG_BUFF_SIZE, GFP_KERNEL);
- if (!ent->data) {
- printk(KERN_ERR
- "Failed to allocate buffer for lparcfg\n");
- remove_proc_entry("lparcfg", ent->parent);
- return -ENOMEM;
- }
} else {
printk(KERN_ERR "Failed to create ppc64/lparcfg\n");
return -EIO;
@@ -613,10 +605,8 @@ int __init lparcfg_init(void)
void __exit lparcfg_cleanup(void)
{
- if (proc_ppc64_lparcfg) {
- kfree(proc_ppc64_lparcfg->data);
+ if (proc_ppc64_lparcfg)
remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
- }
}
module_init(lparcfg_init);
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 330c9dc7db86..7b9160220698 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -8,12 +8,17 @@
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
+ * setjmp/longjmp code by Paul Mackerras.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
+#include <asm/unistd.h>
+#include <asm/asm-compat.h>
+#include <asm/asm-offsets.h>
.text
@@ -43,3 +48,71 @@ _GLOBAL(add_reloc_offset)
add r3,r3,r5
mtlr r0
blr
+
+_GLOBAL(kernel_execve)
+ li r0,__NR_execve
+ sc
+ bnslr
+ neg r3,r3
+ blr
+
+_GLOBAL(setjmp)
+ mflr r0
+ PPC_STL r0,0(r3)
+ PPC_STL r1,SZL(r3)
+ PPC_STL r2,2*SZL(r3)
+ mfcr r0
+ PPC_STL r0,3*SZL(r3)
+ PPC_STL r13,4*SZL(r3)
+ PPC_STL r14,5*SZL(r3)
+ PPC_STL r15,6*SZL(r3)
+ PPC_STL r16,7*SZL(r3)
+ PPC_STL r17,8*SZL(r3)
+ PPC_STL r18,9*SZL(r3)
+ PPC_STL r19,10*SZL(r3)
+ PPC_STL r20,11*SZL(r3)
+ PPC_STL r21,12*SZL(r3)
+ PPC_STL r22,13*SZL(r3)
+ PPC_STL r23,14*SZL(r3)
+ PPC_STL r24,15*SZL(r3)
+ PPC_STL r25,16*SZL(r3)
+ PPC_STL r26,17*SZL(r3)
+ PPC_STL r27,18*SZL(r3)
+ PPC_STL r28,19*SZL(r3)
+ PPC_STL r29,20*SZL(r3)
+ PPC_STL r30,21*SZL(r3)
+ PPC_STL r31,22*SZL(r3)
+ li r3,0
+ blr
+
+_GLOBAL(longjmp)
+ PPC_LCMPI r4,0
+ bne 1f
+ li r4,1
+1: PPC_LL r13,4*SZL(r3)
+ PPC_LL r14,5*SZL(r3)
+ PPC_LL r15,6*SZL(r3)
+ PPC_LL r16,7*SZL(r3)
+ PPC_LL r17,8*SZL(r3)
+ PPC_LL r18,9*SZL(r3)
+ PPC_LL r19,10*SZL(r3)
+ PPC_LL r20,11*SZL(r3)
+ PPC_LL r21,12*SZL(r3)
+ PPC_LL r22,13*SZL(r3)
+ PPC_LL r23,14*SZL(r3)
+ PPC_LL r24,15*SZL(r3)
+ PPC_LL r25,16*SZL(r3)
+ PPC_LL r26,17*SZL(r3)
+ PPC_LL r27,18*SZL(r3)
+ PPC_LL r28,19*SZL(r3)
+ PPC_LL r29,20*SZL(r3)
+ PPC_LL r30,21*SZL(r3)
+ PPC_LL r31,22*SZL(r3)
+ PPC_LL r0,3*SZL(r3)
+ mtcrf 0x38,r0
+ PPC_LL r0,0(r3)
+ PPC_LL r1,SZL(r3)
+ PPC_LL r2,2*SZL(r3)
+ mtlr r0
+ mr r3,r4
+ blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8b642ab26d37..5c2e253ddfb1 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -206,6 +206,45 @@ _GLOBAL(_nmask_and_or_msr)
isync
blr /* Done */
+#ifdef CONFIG_40x
+
+/*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_readb)
+ mfmsr r7
+ ori r0,r7,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsr r0
+ sync
+ isync
+ lbz r3,0(r3)
+ sync
+ mtmsr r7
+ sync
+ isync
+ blr
+
+ /*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_writeb)
+ mfmsr r7
+ ori r0,r7,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsr r0
+ sync
+ isync
+ stb r3,0(r4)
+ sync
+ mtmsr r7
+ sync
+ isync
+ blr
+
+#endif /* CONFIG_40x */
/*
* Flush MMU TLB
@@ -236,12 +275,6 @@ _GLOBAL(_tlbia)
/* Invalidate all entries in TLB1 */
li r3, 0x0c
tlbivax 0,3
- /* Invalidate all entries in TLB2 */
- li r3, 0x14
- tlbivax 0,3
- /* Invalidate all entries in TLB3 */
- li r3, 0x1c
- tlbivax 0,3
msync
#ifdef CONFIG_SMP
tlbsync
@@ -336,12 +369,8 @@ _GLOBAL(_tlbie)
#elif defined(CONFIG_FSL_BOOKE)
rlwinm r4, r3, 0, 0, 19
ori r5, r4, 0x08 /* TLBSEL = 1 */
- ori r6, r4, 0x10 /* TLBSEL = 2 */
- ori r7, r4, 0x18 /* TLBSEL = 3 */
tlbivax 0, r4
tlbivax 0, r5
- tlbivax 0, r6
- tlbivax 0, r7
msync
#if defined(CONFIG_SMP)
tlbsync
@@ -793,13 +822,6 @@ _GLOBAL(kernel_thread)
addi r1,r1,16
blr
-_GLOBAL(kernel_execve)
- li r0,__NR_execve
- sc
- bnslr
- neg r3,r3
- blr
-
/*
* This routine is just here to keep GCC happy - sigh...
*/
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index bbb3ba54c51c..a3c491e88a72 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -518,13 +518,6 @@ _GLOBAL(giveup_altivec)
#endif /* CONFIG_ALTIVEC */
-_GLOBAL(kernel_execve)
- li r0,__NR_execve
- sc
- bnslr
- neg r3,r3
- blr
-
/* kexec_wait(phys_cpu)
*
* wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 07a89a398639..eab313858315 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/cache.h>
#include <linux/bug.h>
+#include <linux/sort.h>
#include "setup.h"
@@ -54,22 +55,60 @@ void module_free(struct module *mod, void *module_region)
addend) */
static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
{
- unsigned int i, j, ret = 0;
-
- /* Sure, this is order(n^2), but it's usually short, and not
- time critical */
- for (i = 0; i < num; i++) {
- for (j = 0; j < i; j++) {
- /* If this addend appeared before, it's
- already been counted */
- if (ELF32_R_SYM(rela[i].r_info)
- == ELF32_R_SYM(rela[j].r_info)
- && rela[i].r_addend == rela[j].r_addend)
- break;
+ unsigned int i, r_info, r_addend, _count_relocs;
+
+ _count_relocs = 0;
+ r_info = 0;
+ r_addend = 0;
+ for (i = 0; i < num; i++)
+ /* Only count 24-bit relocs, others don't need stubs */
+ if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
+ (r_info != ELF32_R_SYM(rela[i].r_info) ||
+ r_addend != rela[i].r_addend)) {
+ _count_relocs++;
+ r_info = ELF32_R_SYM(rela[i].r_info);
+ r_addend = rela[i].r_addend;
}
- if (j == i) ret++;
+
+ return _count_relocs;
+}
+
+static int relacmp(const void *_x, const void *_y)
+{
+ const Elf32_Rela *x, *y;
+
+ y = (Elf32_Rela *)_x;
+ x = (Elf32_Rela *)_y;
+
+ /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
+ * make the comparison cheaper/faster. It won't affect the sorting or
+ * the counting algorithms' performance
+ */
+ if (x->r_info < y->r_info)
+ return -1;
+ else if (x->r_info > y->r_info)
+ return 1;
+ else if (x->r_addend < y->r_addend)
+ return -1;
+ else if (x->r_addend > y->r_addend)
+ return 1;
+ else
+ return 0;
+}
+
+static void relaswap(void *_x, void *_y, int size)
+{
+ uint32_t *x, *y, tmp;
+ int i;
+
+ y = (uint32_t *)_x;
+ x = (uint32_t *)_y;
+
+ for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
+ tmp = x[i];
+ x[i] = y[i];
+ y[i] = tmp;
}
- return ret;
}
/* Get the potential trampolines size required of the init and
@@ -100,6 +139,16 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
DEBUGP("Ptr: %p. Number: %u\n",
(void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf32_Rela));
+
+ /* Sort the relocation information based on a symbol and
+ * addend key. This is a stable O(n*log n) complexity
+ * alogrithm but it will reduce the complexity of
+ * count_relocs() to linear complexity O(n)
+ */
+ sort((void *)hdr + sechdrs[i].sh_offset,
+ sechdrs[i].sh_size / sizeof(Elf32_Rela),
+ sizeof(Elf32_Rela), relacmp, relaswap);
+
ret += count_relocs((void *)hdr
+ sechdrs[i].sh_offset,
sechdrs[i].sh_size
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 75c7c4f19280..3a82b02b784b 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -24,6 +24,7 @@
#include <asm/module.h>
#include <asm/uaccess.h>
#include <asm/firmware.h>
+#include <linux/sort.h>
#include "setup.h"
@@ -81,25 +82,23 @@ static struct ppc64_stub_entry ppc64_stub =
different addend) */
static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
{
- unsigned int i, j, ret = 0;
+ unsigned int i, r_info, r_addend, _count_relocs;
/* FIXME: Only count external ones --RR */
- /* Sure, this is order(n^2), but it's usually short, and not
- time critical */
- for (i = 0; i < num; i++) {
+ _count_relocs = 0;
+ r_info = 0;
+ r_addend = 0;
+ for (i = 0; i < num; i++)
/* Only count 24-bit relocs, others don't need stubs */
- if (ELF64_R_TYPE(rela[i].r_info) != R_PPC_REL24)
- continue;
- for (j = 0; j < i; j++) {
- /* If this addend appeared before, it's
- already been counted */
- if (rela[i].r_info == rela[j].r_info
- && rela[i].r_addend == rela[j].r_addend)
- break;
+ if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
+ (r_info != ELF64_R_SYM(rela[i].r_info) ||
+ r_addend != rela[i].r_addend)) {
+ _count_relocs++;
+ r_info = ELF64_R_SYM(rela[i].r_info);
+ r_addend = rela[i].r_addend;
}
- if (j == i) ret++;
- }
- return ret;
+
+ return _count_relocs;
}
void *module_alloc(unsigned long size)
@@ -118,6 +117,44 @@ void module_free(struct module *mod, void *module_region)
table entries. */
}
+static int relacmp(const void *_x, const void *_y)
+{
+ const Elf64_Rela *x, *y;
+
+ y = (Elf64_Rela *)_x;
+ x = (Elf64_Rela *)_y;
+
+ /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
+ * make the comparison cheaper/faster. It won't affect the sorting or
+ * the counting algorithms' performance
+ */
+ if (x->r_info < y->r_info)
+ return -1;
+ else if (x->r_info > y->r_info)
+ return 1;
+ else if (x->r_addend < y->r_addend)
+ return -1;
+ else if (x->r_addend > y->r_addend)
+ return 1;
+ else
+ return 0;
+}
+
+static void relaswap(void *_x, void *_y, int size)
+{
+ uint64_t *x, *y, tmp;
+ int i;
+
+ y = (uint64_t *)_x;
+ x = (uint64_t *)_y;
+
+ for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
+ tmp = x[i];
+ x[i] = y[i];
+ y[i] = tmp;
+ }
+}
+
/* Get size of potential trampolines required. */
static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
const Elf64_Shdr *sechdrs)
@@ -133,6 +170,16 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
DEBUGP("Ptr: %p. Number: %lu\n",
(void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela));
+
+ /* Sort the relocation information based on a symbol and
+ * addend key. This is a stable O(n*log n) complexity
+ * alogrithm but it will reduce the complexity of
+ * count_relocs() to linear complexity O(n)
+ */
+ sort((void *)sechdrs[i].sh_addr,
+ sechdrs[i].sh_size / sizeof(Elf64_Rela),
+ sizeof(Elf64_Rela), relacmp, relaswap);
+
relocs += count_relocs((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size
/ sizeof(Elf64_Rela));
@@ -343,7 +390,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* Simply set it */
*(u32 *)location = value;
break;
-
+
case R_PPC64_ADDR64:
/* Simply set it */
*(unsigned long *)location = value;
@@ -399,7 +446,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
/* Only replace bits 2 through 26 */
- *(uint32_t *)location
+ *(uint32_t *)location
= (*(uint32_t *)location & ~0x03fffffc)
| (value & 0x03fffffc);
break;
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 3388ad619996..5748ddb47d9f 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -5,10 +5,10 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include <asm/errno.h>
#include <asm/dcr.h>
-#include <asm/of_device.h>
static void of_device_make_bus_id(struct of_device *dev)
{
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index aeaa20268ce2..fb698d47082d 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -19,6 +19,7 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
@@ -40,7 +41,7 @@
* a bus type in the list
*/
-static struct of_device_id of_default_bus_ids[] = {
+static const struct of_device_id of_default_bus_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .type = "spider", },
@@ -64,26 +65,6 @@ static int __init of_bus_driver_init(void)
postcore_initcall(of_bus_driver_init);
-int of_register_platform_driver(struct of_platform_driver *drv)
-{
- /* initialize common driver fields */
- if (!drv->driver.name)
- drv->driver.name = drv->name;
- if (!drv->driver.owner)
- drv->driver.owner = drv->owner;
- drv->driver.bus = &of_platform_bus_type;
-
- /* register with core */
- return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL(of_register_platform_driver);
-
-void of_unregister_platform_driver(struct of_platform_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL(of_unregister_platform_driver);
-
struct of_device* of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent)
@@ -120,15 +101,15 @@ EXPORT_SYMBOL(of_platform_device_create);
* @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
* disallow recursive creation of child busses
*/
-static int of_platform_bus_create(struct device_node *bus,
- struct of_device_id *matches,
+static int of_platform_bus_create(const struct device_node *bus,
+ const struct of_device_id *matches,
struct device *parent)
{
struct device_node *child;
struct of_device *dev;
int rc = 0;
- for (child = NULL; (child = of_get_next_child(bus, child)); ) {
+ for_each_child_of_node(bus, child) {
pr_debug(" create child: %s\n", child->full_name);
dev = of_platform_device_create(child, NULL, parent);
if (dev == NULL)
@@ -157,7 +138,7 @@ static int of_platform_bus_create(struct device_node *bus,
*/
int of_platform_bus_probe(struct device_node *root,
- struct of_device_id *matches,
+ const struct of_device_id *matches,
struct device *parent)
{
struct device_node *child;
@@ -190,7 +171,7 @@ int of_platform_bus_probe(struct device_node *root,
rc = of_platform_bus_create(root, matches, &dev->dev);
goto bail;
}
- for (child = NULL; (child = of_get_next_child(root, child)); ) {
+ for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2ae3b6f778a3..980fe32895c0 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -48,32 +48,26 @@
static DEFINE_SPINLOCK(hose_spinlock);
/* XXX kill that some day ... */
-int global_phb_number; /* Global phb counter */
+static int global_phb_number; /* Global phb counter */
-extern struct list_head hose_list;
+/* ISA Memory physical address */
+resource_size_t isa_mem_base;
-/*
- * pci_controller(phb) initialized common variables.
- */
-static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
-{
- memset(hose, 0, sizeof(struct pci_controller));
-
- spin_lock(&hose_spinlock);
- hose->global_number = global_phb_number++;
- list_add_tail(&hose->list_node, &hose_list);
- spin_unlock(&hose_spinlock);
-}
+/* Default PCI flags is 0 */
+unsigned int ppc_pci_flags;
-struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
+struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
{
struct pci_controller *phb;
- phb = alloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
+ phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
if (phb == NULL)
return NULL;
- pci_setup_pci_controller(phb);
- phb->arch_data = dev;
+ spin_lock(&hose_spinlock);
+ phb->global_number = global_phb_number++;
+ list_add_tail(&phb->list_node, &hose_list);
+ spin_unlock(&hose_spinlock);
+ phb->dn = dev;
phb->is_dynamic = mem_init_done;
#ifdef CONFIG_PPC64
if (dev) {
@@ -126,15 +120,10 @@ int pcibios_vaddr_is_ioport(void __iomem *address)
*/
int pci_domain_nr(struct pci_bus *bus)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
- else {
- struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pci_controller *hose = pci_bus_to_host(bus);
- return hose->global_number;
- }
+ return hose->global_number;
}
-
EXPORT_SYMBOL(pci_domain_nr);
#ifdef CONFIG_PPC_OF
@@ -153,7 +142,7 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
while(node) {
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
- if (hose->arch_data == node)
+ if (hose->dn == node)
return hose;
node = node->parent;
}
@@ -201,6 +190,20 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
struct of_irq oirq;
unsigned int virq;
+ /* The current device-tree that iSeries generates from the HV
+ * PCI informations doesn't contain proper interrupt routing,
+ * and all the fallback would do is print out crap, so we
+ * don't attempt to resolve the interrupts here at all, some
+ * iSeries specific fixup does it.
+ *
+ * In the long run, we will hopefully fix the generated device-tree
+ * instead.
+ */
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ return -1;
+#endif
+
DBG("Try to map irq for %s...\n", pci_name(pci_dev));
#ifdef DEBUG
@@ -222,10 +225,11 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
if (pin == 0)
return -1;
if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
- line == 0xff) {
+ line == 0xff || line == 0) {
return -1;
}
- DBG(" -> no map ! Using irq line %d from PCI config\n", line);
+ DBG(" -> no map ! Using line %d (pin %d) from PCI config\n",
+ line, pin);
virq = irq_create_mapping(NULL, line);
if (virq != NO_IRQ)
@@ -475,3 +479,717 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
*start = rsrc->start - offset;
*end = rsrc->end - offset;
}
+
+/**
+ * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
+ * @hose: newly allocated pci_controller to be setup
+ * @dev: device node of the host bridge
+ * @primary: set if primary bus (32 bits only, soon to be deprecated)
+ *
+ * This function will parse the "ranges" property of a PCI host bridge device
+ * node and setup the resource mapping of a pci controller based on its
+ * content.
+ *
+ * Life would be boring if it wasn't for a few issues that we have to deal
+ * with here:
+ *
+ * - We can only cope with one IO space range and up to 3 Memory space
+ * ranges. However, some machines (thanks Apple !) tend to split their
+ * space into lots of small contiguous ranges. So we have to coalesce.
+ *
+ * - We can only cope with all memory ranges having the same offset
+ * between CPU addresses and PCI addresses. Unfortunately, some bridges
+ * are setup for a large 1:1 mapping along with a small "window" which
+ * maps PCI address 0 to some arbitrary high address of the CPU space in
+ * order to give access to the ISA memory hole.
+ * The way out of here that I've chosen for now is to always set the
+ * offset based on the first resource found, then override it if we
+ * have a different offset and the previous was set by an ISA hole.
+ *
+ * - Some busses have IO space not starting at 0, which causes trouble with
+ * the way we do our IO resource renumbering. The code somewhat deals with
+ * it for 64 bits but I would expect problems on 32 bits.
+ *
+ * - Some 32 bits platforms such as 4xx can have physical space larger than
+ * 32 bits so we need to use 64 bits values for the parsing
+ */
+void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
+ struct device_node *dev,
+ int primary)
+{
+ const u32 *ranges;
+ int rlen;
+ int pna = of_n_addr_cells(dev);
+ int np = pna + 5;
+ int memno = 0, isa_hole = -1;
+ u32 pci_space;
+ unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
+ unsigned long long isa_mb = 0;
+ struct resource *res;
+
+ printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
+ dev->full_name, primary ? "(primary)" : "");
+
+ /* Get ranges property */
+ ranges = of_get_property(dev, "ranges", &rlen);
+ if (ranges == NULL)
+ return;
+
+ /* Parse it */
+ while ((rlen -= np * 4) >= 0) {
+ /* Read next ranges element */
+ pci_space = ranges[0];
+ pci_addr = of_read_number(ranges + 1, 2);
+ cpu_addr = of_translate_address(dev, ranges + 3);
+ size = of_read_number(ranges + pna + 3, 2);
+ ranges += np;
+ if (cpu_addr == OF_BAD_ADDR || size == 0)
+ continue;
+
+ /* Now consume following elements while they are contiguous */
+ for (; rlen >= np * sizeof(u32);
+ ranges += np, rlen -= np * 4) {
+ if (ranges[0] != pci_space)
+ break;
+ pci_next = of_read_number(ranges + 1, 2);
+ cpu_next = of_translate_address(dev, ranges + 3);
+ if (pci_next != pci_addr + size ||
+ cpu_next != cpu_addr + size)
+ break;
+ size += of_read_number(ranges + pna + 3, 2);
+ }
+
+ /* Act based on address space type */
+ res = NULL;
+ switch ((pci_space >> 24) & 0x3) {
+ case 1: /* PCI IO space */
+ printk(KERN_INFO
+ " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
+ cpu_addr, cpu_addr + size - 1, pci_addr);
+
+ /* We support only one IO range */
+ if (hose->pci_io_size) {
+ printk(KERN_INFO
+ " \\--> Skipped (too many) !\n");
+ continue;
+ }
+#ifdef CONFIG_PPC32
+ /* On 32 bits, limit I/O space to 16MB */
+ if (size > 0x01000000)
+ size = 0x01000000;
+
+ /* 32 bits needs to map IOs here */
+ hose->io_base_virt = ioremap(cpu_addr, size);
+
+ /* Expect trouble if pci_addr is not 0 */
+ if (primary)
+ isa_io_base =
+ (unsigned long)hose->io_base_virt;
+#endif /* CONFIG_PPC32 */
+ /* pci_io_size and io_base_phys always represent IO
+ * space starting at 0 so we factor in pci_addr
+ */
+ hose->pci_io_size = pci_addr + size;
+ hose->io_base_phys = cpu_addr - pci_addr;
+
+ /* Build resource */
+ res = &hose->io_resource;
+ res->flags = IORESOURCE_IO;
+ res->start = pci_addr;
+ break;
+ case 2: /* PCI Memory space */
+ printk(KERN_INFO
+ " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
+ cpu_addr, cpu_addr + size - 1, pci_addr,
+ (pci_space & 0x40000000) ? "Prefetch" : "");
+
+ /* We support only 3 memory ranges */
+ if (memno >= 3) {
+ printk(KERN_INFO
+ " \\--> Skipped (too many) !\n");
+ continue;
+ }
+ /* Handles ISA memory hole space here */
+ if (pci_addr == 0) {
+ isa_mb = cpu_addr;
+ isa_hole = memno;
+ if (primary || isa_mem_base == 0)
+ isa_mem_base = cpu_addr;
+ }
+
+ /* We get the PCI/Mem offset from the first range or
+ * the, current one if the offset came from an ISA
+ * hole. If they don't match, bugger.
+ */
+ if (memno == 0 ||
+ (isa_hole >= 0 && pci_addr != 0 &&
+ hose->pci_mem_offset == isa_mb))
+ hose->pci_mem_offset = cpu_addr - pci_addr;
+ else if (pci_addr != 0 &&
+ hose->pci_mem_offset != cpu_addr - pci_addr) {
+ printk(KERN_INFO
+ " \\--> Skipped (offset mismatch) !\n");
+ continue;
+ }
+
+ /* Build resource */
+ res = &hose->mem_resources[memno++];
+ res->flags = IORESOURCE_MEM;
+ if (pci_space & 0x40000000)
+ res->flags |= IORESOURCE_PREFETCH;
+ res->start = cpu_addr;
+ break;
+ }
+ if (res != NULL) {
+ res->name = dev->full_name;
+ res->end = res->start + size - 1;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+ }
+ }
+
+ /* Out of paranoia, let's put the ISA hole last if any */
+ if (isa_hole >= 0 && memno > 0 && isa_hole != (memno-1)) {
+ struct resource tmp = hose->mem_resources[isa_hole];
+ hose->mem_resources[isa_hole] = hose->mem_resources[memno-1];
+ hose->mem_resources[memno-1] = tmp;
+ }
+}
+
+/* Decide whether to display the domain number in /proc */
+int pci_proc_domain(struct pci_bus *bus)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+#ifdef CONFIG_PPC64
+ return hose->buid != 0;
+#else
+ if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
+ return 0;
+ if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
+ return hose->global_number != 0;
+ return 1;
+#endif
+}
+
+void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ resource_size_t offset = 0, mask = (resource_size_t)-1;
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+ if (!hose)
+ return;
+ if (res->flags & IORESOURCE_IO) {
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ mask = 0xffffffffu;
+ } else if (res->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+
+ region->start = (res->start - offset) & mask;
+ region->end = (res->end - offset) & mask;
+}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region)
+{
+ resource_size_t offset = 0, mask = (resource_size_t)-1;
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+ if (!hose)
+ return;
+ if (res->flags & IORESOURCE_IO) {
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ mask = 0xffffffffu;
+ } else if (res->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+ res->start = (region->start + offset) & mask;
+ res->end = (region->end + offset) & mask;
+}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
+
+/* Fixup a bus resource into a linux resource */
+static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ resource_size_t offset = 0, mask = (resource_size_t)-1;
+
+ if (res->flags & IORESOURCE_IO) {
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ mask = 0xffffffffu;
+ } else if (res->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+
+ res->start = (res->start + offset) & mask;
+ res->end = (res->end + offset) & mask;
+
+ pr_debug("PCI:%s %016llx-%016llx\n",
+ pci_name(dev),
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+}
+
+
+/* This header fixup will do the resource fixup for all devices as they are
+ * probed, but not for bridge ranges
+ */
+static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ int i;
+
+ if (!hose) {
+ printk(KERN_ERR "No host bridge for PCI dev %s !\n",
+ pci_name(dev));
+ return;
+ }
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ struct resource *res = dev->resource + i;
+ if (!res->flags)
+ continue;
+ if (res->end == 0xffffffff) {
+ pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n",
+ pci_name(dev), i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned int)res->flags);
+ res->end -= res->start;
+ res->start = 0;
+ res->flags |= IORESOURCE_UNSET;
+ continue;
+ }
+
+ pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
+ pci_name(dev), i,
+ (unsigned long long)res->start,\
+ (unsigned long long)res->end,
+ (unsigned int)res->flags);
+
+ fixup_resource(res, dev);
+ }
+
+ /* Call machine specific resource fixup */
+ if (ppc_md.pcibios_fixup_resources)
+ ppc_md.pcibios_fixup_resources(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
+
+static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pci_dev *dev = bus->self;
+
+ pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
+
+ /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
+ * now differently between 32 and 64 bits.
+ */
+ if (dev != NULL) {
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
+ if ((res = bus->resource[i]) == NULL)
+ continue;
+ if (!res->flags)
+ continue;
+ if (i >= 3 && bus->self->transparent)
+ continue;
+ /* On PowerMac, Apple leaves bridge windows open over
+ * an inaccessible region of memory space (0...fffff)
+ * which is somewhat bogus, but that's what they think
+ * means disabled...
+ *
+ * We clear those to force them to be reallocated later
+ *
+ * We detect such regions by the fact that the base is
+ * equal to the pci_mem_offset of the host bridge and
+ * their size is smaller than 1M.
+ */
+ if (res->flags & IORESOURCE_MEM &&
+ res->start == hose->pci_mem_offset &&
+ res->end < 0x100000) {
+ printk(KERN_INFO
+ "PCI: Closing bogus Apple Firmware"
+ " region %d on bus 0x%02x\n",
+ i, bus->number);
+ res->flags = 0;
+ continue;
+ }
+
+ pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
+ pci_name(dev), i,
+ (unsigned long long)res->start,\
+ (unsigned long long)res->end,
+ (unsigned int)res->flags);
+
+ fixup_resource(res, dev);
+ }
+ }
+
+ /* Additional setup that is different between 32 and 64 bits for now */
+ pcibios_do_bus_setup(bus);
+
+ /* Platform specific bus fixups */
+ if (ppc_md.pcibios_fixup_bus)
+ ppc_md.pcibios_fixup_bus(bus);
+
+ /* Read default IRQs and fixup if necessary */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_read_irq_line(dev);
+ if (ppc_md.pci_irq_fixup)
+ ppc_md.pci_irq_fixup(dev);
+ }
+}
+
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* When called from the generic PCI probe, read PCI<->PCI bridge
+ * bases before proceeding
+ */
+ if (bus->self != NULL)
+ pci_read_bridge_bases(bus);
+ __pcibios_fixup_bus(bus);
+}
+EXPORT_SYMBOL(pcibios_fixup_bus);
+
+/* When building a bus from the OF tree rather than probing, we need a
+ * slightly different version of the fixup which doesn't read the
+ * bridge bases using config space accesses
+ */
+void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus)
+{
+ __pcibios_fixup_bus(bus);
+}
+
+static int skip_isa_ioresource_align(struct pci_dev *dev)
+{
+ if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
+ !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
+ return 1;
+ return 0;
+}
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+void pcibios_align_resource(void *data, struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pci_dev *dev = data;
+
+ if (res->flags & IORESOURCE_IO) {
+ resource_size_t start = res->start;
+
+ if (skip_isa_ioresource_align(dev))
+ return;
+ if (start & 0x300) {
+ start = (start + 0x3ff) & ~0x3ff;
+ res->start = start;
+ }
+ }
+}
+EXPORT_SYMBOL(pcibios_align_resource);
+
+/*
+ * Reparent resource children of pr that conflict with res
+ * under res, and make res replace those children.
+ */
+static int __init reparent_resources(struct resource *parent,
+ struct resource *res)
+{
+ struct resource *p, **pp;
+ struct resource **firstpp = NULL;
+
+ for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
+ if (p->end < res->start)
+ continue;
+ if (res->end < p->start)
+ break;
+ if (p->start < res->start || p->end > res->end)
+ return -1; /* not completely contained */
+ if (firstpp == NULL)
+ firstpp = pp;
+ }
+ if (firstpp == NULL)
+ return -1; /* didn't find any conflicting entries? */
+ res->parent = parent;
+ res->child = *firstpp;
+ res->sibling = *pp;
+ *firstpp = res;
+ *pp = NULL;
+ for (p = res->child; p != NULL; p = p->sibling) {
+ p->parent = res;
+ DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
+ p->name,
+ (unsigned long long)p->start,
+ (unsigned long long)p->end, res->name);
+ }
+ return 0;
+}
+
+/*
+ * Handle resources of PCI devices. If the world were perfect, we could
+ * just allocate all the resource regions and do nothing more. It isn't.
+ * On the other hand, we cannot just re-allocate all devices, as it would
+ * require us to know lots of host bridge internals. So we attempt to
+ * keep as much of the original configuration as possible, but tweak it
+ * when it's found to be wrong.
+ *
+ * Known BIOS problems we have to work around:
+ * - I/O or memory regions not configured
+ * - regions configured, but not enabled in the command register
+ * - bogus I/O addresses above 64K used
+ * - expansion ROMs left enabled (this may sound harmless, but given
+ * the fact the PCI specs explicitly allow address decoders to be
+ * shared between expansion ROMs and other resource regions, it's
+ * at least dangerous)
+ *
+ * Our solution:
+ * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
+ * This gives us fixed barriers on where we can allocate.
+ * (2) Allocate resources for all enabled devices. If there is
+ * a collision, just mark the resource as unallocated. Also
+ * disable expansion ROMs during this step.
+ * (3) Try to allocate resources for disabled devices. If the
+ * resources were assigned correctly, everything goes well,
+ * if they weren't, they won't disturb allocation of other
+ * resources.
+ * (4) Assign new addresses to resources which were either
+ * not configured at all or misconfigured. If explicitly
+ * requested by the user, configure expansion ROM address
+ * as well.
+ */
+
+static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
+{
+ struct pci_bus *bus;
+ int i;
+ struct resource *res, *pr;
+
+ /* Depth-First Search on bus tree */
+ list_for_each_entry(bus, bus_list, node) {
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
+ if ((res = bus->resource[i]) == NULL || !res->flags
+ || res->start > res->end)
+ continue;
+ if (bus->parent == NULL)
+ pr = (res->flags & IORESOURCE_IO) ?
+ &ioport_resource : &iomem_resource;
+ else {
+ /* Don't bother with non-root busses when
+ * re-assigning all resources. We clear the
+ * resource flags as if they were colliding
+ * and as such ensure proper re-allocation
+ * later.
+ */
+ if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)
+ goto clear_resource;
+ pr = pci_find_parent_resource(bus->self, res);
+ if (pr == res) {
+ /* this happens when the generic PCI
+ * code (wrongly) decides that this
+ * bridge is transparent -- paulus
+ */
+ continue;
+ }
+ }
+
+ DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
+ "[0x%x], parent %p (%s)\n",
+ bus->self ? pci_name(bus->self) : "PHB",
+ bus->number, i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned int)res->flags,
+ pr, (pr && pr->name) ? pr->name : "nil");
+
+ if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+ if (request_resource(pr, res) == 0)
+ continue;
+ /*
+ * Must be a conflict with an existing entry.
+ * Move that entry (or entries) under the
+ * bridge resource and try again.
+ */
+ if (reparent_resources(pr, res) == 0)
+ continue;
+ }
+ printk(KERN_WARNING
+ "PCI: Cannot allocate resource region "
+ "%d of PCI bridge %d, will remap\n",
+ i, bus->number);
+clear_resource:
+ res->flags = 0;
+ }
+ pcibios_allocate_bus_resources(&bus->children);
+ }
+}
+
+static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
+{
+ struct resource *pr, *r = &dev->resource[idx];
+
+ DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
+ pci_name(dev), idx,
+ (unsigned long long)r->start,
+ (unsigned long long)r->end,
+ (unsigned int)r->flags);
+
+ pr = pci_find_parent_resource(dev, r);
+ if (!pr || (pr->flags & IORESOURCE_UNSET) ||
+ request_resource(pr, r) < 0) {
+ printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
+ " of device %s, will remap\n", idx, pci_name(dev));
+ if (pr)
+ DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr,
+ (unsigned long long)pr->start,
+ (unsigned long long)pr->end,
+ (unsigned int)pr->flags);
+ /* We'll assign a new address later */
+ r->flags |= IORESOURCE_UNSET;
+ r->end -= r->start;
+ r->start = 0;
+ }
+}
+
+static void __init pcibios_allocate_resources(int pass)
+{
+ struct pci_dev *dev = NULL;
+ int idx, disabled;
+ u16 command;
+ struct resource *r;
+
+ for_each_pci_dev(dev) {
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ for (idx = 0; idx < 6; idx++) {
+ r = &dev->resource[idx];
+ if (r->parent) /* Already allocated */
+ continue;
+ if (!r->flags || (r->flags & IORESOURCE_UNSET))
+ continue; /* Not assigned at all */
+ if (r->flags & IORESOURCE_IO)
+ disabled = !(command & PCI_COMMAND_IO);
+ else
+ disabled = !(command & PCI_COMMAND_MEMORY);
+ if (pass == disabled)
+ alloc_resource(dev, idx);
+ }
+ if (pass)
+ continue;
+ r = &dev->resource[PCI_ROM_RESOURCE];
+ if (r->flags & IORESOURCE_ROM_ENABLE) {
+ /* Turn the ROM off, leave the resource region,
+ * but keep it unregistered.
+ */
+ u32 reg;
+ DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
+ r->flags &= ~IORESOURCE_ROM_ENABLE;
+ pci_read_config_dword(dev, dev->rom_base_reg, &reg);
+ pci_write_config_dword(dev, dev->rom_base_reg,
+ reg & ~PCI_ROM_ADDRESS_ENABLE);
+ }
+ }
+}
+
+void __init pcibios_resource_survey(void)
+{
+ /* Allocate and assign resources. If we re-assign everything, then
+ * we skip the allocate phase
+ */
+ pcibios_allocate_bus_resources(&pci_root_buses);
+
+ if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) {
+ pcibios_allocate_resources(0);
+ pcibios_allocate_resources(1);
+ }
+
+ if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
+ DBG("PCI: Assigning unassigned resouces...\n");
+ pci_assign_unassigned_resources();
+ }
+
+ /* Call machine dependent fixup */
+ if (ppc_md.pcibios_fixup)
+ ppc_md.pcibios_fixup();
+}
+
+#ifdef CONFIG_HOTPLUG
+/* This is used by the pSeries hotplug driver to allocate resource
+ * of newly plugged busses. We can try to consolidate with the
+ * rest of the code later, for now, keep it as-is
+ */
+void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child_bus;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (r->parent || !r->start || !r->flags)
+ continue;
+ pci_claim_resource(dev, i);
+ }
+ }
+
+ list_for_each_entry(child_bus, &bus->children, node)
+ pcibios_claim_one_bus(child_bus);
+}
+EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
+#endif /* CONFIG_HOTPLUG */
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ if (ppc_md.pcibios_enable_device_hook)
+ if (ppc_md.pcibios_enable_device_hook(dev))
+ return -EINVAL;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1 << idx)))
+ continue;
+ r = &dev->resource[idx];
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((idx == PCI_ROM_RESOURCE) &&
+ (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ continue;
+ if (r->parent == NULL) {
+ printk(KERN_ERR "PCI: Device %s not available because"
+ " of resource collisions\n", pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (cmd != old_cmd) {
+ printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 0e2bee46304c..88db4ffaf11c 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -13,6 +13,7 @@
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/list.h>
+#include <linux/of.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -32,19 +33,12 @@
#endif
unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
unsigned long pci_dram_offset = 0;
int pcibios_assign_bus_offset = 1;
void pcibios_make_OF_bus_map(void);
-static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
-static int probe_resource(struct pci_bus *parent, struct resource *pr,
- struct resource *res, struct resource **conflict);
-static void update_bridge_base(struct pci_bus *bus, int i);
-static void pcibios_fixup_resources(struct pci_dev* dev);
static void fixup_broken_pcnet32(struct pci_dev* dev);
-static int reparent_resources(struct resource *parent, struct resource *res);
static void fixup_cpc710_pci64(struct pci_dev* dev);
#ifdef CONFIG_PPC_OF
static u8* pci_to_OF_bus_map;
@@ -53,7 +47,7 @@ static u8* pci_to_OF_bus_map;
/* By default, we don't re-assign bus numbers. We do this only on
* some pmacs
*/
-int pci_assign_all_buses;
+static int pci_assign_all_buses;
LIST_HEAD(hose_list);
@@ -100,505 +94,6 @@ fixup_cpc710_pci64(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
-static void
-pcibios_fixup_resources(struct pci_dev *dev)
-{
- struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
- int i;
- unsigned long offset;
-
- if (!hose) {
- printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
- return;
- }
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- struct resource *res = dev->resource + i;
- if (!res->flags)
- continue;
- if (res->end == 0xffffffff) {
- DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
- pci_name(dev), i, (u64)res->start, (u64)res->end);
- res->end -= res->start;
- res->start = 0;
- res->flags |= IORESOURCE_UNSET;
- continue;
- }
- offset = 0;
- if (res->flags & IORESOURCE_MEM) {
- offset = hose->pci_mem_offset;
- } else if (res->flags & IORESOURCE_IO) {
- offset = (unsigned long) hose->io_base_virt
- - isa_io_base;
- }
- if (offset != 0) {
- res->start += offset;
- res->end += offset;
- DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
- i, res->flags, pci_name(dev),
- (u64)res->start - offset, (u64)res->start);
- }
- }
-
- /* Call machine specific resource fixup */
- if (ppc_md.pcibios_fixup_resources)
- ppc_md.pcibios_fixup_resources(dev);
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
-
-void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
- struct resource *res)
-{
- unsigned long offset = 0;
- struct pci_controller *hose = dev->sysdata;
-
- if (hose && res->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - isa_io_base;
- else if (hose && res->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
- region->start = res->start - offset;
- region->end = res->end - offset;
-}
-EXPORT_SYMBOL(pcibios_resource_to_bus);
-
-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region)
-{
- unsigned long offset = 0;
- struct pci_controller *hose = dev->sysdata;
-
- if (hose && res->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - isa_io_base;
- else if (hose && res->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
- res->start = region->start + offset;
- res->end = region->end + offset;
-}
-EXPORT_SYMBOL(pcibios_bus_to_resource);
-
-/*
- * We need to avoid collisions with `mirrored' VGA ports
- * and other strange ISA hardware, so we always want the
- * addresses to be allocated in the 0x000-0x0ff region
- * modulo 0x400.
- *
- * Why? Because some silly external IO cards only decode
- * the low 10 bits of the IO address. The 0x00-0xff region
- * is reserved for motherboard devices that decode all 16
- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
- * but we want to try to avoid allocating at 0x2900-0x2bff
- * which might have be mirrored at 0x0100-0x03ff..
- */
-void pcibios_align_resource(void *data, struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- struct pci_dev *dev = data;
-
- if (res->flags & IORESOURCE_IO) {
- resource_size_t start = res->start;
-
- if (size > 0x100) {
- printk(KERN_ERR "PCI: I/O Region %s/%d too large"
- " (%lld bytes)\n", pci_name(dev),
- dev->resource - res, (unsigned long long)size);
- }
-
- if (start & 0x300) {
- start = (start + 0x3ff) & ~0x3ff;
- res->start = start;
- }
- }
-}
-EXPORT_SYMBOL(pcibios_align_resource);
-
-/*
- * Handle resources of PCI devices. If the world were perfect, we could
- * just allocate all the resource regions and do nothing more. It isn't.
- * On the other hand, we cannot just re-allocate all devices, as it would
- * require us to know lots of host bridge internals. So we attempt to
- * keep as much of the original configuration as possible, but tweak it
- * when it's found to be wrong.
- *
- * Known BIOS problems we have to work around:
- * - I/O or memory regions not configured
- * - regions configured, but not enabled in the command register
- * - bogus I/O addresses above 64K used
- * - expansion ROMs left enabled (this may sound harmless, but given
- * the fact the PCI specs explicitly allow address decoders to be
- * shared between expansion ROMs and other resource regions, it's
- * at least dangerous)
- *
- * Our solution:
- * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
- * This gives us fixed barriers on where we can allocate.
- * (2) Allocate resources for all enabled devices. If there is
- * a collision, just mark the resource as unallocated. Also
- * disable expansion ROMs during this step.
- * (3) Try to allocate resources for disabled devices. If the
- * resources were assigned correctly, everything goes well,
- * if they weren't, they won't disturb allocation of other
- * resources.
- * (4) Assign new addresses to resources which were either
- * not configured at all or misconfigured. If explicitly
- * requested by the user, configure expansion ROM address
- * as well.
- */
-
-static void __init
-pcibios_allocate_bus_resources(struct list_head *bus_list)
-{
- struct pci_bus *bus;
- int i;
- struct resource *res, *pr;
-
- /* Depth-First Search on bus tree */
- list_for_each_entry(bus, bus_list, node) {
- for (i = 0; i < 4; ++i) {
- if ((res = bus->resource[i]) == NULL || !res->flags
- || res->start > res->end)
- continue;
- if (bus->parent == NULL)
- pr = (res->flags & IORESOURCE_IO)?
- &ioport_resource: &iomem_resource;
- else {
- pr = pci_find_parent_resource(bus->self, res);
- if (pr == res) {
- /* this happens when the generic PCI
- * code (wrongly) decides that this
- * bridge is transparent -- paulus
- */
- continue;
- }
- }
-
- DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
- (u64)res->start, (u64)res->end, res->flags, pr);
- if (pr) {
- if (request_resource(pr, res) == 0)
- continue;
- /*
- * Must be a conflict with an existing entry.
- * Move that entry (or entries) under the
- * bridge resource and try again.
- */
- if (reparent_resources(pr, res) == 0)
- continue;
- }
- printk(KERN_ERR "PCI: Cannot allocate resource region "
- "%d of PCI bridge %d\n", i, bus->number);
- if (pci_relocate_bridge_resource(bus, i))
- bus->resource[i] = NULL;
- }
- pcibios_allocate_bus_resources(&bus->children);
- }
-}
-
-/*
- * Reparent resource children of pr that conflict with res
- * under res, and make res replace those children.
- */
-static int __init
-reparent_resources(struct resource *parent, struct resource *res)
-{
- struct resource *p, **pp;
- struct resource **firstpp = NULL;
-
- for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
- if (p->end < res->start)
- continue;
- if (res->end < p->start)
- break;
- if (p->start < res->start || p->end > res->end)
- return -1; /* not completely contained */
- if (firstpp == NULL)
- firstpp = pp;
- }
- if (firstpp == NULL)
- return -1; /* didn't find any conflicting entries? */
- res->parent = parent;
- res->child = *firstpp;
- res->sibling = *pp;
- *firstpp = res;
- *pp = NULL;
- for (p = res->child; p != NULL; p = p->sibling) {
- p->parent = res;
- DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
- p->name, (u64)p->start, (u64)p->end, res->name);
- }
- return 0;
-}
-
-/*
- * A bridge has been allocated a range which is outside the range
- * of its parent bridge, so it needs to be moved.
- */
-static int __init
-pci_relocate_bridge_resource(struct pci_bus *bus, int i)
-{
- struct resource *res, *pr, *conflict;
- unsigned long try, size;
- int j;
- struct pci_bus *parent = bus->parent;
-
- if (parent == NULL) {
- /* shouldn't ever happen */
- printk(KERN_ERR "PCI: can't move host bridge resource\n");
- return -1;
- }
- res = bus->resource[i];
- if (res == NULL)
- return -1;
- pr = NULL;
- for (j = 0; j < 4; j++) {
- struct resource *r = parent->resource[j];
- if (!r)
- continue;
- if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
- continue;
- if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
- pr = r;
- break;
- }
- if (res->flags & IORESOURCE_PREFETCH)
- pr = r;
- }
- if (pr == NULL)
- return -1;
- size = res->end - res->start;
- if (pr->start > pr->end || size > pr->end - pr->start)
- return -1;
- try = pr->end;
- for (;;) {
- res->start = try - size;
- res->end = try;
- if (probe_resource(bus->parent, pr, res, &conflict) == 0)
- break;
- if (conflict->start <= pr->start + size)
- return -1;
- try = conflict->start - 1;
- }
- if (request_resource(pr, res)) {
- DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
- (u64)res->start, (u64)res->end);
- return -1; /* "can't happen" */
- }
- update_bridge_base(bus, i);
- printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
- bus->number, i, (unsigned long long)res->start,
- (unsigned long long)res->end);
- return 0;
-}
-
-static int __init
-probe_resource(struct pci_bus *parent, struct resource *pr,
- struct resource *res, struct resource **conflict)
-{
- struct pci_bus *bus;
- struct pci_dev *dev;
- struct resource *r;
- int i;
-
- for (r = pr->child; r != NULL; r = r->sibling) {
- if (r->end >= res->start && res->end >= r->start) {
- *conflict = r;
- return 1;
- }
- }
- list_for_each_entry(bus, &parent->children, node) {
- for (i = 0; i < 4; ++i) {
- if ((r = bus->resource[i]) == NULL)
- continue;
- if (!r->flags || r->start > r->end || r == res)
- continue;
- if (pci_find_parent_resource(bus->self, r) != pr)
- continue;
- if (r->end >= res->start && res->end >= r->start) {
- *conflict = r;
- return 1;
- }
- }
- }
- list_for_each_entry(dev, &parent->devices, bus_list) {
- for (i = 0; i < 6; ++i) {
- r = &dev->resource[i];
- if (!r->flags || (r->flags & IORESOURCE_UNSET))
- continue;
- if (pci_find_parent_resource(dev, r) != pr)
- continue;
- if (r->end >= res->start && res->end >= r->start) {
- *conflict = r;
- return 1;
- }
- }
- }
- return 0;
-}
-
-void __init
-update_bridge_resource(struct pci_dev *dev, struct resource *res)
-{
- u8 io_base_lo, io_limit_lo;
- u16 mem_base, mem_limit;
- u16 cmd;
- unsigned long start, end, off;
- struct pci_controller *hose = dev->sysdata;
-
- if (!hose) {
- printk("update_bridge_base: no hose?\n");
- return;
- }
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- pci_write_config_word(dev, PCI_COMMAND,
- cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
- if (res->flags & IORESOURCE_IO) {
- off = (unsigned long) hose->io_base_virt - isa_io_base;
- start = res->start - off;
- end = res->end - off;
- io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
- io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
- if (end > 0xffff)
- io_base_lo |= PCI_IO_RANGE_TYPE_32;
- else
- io_base_lo |= PCI_IO_RANGE_TYPE_16;
- pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
- start >> 16);
- pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
- end >> 16);
- pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
- pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
-
- } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
- == IORESOURCE_MEM) {
- off = hose->pci_mem_offset;
- mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
- mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
- pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
- pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
-
- } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
- == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
- off = hose->pci_mem_offset;
- mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
- mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
- pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
- pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
-
- } else {
- DBG(KERN_ERR "PCI: ugh, bridge %s res has flags=%lx\n",
- pci_name(dev), res->flags);
- }
- pci_write_config_word(dev, PCI_COMMAND, cmd);
-}
-
-static void __init
-update_bridge_base(struct pci_bus *bus, int i)
-{
- struct resource *res = bus->resource[i];
- struct pci_dev *dev = bus->self;
- update_bridge_resource(dev, res);
-}
-
-static inline void alloc_resource(struct pci_dev *dev, int idx)
-{
- struct resource *pr, *r = &dev->resource[idx];
-
- DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
- pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
- printk(KERN_ERR "PCI: Cannot allocate resource region %d"
- " of device %s\n", idx, pci_name(dev));
- if (pr)
- DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
- pr, (u64)pr->start, (u64)pr->end, pr->flags);
- /* We'll assign a new address later */
- r->flags |= IORESOURCE_UNSET;
- r->end -= r->start;
- r->start = 0;
- }
-}
-
-static void __init
-pcibios_allocate_resources(int pass)
-{
- struct pci_dev *dev = NULL;
- int idx, disabled;
- u16 command;
- struct resource *r;
-
- for_each_pci_dev(dev) {
- pci_read_config_word(dev, PCI_COMMAND, &command);
- for (idx = 0; idx < 6; idx++) {
- r = &dev->resource[idx];
- if (r->parent) /* Already allocated */
- continue;
- if (!r->flags || (r->flags & IORESOURCE_UNSET))
- continue; /* Not assigned at all */
- if (r->flags & IORESOURCE_IO)
- disabled = !(command & PCI_COMMAND_IO);
- else
- disabled = !(command & PCI_COMMAND_MEMORY);
- if (pass == disabled)
- alloc_resource(dev, idx);
- }
- if (pass)
- continue;
- r = &dev->resource[PCI_ROM_RESOURCE];
- if (r->flags & IORESOURCE_ROM_ENABLE) {
- /* Turn the ROM off, leave the resource region, but keep it unregistered. */
- u32 reg;
- DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
- r->flags &= ~IORESOURCE_ROM_ENABLE;
- pci_read_config_dword(dev, dev->rom_base_reg, &reg);
- pci_write_config_dword(dev, dev->rom_base_reg,
- reg & ~PCI_ROM_ADDRESS_ENABLE);
- }
- }
-}
-
-static void __init
-pcibios_assign_resources(void)
-{
- struct pci_dev *dev = NULL;
- int idx;
- struct resource *r;
-
- for_each_pci_dev(dev) {
- int class = dev->class >> 8;
-
- /* Don't touch classless devices and host bridges */
- if (!class || class == PCI_CLASS_BRIDGE_HOST)
- continue;
-
- for (idx = 0; idx < 6; idx++) {
- r = &dev->resource[idx];
-
- /*
- * We shall assign a new address to this resource,
- * either because the BIOS (sic) forgot to do so
- * or because we have decided the old address was
- * unusable for some reason.
- */
- if ((r->flags & IORESOURCE_UNSET) && r->end &&
- (!ppc_md.pcibios_enable_device_hook ||
- !ppc_md.pcibios_enable_device_hook(dev, 1))) {
- int rc;
-
- r->flags &= ~IORESOURCE_UNSET;
- rc = pci_assign_resource(dev, idx);
- BUG_ON(rc);
- }
- }
-
-#if 0 /* don't assign ROMs */
- r = &dev->resource[PCI_ROM_RESOURCE];
- r->end -= r->start;
- r->start = 0;
- if (r->end)
- pci_assign_resource(dev, PCI_ROM_RESOURCE);
-#endif
- }
-}
-
#ifdef CONFIG_PPC_OF
/*
* Functions below are used on OpenFirmware machines.
@@ -619,7 +114,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
} else
pci_to_OF_bus_map[pci_bus] = bus_range[0];
- for (node=node->child; node != 0;node = node->sibling) {
+ for_each_child_of_node(node, node) {
struct pci_dev* dev;
const unsigned int *class_code, *reg;
@@ -662,8 +157,8 @@ pcibios_make_OF_bus_map(void)
/* For each hose, we begin searching bridges */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- struct device_node* node;
- node = (struct device_node *)hose->arch_data;
+ struct device_node* node = hose->dn;
+
if (!node)
continue;
make_one_node_map(node, hose->first_busno);
@@ -688,15 +183,18 @@ pcibios_make_OF_bus_map(void)
typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
static struct device_node*
-scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
+scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
{
+ struct device_node *node;
struct device_node* sub_node;
- for (; node != 0;node = node->sibling) {
+ for_each_child_of_node(parent, node) {
const unsigned int *class_code;
- if (filter(node, data))
+ if (filter(node, data)) {
+ of_node_put(node);
return node;
+ }
/* For PCI<->PCI bridges or CardBus bridges, we go down
* Note: some OFs create a parent node "multifunc-device" as
@@ -708,9 +206,11 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
strcmp(node->name, "multifunc-device"))
continue;
- sub_node = scan_OF_pci_childs(node->child, filter, data);
- if (sub_node)
+ sub_node = scan_OF_pci_childs(node, filter, data);
+ if (sub_node) {
+ of_node_put(node);
return sub_node;
+ }
}
return NULL;
}
@@ -718,11 +218,11 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
unsigned int devfn)
{
- struct device_node *np = NULL;
+ struct device_node *np;
const u32 *reg;
unsigned int psize;
- while ((np = of_get_next_child(parent, np)) != NULL) {
+ for_each_child_of_node(parent, np) {
reg = of_get_property(np, "reg", &psize);
if (reg == NULL || psize < 4)
continue;
@@ -742,7 +242,7 @@ static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
struct pci_controller *hose = pci_bus_to_host(bus);
if (hose == NULL)
return NULL;
- return of_node_get(hose->arch_data);
+ return of_node_get(hose->dn);
}
/* not a root bus, we need to get our parent */
@@ -812,9 +312,9 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
return -ENODEV;
/* Make sure it's really a PCI device */
hose = pci_find_hose_for_OF_device(node);
- if (!hose || !hose->arch_data)
+ if (!hose || !hose->dn)
return -ENODEV;
- if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
+ if (!scan_OF_pci_childs(hose->dn,
find_OF_pci_device_filter, (void *)node))
return -ENODEV;
reg = of_get_property(node, "reg", NULL);
@@ -843,120 +343,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
}
EXPORT_SYMBOL(pci_device_from_OF_node);
-void __init
-pci_process_bridge_OF_ranges(struct pci_controller *hose,
- struct device_node *dev, int primary)
-{
- static unsigned int static_lc_ranges[256] __initdata;
- const unsigned int *dt_ranges;
- unsigned int *lc_ranges, *ranges, *prev, size;
- int rlen = 0, orig_rlen;
- int memno = 0;
- struct resource *res;
- int np, na = of_n_addr_cells(dev);
- np = na + 5;
-
- /* First we try to merge ranges to fix a problem with some pmacs
- * that can have more than 3 ranges, fortunately using contiguous
- * addresses -- BenH
- */
- dt_ranges = of_get_property(dev, "ranges", &rlen);
- if (!dt_ranges)
- return;
- /* Sanity check, though hopefully that never happens */
- if (rlen > sizeof(static_lc_ranges)) {
- printk(KERN_WARNING "OF ranges property too large !\n");
- rlen = sizeof(static_lc_ranges);
- }
- lc_ranges = static_lc_ranges;
- memcpy(lc_ranges, dt_ranges, rlen);
- orig_rlen = rlen;
-
- /* Let's work on a copy of the "ranges" property instead of damaging
- * the device-tree image in memory
- */
- ranges = lc_ranges;
- prev = NULL;
- while ((rlen -= np * sizeof(unsigned int)) >= 0) {
- if (prev) {
- if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
- (prev[2] + prev[na+4]) == ranges[2] &&
- (prev[na+2] + prev[na+4]) == ranges[na+2]) {
- prev[na+4] += ranges[na+4];
- ranges[0] = 0;
- ranges += np;
- continue;
- }
- }
- prev = ranges;
- ranges += np;
- }
-
- /*
- * The ranges property is laid out as an array of elements,
- * each of which comprises:
- * cells 0 - 2: a PCI address
- * cells 3 or 3+4: a CPU physical address
- * (size depending on dev->n_addr_cells)
- * cells 4+5 or 5+6: the size of the range
- */
- ranges = lc_ranges;
- rlen = orig_rlen;
- while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
- res = NULL;
- size = ranges[na+4];
- switch ((ranges[0] >> 24) & 0x3) {
- case 1: /* I/O space */
- if (ranges[2] != 0)
- break;
- hose->io_base_phys = ranges[na+2];
- /* limit I/O space to 16MB */
- if (size > 0x01000000)
- size = 0x01000000;
- hose->io_base_virt = ioremap(ranges[na+2], size);
- if (primary)
- isa_io_base = (unsigned long) hose->io_base_virt;
- res = &hose->io_resource;
- res->flags = IORESOURCE_IO;
- res->start = ranges[2];
- DBG("PCI: IO 0x%llx -> 0x%llx\n",
- (u64)res->start, (u64)res->start + size - 1);
- break;
- case 2: /* memory space */
- memno = 0;
- if (ranges[1] == 0 && ranges[2] == 0
- && ranges[na+4] <= (16 << 20)) {
- /* 1st 16MB, i.e. ISA memory area */
- if (primary)
- isa_mem_base = ranges[na+2];
- memno = 1;
- }
- while (memno < 3 && hose->mem_resources[memno].flags)
- ++memno;
- if (memno == 0)
- hose->pci_mem_offset = ranges[na+2] - ranges[2];
- if (memno < 3) {
- res = &hose->mem_resources[memno];
- res->flags = IORESOURCE_MEM;
- if(ranges[0] & 0x40000000)
- res->flags |= IORESOURCE_PREFETCH;
- res->start = ranges[na+2];
- DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
- (u64)res->start, (u64)res->start + size - 1);
- }
- break;
- }
- if (res != NULL) {
- res->name = dev->full_name;
- res->end = res->start + size - 1;
- res->parent = NULL;
- res->sibling = NULL;
- res->child = NULL;
- }
- ranges += np;
- }
-}
-
/* We create the "pci-OF-bus-map" property now so it appears in the
* /proc device tree
*/
@@ -986,219 +372,7 @@ void pcibios_make_OF_bus_map(void)
}
#endif /* CONFIG_PPC_OF */
-#ifdef CONFIG_PPC_PMAC
-/*
- * This set of routines checks for PCI<->PCI bridges that have closed
- * IO resources and have child devices. It tries to re-open an IO
- * window on them.
- *
- * This is a _temporary_ fix to workaround a problem with Apple's OF
- * closing IO windows on P2P bridges when the OF drivers of cards
- * below this bridge don't claim any IO range (typically ATI or
- * Adaptec).
- *
- * A more complete fix would be to use drivers/pci/setup-bus.c, which
- * involves a working pcibios_fixup_pbus_ranges(), some more care about
- * ordering when creating the host bus resources, and maybe a few more
- * minor tweaks
- */
-
-/* Initialize bridges with base/limit values we have collected */
-static void __init
-do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
-{
- struct pci_dev *bridge = bus->self;
- struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
- u32 l;
- u16 w;
- struct resource res;
-
- if (bus->resource[0] == NULL)
- return;
- res = *(bus->resource[0]);
-
- DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
- res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
- res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
- DBG(" IO window: %016llx-%016llx\n", res.start, res.end);
-
- /* Set up the top and bottom of the PCI I/O segment for this bus. */
- pci_read_config_dword(bridge, PCI_IO_BASE, &l);
- l &= 0xffff000f;
- l |= (res.start >> 8) & 0x00f0;
- l |= res.end & 0xf000;
- pci_write_config_dword(bridge, PCI_IO_BASE, l);
-
- if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
- l = (res.start >> 16) | (res.end & 0xffff0000);
- pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
- }
-
- pci_read_config_word(bridge, PCI_COMMAND, &w);
- w |= PCI_COMMAND_IO;
- pci_write_config_word(bridge, PCI_COMMAND, w);
-
-#if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
- if (enable_vga) {
- pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
- w |= PCI_BRIDGE_CTL_VGA;
- pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
- }
-#endif
-}
-
-/* This function is pretty basic and actually quite broken for the
- * general case, it's enough for us right now though. It's supposed
- * to tell us if we need to open an IO range at all or not and what
- * size.
- */
-static int __init
-check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
-{
- struct pci_dev *dev;
- int i;
- int rc = 0;
-
-#define push_end(res, mask) do { \
- BUG_ON((mask+1) & mask); \
- res->end = (res->end + mask) | mask; \
-} while (0)
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- u16 class = dev->class >> 8;
-
- if (class == PCI_CLASS_DISPLAY_VGA ||
- class == PCI_CLASS_NOT_DEFINED_VGA)
- *found_vga = 1;
- if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
- rc |= check_for_io_childs(dev->subordinate, res, found_vga);
- if (class == PCI_CLASS_BRIDGE_CARDBUS)
- push_end(res, 0xfff);
-
- for (i=0; i<PCI_NUM_RESOURCES; i++) {
- struct resource *r;
- unsigned long r_size;
-
- if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
- && i >= PCI_BRIDGE_RESOURCES)
- continue;
- r = &dev->resource[i];
- r_size = r->end - r->start;
- if (r_size < 0xfff)
- r_size = 0xfff;
- if (r->flags & IORESOURCE_IO && (r_size) != 0) {
- rc = 1;
- push_end(res, r_size);
- }
- }
- }
-
- return rc;
-}
-
-/* Here we scan all P2P bridges of a given level that have a closed
- * IO window. Note that the test for the presence of a VGA card should
- * be improved to take into account already configured P2P bridges,
- * currently, we don't see them and might end up configuring 2 bridges
- * with VGA pass through enabled
- */
-static void __init
-do_fixup_p2p_level(struct pci_bus *bus)
-{
- struct pci_bus *b;
- int i, parent_io;
- int has_vga = 0;
-
- for (parent_io=0; parent_io<4; parent_io++)
- if (bus->resource[parent_io]
- && bus->resource[parent_io]->flags & IORESOURCE_IO)
- break;
- if (parent_io >= 4)
- return;
-
- list_for_each_entry(b, &bus->children, node) {
- struct pci_dev *d = b->self;
- struct pci_controller* hose = (struct pci_controller *)d->sysdata;
- struct resource *res = b->resource[0];
- struct resource tmp_res;
- unsigned long max;
- int found_vga = 0;
-
- memset(&tmp_res, 0, sizeof(tmp_res));
- tmp_res.start = bus->resource[parent_io]->start;
-
- /* We don't let low addresses go through that closed P2P bridge, well,
- * that may not be necessary but I feel safer that way
- */
- if (tmp_res.start == 0)
- tmp_res.start = 0x1000;
-
- if (!list_empty(&b->devices) && res && res->flags == 0 &&
- res != bus->resource[parent_io] &&
- (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
- check_for_io_childs(b, &tmp_res, &found_vga)) {
- u8 io_base_lo;
-
- printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
-
- if (found_vga) {
- if (has_vga) {
- printk(KERN_WARNING "Skipping VGA, already active"
- " on bus segment\n");
- found_vga = 0;
- } else
- has_vga = 1;
- }
- pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
-
- if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
- max = ((unsigned long) hose->io_base_virt
- - isa_io_base) + 0xffffffff;
- else
- max = ((unsigned long) hose->io_base_virt
- - isa_io_base) + 0xffff;
-
- *res = tmp_res;
- res->flags = IORESOURCE_IO;
- res->name = b->name;
-
- /* Find a resource in the parent where we can allocate */
- for (i = 0 ; i < 4; i++) {
- struct resource *r = bus->resource[i];
- if (!r)
- continue;
- if ((r->flags & IORESOURCE_IO) == 0)
- continue;
- DBG("Trying to allocate from %016llx, size %016llx from parent"
- " res %d: %016llx -> %016llx\n",
- res->start, res->end, i, r->start, r->end);
-
- if (allocate_resource(r, res, res->end + 1, res->start, max,
- res->end + 1, NULL, NULL) < 0) {
- DBG("Failed !\n");
- continue;
- }
- do_update_p2p_io_resource(b, found_vga);
- break;
- }
- }
- do_fixup_p2p_level(b);
- }
-}
-
-static void
-pcibios_fixup_p2p_bridges(void)
-{
- struct pci_bus *b;
-
- list_for_each_entry(b, &pci_root_buses, node)
- do_fixup_p2p_level(b);
-}
-
-#endif /* CONFIG_PPC_PMAC */
-
-static int __init
-pcibios_init(void)
+static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
struct pci_bus *bus;
@@ -1206,6 +380,9 @@ pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
+ if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS)
+ pci_assign_all_buses = 1;
+
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
if (pci_assign_all_buses)
@@ -1213,9 +390,10 @@ pcibios_init(void)
hose->last_busno = 0xff;
bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
hose->ops, hose);
- if (bus)
+ if (bus) {
pci_bus_add_devices(bus);
- hose->last_busno = bus->subordinate;
+ hose->last_busno = bus->subordinate;
+ }
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
}
@@ -1228,18 +406,8 @@ pcibios_init(void)
if (pci_assign_all_buses && have_of)
pcibios_make_OF_bus_map();
- /* Call machine dependent fixup */
- if (ppc_md.pcibios_fixup)
- ppc_md.pcibios_fixup();
-
- /* Allocate and assign resources */
- pcibios_allocate_bus_resources(&pci_root_buses);
- pcibios_allocate_resources(0);
- pcibios_allocate_resources(1);
-#ifdef CONFIG_PPC_PMAC
- pcibios_fixup_p2p_bridges();
-#endif /* CONFIG_PPC_PMAC */
- pcibios_assign_resources();
+ /* Call common code to handle resource allocation */
+ pcibios_resource_survey();
/* Call machine dependent post-init code */
if (ppc_md.pcibios_after_init)
@@ -1250,14 +418,14 @@ pcibios_init(void)
subsys_initcall(pcibios_init);
-void pcibios_fixup_bus(struct pci_bus *bus)
+void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
{
struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
unsigned long io_offset;
struct resource *res;
- struct pci_dev *dev;
int i;
+ /* Hookup PHB resources */
io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
if (bus->parent == NULL) {
/* This is a host bridge - fill in its resources */
@@ -1272,8 +440,8 @@ void pcibios_fixup_bus(struct pci_bus *bus)
res->end = IO_SPACE_LIMIT;
res->flags = IORESOURCE_IO;
}
- res->start += io_offset;
- res->end += io_offset;
+ res->start = (res->start + io_offset) & 0xffffffffu;
+ res->end = (res->end + io_offset) & 0xffffffffu;
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
@@ -1288,35 +456,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
}
bus->resource[i+1] = res;
}
- } else {
- /* This is a subordinate bridge */
- pci_read_bridge_bases(bus);
-
- for (i = 0; i < 4; ++i) {
- if ((res = bus->resource[i]) == NULL)
- continue;
- if (!res->flags || bus->self->transparent)
- continue;
- if (io_offset && (res->flags & IORESOURCE_IO)) {
- res->start += io_offset;
- res->end += io_offset;
- } else if (hose->pci_mem_offset
- && (res->flags & IORESOURCE_MEM)) {
- res->start += hose->pci_mem_offset;
- res->end += hose->pci_mem_offset;
- }
- }
- }
-
- /* Platform specific bus fixups */
- if (ppc_md.pcibios_fixup_bus)
- ppc_md.pcibios_fixup_bus(bus);
-
- /* Read default IRQs and fixup if necessary */
- list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_read_irq_line(dev);
- if (ppc_md.pci_irq_fixup)
- ppc_md.pci_irq_fixup(dev);
}
}
@@ -1328,37 +467,6 @@ pcibios_update_irq(struct pci_dev *dev, int irq)
/* XXX FIXME - update OF device tree node interrupt property */
}
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- int idx;
- struct resource *r;
-
- if (ppc_md.pcibios_enable_device_hook)
- if (ppc_md.pcibios_enable_device_hook(dev, 0))
- return -EINVAL;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- for (idx=0; idx<6; idx++) {
- r = &dev->resource[idx];
- if (r->flags & IORESOURCE_UNSET) {
- printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
-
static struct pci_controller*
pci_bus_to_hose(int bus)
{
@@ -1381,17 +489,6 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
struct pci_controller* hose;
long result = -EOPNOTSUPP;
- /* Argh ! Please forgive me for that hack, but that's the
- * simplest way to get existing XFree to not lockup on some
- * G5 machines... So when something asks for bus 0 io base
- * (bus 0 is HT root), we return the AGP one instead.
- */
-#ifdef CONFIG_PPC_PMAC
- if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
- if (bus == 0)
- bus = 0xf0;
-#endif /* CONFIG_PPC_PMAC */
-
hose = pci_bus_to_hose(bus);
if (!hose)
return -ENODEV;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 9f63bdcb0bdf..52750745edfd 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -31,7 +31,6 @@
#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
-#include <asm/firmware.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -41,10 +40,6 @@
#endif
unsigned long pci_probe_only = 1;
-int pci_assign_all_buses = 0;
-
-static void fixup_resource(struct resource *res, struct pci_dev *dev);
-static void do_bus_setup(struct pci_bus *bus);
/* pci_io_base -- the base address from which io bars are offsets.
* This is the lowest I/O base address (so bar values are always positive),
@@ -70,139 +65,31 @@ struct dma_mapping_ops *get_pci_dma_ops(void)
}
EXPORT_SYMBOL(get_pci_dma_ops);
-static void fixup_broken_pcnet32(struct pci_dev* dev)
-{
- if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
- dev->vendor = PCI_VENDOR_ID_AMD;
- pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
-void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
- struct resource *res)
+int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{
- unsigned long offset = 0;
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- if (!hose)
- return;
-
- if (res->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
-
- if (res->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
-
- region->start = res->start - offset;
- region->end = res->end - offset;
+ return dma_set_mask(&dev->dev, mask);
}
-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region)
+int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{
- unsigned long offset = 0;
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- if (!hose)
- return;
+ int rc;
- if (res->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ rc = dma_set_mask(&dev->dev, mask);
+ dev->dev.coherent_dma_mask = dev->dma_mask;
- if (res->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
-
- res->start = region->start + offset;
- res->end = region->end + offset;
+ return rc;
}
-#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL(pcibios_resource_to_bus);
-EXPORT_SYMBOL(pcibios_bus_to_resource);
-#endif
-
-/*
- * We need to avoid collisions with `mirrored' VGA ports
- * and other strange ISA hardware, so we always want the
- * addresses to be allocated in the 0x000-0x0ff region
- * modulo 0x400.
- *
- * Why? Because some silly external IO cards only decode
- * the low 10 bits of the IO address. The 0x00-0xff region
- * is reserved for motherboard devices that decode all 16
- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
- * but we want to try to avoid allocating at 0x2900-0x2bff
- * which might have be mirrored at 0x0100-0x03ff..
- */
-void pcibios_align_resource(void *data, struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- struct pci_dev *dev = data;
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- resource_size_t start = res->start;
- unsigned long alignto;
-
- if (res->flags & IORESOURCE_IO) {
- unsigned long offset = (unsigned long)hose->io_base_virt -
- _IO_BASE;
- /* Make sure we start at our min on all hoses */
- if (start - offset < PCIBIOS_MIN_IO)
- start = PCIBIOS_MIN_IO + offset;
-
- /*
- * Put everything into 0x00-0xff region modulo 0x400
- */
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
-
- } else if (res->flags & IORESOURCE_MEM) {
- /* Make sure we start at our min on all hoses */
- if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
- start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
-
- /* Align to multiple of size of minimum base. */
- alignto = max(0x1000UL, align);
- start = ALIGN(start, alignto);
- }
-
- res->start = start;
-}
-
-void __devinit pcibios_claim_one_bus(struct pci_bus *b)
+static void fixup_broken_pcnet32(struct pci_dev* dev)
{
- struct pci_dev *dev;
- struct pci_bus *child_bus;
-
- list_for_each_entry(dev, &b->devices, bus_list) {
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *r = &dev->resource[i];
-
- if (r->parent || !r->start || !r->flags)
- continue;
- pci_claim_resource(dev, i);
- }
+ if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
+ dev->vendor = PCI_VENDOR_ID_AMD;
+ pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
}
-
- list_for_each_entry(child_bus, &b->children, node)
- pcibios_claim_one_bus(child_bus);
}
-#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
-#endif
-
-static void __init pcibios_claim_of_setup(void)
-{
- struct pci_bus *b;
-
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return;
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
- list_for_each_entry(b, &pci_root_buses, node)
- pcibios_claim_one_bus(b);
-}
static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
{
@@ -270,7 +157,6 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
res->end = base + size - 1;
res->flags = flags;
res->name = pci_name(dev);
- fixup_resource(res, dev);
}
}
@@ -339,16 +225,17 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
EXPORT_SYMBOL(of_create_pci_dev);
void __devinit of_scan_bus(struct device_node *node,
- struct pci_bus *bus)
+ struct pci_bus *bus)
{
- struct device_node *child = NULL;
+ struct device_node *child;
const u32 *reg;
int reglen, devfn;
struct pci_dev *dev;
DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
- while ((child = of_get_next_child(node, child)) != NULL) {
+ /* Scan direct children */
+ for_each_child_of_node(node, child) {
DBG(" * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
@@ -359,19 +246,26 @@ void __devinit of_scan_bus(struct device_node *node,
dev = of_create_pci_dev(child, bus, devfn);
if (!dev)
continue;
- DBG("dev header type: %x\n", dev->hdr_type);
+ DBG(" dev header type: %x\n", dev->hdr_type);
+ }
+ /* Ally all fixups */
+ pcibios_fixup_of_probed_bus(bus);
+
+ /* Now scan child busses */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
- of_scan_pci_bridge(child, dev);
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
+ struct device_node *child = pci_device_to_OF_node(dev);
+ if (dev)
+ of_scan_pci_bridge(child, dev);
+ }
}
-
- do_bus_setup(bus);
}
EXPORT_SYMBOL(of_scan_bus);
void __devinit of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev)
+ struct pci_dev *dev)
{
struct pci_bus *bus;
const u32 *busrange, *ranges;
@@ -441,7 +335,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
res->start = of_read_number(&ranges[1], 2);
res->end = res->start + size - 1;
res->flags = flags;
- fixup_resource(res, dev);
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
@@ -462,12 +355,12 @@ EXPORT_SYMBOL(of_scan_pci_bridge);
void __devinit scan_phb(struct pci_controller *hose)
{
struct pci_bus *bus;
- struct device_node *node = hose->arch_data;
+ struct device_node *node = hose->dn;
int i, mode;
- struct resource *res;
- DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
+ DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
+ /* Create an empty bus for the toplevel */
bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
if (bus == NULL) {
printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
@@ -477,27 +370,27 @@ void __devinit scan_phb(struct pci_controller *hose)
bus->secondary = hose->first_busno;
hose->bus = bus;
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- pcibios_map_io_space(bus);
-
- bus->resource[0] = res = &hose->io_resource;
- if (res->flags && request_resource(&ioport_resource, res)) {
- printk(KERN_ERR "Failed to request PCI IO region "
- "on PCI domain %04x\n", hose->global_number);
- DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
- res->start, res->end);
- }
+ /* Get some IO space for the new PHB */
+ pcibios_map_io_space(bus);
+ /* Wire up PHB bus resources */
+ DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n",
+ hose->io_resource.start, hose->io_resource.end,
+ hose->io_resource.flags);
+ bus->resource[0] = &hose->io_resource;
for (i = 0; i < 3; ++i) {
- res = &hose->mem_resources[i];
- bus->resource[i+1] = res;
- if (res->flags && request_resource(&iomem_resource, res))
- printk(KERN_ERR "Failed to request PCI memory region "
- "on PCI domain %04x\n", hose->global_number);
+ DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
+ hose->mem_resources[i].start,
+ hose->mem_resources[i].end,
+ hose->mem_resources[i].flags);
+ bus->resource[i+1] = &hose->mem_resources[i];
}
+ DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset);
+ DBG("PCI: PHB IO offset = %08lx\n",
+ (unsigned long)hose->io_base_virt - _IO_BASE);
+ /* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
-
if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
DBG(" probe mode: %d\n", mode);
@@ -514,15 +407,15 @@ static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
+ printk(KERN_INFO "PCI: Probing PCI hardware\n");
+
/* For now, override phys_mem_access_prot. If we need it,
* later, we may move that initialization to each ppc_md
*/
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- iSeries_pcibios_init();
-
- printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
+ if (pci_probe_only)
+ ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
@@ -530,19 +423,8 @@ static int __init pcibios_init(void)
pci_bus_add_devices(hose->bus);
}
- if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
- if (pci_probe_only)
- pcibios_claim_of_setup();
- else
- /* FIXME: `else' will be removed when
- pci_assign_unassigned_resources() is able to work
- correctly with [partially] allocated PCI tree. */
- pci_assign_unassigned_resources();
- }
-
- /* Call machine dependent final fixup */
- if (ppc_md.pcibios_fixup)
- ppc_md.pcibios_fixup();
+ /* Call common code to handle resource allocation */
+ pcibios_resource_survey();
printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
@@ -551,141 +433,6 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- u16 cmd, oldcmd;
- int i;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- oldcmd = cmd;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *res = &dev->resource[i];
-
- /* Only set up the requested stuff */
- if (!(mask & (1<<i)))
- continue;
-
- if (res->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (res->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
-
- if (cmd != oldcmd) {
- printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
- pci_name(dev), cmd);
- /* Enable the appropriate bits in the PCI command register. */
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
-
-/* Decide whether to display the domain number in /proc */
-int pci_proc_domain(struct pci_bus *bus)
-{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
- else {
- struct pci_controller *hose = pci_bus_to_host(bus);
- return hose->buid != 0;
- }
-}
-
-void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
- struct device_node *dev, int prim)
-{
- const unsigned int *ranges;
- unsigned int pci_space;
- unsigned long size;
- int rlen = 0;
- int memno = 0;
- struct resource *res;
- int np, na = of_n_addr_cells(dev);
- unsigned long pci_addr, cpu_phys_addr;
-
- np = na + 5;
-
- /* From "PCI Binding to 1275"
- * The ranges property is laid out as an array of elements,
- * each of which comprises:
- * cells 0 - 2: a PCI address
- * cells 3 or 3+4: a CPU physical address
- * (size depending on dev->n_addr_cells)
- * cells 4+5 or 5+6: the size of the range
- */
- ranges = of_get_property(dev, "ranges", &rlen);
- if (ranges == NULL)
- return;
- hose->io_base_phys = 0;
- while ((rlen -= np * sizeof(unsigned int)) >= 0) {
- res = NULL;
- pci_space = ranges[0];
- pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
- cpu_phys_addr = of_translate_address(dev, &ranges[3]);
- size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
- ranges += np;
- if (size == 0)
- continue;
-
- /* Now consume following elements while they are contiguous */
- while (rlen >= np * sizeof(unsigned int)) {
- unsigned long addr, phys;
-
- if (ranges[0] != pci_space)
- break;
- addr = ((unsigned long)ranges[1] << 32) | ranges[2];
- phys = ranges[3];
- if (na >= 2)
- phys = (phys << 32) | ranges[4];
- if (addr != pci_addr + size ||
- phys != cpu_phys_addr + size)
- break;
-
- size += ((unsigned long)ranges[na+3] << 32)
- | ranges[na+4];
- ranges += np;
- rlen -= np * sizeof(unsigned int);
- }
-
- switch ((pci_space >> 24) & 0x3) {
- case 1: /* I/O space */
- hose->io_base_phys = cpu_phys_addr - pci_addr;
- /* handle from 0 to top of I/O window */
- hose->pci_io_size = pci_addr + size;
-
- res = &hose->io_resource;
- res->flags = IORESOURCE_IO;
- res->start = pci_addr;
- DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
- res->start, res->start + size - 1);
- break;
- case 2: /* memory space */
- memno = 0;
- while (memno < 3 && hose->mem_resources[memno].flags)
- ++memno;
-
- if (memno == 0)
- hose->pci_mem_offset = cpu_phys_addr - pci_addr;
- if (memno < 3) {
- res = &hose->mem_resources[memno];
- res->flags = IORESOURCE_MEM;
- res->start = cpu_phys_addr;
- DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
- res->start, res->start + size - 1);
- }
- break;
- }
- if (res != NULL) {
- res->name = dev->full_name;
- res->end = res->start + size - 1;
- res->parent = NULL;
- res->sibling = NULL;
- res->child = NULL;
- }
- }
-}
-
#ifdef CONFIG_HOTPLUG
int pcibios_unmap_io_space(struct pci_bus *bus)
@@ -719,8 +466,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
if (hose->io_base_alloc == 0)
return 0;
- DBG("IO unmapping for PHB %s\n",
- ((struct device_node *)hose->arch_data)->full_name);
+ DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
DBG(" alloc=0x%p\n", hose->io_base_alloc);
/* This is a PHB, we fully unmap the IO area */
@@ -779,8 +525,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_base_virt = (void __iomem *)(area->addr +
hose->io_base_phys - phys_page);
- DBG("IO mapping for PHB %s\n",
- ((struct device_node *)hose->arch_data)->full_name);
+ DBG("IO mapping for PHB %s\n", hose->dn->full_name);
DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
DBG(" size=0x%016lx (alloc=0x%016lx)\n",
@@ -803,51 +548,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pcibios_map_io_space);
-static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long offset;
-
- if (res->flags & IORESOURCE_IO) {
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
- res->start += offset;
- res->end += offset;
- } else if (res->flags & IORESOURCE_MEM) {
- res->start += hose->pci_mem_offset;
- res->end += hose->pci_mem_offset;
- }
-}
-
-void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
- struct pci_bus *bus)
-{
- /* Update device resources. */
- int i;
-
- DBG("%s: Fixup resources:\n", pci_name(dev));
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *res = &dev->resource[i];
- if (!res->flags)
- continue;
-
- DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
- i, res->flags, res->start, res->end);
-
- fixup_resource(res, dev);
-
- DBG(" > %08lx:0x%016lx...0x%016lx\n",
- res->flags, res->start, res->end);
- }
-}
-EXPORT_SYMBOL(pcibios_fixup_device_resources);
-
void __devinit pcibios_setup_new_device(struct pci_dev *dev)
{
struct dev_archdata *sd = &dev->dev.archdata;
sd->of_node = pci_device_to_OF_node(dev);
- DBG("PCI device %s OF node: %s\n", pci_name(dev),
+ DBG("PCI: device %s OF node: %s\n", pci_name(dev),
sd->of_node ? sd->of_node->full_name : "<none>");
sd->dma_ops = pci_dma_ops;
@@ -861,7 +568,7 @@ void __devinit pcibios_setup_new_device(struct pci_dev *dev)
}
EXPORT_SYMBOL(pcibios_setup_new_device);
-static void __devinit do_bus_setup(struct pci_bus *bus)
+void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -870,42 +577,7 @@ static void __devinit do_bus_setup(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list)
pcibios_setup_new_device(dev);
-
- /* Read default IRQs and fixup if necessary */
- list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_read_irq_line(dev);
- if (ppc_md.pci_irq_fixup)
- ppc_md.pci_irq_fixup(dev);
- }
-}
-
-void __devinit pcibios_fixup_bus(struct pci_bus *bus)
-{
- struct pci_dev *dev = bus->self;
- struct device_node *np;
-
- np = pci_bus_to_OF_node(bus);
-
- DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
-
- if (dev && pci_probe_only &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- /* This is a subordinate bridge */
-
- pci_read_bridge_bases(bus);
- pcibios_fixup_device_resources(dev, bus);
- }
-
- do_bus_setup(bus);
-
- if (!pci_probe_only)
- return;
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
- pcibios_fixup_device_resources(dev, bus);
}
-EXPORT_SYMBOL(pcibios_fixup_bus);
unsigned long pci_address_to_pio(phys_addr_t address)
{
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index b4839038613d..1c67de52e3ce 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -56,11 +56,6 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
pdn->busno = (regs[0] >> 16) & 0xff;
pdn->devfn = (regs[0] >> 8) & 0xff;
}
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- const u32 *busp = of_get_property(dn, "linux,subbus", NULL);
- if (busp)
- pdn->bussubno = *busp;
- }
pdn->pci_ext_config_space = (type && *type == 1);
return NULL;
@@ -133,7 +128,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
*/
void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
{
- struct device_node * dn = (struct device_node *) phb->arch_data;
+ struct device_node *dn = phb->dn;
struct pci_dn *pdn;
/* PHB nodes themselves must not match */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 13ebeb2d71e6..aa9ff35b0e63 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -59,6 +59,7 @@ extern void single_step_exception(struct pt_regs *regs);
extern int sys_sigreturn(struct pt_regs *regs);
EXPORT_SYMBOL(clear_pages);
+EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index acc0d247d3c3..8b5efbce8d90 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
+#ifdef CONFIG_PPC64
+static void __init check_cpu_slb_size(unsigned long node)
+{
+ u32 *slb_size_ptr;
+
+ slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+ if (slb_size_ptr != NULL) {
+ mmu_slb_size = *slb_size_ptr;
+ }
+}
+#else
+#define check_cpu_slb_size(node) do { } while(0)
+#endif
+
static struct feature_property {
const char *name;
u32 min_value;
@@ -600,6 +614,29 @@ static struct feature_property {
#endif /* CONFIG_PPC64 */
};
+#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
+static inline void identical_pvr_fixup(unsigned long node)
+{
+ unsigned int pvr;
+ char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+ /*
+ * Since 440GR(x)/440EP(x) processors have the same pvr,
+ * we check the node path and set bit 28 in the cur_cpu_spec
+ * pvr for EP(x) processor version. This bit is always 0 in
+ * the "real" pvr. Then we call identify_cpu again with
+ * the new logical pvr to enable FPU support.
+ */
+ if (model && strstr(model, "440EP")) {
+ pvr = cur_cpu_spec->pvr_value | 0x8;
+ identify_cpu(0, pvr);
+ DBG("Using logical pvr %x for %s\n", pvr, model);
+ }
+}
+#else
+#define identical_pvr_fixup(node) do { } while(0)
+#endif
+
static void __init check_cpu_feature_properties(unsigned long node)
{
unsigned long i;
@@ -697,22 +734,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
if (prop && (*prop & 0xff000000) == 0x0f000000)
identify_cpu(0, *prop);
-#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
- /*
- * Since 440GR(x)/440EP(x) processors have the same pvr,
- * we check the node path and set bit 28 in the cur_cpu_spec
- * pvr for EP(x) processor version. This bit is always 0 in
- * the "real" pvr. Then we call identify_cpu again with
- * the new logical pvr to enable FPU support.
- */
- if (strstr(uname, "440EP")) {
- identify_cpu(0, cur_cpu_spec->pvr_value | 0x8);
- }
-#endif
+
+ identical_pvr_fixup(node);
}
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
+ check_cpu_slb_size(node);
#ifdef CONFIG_PPC_PSERIES
if (nthreads > 1)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5d89a21dd0d6..5ab4c8466cc9 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2142,82 +2142,34 @@ static void __init fixup_device_tree_pmac(void)
#endif
#ifdef CONFIG_PPC_EFIKA
-/* The current fw of the Efika has a device tree needs quite a few
- * fixups to be compliant with the mpc52xx bindings. It's currently
- * unknown if it will ever be compliant (come on bPlan ...) so we do fixups.
- * NOTE that we (barely) tolerate it because the EFIKA was out before
- * the bindings were finished, for any new boards -> RTFM ! */
-
-struct subst_entry {
- char *path;
- char *property;
- void *value;
- int value_len;
-};
-
-static void __init fixup_device_tree_efika(void)
+/*
+ * The MPC5200 FEC driver requires an phy-handle property to tell it how
+ * to talk to the phy. If the phy-handle property is missing, then this
+ * function is called to add the appropriate nodes and link it to the
+ * ethernet node.
+ */
+static void __init fixup_device_tree_efika_add_phy(void)
{
- /* Substitution table */
- #define prop_cstr(x) x, sizeof(x)
- int prop_sound_irq[3] = { 2, 2, 0 };
- int prop_bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
- 3,4,0, 3,5,0, 3,6,0, 3,7,0,
- 3,8,0, 3,9,0, 3,10,0, 3,11,0,
- 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
- struct subst_entry efika_subst_table[] = {
- { "/", "device_type", prop_cstr("efika") },
- { "/builtin", "device_type", prop_cstr("soc") },
- { "/builtin/ata", "compatible", prop_cstr("mpc5200b-ata\0mpc5200-ata"), },
- { "/builtin/bestcomm", "compatible", prop_cstr("mpc5200b-bestcomm\0mpc5200-bestcomm") },
- { "/builtin/bestcomm", "interrupts", prop_bcomm_irq, sizeof(prop_bcomm_irq) },
- { "/builtin/ethernet", "compatible", prop_cstr("mpc5200b-fec\0mpc5200-fec") },
- { "/builtin/pic", "compatible", prop_cstr("mpc5200b-pic\0mpc5200-pic") },
- { "/builtin/serial", "compatible", prop_cstr("mpc5200b-psc-uart\0mpc5200-psc-uart") },
- { "/builtin/sound", "compatible", prop_cstr("mpc5200b-psc-ac97\0mpc5200-psc-ac97") },
- { "/builtin/sound", "interrupts", prop_sound_irq, sizeof(prop_sound_irq) },
- { "/builtin/sram", "compatible", prop_cstr("mpc5200b-sram\0mpc5200-sram") },
- { "/builtin/sram", "device_type", prop_cstr("sram") },
- {}
- };
- #undef prop_cstr
-
- /* Vars */
u32 node;
char prop[64];
- int rv, i;
+ int rv;
- /* Check if we're really running on a EFIKA */
- node = call_prom("finddevice", 1, 1, ADDR("/"));
+ /* Check if /builtin/ethernet exists - bail if it doesn't */
+ node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
if (!PHANDLE_VALID(node))
return;
- rv = prom_getprop(node, "model", prop, sizeof(prop));
- if (rv == PROM_ERROR)
- return;
- if (strcmp(prop, "EFIKA5K2"))
+ /* Check if the phy-handle property exists - bail if it does */
+ rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
+ if (!rv)
return;
- prom_printf("Applying EFIKA device tree fixups\n");
-
- /* Process substitution table */
- for (i=0; efika_subst_table[i].path; i++) {
- struct subst_entry *se = &efika_subst_table[i];
-
- node = call_prom("finddevice", 1, 1, ADDR(se->path));
- if (!PHANDLE_VALID(node)) {
- prom_printf("fixup_device_tree_efika: ",
- "skipped entry %x - not found\n", i);
- continue;
- }
-
- rv = prom_setprop(node, se->path, se->property,
- se->value, se->value_len );
- if (rv == PROM_ERROR)
- prom_printf("fixup_device_tree_efika: ",
- "skipped entry %x - setprop error\n", i);
- }
+ /*
+ * At this point the ethernet device doesn't have a phy described.
+ * Now we need to add the missing phy node and linkage
+ */
- /* Make sure ethernet mdio bus node exists */
+ /* Check for an MDIO bus node - if missing then create one */
node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
if (!PHANDLE_VALID(node)) {
prom_printf("Adding Ethernet MDIO node\n");
@@ -2226,8 +2178,8 @@ static void __init fixup_device_tree_efika(void)
" new-device"
" 1 encode-int s\" #address-cells\" property"
" 0 encode-int s\" #size-cells\" property"
- " s\" mdio\" 2dup device-name device-type"
- " s\" mpc5200b-fec-phy\" encode-string"
+ " s\" mdio\" device-name"
+ " s\" fsl,mpc5200b-mdio\" encode-string"
" s\" compatible\" property"
" 0xf0003000 0x400 reg"
" 0x2 encode-int"
@@ -2237,8 +2189,10 @@ static void __init fixup_device_tree_efika(void)
" finish-device");
};
- /* Make sure ethernet phy device node exist */
- node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio/ethernet-phy"));
+ /* Check for a PHY device node - if missing then create one and
+ * give it's phandle to the ethernet node */
+ node = call_prom("finddevice", 1, 1,
+ ADDR("/builtin/mdio/ethernet-phy"));
if (!PHANDLE_VALID(node)) {
prom_printf("Adding Ethernet PHY node\n");
call_prom("interpret", 1, 1,
@@ -2254,7 +2208,62 @@ static void __init fixup_device_tree_efika(void)
" s\" phy-handle\" property"
" device-end");
}
+}
+
+static void __init fixup_device_tree_efika(void)
+{
+ int sound_irq[3] = { 2, 2, 0 };
+ int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
+ 3,4,0, 3,5,0, 3,6,0, 3,7,0,
+ 3,8,0, 3,9,0, 3,10,0, 3,11,0,
+ 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
+ u32 node;
+ char prop[64];
+ int rv, len;
+
+ /* Check if we're really running on a EFIKA */
+ node = call_prom("finddevice", 1, 1, ADDR("/"));
+ if (!PHANDLE_VALID(node))
+ return;
+
+ rv = prom_getprop(node, "model", prop, sizeof(prop));
+ if (rv == PROM_ERROR)
+ return;
+ if (strcmp(prop, "EFIKA5K2"))
+ return;
+
+ prom_printf("Applying EFIKA device tree fixups\n");
+
+ /* Claiming to be 'chrp' is death */
+ node = call_prom("finddevice", 1, 1, ADDR("/"));
+ rv = prom_getprop(node, "device_type", prop, sizeof(prop));
+ if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
+ prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
+
+ /* Fixup bestcomm interrupts property */
+ node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
+ if (PHANDLE_VALID(node)) {
+ len = prom_getproplen(node, "interrupts");
+ if (len == 12) {
+ prom_printf("Fixing bestcomm interrupts property\n");
+ prom_setprop(node, "/builtin/bestcom", "interrupts",
+ bcomm_irq, sizeof(bcomm_irq));
+ }
+ }
+
+ /* Fixup sound interrupts property */
+ node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
+ if (PHANDLE_VALID(node)) {
+ rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
+ if (rv == PROM_ERROR) {
+ prom_printf("Adding sound interrupts property\n");
+ prom_setprop(node, "/builtin/sound", "interrupts",
+ sound_irq, sizeof(sound_irq));
+ }
+ }
+ /* Make sure ethernet phy-handle property exists */
+ fixup_device_tree_efika_add_phy();
}
#else
#define fixup_device_tree_efika()
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index b5c96af955c6..90eb3a3e383e 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -273,7 +273,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
#else
struct pci_controller *host;
host = pci_bus_to_host(pdev->bus);
- ppnode = host ? host->arch_data : NULL;
+ ppnode = host ? host->dn : NULL;
#endif
/* No node for host bridge ? give up */
if (ppnode == NULL)
@@ -419,7 +419,7 @@ static struct of_bus *of_match_bus(struct device_node *np)
static int of_translate_one(struct device_node *parent, struct of_bus *bus,
struct of_bus *pbus, u32 *addr,
- int na, int ns, int pna)
+ int na, int ns, int pna, const char *rprop)
{
const u32 *ranges;
unsigned int rlen;
@@ -438,7 +438,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* to translate addresses that aren't supposed to be translated in
* the first place. --BenH.
*/
- ranges = of_get_property(parent, "ranges", &rlen);
+ ranges = of_get_property(parent, rprop, &rlen);
if (ranges == NULL || rlen == 0) {
offset = of_read_number(addr, na);
memset(addr, 0, pna * 4);
@@ -481,7 +481,8 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* that can be mapped to a cpu physical address). This is not really specified
* that way, but this is traditionally the way IBM at least do things
*/
-u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+u64 __of_translate_address(struct device_node *dev, const u32 *in_addr,
+ const char *rprop)
{
struct device_node *parent = NULL;
struct of_bus *bus, *pbus;
@@ -540,7 +541,7 @@ u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
pbus->name, pna, pns, parent->full_name);
/* Apply bus translation */
- if (of_translate_one(dev, bus, pbus, addr, na, ns, pna))
+ if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
break;
/* Complete the move up one level */
@@ -556,8 +557,19 @@ u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
return result;
}
+
+u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+{
+ return __of_translate_address(dev, in_addr, "ranges");
+}
EXPORT_SYMBOL(of_translate_address);
+u64 of_translate_dma_address(struct device_node *dev, const u32 *in_addr)
+{
+ return __of_translate_address(dev, in_addr, "dma-ranges");
+}
+EXPORT_SYMBOL(of_translate_dma_address);
+
const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
unsigned int *flags)
{
diff --git a/arch/powerpc/kernel/rio.c b/arch/powerpc/kernel/rio.c
new file mode 100644
index 000000000000..29487fedfc76
--- /dev/null
+++ b/arch/powerpc/kernel/rio.c
@@ -0,0 +1,52 @@
+/*
+ * RapidIO PPC32 support
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/rio.h>
+
+#include <asm/rio.h>
+
+/**
+ * platform_rio_init - Do platform specific RIO init
+ *
+ * Any platform specific initialization of RapdIO
+ * hardware is done here as well as registration
+ * of any active master ports in the system.
+ */
+void __attribute__ ((weak))
+ platform_rio_init(void)
+{
+ printk(KERN_WARNING "RIO: No platform_rio_init() present\n");
+}
+
+/**
+ * ppc_rio_init - Do PPC32 RIO init
+ *
+ * Calls platform-specific RIO init code and then calls
+ * rio_init_mports() to initialize any master ports that
+ * have been registered with the RIO subsystem.
+ */
+static int __init ppc_rio_init(void)
+{
+ printk(KERN_INFO "RIO: RapidIO init\n");
+
+ /* Platform specific initialization */
+ platform_rio_init();
+
+ /* Enumerate all registered ports */
+ rio_init_mports();
+
+ return 0;
+}
+
+subsys_initcall(ppc_rio_init);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 21f14e57d1f3..433a0a0949fb 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -260,7 +260,7 @@ static int phb_set_bus_ranges(struct device_node *dev,
int __devinit rtas_setup_phb(struct pci_controller *phb)
{
- struct device_node *dev = phb->arch_data;
+ struct device_node *dev = phb->dn;
if (is_python(dev))
python_countermeasures(dev);
@@ -280,10 +280,7 @@ void __init find_and_init_phbs(void)
struct pci_controller *phb;
struct device_node *root = of_find_node_by_path("/");
- for (node = of_get_next_child(root, NULL);
- node != NULL;
- node = of_get_next_child(root, node)) {
-
+ for_each_child_of_node(root, node) {
if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
strcmp(node->type, "pciex") != 0))
continue;
@@ -311,10 +308,12 @@ void __init find_and_init_phbs(void)
if (prop)
pci_probe_only = *prop;
+#ifdef CONFIG_PPC32 /* Will be made generic soon */
prop = of_get_property(of_chosen,
"linux,pci-assign-all-buses", NULL);
- if (prop)
- pci_assign_all_buses = *prop;
+ if (prop && *prop)
+ ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+#endif /* CONFIG_PPC32 */
}
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 2de00f870edc..6adb5a1e98bb 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,6 +33,7 @@
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/debugfs.h>
+#include <linux/percpu.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/processor.h>
@@ -57,6 +58,7 @@
#include <asm/mmu.h>
#include <asm/lmb.h>
#include <asm/xmon.h>
+#include <asm/cputhreads.h>
#include "setup.h"
@@ -327,6 +329,31 @@ void __init check_for_initrd(void)
#ifdef CONFIG_SMP
+int threads_per_core, threads_shift;
+cpumask_t threads_core_mask;
+
+static void __init cpu_init_thread_core_maps(int tpc)
+{
+ int i;
+
+ threads_per_core = tpc;
+ threads_core_mask = CPU_MASK_NONE;
+
+ /* This implementation only supports power of 2 number of threads
+ * for simplicity and performance
+ */
+ threads_shift = ilog2(tpc);
+ BUG_ON(tpc != (1 << threads_shift));
+
+ for (i = 0; i < tpc; i++)
+ cpu_set(i, threads_core_mask);
+
+ printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
+ tpc, tpc > 1 ? "s" : "");
+ printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
+}
+
+
/**
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_map
@@ -350,22 +377,32 @@ void __init smp_setup_cpu_maps(void)
{
struct device_node *dn = NULL;
int cpu = 0;
+ int nthreads = 1;
+
+ DBG("smp_setup_cpu_maps()\n");
while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
const int *intserv;
- int j, len = sizeof(u32), nthreads = 1;
+ int j, len;
+
+ DBG(" * %s...\n", dn->full_name);
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
&len);
- if (intserv)
+ if (intserv) {
nthreads = len / sizeof(int);
- else {
+ DBG(" ibm,ppc-interrupt-server#s -> %d threads\n",
+ nthreads);
+ } else {
+ DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
intserv = of_get_property(dn, "reg", NULL);
if (!intserv)
intserv = &cpu; /* assume logical == phys */
}
for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
+ DBG(" thread %d -> cpu %d (hard id %d)\n",
+ j, cpu, intserv[j]);
cpu_set(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, intserv[j]);
cpu_set(cpu, cpu_possible_map);
@@ -373,6 +410,12 @@ void __init smp_setup_cpu_maps(void)
}
}
+ /* If no SMT supported, nthreads is forced to 1 */
+ if (!cpu_has_feature(CPU_FTR_SMT)) {
+ DBG(" SMT disabled ! nthreads forced to 1\n");
+ nthreads = 1;
+ }
+
#ifdef CONFIG_PPC64
/*
* On pSeries LPAR, we need to know how many cpus
@@ -395,7 +438,7 @@ void __init smp_setup_cpu_maps(void)
/* Double maxcpus for processors which have SMT capability */
if (cpu_has_feature(CPU_FTR_SMT))
- maxcpus *= 2;
+ maxcpus *= nthreads;
if (maxcpus > NR_CPUS) {
printk(KERN_WARNING
@@ -412,9 +455,16 @@ void __init smp_setup_cpu_maps(void)
out:
of_node_put(dn);
}
-
vdso_data->processorCount = num_present_cpus();
#endif /* CONFIG_PPC64 */
+
+ /* Initialize CPU <=> thread mapping/
+ *
+ * WARNING: We assume that the number of threads is the same for
+ * every CPU in the system. If that is not the case, then some code
+ * here will have to be reworked
+ */
+ cpu_init_thread_core_maps(nthreads);
}
/*
@@ -424,17 +474,19 @@ void __init smp_setup_cpu_maps(void)
*/
void __init smp_setup_cpu_sibling_map(void)
{
-#if defined(CONFIG_PPC64)
- int cpu;
+#ifdef CONFIG_PPC64
+ int i, cpu, base;
- /*
- * Do the sibling map; assume only two threads per processor.
- */
for_each_possible_cpu(cpu) {
- cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
- if (cpu_has_feature(CPU_FTR_SMT))
- cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
+ DBG("Sibling map for CPU %d:", cpu);
+ base = cpu_first_thread_in_core(cpu);
+ for (i = 0; i < threads_per_core; i++) {
+ cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+ DBG(" %d", base + i);
+ }
+ DBG("\n");
}
+
#endif /* CONFIG_PPC64 */
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 6126bca8b70a..d840bc772fd3 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -24,13 +24,12 @@
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/elf.h>
+#include <linux/ptrace.h>
#ifdef CONFIG_PPC64
#include <linux/syscalls.h>
#include <linux/compat.h>
-#include <linux/ptrace.h>
#else
#include <linux/wait.h>
-#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 338950aeb6f6..be35ffae10f0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -76,6 +76,8 @@ void smp_call_function_interrupt(void);
int smt_enabled_at_boot = 1;
+static int ipi_fail_ok;
+
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
@@ -181,12 +183,13 @@ static struct call_data_struct {
* <wait> If true, wait (atomically) until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ * <map> is a cpu map of the cpus to send IPI to.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
- int wait, cpumask_t map)
+static int __smp_call_function_map(void (*func) (void *info), void *info,
+ int nonatomic, int wait, cpumask_t map)
{
struct call_data_struct data;
int ret = -1, num_cpus;
@@ -203,8 +206,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
if (wait)
atomic_set(&data.finished, 0);
- spin_lock(&call_lock);
-
/* remove 'self' from the map */
if (cpu_isset(smp_processor_id(), map))
cpu_clear(smp_processor_id(), map);
@@ -231,7 +232,8 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
printk("smp_call_function on cpu %d: other cpus not "
"responding (%d)\n", smp_processor_id(),
atomic_read(&data.started));
- debugger(NULL);
+ if (!ipi_fail_ok)
+ debugger(NULL);
goto out;
}
}
@@ -258,14 +260,18 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
out:
call_data = NULL;
HMT_medium();
- spin_unlock(&call_lock);
return ret;
}
static int __smp_call_function(void (*func)(void *info), void *info,
int nonatomic, int wait)
{
- return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
+ int ret;
+ spin_lock(&call_lock);
+ ret =__smp_call_function_map(func, info, nonatomic, wait,
+ cpu_online_map);
+ spin_unlock(&call_lock);
+ return ret;
}
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
@@ -278,8 +284,8 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
}
EXPORT_SYMBOL(smp_call_function);
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
- int wait)
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
{
cpumask_t map = CPU_MASK_NONE;
int ret = 0;
@@ -291,9 +297,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
return -EINVAL;
cpu_set(cpu, map);
- if (cpu != get_cpu())
- ret = smp_call_function_map(func,info,nonatomic,wait,map);
- else {
+ if (cpu != get_cpu()) {
+ spin_lock(&call_lock);
+ ret = __smp_call_function_map(func, info, nonatomic, wait, map);
+ spin_unlock(&call_lock);
+ } else {
local_irq_disable();
func(info);
local_irq_enable();
@@ -305,7 +313,22 @@ EXPORT_SYMBOL(smp_call_function_single);
void smp_send_stop(void)
{
- __smp_call_function(stop_this_cpu, NULL, 1, 0);
+ int nolock;
+
+ /* It's OK to fail sending the IPI, since the alternative is to
+ * be stuck forever waiting on the other CPU to take the interrupt.
+ *
+ * It's better to at least continue and go through reboot, since this
+ * function is usually called at panic or reboot time in the first
+ * place.
+ */
+ ipi_fail_ok = 1;
+
+ /* Don't deadlock in case we got called through panic */
+ nolock = !spin_trylock(&call_lock);
+ __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
+ if (!nolock)
+ spin_unlock(&call_lock);
}
void smp_call_function_interrupt(void)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
new file mode 100644
index 000000000000..238aa63ced8f
--- /dev/null
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -0,0 +1,58 @@
+/*
+ * This file, when run through CPP produces a list of syscall numbers
+ * in the order of systbl.h. That way we can check for gaps and syscalls
+ * that are out of order.
+ *
+ * Unfortunately, we cannot check for the correct ordering of entries
+ * using SYSX().
+ *
+ * Copyright © IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/unistd.h>
+
+#define SYSCALL(func) __NR_##func
+#define COMPAT_SYS(func) __NR_##func
+#define PPC_SYS(func) __NR_##func
+#ifdef CONFIG_PPC64
+#define OLDSYS(func) -1
+#define SYS32ONLY(func) -1
+#else
+#define OLDSYS(func) __NR_old##func
+#define SYS32ONLY(func) __NR_##func
+#endif
+#define SYSX(f, f3264, f32) -1
+
+#define SYSCALL_SPU(func) SYSCALL(func)
+#define COMPAT_SYS_SPU(func) COMPAT_SYS(func)
+#define PPC_SYS_SPU(func) PPC_SYS(func)
+#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
+
+/* Just insert a marker for ni_syscalls */
+#define __NR_ni_syscall -1
+
+/*
+ * These are the known exceptions.
+ * Hopefully, there will be no more.
+ */
+#define __NR_llseek __NR__llseek
+#undef __NR_umount
+#define __NR_umount __NR_umount2
+#define __NR_old_getrlimit __NR_getrlimit
+#define __NR_newstat __NR_stat
+#define __NR_newlstat __NR_lstat
+#define __NR_newfstat __NR_fstat
+#define __NR_newuname __NR_uname
+#define __NR_sysctl __NR__sysctl
+#define __NR_olddebug_setcontext __NR_sys_debug_setcontext
+
+/* We call sys_ugetrlimit for syscall number __NR_getrlimit */
+#define getrlimit ugetrlimit
+
+START_TABLE
+#include <asm/systbl.h>
+END_TABLE __NR_syscalls
diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh
new file mode 100644
index 000000000000..19415e7674a5
--- /dev/null
+++ b/arch/powerpc/kernel/systbl_chk.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Just process the CPP output from systbl_chk.c and complain
+# if anything is out of order.
+#
+# Copyright © 2008 IBM Corporation
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+awk 'BEGIN { num = -1; } # Ignore the beginning of the file
+ /^#/ { next; }
+ /^[ \t]*$/ { next; }
+ /^START_TABLE/ { num = 0; next; }
+ /^END_TABLE/ {
+ if (num != $2) {
+ printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n",
+ $2, num - 1;
+ exit(1);
+ }
+ num = -1; # Ignore the rest of the file
+ }
+ {
+ if (num == -1) next;
+ if (($1 != -1) && ($1 != num)) {
+ printf "Syscall %s out of order (expected %s)\n",
+ $1, num;
+ exit(1);
+ };
+ num++;
+ }' "$1"
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a925a8eae121..5cd3db5cae41 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -116,9 +116,12 @@ static struct clock_event_device decrementer_clockevent = {
.features = CLOCK_EVT_FEAT_ONESHOT,
};
-static DEFINE_PER_CPU(struct clock_event_device, decrementers);
-void init_decrementer_clockevent(void);
-static DEFINE_PER_CPU(u64, decrementer_next_tb);
+struct decrementer_clock {
+ struct clock_event_device event;
+ u64 next_tb;
+};
+
+static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
#ifdef CONFIG_PPC_ISERIES
static unsigned long __initdata iSeries_recal_titan;
@@ -216,7 +219,11 @@ static u64 read_purr(void)
*/
static u64 read_spurr(u64 purr)
{
- if (cpu_has_feature(CPU_FTR_SPURR))
+ /*
+ * cpus without PURR won't have a SPURR
+ * We already know the former when we use this, so tell gcc
+ */
+ if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
return purr;
}
@@ -227,29 +234,30 @@ static u64 read_spurr(u64 purr)
*/
void account_system_vtime(struct task_struct *tsk)
{
- u64 now, nowscaled, delta, deltascaled;
+ u64 now, nowscaled, delta, deltascaled, sys_time;
unsigned long flags;
local_irq_save(flags);
now = read_purr();
- delta = now - get_paca()->startpurr;
- get_paca()->startpurr = now;
nowscaled = read_spurr(now);
+ delta = now - get_paca()->startpurr;
deltascaled = nowscaled - get_paca()->startspurr;
+ get_paca()->startpurr = now;
get_paca()->startspurr = nowscaled;
if (!in_interrupt()) {
/* deltascaled includes both user and system time.
* Hence scale it based on the purr ratio to estimate
* the system time */
+ sys_time = get_paca()->system_time;
if (get_paca()->user_time)
- deltascaled = deltascaled * get_paca()->system_time /
- (get_paca()->system_time + get_paca()->user_time);
- delta += get_paca()->system_time;
+ deltascaled = deltascaled * sys_time /
+ (sys_time + get_paca()->user_time);
+ delta += sys_time;
get_paca()->system_time = 0;
}
account_system_time(tsk, 0, delta);
- get_paca()->purrdelta = delta;
account_system_time_scaled(tsk, deltascaled);
+ get_paca()->purrdelta = delta;
get_paca()->spurrdelta = deltascaled;
local_irq_restore(flags);
}
@@ -326,11 +334,9 @@ void calculate_steal_time(void)
s64 stolen;
struct cpu_purr_data *pme;
- if (!cpu_has_feature(CPU_FTR_PURR))
- return;
- pme = &per_cpu(cpu_purr_data, smp_processor_id());
+ pme = &__get_cpu_var(cpu_purr_data);
if (!pme->initialized)
- return; /* this can happen in early boot */
+ return; /* !CPU_FTR_PURR or early in early boot */
tb = mftb();
purr = mfspr(SPRN_PURR);
stolen = (tb - pme->tb) - (purr - pme->purr);
@@ -353,7 +359,7 @@ static void snapshot_purr(void)
if (!cpu_has_feature(CPU_FTR_PURR))
return;
local_irq_save(flags);
- pme = &per_cpu(cpu_purr_data, smp_processor_id());
+ pme = &__get_cpu_var(cpu_purr_data);
pme->tb = mftb();
pme->purr = mfspr(SPRN_PURR);
pme->initialized = 1;
@@ -556,8 +562,8 @@ void __init iSeries_time_init_early(void)
void timer_interrupt(struct pt_regs * regs)
{
struct pt_regs *old_regs;
- int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(decrementers, cpu);
+ struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
+ struct clock_event_device *evt = &decrementer->event;
u64 now;
/* Ensure a positive value is written to the decrementer, or else
@@ -570,9 +576,9 @@ void timer_interrupt(struct pt_regs * regs)
#endif
now = get_tb_or_rtc();
- if (now < per_cpu(decrementer_next_tb, cpu)) {
+ if (now < decrementer->next_tb) {
/* not time for this event yet */
- now = per_cpu(decrementer_next_tb, cpu) - now;
+ now = decrementer->next_tb - now;
if (now <= DECREMENTER_MAX)
set_dec((int)now);
return;
@@ -623,6 +629,45 @@ void wakeup_decrementer(void)
set_dec(ticks);
}
+#ifdef CONFIG_SUSPEND
+void generic_suspend_disable_irqs(void)
+{
+ preempt_disable();
+
+ /* Disable the decrementer, so that it doesn't interfere
+ * with suspending.
+ */
+
+ set_dec(0x7fffffff);
+ local_irq_disable();
+ set_dec(0x7fffffff);
+}
+
+void generic_suspend_enable_irqs(void)
+{
+ wakeup_decrementer();
+
+ local_irq_enable();
+ preempt_enable();
+}
+
+/* Overrides the weak version in kernel/power/main.c */
+void arch_suspend_disable_irqs(void)
+{
+ if (ppc_md.suspend_disable_irqs)
+ ppc_md.suspend_disable_irqs();
+ generic_suspend_disable_irqs();
+}
+
+/* Overrides the weak version in kernel/power/main.c */
+void arch_suspend_enable_irqs(void)
+{
+ generic_suspend_enable_irqs();
+ if (ppc_md.suspend_enable_irqs)
+ ppc_md.suspend_enable_irqs();
+}
+#endif
+
#ifdef CONFIG_SMP
void __init smp_space_timers(unsigned int max_cpus)
{
@@ -811,7 +856,7 @@ void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
- __get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt;
+ __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
set_dec(evt);
return 0;
}
@@ -825,7 +870,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
static void register_decrementer_clockevent(int cpu)
{
- struct clock_event_device *dec = &per_cpu(decrementers, cpu);
+ struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of_cpu(cpu);
@@ -836,7 +881,7 @@ static void register_decrementer_clockevent(int cpu)
clockevents_register_device(dec);
}
-void init_decrementer_clockevent(void)
+static void __init init_decrementer_clockevent(void)
{
int cpu = smp_processor_id();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 59c464e26f38..848a20475db8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -334,18 +334,25 @@ static inline int check_io_access(struct pt_regs *regs)
#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
#endif
-static int generic_machine_check_exception(struct pt_regs *regs)
+#if defined(CONFIG_4xx)
+int machine_check_4xx(struct pt_regs *regs)
{
unsigned long reason = get_mc_reason(regs);
-#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
if (reason & ESR_IMCP) {
printk("Instruction");
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
} else
printk("Data");
printk(" machine check in kernel mode.\n");
-#elif defined(CONFIG_440A)
+
+ return 0;
+}
+
+int machine_check_440A(struct pt_regs *regs)
+{
+ unsigned long reason = get_mc_reason(regs);
+
printk("Machine check in kernel mode.\n");
if (reason & ESR_IMCP){
printk("Instruction Synchronous Machine Check exception\n");
@@ -375,7 +382,13 @@ static int generic_machine_check_exception(struct pt_regs *regs)
/* Clear MCSR */
mtspr(SPRN_MCSR, mcsr);
}
-#elif defined (CONFIG_E500)
+ return 0;
+}
+#elif defined(CONFIG_E500)
+int machine_check_e500(struct pt_regs *regs)
+{
+ unsigned long reason = get_mc_reason(regs);
+
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
@@ -403,7 +416,14 @@ static int generic_machine_check_exception(struct pt_regs *regs)
printk("Bus - Instruction Parity Error\n");
if (reason & MCSR_BUS_RPERR)
printk("Bus - Read Parity Error\n");
-#elif defined (CONFIG_E200)
+
+ return 0;
+}
+#elif defined(CONFIG_E200)
+int machine_check_e200(struct pt_regs *regs)
+{
+ unsigned long reason = get_mc_reason(regs);
+
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
@@ -421,7 +441,14 @@ static int generic_machine_check_exception(struct pt_regs *regs)
printk("Bus - Read Bus Error on data load\n");
if (reason & MCSR_BUS_WRERR)
printk("Bus - Write Bus Error on buffered store or cache line push\n");
-#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
+
+ return 0;
+}
+#else
+int machine_check_generic(struct pt_regs *regs)
+{
+ unsigned long reason = get_mc_reason(regs);
+
printk("Machine check in kernel mode.\n");
printk("Caused by (from SRR1=%lx): ", reason);
switch (reason & 0x601F0000) {
@@ -451,22 +478,26 @@ static int generic_machine_check_exception(struct pt_regs *regs)
default:
printk("Unknown values in msr\n");
}
-#endif /* CONFIG_4xx */
-
return 0;
}
+#endif /* everything else */
void machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
- /* See if any machine dependent calls */
+ /* See if any machine dependent calls. In theory, we would want
+ * to call the CPU first, and call the ppc_md. one if the CPU
+ * one returns a positive number. However there is existing code
+ * that assumes the board gets a first chance, so let's keep it
+ * that way for now and fix things later. --BenH.
+ */
if (ppc_md.machine_check_exception)
recover = ppc_md.machine_check_exception(regs);
- else
- recover = generic_machine_check_exception(regs);
+ else if (cur_cpu_spec->machine_check)
+ recover = cur_cpu_spec->machine_check(regs);
- if (recover)
+ if (recover > 0)
return;
if (user_mode(regs)) {
@@ -476,7 +507,12 @@ void machine_check_exception(struct pt_regs *regs)
}
#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
- /* the qspan pci read routines can cause machine checks -- Cort */
+ /* the qspan pci read routines can cause machine checks -- Cort
+ *
+ * yuck !!! that totally needs to go away ! There are better ways
+ * to deal with that than having a wart in the mcheck handler.
+ * -- BenH
+ */
bad_page_fault(regs, regs->dar, SIGBUS);
return;
#endif
@@ -622,6 +658,9 @@ static void parse_fpe(struct pt_regs *regs)
#define INST_POPCNTB 0x7c0000f4
#define INST_POPCNTB_MASK 0xfc0007fe
+#define INST_ISEL 0x7c00001e
+#define INST_ISEL_MASK 0xfc00003e
+
static int emulate_string_inst(struct pt_regs *regs, u32 instword)
{
u8 rT = (instword >> 21) & 0x1f;
@@ -707,6 +746,23 @@ static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
return 0;
}
+static int emulate_isel(struct pt_regs *regs, u32 instword)
+{
+ u8 rT = (instword >> 21) & 0x1f;
+ u8 rA = (instword >> 16) & 0x1f;
+ u8 rB = (instword >> 11) & 0x1f;
+ u8 BC = (instword >> 6) & 0x1f;
+ u8 bit;
+ unsigned long tmp;
+
+ tmp = (rA == 0) ? 0 : regs->gpr[rA];
+ bit = (regs->ccr >> (31 - BC)) & 0x1;
+
+ regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
+
+ return 0;
+}
+
static int emulate_instruction(struct pt_regs *regs)
{
u32 instword;
@@ -749,6 +805,11 @@ static int emulate_instruction(struct pt_regs *regs)
return emulate_popcntb_inst(regs, instword);
}
+ /* Emulate isel (Integer Select) instruction */
+ if ((instword & INST_ISEL_MASK) == INST_ISEL) {
+ return emulate_isel(regs, instword);
+ }
+
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index d723070c9a33..7aad6203e411 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -54,9 +54,16 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_44x)
/* PPC44x debug */
udbg_init_44x_as1();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_40x)
+ /* PPC40x debug */
+ udbg_init_40x_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_CPM)
udbg_init_cpm();
#endif
+
+#ifdef CONFIG_PPC_EARLY_DEBUG
+ console_loglevel = 10;
+#endif
}
/* udbg library, used by xmon et al */
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 833a3d0bcfa7..cb01ebc59387 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -46,7 +46,7 @@ struct NS16550 {
#define LCR_DLAB 0x80
-static volatile struct NS16550 __iomem *udbg_comport;
+static struct NS16550 __iomem *udbg_comport;
static void udbg_550_putc(char c)
{
@@ -117,7 +117,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
{
unsigned int dll, dlm, divisor, prescaler, speed;
u8 old_lcr;
- volatile struct NS16550 __iomem *port = comport;
+ struct NS16550 __iomem *port = comport;
old_lcr = in_8(&port->lcr);
@@ -162,7 +162,7 @@ void udbg_maple_real_putc(char c)
void __init udbg_init_maple_realmode(void)
{
- udbg_comport = (volatile struct NS16550 __iomem *)0xf40003f8;
+ udbg_comport = (struct NS16550 __iomem *)0xf40003f8;
udbg_putc = udbg_maple_real_putc;
udbg_getc = NULL;
@@ -184,7 +184,7 @@ void udbg_pas_real_putc(char c)
void udbg_init_pas_realmode(void)
{
- udbg_comport = (volatile struct NS16550 __iomem *)0xfcff03f8UL;
+ udbg_comport = (struct NS16550 __iomem *)0xfcff03f8UL;
udbg_putc = udbg_pas_real_putc;
udbg_getc = NULL;
@@ -219,9 +219,42 @@ static int udbg_44x_as1_getc(void)
void __init udbg_init_44x_as1(void)
{
udbg_comport =
- (volatile struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
+ (struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
udbg_putc = udbg_44x_as1_putc;
udbg_getc = udbg_44x_as1_getc;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_40x
+static void udbg_40x_real_putc(char c)
+{
+ if (udbg_comport) {
+ while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
+ /* wait for idle */;
+ real_writeb(c, &udbg_comport->thr); eieio();
+ if (c == '\n')
+ udbg_40x_real_putc('\r');
+ }
+}
+
+static int udbg_40x_real_getc(void)
+{
+ if (udbg_comport) {
+ while ((real_readb(&udbg_comport->lsr) & LSR_DR) == 0)
+ ; /* wait for char */
+ return real_readb(&udbg_comport->rbr);
+ }
+ return -1;
+}
+
+void __init udbg_init_40x_realmode(void)
+{
+ udbg_comport = (struct NS16550 __iomem *)
+ CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR;
+
+ udbg_putc = udbg_40x_real_putc;
+ udbg_getc = udbg_40x_real_getc;
+ udbg_getc_poll = NULL;
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_40x */